Page MenuHomeFreeBSD

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/sys/contrib/openzfs/.github/workflows/zfs-tests-functional.yml b/sys/contrib/openzfs/.github/workflows/zfs-tests-functional.yml
index cad763287ea9..adcbcb15681a 100644
--- a/sys/contrib/openzfs/.github/workflows/zfs-tests-functional.yml
+++ b/sys/contrib/openzfs/.github/workflows/zfs-tests-functional.yml
@@ -1,81 +1,83 @@
name: zfs-tests-functional
on:
push:
pull_request:
jobs:
tests-functional-ubuntu:
strategy:
fail-fast: false
matrix:
os: [18.04, 20.04]
runs-on: ubuntu-${{ matrix.os }}
steps:
- uses: actions/checkout@v2
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Install dependencies
run: |
sudo apt-get update
sudo apt-get install --yes -qq build-essential autoconf libtool gdb lcov \
git alien fakeroot wget curl bc fio acl \
sysstat mdadm lsscsi parted gdebi attr dbench watchdog ksh \
nfs-kernel-server samba rng-tools xz-utils \
zlib1g-dev uuid-dev libblkid-dev libselinux-dev \
xfslibs-dev libattr1-dev libacl1-dev libudev-dev libdevmapper-dev \
libssl-dev libffi-dev libaio-dev libelf-dev libmount-dev \
libpam0g-dev pamtester python-dev python-setuptools python-cffi \
- python3 python3-dev python3-setuptools python3-cffi python3-packaging
+ python3 python3-dev python3-setuptools python3-cffi python3-packaging \
+ libcurl4-openssl-dev
- name: Autogen.sh
run: |
sh autogen.sh
- name: Configure
run: |
./configure --enable-debug --enable-debuginfo
- name: Make
run: |
make --no-print-directory -s pkg-utils pkg-kmod
- name: Install
run: |
sudo dpkg -i *.deb
# Update order of directories to search for modules, otherwise
# Ubuntu will load kernel-shipped ones.
sudo sed -i.bak 's/updates/extra updates/' /etc/depmod.d/ubuntu.conf
sudo depmod
sudo modprobe zfs
# Workaround for cloud-init bug
# see https://github.com/openzfs/zfs/issues/12644
FILE=/lib/udev/rules.d/10-cloud-init-hook-hotplug.rules
if [ -r "${FILE}" ]; then
HASH=$(md5sum "${FILE}" | awk '{ print $1 }')
if [ "${HASH}" = "121ff0ef1936cd2ef65aec0458a35772" ]; then
# Just shove a zd* exclusion right above the hotplug hook...
sudo sed -i -e s/'LABEL="cloudinit_hook"'/'KERNEL=="zd*", GOTO="cloudinit_end"\n&'/ "${FILE}"
sudo udevadm control --reload-rules
fi
fi
# Workaround to provide additional free space for testing.
# https://github.com/actions/virtual-environments/issues/2840
sudo rm -rf /usr/share/dotnet
sudo rm -rf /opt/ghc
sudo rm -rf "/usr/local/share/boost"
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
- name: Tests
run: |
/usr/share/zfs/zfs-tests.sh -vR -s 3G
+ timeout-minutes: 330
- name: Prepare artifacts
if: failure()
run: |
RESULTS_PATH=$(readlink -f /var/tmp/test_results/current)
sudo dmesg > $RESULTS_PATH/dmesg
sudo cp /var/log/syslog $RESULTS_PATH/
sudo chmod +r $RESULTS_PATH/*
# Replace ':' in dir names, actions/upload-artifact doesn't support it
for f in $(find /var/tmp/test_results -name '*:*'); do mv "$f" "${f//:/__}"; done
- uses: actions/upload-artifact@v2
if: failure()
with:
name: Test logs Ubuntu-${{ matrix.os }}
path: /var/tmp/test_results/20*/
if-no-files-found: ignore
diff --git a/sys/contrib/openzfs/.github/workflows/zfs-tests-sanity.yml b/sys/contrib/openzfs/.github/workflows/zfs-tests-sanity.yml
index 78187212bb26..c1e257dd1572 100644
--- a/sys/contrib/openzfs/.github/workflows/zfs-tests-sanity.yml
+++ b/sys/contrib/openzfs/.github/workflows/zfs-tests-sanity.yml
@@ -1,77 +1,79 @@
name: zfs-tests-sanity
on:
push:
pull_request:
jobs:
tests:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Install dependencies
run: |
sudo apt-get update
sudo apt-get install --yes -qq build-essential autoconf libtool gdb lcov \
git alien fakeroot wget curl bc fio acl \
sysstat mdadm lsscsi parted gdebi attr dbench watchdog ksh \
nfs-kernel-server samba rng-tools xz-utils \
zlib1g-dev uuid-dev libblkid-dev libselinux-dev \
xfslibs-dev libattr1-dev libacl1-dev libudev-dev libdevmapper-dev \
libssl-dev libffi-dev libaio-dev libelf-dev libmount-dev \
libpam0g-dev pamtester python-dev python-setuptools python-cffi \
- python3 python3-dev python3-setuptools python3-cffi python3-packaging
+ python3 python3-dev python3-setuptools python3-cffi python3-packaging \
+ libcurl4-openssl-dev
- name: Autogen.sh
run: |
sh autogen.sh
- name: Configure
run: |
./configure --enable-debug --enable-debuginfo
- name: Make
run: |
make --no-print-directory -s pkg-utils pkg-kmod
- name: Install
run: |
sudo dpkg -i *.deb
# Update order of directories to search for modules, otherwise
# Ubuntu will load kernel-shipped ones.
sudo sed -i.bak 's/updates/extra updates/' /etc/depmod.d/ubuntu.conf
sudo depmod
sudo modprobe zfs
# Workaround for cloud-init bug
# see https://github.com/openzfs/zfs/issues/12644
FILE=/lib/udev/rules.d/10-cloud-init-hook-hotplug.rules
if [ -r "${FILE}" ]; then
HASH=$(md5sum "${FILE}" | awk '{ print $1 }')
if [ "${HASH}" = "121ff0ef1936cd2ef65aec0458a35772" ]; then
# Just shove a zd* exclusion right above the hotplug hook...
sudo sed -i -e s/'LABEL="cloudinit_hook"'/'KERNEL=="zd*", GOTO="cloudinit_end"\n&'/ "${FILE}"
sudo udevadm control --reload-rules
fi
fi
# Workaround to provide additional free space for testing.
# https://github.com/actions/virtual-environments/issues/2840
sudo rm -rf /usr/share/dotnet
sudo rm -rf /opt/ghc
sudo rm -rf "/usr/local/share/boost"
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
- name: Tests
run: |
/usr/share/zfs/zfs-tests.sh -vR -s 3G -r sanity
+ timeout-minutes: 330
- name: Prepare artifacts
if: failure()
run: |
RESULTS_PATH=$(readlink -f /var/tmp/test_results/current)
sudo dmesg > $RESULTS_PATH/dmesg
sudo cp /var/log/syslog $RESULTS_PATH/
sudo chmod +r $RESULTS_PATH/*
# Replace ':' in dir names, actions/upload-artifact doesn't support it
for f in $(find /var/tmp/test_results -name '*:*'); do mv "$f" "${f//:/__}"; done
- uses: actions/upload-artifact@v2
if: failure()
with:
name: Test logs
path: /var/tmp/test_results/20*/
if-no-files-found: ignore
diff --git a/sys/contrib/openzfs/META b/sys/contrib/openzfs/META
index 10130517955a..0437224b403f 100644
--- a/sys/contrib/openzfs/META
+++ b/sys/contrib/openzfs/META
@@ -1,10 +1,10 @@
Meta: 1
Name: zfs
Branch: 1.0
-Version: 2.1.2
+Version: 2.1.3
Release: 1
Release-Tags: relext
License: CDDL
Author: OpenZFS
-Linux-Maximum: 5.15
+Linux-Maximum: 5.16
Linux-Minimum: 3.10
diff --git a/sys/contrib/openzfs/README.md b/sys/contrib/openzfs/README.md
index d666df7af309..331889560950 100644
--- a/sys/contrib/openzfs/README.md
+++ b/sys/contrib/openzfs/README.md
@@ -1,35 +1,35 @@
![img](https://openzfs.github.io/openzfs-docs/_static/img/logo/480px-Open-ZFS-Secondary-Logo-Colour-halfsize.png)
OpenZFS is an advanced file system and volume manager which was originally
developed for Solaris and is now maintained by the OpenZFS community.
This repository contains the code for running OpenZFS on Linux and FreeBSD.
[![codecov](https://codecov.io/gh/openzfs/zfs/branch/master/graph/badge.svg)](https://codecov.io/gh/openzfs/zfs)
[![coverity](https://scan.coverity.com/projects/1973/badge.svg)](https://scan.coverity.com/projects/openzfs-zfs)
# Official Resources
* [Documentation](https://openzfs.github.io/openzfs-docs/) - for using and developing this repo
* [ZoL Site](https://zfsonlinux.org) - Linux release info & links
* [Mailing lists](https://openzfs.github.io/openzfs-docs/Project%20and%20Community/Mailing%20Lists.html)
- * [OpenZFS site](http://open-zfs.org/) - for conference videos and info on other platforms (illumos, OSX, Windows, etc)
+ * [OpenZFS site](https://openzfs.org/) - for conference videos and info on other platforms (illumos, OSX, Windows, etc)
# Installation
Full documentation for installing OpenZFS on your favorite operating system can
be found at the [Getting Started Page](https://openzfs.github.io/openzfs-docs/Getting%20Started/index.html).
# Contribute & Develop
We have a separate document with [contribution guidelines](./.github/CONTRIBUTING.md).
We have a [Code of Conduct](./CODE_OF_CONDUCT.md).
# Release
OpenZFS is released under a CDDL license.
For more details see the NOTICE, LICENSE and COPYRIGHT files; `UCRL-CODE-235197`
# Supported Kernels
* The `META` file contains the officially recognized supported Linux kernel versions.
* Supported FreeBSD versions are any supported branches and releases starting from 12.2-RELEASE.
diff --git a/sys/contrib/openzfs/cmd/mount_zfs/mount_zfs.c b/sys/contrib/openzfs/cmd/mount_zfs/mount_zfs.c
index de3833698a2b..55968ac88ffa 100644
--- a/sys/contrib/openzfs/cmd/mount_zfs/mount_zfs.c
+++ b/sys/contrib/openzfs/cmd/mount_zfs/mount_zfs.c
@@ -1,388 +1,409 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011 Lawrence Livermore National Security, LLC.
*/
#include <libintl.h>
#include <unistd.h>
#include <sys/file.h>
#include <sys/mount.h>
#include <sys/mntent.h>
#include <sys/stat.h>
#include <libzfs.h>
#include <libzutil.h>
#include <locale.h>
#include <getopt.h>
#include <fcntl.h>
#include <errno.h>
#define ZS_COMMENT 0x00000000 /* comment */
#define ZS_ZFSUTIL 0x00000001 /* caller is zfs(8) */
libzfs_handle_t *g_zfs;
/*
* Opportunistically convert a target string into a pool name. If the
* string does not represent a block device with a valid zfs label
* then it is passed through without modification.
*/
static void
parse_dataset(const char *target, char **dataset)
{
/*
* Prior to util-linux 2.36.2, if a file or directory in the
* current working directory was named 'dataset' then mount(8)
* would prepend the current working directory to the dataset.
* Check for it and strip the prepended path when it is added.
*/
char cwd[PATH_MAX];
if (getcwd(cwd, PATH_MAX) == NULL) {
perror("getcwd");
return;
}
int len = strlen(cwd);
if (strncmp(cwd, target, len) == 0)
target += len;
/* Assume pool/dataset is more likely */
strlcpy(*dataset, target, PATH_MAX);
int fd = open(target, O_RDONLY | O_CLOEXEC);
if (fd < 0)
return;
nvlist_t *cfg = NULL;
if (zpool_read_label(fd, &cfg, NULL) == 0) {
char *nm = NULL;
if (!nvlist_lookup_string(cfg, ZPOOL_CONFIG_POOL_NAME, &nm))
strlcpy(*dataset, nm, PATH_MAX);
nvlist_free(cfg);
}
if (close(fd))
perror("close");
}
/*
* Update the mtab_* code to use the libmount library when it is commonly
* available otherwise fallback to legacy mode. The mount(8) utility will
* manage the lock file for us to prevent racing updates to /etc/mtab.
*/
static int
mtab_is_writeable(void)
{
struct stat st;
int error, fd;
error = lstat("/etc/mtab", &st);
if (error || S_ISLNK(st.st_mode))
return (0);
fd = open("/etc/mtab", O_RDWR | O_CREAT, 0644);
if (fd < 0)
return (0);
close(fd);
return (1);
}
static int
mtab_update(char *dataset, char *mntpoint, char *type, char *mntopts)
{
struct mntent mnt;
FILE *fp;
int error;
mnt.mnt_fsname = dataset;
mnt.mnt_dir = mntpoint;
mnt.mnt_type = type;
mnt.mnt_opts = mntopts ? mntopts : "";
mnt.mnt_freq = 0;
mnt.mnt_passno = 0;
fp = setmntent("/etc/mtab", "a+");
if (!fp) {
(void) fprintf(stderr, gettext(
"filesystem '%s' was mounted, but /etc/mtab "
"could not be opened due to error: %s\n"),
dataset, strerror(errno));
return (MOUNT_FILEIO);
}
error = addmntent(fp, &mnt);
if (error) {
(void) fprintf(stderr, gettext(
"filesystem '%s' was mounted, but /etc/mtab "
"could not be updated due to error: %s\n"),
dataset, strerror(errno));
return (MOUNT_FILEIO);
}
(void) endmntent(fp);
return (MOUNT_SUCCESS);
}
int
main(int argc, char **argv)
{
zfs_handle_t *zhp;
char prop[ZFS_MAXPROPLEN];
uint64_t zfs_version = 0;
char mntopts[MNT_LINE_MAX] = { '\0' };
char badopt[MNT_LINE_MAX] = { '\0' };
char mtabopt[MNT_LINE_MAX] = { '\0' };
char mntpoint[PATH_MAX];
char dataset[PATH_MAX], *pdataset = dataset;
unsigned long mntflags = 0, zfsflags = 0, remount = 0;
int sloppy = 0, fake = 0, verbose = 0, nomtab = 0, zfsutil = 0;
int error, c;
(void) setlocale(LC_ALL, "");
(void) setlocale(LC_NUMERIC, "C");
(void) textdomain(TEXT_DOMAIN);
opterr = 0;
/* check options */
while ((c = getopt_long(argc, argv, "sfnvo:h?", 0, 0)) != -1) {
switch (c) {
case 's':
sloppy = 1;
break;
case 'f':
fake = 1;
break;
case 'n':
nomtab = 1;
break;
case 'v':
verbose++;
break;
case 'o':
(void) strlcpy(mntopts, optarg, sizeof (mntopts));
break;
case 'h':
case '?':
if (optopt)
(void) fprintf(stderr,
gettext("Invalid option '%c'\n"), optopt);
(void) fprintf(stderr, gettext("Usage: mount.zfs "
"[-sfnvh] [-o options] <dataset> <mountpoint>\n"));
return (MOUNT_USAGE);
}
}
argc -= optind;
argv += optind;
/* check that we only have two arguments */
if (argc != 2) {
if (argc == 0)
(void) fprintf(stderr, gettext("missing dataset "
"argument\n"));
else if (argc == 1)
(void) fprintf(stderr,
gettext("missing mountpoint argument\n"));
else
(void) fprintf(stderr, gettext("too many arguments\n"));
(void) fprintf(stderr, "usage: mount <dataset> <mountpoint>\n");
return (MOUNT_USAGE);
}
parse_dataset(argv[0], &pdataset);
/* canonicalize the mount point */
if (realpath(argv[1], mntpoint) == NULL) {
(void) fprintf(stderr, gettext("filesystem '%s' cannot be "
"mounted at '%s' due to canonicalization error: %s\n"),
dataset, argv[1], strerror(errno));
return (MOUNT_SYSERR);
}
/* validate mount options and set mntflags */
error = zfs_parse_mount_options(mntopts, &mntflags, &zfsflags, sloppy,
badopt, mtabopt);
if (error) {
switch (error) {
case ENOMEM:
(void) fprintf(stderr, gettext("filesystem '%s' "
"cannot be mounted due to a memory allocation "
"failure.\n"), dataset);
return (MOUNT_SYSERR);
case ENOENT:
(void) fprintf(stderr, gettext("filesystem '%s' "
"cannot be mounted due to invalid option "
"'%s'.\n"), dataset, badopt);
(void) fprintf(stderr, gettext("Use the '-s' option "
"to ignore the bad mount option.\n"));
return (MOUNT_USAGE);
default:
(void) fprintf(stderr, gettext("filesystem '%s' "
"cannot be mounted due to internal error %d.\n"),
dataset, error);
return (MOUNT_SOFTWARE);
}
}
- if (verbose)
- (void) fprintf(stdout, gettext("mount.zfs:\n"
- " dataset: \"%s\"\n mountpoint: \"%s\"\n"
- " mountflags: 0x%lx\n zfsflags: 0x%lx\n"
- " mountopts: \"%s\"\n mtabopts: \"%s\"\n"),
- dataset, mntpoint, mntflags, zfsflags, mntopts, mtabopt);
-
if (mntflags & MS_REMOUNT) {
nomtab = 1;
remount = 1;
}
if (zfsflags & ZS_ZFSUTIL)
zfsutil = 1;
if ((g_zfs = libzfs_init()) == NULL) {
(void) fprintf(stderr, "%s\n", libzfs_error_init(errno));
return (MOUNT_SYSERR);
}
/* try to open the dataset to access the mount point */
if ((zhp = zfs_open(g_zfs, dataset,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT)) == NULL) {
(void) fprintf(stderr, gettext("filesystem '%s' cannot be "
"mounted, unable to open the dataset\n"), dataset);
libzfs_fini(g_zfs);
return (MOUNT_USAGE);
}
- zfs_adjust_mount_options(zhp, mntpoint, mntopts, mtabopt);
+ if (!zfsutil || sloppy ||
+ libzfs_envvar_is_set("ZFS_MOUNT_HELPER")) {
+ zfs_adjust_mount_options(zhp, mntpoint, mntopts, mtabopt);
+ }
/* treat all snapshots as legacy mount points */
if (zfs_get_type(zhp) == ZFS_TYPE_SNAPSHOT)
(void) strlcpy(prop, ZFS_MOUNTPOINT_LEGACY, ZFS_MAXPROPLEN);
else
(void) zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, prop,
sizeof (prop), NULL, NULL, 0, B_FALSE);
/*
* Fetch the max supported zfs version in case we get ENOTSUP
* back from the mount command, since we need the zfs handle
* to do so.
*/
zfs_version = zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
if (zfs_version == 0) {
fprintf(stderr, gettext("unable to fetch "
"ZFS version for filesystem '%s'\n"), dataset);
+ zfs_close(zhp);
+ libzfs_fini(g_zfs);
return (MOUNT_SYSERR);
}
- zfs_close(zhp);
- libzfs_fini(g_zfs);
-
/*
* Legacy mount points may only be mounted using 'mount', never using
* 'zfs mount'. However, since 'zfs mount' actually invokes 'mount'
* we differentiate the two cases using the 'zfsutil' mount option.
* This mount option should only be supplied by the 'zfs mount' util.
*
* The only exception to the above rule is '-o remount' which is
* always allowed for non-legacy datasets. This is done because when
* using zfs as your root file system both rc.sysinit/umountroot and
* systemd depend on 'mount -o remount <mountpoint>' to work.
*/
if (zfsutil && (strcmp(prop, ZFS_MOUNTPOINT_LEGACY) == 0)) {
(void) fprintf(stderr, gettext(
"filesystem '%s' cannot be mounted using 'zfs mount'.\n"
"Use 'zfs set mountpoint=%s' or 'mount -t zfs %s %s'.\n"
"See zfs(8) for more information.\n"),
dataset, mntpoint, dataset, mntpoint);
+ zfs_close(zhp);
+ libzfs_fini(g_zfs);
return (MOUNT_USAGE);
}
if (!zfsutil && !(remount || fake) &&
strcmp(prop, ZFS_MOUNTPOINT_LEGACY)) {
(void) fprintf(stderr, gettext(
"filesystem '%s' cannot be mounted using 'mount'.\n"
"Use 'zfs set mountpoint=%s' or 'zfs mount %s'.\n"
"See zfs(8) for more information.\n"),
dataset, "legacy", dataset);
+ zfs_close(zhp);
+ libzfs_fini(g_zfs);
return (MOUNT_USAGE);
}
+ if (verbose)
+ (void) fprintf(stdout, gettext("mount.zfs:\n"
+ " dataset: \"%s\"\n mountpoint: \"%s\"\n"
+ " mountflags: 0x%lx\n zfsflags: 0x%lx\n"
+ " mountopts: \"%s\"\n mtabopts: \"%s\"\n"),
+ dataset, mntpoint, mntflags, zfsflags, mntopts, mtabopt);
+
if (!fake) {
- error = mount(dataset, mntpoint, MNTTYPE_ZFS,
- mntflags, mntopts);
+ if (zfsutil && !sloppy &&
+ !libzfs_envvar_is_set("ZFS_MOUNT_HELPER")) {
+ error = zfs_mount_at(zhp, mntopts, mntflags, mntpoint);
+ if (error) {
+ (void) fprintf(stderr, "zfs_mount_at() failed: "
+ "%s", libzfs_error_description(g_zfs));
+ zfs_close(zhp);
+ libzfs_fini(g_zfs);
+ return (MOUNT_SYSERR);
+ }
+ } else {
+ error = mount(dataset, mntpoint, MNTTYPE_ZFS,
+ mntflags, mntopts);
+ }
}
+ zfs_close(zhp);
+ libzfs_fini(g_zfs);
+
if (error) {
switch (errno) {
case ENOENT:
(void) fprintf(stderr, gettext("mount point "
"'%s' does not exist\n"), mntpoint);
return (MOUNT_SYSERR);
case EBUSY:
(void) fprintf(stderr, gettext("filesystem "
"'%s' is already mounted\n"), dataset);
return (MOUNT_BUSY);
case ENOTSUP:
if (zfs_version > ZPL_VERSION) {
(void) fprintf(stderr,
gettext("filesystem '%s' (v%d) is not "
"supported by this implementation of "
"ZFS (max v%d).\n"), dataset,
(int)zfs_version, (int)ZPL_VERSION);
} else {
(void) fprintf(stderr,
gettext("filesystem '%s' mount "
"failed for unknown reason.\n"), dataset);
}
return (MOUNT_SYSERR);
#ifdef MS_MANDLOCK
case EPERM:
if (mntflags & MS_MANDLOCK) {
(void) fprintf(stderr, gettext("filesystem "
"'%s' has the 'nbmand=on' property set, "
"this mount\noption may be disabled in "
"your kernel. Use 'zfs set nbmand=off'\n"
"to disable this option and try to "
"mount the filesystem again.\n"), dataset);
return (MOUNT_SYSERR);
}
fallthrough;
#endif
default:
(void) fprintf(stderr, gettext("filesystem "
"'%s' can not be mounted: %s\n"), dataset,
strerror(errno));
return (MOUNT_USAGE);
}
}
if (!nomtab && mtab_is_writeable()) {
error = mtab_update(dataset, mntpoint, MNTTYPE_ZFS, mtabopt);
if (error)
return (error);
}
return (MOUNT_SUCCESS);
}
diff --git a/sys/contrib/openzfs/cmd/vdev_id/vdev_id b/sys/contrib/openzfs/cmd/vdev_id/vdev_id
index 8cc4399a5668..7b5aab141997 100755
--- a/sys/contrib/openzfs/cmd/vdev_id/vdev_id
+++ b/sys/contrib/openzfs/cmd/vdev_id/vdev_id
@@ -1,789 +1,792 @@
#!/bin/sh
#
# vdev_id: udev helper to generate user-friendly names for JBOD disks
#
# This script parses the file /etc/zfs/vdev_id.conf to map a
# physical path in a storage topology to a channel name. The
# channel name is combined with a disk enclosure slot number to
# create an alias that reflects the physical location of the drive.
# This is particularly helpful when it comes to tasks like replacing
# failed drives. Slot numbers may also be re-mapped in case the
# default numbering is unsatisfactory. The drive aliases will be
# created as symbolic links in /dev/disk/by-vdev.
#
# The currently supported topologies are sas_direct and sas_switch.
# A multipath mode is supported in which dm-mpath devices are
# handled by examining the first-listed running component disk. In
# multipath mode the configuration file should contain a channel
# definition with the same name for each path to a given enclosure.
#
# The alias keyword provides a simple way to map already-existing
# device symlinks to more convenient names. It is suitable for
# small, static configurations or for sites that have some automated
# way to generate the mapping file.
#
#
# Some example configuration files are given below.
# #
# # Example vdev_id.conf - sas_direct.
# #
#
# multipath no
# topology sas_direct
# phys_per_port 4
# slot bay
#
# # PCI_ID HBA PORT CHANNEL NAME
# channel 85:00.0 1 A
# channel 85:00.0 0 B
# channel 86:00.0 1 C
# channel 86:00.0 0 D
#
# # Custom mapping for Channel A
#
# # Linux Mapped
# # Slot Slot Channel
# slot 1 7 A
# slot 2 10 A
# slot 3 3 A
# slot 4 6 A
#
# # Default mapping for B, C, and D
# slot 1 4
# slot 2 2
# slot 3 1
# slot 4 3
# #
# # Example vdev_id.conf - sas_switch
# #
#
# topology sas_switch
#
# # SWITCH PORT CHANNEL NAME
# channel 1 A
# channel 2 B
# channel 3 C
# channel 4 D
# #
# # Example vdev_id.conf - multipath
# #
#
# multipath yes
#
# # PCI_ID HBA PORT CHANNEL NAME
# channel 85:00.0 1 A
# channel 85:00.0 0 B
# channel 86:00.0 1 A
# channel 86:00.0 0 B
# #
# # Example vdev_id.conf - multipath / multijbod-daisychaining
# #
#
# multipath yes
# multijbod yes
#
# # PCI_ID HBA PORT CHANNEL NAME
# channel 85:00.0 1 A
# channel 85:00.0 0 B
# channel 86:00.0 1 A
# channel 86:00.0 0 B
# #
# # Example vdev_id.conf - multipath / mixed
# #
#
# multipath yes
# slot mix
#
# # PCI_ID HBA PORT CHANNEL NAME
# channel 85:00.0 3 A
# channel 85:00.0 2 B
# channel 86:00.0 3 A
# channel 86:00.0 2 B
# channel af:00.0 0 C
# channel af:00.0 1 C
# #
# # Example vdev_id.conf - alias
# #
#
# # by-vdev
# # name fully qualified or base name of device link
# alias d1 /dev/disk/by-id/wwn-0x5000c5002de3b9ca
# alias d2 wwn-0x5000c5002def789e
PATH=/bin:/sbin:/usr/bin:/usr/sbin
CONFIG=/etc/zfs/vdev_id.conf
PHYS_PER_PORT=
DEV=
TOPOLOGY=
BAY=
ENCL_ID=""
UNIQ_ENCL_ID=""
usage() {
cat << EOF
Usage: vdev_id [-h]
vdev_id <-d device> [-c config_file] [-p phys_per_port]
[-g sas_direct|sas_switch|scsi] [-m]
-c specify name of an alternative config file [default=$CONFIG]
-d specify basename of device (i.e. sda)
-e Create enclose device symlinks only (/dev/by-enclosure)
-g Storage network topology [default="$TOPOLOGY"]
-m Run in multipath mode
-j Run in multijbod mode
-p number of phy's per switch port [default=$PHYS_PER_PORT]
-h show this summary
EOF
exit 1
# exit with error to avoid processing usage message by a udev rule
}
map_slot() {
LINUX_SLOT=$1
CHANNEL=$2
MAPPED_SLOT=$(awk -v linux_slot="$LINUX_SLOT" -v channel="$CHANNEL" \
'$1 == "slot" && $2 == linux_slot && \
($4 ~ "^"channel"$" || $4 ~ /^$/) { print $3; exit}' $CONFIG)
if [ -z "$MAPPED_SLOT" ] ; then
MAPPED_SLOT=$LINUX_SLOT
fi
printf "%d" "${MAPPED_SLOT}"
}
map_channel() {
MAPPED_CHAN=
PCI_ID=$1
PORT=$2
case $TOPOLOGY in
"sas_switch")
MAPPED_CHAN=$(awk -v port="$PORT" \
'$1 == "channel" && $2 == port \
{ print $3; exit }' $CONFIG)
;;
"sas_direct"|"scsi")
MAPPED_CHAN=$(awk -v pciID="$PCI_ID" -v port="$PORT" \
'$1 == "channel" && $2 == pciID && $3 == port \
{print $4}' $CONFIG)
;;
esac
printf "%s" "${MAPPED_CHAN}"
}
get_encl_id() {
set -- $(echo $1)
count=$#
i=1
while [ $i -le $count ] ; do
d=$(eval echo '$'{$i})
id=$(cat "/sys/class/enclosure/${d}/id")
ENCL_ID="${ENCL_ID} $id"
i=$((i + 1))
done
}
get_uniq_encl_id() {
for uuid in ${ENCL_ID}; do
found=0
for count in ${UNIQ_ENCL_ID}; do
if [ $count = $uuid ]; then
found=1
break
fi
done
if [ $found -eq 0 ]; then
UNIQ_ENCL_ID="${UNIQ_ENCL_ID} $uuid"
fi
done
}
# map_jbod explainer: The bsg driver knows the difference between a SAS
# expander and fanout expander. Use hostX instance along with top-level
# (whole enclosure) expander instances in /sys/class/enclosure and
# matching a field in an array of expanders, using the index of the
# matched array field as the enclosure instance, thereby making jbod IDs
# dynamic. Avoids reliance on high overhead userspace commands like
# multipath and lsscsi and instead uses existing sysfs data. $HOSTCHAN
# variable derived from devpath gymnastics in sas_handler() function.
map_jbod() {
DEVEXP=$(ls -l "/sys/block/$DEV/device/" | grep enclos | awk -F/ '{print $(NF-1) }')
DEV=$1
# Use "set --" to create index values (Arrays)
set -- $(ls -l /sys/class/enclosure | grep -v "^total" | awk '{print $9}')
# Get count of total elements
JBOD_COUNT=$#
JBOD_ITEM=$*
# Build JBODs (enclosure) id from sys/class/enclosure/<dev>/id
get_encl_id "$JBOD_ITEM"
# Different expander instances for each paths.
# Filter out and keep only unique id.
get_uniq_encl_id
# Identify final 'mapped jbod'
j=0
for count in ${UNIQ_ENCL_ID}; do
i=1
j=$((j + 1))
while [ $i -le $JBOD_COUNT ] ; do
d=$(eval echo '$'{$i})
id=$(cat "/sys/class/enclosure/${d}/id")
if [ "$d" = "$DEVEXP" ] && [ $id = $count ] ; then
MAPPED_JBOD=$j
break
fi
i=$((i + 1))
done
done
printf "%d" "${MAPPED_JBOD}"
}
sas_handler() {
if [ -z "$PHYS_PER_PORT" ] ; then
PHYS_PER_PORT=$(awk '$1 == "phys_per_port" \
{print $2; exit}' $CONFIG)
fi
PHYS_PER_PORT=${PHYS_PER_PORT:-4}
if ! echo "$PHYS_PER_PORT" | grep -q -E '^[0-9]+$' ; then
echo "Error: phys_per_port value $PHYS_PER_PORT is non-numeric"
exit 1
fi
if [ -z "$MULTIPATH_MODE" ] ; then
MULTIPATH_MODE=$(awk '$1 == "multipath" \
{print $2; exit}' $CONFIG)
fi
if [ -z "$MULTIJBOD_MODE" ] ; then
MULTIJBOD_MODE=$(awk '$1 == "multijbod" \
{print $2; exit}' $CONFIG)
fi
# Use first running component device if we're handling a dm-mpath device
if [ "$MULTIPATH_MODE" = "yes" ] ; then
# If udev didn't tell us the UUID via DM_NAME, check /dev/mapper
if [ -z "$DM_NAME" ] ; then
DM_NAME=$(ls -l --full-time /dev/mapper |
grep "$DEV"$ | awk '{print $9}')
fi
# For raw disks udev exports DEVTYPE=partition when
# handling partitions, and the rules can be written to
# take advantage of this to append a -part suffix. For
# dm devices we get DEVTYPE=disk even for partitions so
# we have to append the -part suffix directly in the
# helper.
if [ "$DEVTYPE" != "partition" ] ; then
# Match p[number], remove the 'p' and prepend "-part"
PART=$(echo "$DM_NAME" |
awk 'match($0,/p[0-9]+$/) {print "-part"substr($0,RSTART+1,RLENGTH-1)}')
fi
# Strip off partition information.
DM_NAME=$(echo "$DM_NAME" | sed 's/p[0-9][0-9]*$//')
if [ -z "$DM_NAME" ] ; then
return
fi
# Utilize DM device name to gather subordinate block devices
# using sysfs to avoid userspace utilities
# If our DEVNAME is something like /dev/dm-177, then we may be
# able to get our DMDEV from it.
DMDEV=$(echo $DEVNAME | sed 's;/dev/;;g')
if [ ! -e /sys/block/$DMDEV/slaves/* ] ; then
# It's not there, try looking in /dev/mapper
DMDEV=$(ls -l --full-time /dev/mapper | grep $DM_NAME |
awk '{gsub("../", " "); print $NF}')
fi
# Use sysfs pointers in /sys/block/dm-X/slaves because using
# userspace tools creates lots of overhead and should be avoided
# whenever possible. Use awk to isolate lowest instance of
# sd device member in dm device group regardless of string
# length.
DEV=$(ls "/sys/block/$DMDEV/slaves" | awk '
{ len=sprintf ("%20s",length($0)); gsub(/ /,0,str); a[NR]=len "_" $0; }
END {
asort(a)
print substr(a[1],22)
}')
if [ -z "$DEV" ] ; then
return
fi
fi
if echo "$DEV" | grep -q ^/devices/ ; then
sys_path=$DEV
else
sys_path=$(udevadm info -q path -p "/sys/block/$DEV" 2>/dev/null)
fi
# Use positional parameters as an ad-hoc array
set -- $(echo "$sys_path" | tr / ' ')
num_dirs=$#
scsi_host_dir="/sys"
# Get path up to /sys/.../hostX
i=1
while [ $i -le "$num_dirs" ] ; do
d=$(eval echo '$'{$i})
scsi_host_dir="$scsi_host_dir/$d"
echo "$d" | grep -q -E '^host[0-9]+$' && break
i=$((i + 1))
done
# Lets grab the SAS host channel number and save it for JBOD sorting later
HOSTCHAN=$(echo "$d" | awk -F/ '{ gsub("host","",$NF); print $NF}')
if [ $i = "$num_dirs" ] ; then
return
fi
PCI_ID=$(eval echo '$'{$((i -1))} | awk -F: '{print $2":"$3}')
# In sas_switch mode, the directory four levels beneath
# /sys/.../hostX contains symlinks to phy devices that reveal
# the switch port number. In sas_direct mode, the phy links one
# directory down reveal the HBA port.
port_dir=$scsi_host_dir
case $TOPOLOGY in
"sas_switch") j=$((i + 4)) ;;
"sas_direct") j=$((i + 1)) ;;
esac
i=$((i + 1))
while [ $i -le $j ] ; do
port_dir="$port_dir/$(eval echo '$'{$i})"
i=$((i + 1))
done
PHY=$(ls -vd "$port_dir"/phy* 2>/dev/null | head -1 | awk -F: '{print $NF}')
if [ -z "$PHY" ] ; then
PHY=0
fi
PORT=$((PHY / PHYS_PER_PORT))
# Look in /sys/.../sas_device/end_device-X for the bay_identifier
# attribute.
end_device_dir=$port_dir
while [ $i -lt "$num_dirs" ] ; do
d=$(eval echo '$'{$i})
end_device_dir="$end_device_dir/$d"
if echo "$d" | grep -q '^end_device' ; then
end_device_dir="$end_device_dir/sas_device/$d"
break
fi
i=$((i + 1))
done
# Add 'mix' slot type for environments where dm-multipath devices
# include end-devices connected via SAS expanders or direct connection
# to SAS HBA. A mixed connectivity environment such as pool devices
# contained in a SAS JBOD and spare drives or log devices directly
# connected in a server backplane without expanders in the I/O path.
SLOT=
case $BAY in
"bay")
SLOT=$(cat "$end_device_dir/bay_identifier" 2>/dev/null)
;;
"mix")
if [ $(cat "$end_device_dir/bay_identifier" 2>/dev/null) ] ; then
SLOT=$(cat "$end_device_dir/bay_identifier" 2>/dev/null)
else
SLOT=$(cat "$end_device_dir/phy_identifier" 2>/dev/null)
fi
;;
"phy")
SLOT=$(cat "$end_device_dir/phy_identifier" 2>/dev/null)
;;
"port")
d=$(eval echo '$'{$i})
SLOT=$(echo "$d" | sed -e 's/^.*://')
;;
"id")
i=$((i + 1))
d=$(eval echo '$'{$i})
SLOT=$(echo "$d" | sed -e 's/^.*://')
;;
"lun")
i=$((i + 2))
d=$(eval echo '$'{$i})
SLOT=$(echo "$d" | sed -e 's/^.*://')
;;
"ses")
# look for this SAS path in all SCSI Enclosure Services
# (SES) enclosures
sas_address=$(cat "$end_device_dir/sas_address" 2>/dev/null)
enclosures=$(lsscsi -g | \
sed -n -e '/enclosu/s/^.* \([^ ][^ ]*\) *$/\1/p')
for enclosure in $enclosures; do
set -- $(sg_ses -p aes "$enclosure" | \
awk "/device slot number:/{slot=\$12} \
/SAS address: $sas_address/\
{print slot}")
SLOT=$1
if [ -n "$SLOT" ] ; then
break
fi
done
;;
esac
if [ -z "$SLOT" ] ; then
return
fi
if [ "$MULTIJBOD_MODE" = "yes" ] ; then
CHAN=$(map_channel "$PCI_ID" "$PORT")
SLOT=$(map_slot "$SLOT" "$CHAN")
JBOD=$(map_jbod "$DEV")
if [ -z "$CHAN" ] ; then
return
fi
echo "${CHAN}"-"${JBOD}"-"${SLOT}${PART}"
else
CHAN=$(map_channel "$PCI_ID" "$PORT")
SLOT=$(map_slot "$SLOT" "$CHAN")
if [ -z "$CHAN" ] ; then
return
fi
echo "${CHAN}${SLOT}${PART}"
fi
}
scsi_handler() {
if [ -z "$FIRST_BAY_NUMBER" ] ; then
FIRST_BAY_NUMBER=$(awk '$1 == "first_bay_number" \
{print $2; exit}' $CONFIG)
fi
FIRST_BAY_NUMBER=${FIRST_BAY_NUMBER:-0}
if [ -z "$PHYS_PER_PORT" ] ; then
PHYS_PER_PORT=$(awk '$1 == "phys_per_port" \
{print $2; exit}' $CONFIG)
fi
PHYS_PER_PORT=${PHYS_PER_PORT:-4}
if ! echo "$PHYS_PER_PORT" | grep -q -E '^[0-9]+$' ; then
echo "Error: phys_per_port value $PHYS_PER_PORT is non-numeric"
exit 1
fi
if [ -z "$MULTIPATH_MODE" ] ; then
MULTIPATH_MODE=$(awk '$1 == "multipath" \
{print $2; exit}' $CONFIG)
fi
# Use first running component device if we're handling a dm-mpath device
if [ "$MULTIPATH_MODE" = "yes" ] ; then
# If udev didn't tell us the UUID via DM_NAME, check /dev/mapper
if [ -z "$DM_NAME" ] ; then
DM_NAME=$(ls -l --full-time /dev/mapper |
grep "$DEV"$ | awk '{print $9}')
fi
# For raw disks udev exports DEVTYPE=partition when
# handling partitions, and the rules can be written to
# take advantage of this to append a -part suffix. For
# dm devices we get DEVTYPE=disk even for partitions so
# we have to append the -part suffix directly in the
# helper.
if [ "$DEVTYPE" != "partition" ] ; then
# Match p[number], remove the 'p' and prepend "-part"
PART=$(echo "$DM_NAME" |
awk 'match($0,/p[0-9]+$/) {print "-part"substr($0,RSTART+1,RLENGTH-1)}')
fi
# Strip off partition information.
DM_NAME=$(echo "$DM_NAME" | sed 's/p[0-9][0-9]*$//')
if [ -z "$DM_NAME" ] ; then
return
fi
# Get the raw scsi device name from multipath -ll. Strip off
# leading pipe symbols to make field numbering consistent.
DEV=$(multipath -ll "$DM_NAME" |
awk '/running/{gsub("^[|]"," "); print $3 ; exit}')
if [ -z "$DEV" ] ; then
return
fi
fi
if echo "$DEV" | grep -q ^/devices/ ; then
sys_path=$DEV
else
sys_path=$(udevadm info -q path -p "/sys/block/$DEV" 2>/dev/null)
fi
# expect sys_path like this, for example:
# /devices/pci0000:00/0000:00:0b.0/0000:09:00.0/0000:0a:05.0/0000:0c:00.0/host3/target3:1:0/3:1:0:21/block/sdv
# Use positional parameters as an ad-hoc array
set -- $(echo "$sys_path" | tr / ' ')
num_dirs=$#
scsi_host_dir="/sys"
# Get path up to /sys/.../hostX
i=1
while [ $i -le "$num_dirs" ] ; do
d=$(eval echo '$'{$i})
scsi_host_dir="$scsi_host_dir/$d"
echo "$d" | grep -q -E '^host[0-9]+$' && break
i=$((i + 1))
done
if [ $i = "$num_dirs" ] ; then
return
fi
PCI_ID=$(eval echo '$'{$((i -1))} | awk -F: '{print $2":"$3}')
# In scsi mode, the directory two levels beneath
# /sys/.../hostX reveals the port and slot.
port_dir=$scsi_host_dir
j=$((i + 2))
i=$((i + 1))
while [ $i -le $j ] ; do
port_dir="$port_dir/$(eval echo '$'{$i})"
i=$((i + 1))
done
set -- $(echo "$port_dir" | sed -e 's/^.*:\([^:]*\):\([^:]*\)$/\1 \2/')
PORT=$1
SLOT=$(($2 + FIRST_BAY_NUMBER))
if [ -z "$SLOT" ] ; then
return
fi
CHAN=$(map_channel "$PCI_ID" "$PORT")
SLOT=$(map_slot "$SLOT" "$CHAN")
if [ -z "$CHAN" ] ; then
return
fi
echo "${CHAN}${SLOT}${PART}"
}
# Figure out the name for the enclosure symlink
enclosure_handler () {
# We get all the info we need from udev's DEVPATH variable:
#
# DEVPATH=/sys/devices/pci0000:00/0000:00:03.0/0000:05:00.0/host0/subsystem/devices/0:0:0:0/scsi_generic/sg0
# Get the enclosure ID ("0:0:0:0")
- ENC=$(basename $(readlink -m "/sys/$DEVPATH/../.."))
+ ENC="${DEVPATH%/*}"
+ ENC="${ENC%/*}"
+ ENC="${ENC##*/}"
if [ ! -d "/sys/class/enclosure/$ENC" ] ; then
# Not an enclosure, bail out
return
fi
# Get the long sysfs device path to our enclosure. Looks like:
# /devices/pci0000:00/0000:00:03.0/0000:05:00.0/host0/port-0:0/ ... /enclosure/0:0:0:0
ENC_DEVICE=$(readlink "/sys/class/enclosure/$ENC")
# Grab the full path to the hosts port dir:
# /devices/pci0000:00/0000:00:03.0/0000:05:00.0/host0/port-0:0
PORT_DIR=$(echo "$ENC_DEVICE" | grep -Eo '.+host[0-9]+/port-[0-9]+:[0-9]+')
# Get the port number
PORT_ID=$(echo "$PORT_DIR" | grep -Eo "[0-9]+$")
# The PCI directory is two directories up from the port directory
# /sys/devices/pci0000:00/0000:00:03.0/0000:05:00.0
- PCI_ID_LONG=$(basename $(readlink -m "/sys/$PORT_DIR/../.."))
+ PCI_ID_LONG="$(readlink -m "/sys/$PORT_DIR/../..")"
+ PCI_ID_LONG="${PCI_ID_LONG##*/}"
# Strip down the PCI address from 0000:05:00.0 to 05:00.0
- PCI_ID=$(echo "$PCI_ID_LONG" | sed -r 's/^[0-9]+://g')
+ PCI_ID="${PCI_ID_LONG#[0-9]*:}"
# Name our device according to vdev_id.conf (like "L0" or "U1").
NAME=$(awk "/channel/{if (\$1 == \"channel\" && \$2 == \"$PCI_ID\" && \
\$3 == \"$PORT_ID\") {print \$4\$3}}" $CONFIG)
echo "${NAME}"
}
alias_handler () {
# Special handling is needed to correctly append a -part suffix
# to partitions of device mapper devices. The DEVTYPE attribute
# is normally set to "disk" instead of "partition" in this case,
# so the udev rules won't handle that for us as they do for
# "plain" block devices.
#
# For example, we may have the following links for a device and its
# partitions,
#
# /dev/disk/by-id/dm-name-isw_dibgbfcije_ARRAY0 -> ../../dm-0
# /dev/disk/by-id/dm-name-isw_dibgbfcije_ARRAY0p1 -> ../../dm-1
# /dev/disk/by-id/dm-name-isw_dibgbfcije_ARRAY0p2 -> ../../dm-3
#
# and the following alias in vdev_id.conf.
#
# alias A0 dm-name-isw_dibgbfcije_ARRAY0
#
# The desired outcome is for the following links to be created
# without having explicitly defined aliases for the partitions.
#
# /dev/disk/by-vdev/A0 -> ../../dm-0
# /dev/disk/by-vdev/A0-part1 -> ../../dm-1
# /dev/disk/by-vdev/A0-part2 -> ../../dm-3
#
# Warning: The following grep pattern will misidentify whole-disk
# devices whose names end with 'p' followed by a string of
# digits as partitions, causing alias creation to fail. This
# ambiguity seems unavoidable, so devices using this facility
# must not use such names.
DM_PART=
if echo "$DM_NAME" | grep -q -E 'p[0-9][0-9]*$' ; then
if [ "$DEVTYPE" != "partition" ] ; then
# Match p[number], remove the 'p' and prepend "-part"
DM_PART=$(echo "$DM_NAME" |
awk 'match($0,/p[0-9]+$/) {print "-part"substr($0,RSTART+1,RLENGTH-1)}')
fi
fi
# DEVLINKS attribute must have been populated by already-run udev rules.
for link in $DEVLINKS ; do
# Remove partition information to match key of top-level device.
if [ -n "$DM_PART" ] ; then
link=$(echo "$link" | sed 's/p[0-9][0-9]*$//')
fi
# Check both the fully qualified and the base name of link.
- for l in $link $(basename "$link") ; do
+ for l in $link ${link##*/} ; do
if [ ! -z "$l" ]; then
alias=$(awk -v var="$l" '($1 == "alias") && \
($3 == var) \
{ print $2; exit }' $CONFIG)
if [ -n "$alias" ] ; then
echo "${alias}${DM_PART}"
return
fi
fi
done
done
}
# main
while getopts 'c:d:eg:jmp:h' OPTION; do
case ${OPTION} in
c)
CONFIG=${OPTARG}
;;
d)
DEV=${OPTARG}
;;
e)
# When udev sees a scsi_generic device, it calls this script with -e to
# create the enclosure device symlinks only. We also need
# "enclosure_symlinks yes" set in vdev_id.config to actually create the
# symlink.
ENCLOSURE_MODE=$(awk '{if ($1 == "enclosure_symlinks") \
print $2}' "$CONFIG")
if [ "$ENCLOSURE_MODE" != "yes" ] ; then
exit 0
fi
;;
g)
TOPOLOGY=$OPTARG
;;
p)
PHYS_PER_PORT=${OPTARG}
;;
j)
MULTIJBOD_MODE=yes
;;
m)
MULTIPATH_MODE=yes
;;
h)
usage
;;
esac
done
if [ ! -r "$CONFIG" ] ; then
echo "Error: Config file \"$CONFIG\" not found"
exit 1
fi
if [ -z "$DEV" ] && [ -z "$ENCLOSURE_MODE" ] ; then
echo "Error: missing required option -d"
exit 1
fi
if [ -z "$TOPOLOGY" ] ; then
TOPOLOGY=$(awk '($1 == "topology") {print $2; exit}' "$CONFIG")
fi
if [ -z "$BAY" ] ; then
BAY=$(awk '($1 == "slot") {print $2; exit}' "$CONFIG")
fi
TOPOLOGY=${TOPOLOGY:-sas_direct}
# Should we create /dev/by-enclosure symlinks?
if [ "$ENCLOSURE_MODE" = "yes" ] && [ "$TOPOLOGY" = "sas_direct" ] ; then
ID_ENCLOSURE=$(enclosure_handler)
if [ -z "$ID_ENCLOSURE" ] ; then
exit 0
fi
# Just create the symlinks to the enclosure devices and then exit.
ENCLOSURE_PREFIX=$(awk '/enclosure_symlinks_prefix/{print $2}' "$CONFIG")
if [ -z "$ENCLOSURE_PREFIX" ] ; then
ENCLOSURE_PREFIX="enc"
fi
echo "ID_ENCLOSURE=$ID_ENCLOSURE"
echo "ID_ENCLOSURE_PATH=by-enclosure/$ENCLOSURE_PREFIX-$ID_ENCLOSURE"
exit 0
fi
# First check if an alias was defined for this device.
ID_VDEV=$(alias_handler)
if [ -z "$ID_VDEV" ] ; then
BAY=${BAY:-bay}
case $TOPOLOGY in
sas_direct|sas_switch)
ID_VDEV=$(sas_handler)
;;
scsi)
ID_VDEV=$(scsi_handler)
;;
*)
echo "Error: unknown topology $TOPOLOGY"
exit 1
;;
esac
fi
if [ -n "$ID_VDEV" ] ; then
echo "ID_VDEV=${ID_VDEV}"
echo "ID_VDEV_PATH=disk/by-vdev/${ID_VDEV}"
fi
diff --git a/sys/contrib/openzfs/cmd/zed/agents/zfs_retire.c b/sys/contrib/openzfs/cmd/zed/agents/zfs_retire.c
index 1c4cc885b5e5..f4063bea7378 100644
--- a/sys/contrib/openzfs/cmd/zed/agents/zfs_retire.c
+++ b/sys/contrib/openzfs/cmd/zed/agents/zfs_retire.c
@@ -1,564 +1,565 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
*
* Copyright (c) 2016, Intel Corporation.
* Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>
*/
/*
* The ZFS retire agent is responsible for managing hot spares across all pools.
* When we see a device fault or a device removal, we try to open the associated
* pool and look for any hot spares. We iterate over any available hot spares
* and attempt a 'zpool replace' for each one.
*
* For vdevs diagnosed as faulty, the agent is also responsible for proactively
* marking the vdev FAULTY (for I/O errors) or DEGRADED (for checksum errors).
*/
#include <sys/fs/zfs.h>
#include <sys/fm/protocol.h>
#include <sys/fm/fs/zfs.h>
#include <libzfs.h>
#include <string.h>
+#include <libgen.h>
#include "zfs_agents.h"
#include "fmd_api.h"
typedef struct zfs_retire_repaired {
struct zfs_retire_repaired *zrr_next;
uint64_t zrr_pool;
uint64_t zrr_vdev;
} zfs_retire_repaired_t;
typedef struct zfs_retire_data {
libzfs_handle_t *zrd_hdl;
zfs_retire_repaired_t *zrd_repaired;
} zfs_retire_data_t;
static void
zfs_retire_clear_data(fmd_hdl_t *hdl, zfs_retire_data_t *zdp)
{
zfs_retire_repaired_t *zrp;
while ((zrp = zdp->zrd_repaired) != NULL) {
zdp->zrd_repaired = zrp->zrr_next;
fmd_hdl_free(hdl, zrp, sizeof (zfs_retire_repaired_t));
}
}
/*
* Find a pool with a matching GUID.
*/
typedef struct find_cbdata {
uint64_t cb_guid;
zpool_handle_t *cb_zhp;
nvlist_t *cb_vdev;
} find_cbdata_t;
static int
find_pool(zpool_handle_t *zhp, void *data)
{
find_cbdata_t *cbp = data;
if (cbp->cb_guid ==
zpool_get_prop_int(zhp, ZPOOL_PROP_GUID, NULL)) {
cbp->cb_zhp = zhp;
return (1);
}
zpool_close(zhp);
return (0);
}
/*
* Find a vdev within a tree with a matching GUID.
*/
static nvlist_t *
find_vdev(libzfs_handle_t *zhdl, nvlist_t *nv, uint64_t search_guid)
{
uint64_t guid;
nvlist_t **child;
uint_t c, children;
nvlist_t *ret;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&
guid == search_guid) {
fmd_hdl_debug(fmd_module_hdl("zfs-retire"),
"matched vdev %llu", guid);
return (nv);
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
return (NULL);
for (c = 0; c < children; c++) {
if ((ret = find_vdev(zhdl, child[c], search_guid)) != NULL)
return (ret);
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) != 0)
return (NULL);
for (c = 0; c < children; c++) {
if ((ret = find_vdev(zhdl, child[c], search_guid)) != NULL)
return (ret);
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
&child, &children) != 0)
return (NULL);
for (c = 0; c < children; c++) {
if ((ret = find_vdev(zhdl, child[c], search_guid)) != NULL)
return (ret);
}
return (NULL);
}
/*
* Given a (pool, vdev) GUID pair, find the matching pool and vdev.
*/
static zpool_handle_t *
find_by_guid(libzfs_handle_t *zhdl, uint64_t pool_guid, uint64_t vdev_guid,
nvlist_t **vdevp)
{
find_cbdata_t cb;
zpool_handle_t *zhp;
nvlist_t *config, *nvroot;
/*
* Find the corresponding pool and make sure the vdev still exists.
*/
cb.cb_guid = pool_guid;
if (zpool_iter(zhdl, find_pool, &cb) != 1)
return (NULL);
zhp = cb.cb_zhp;
config = zpool_get_config(zhp, NULL);
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) != 0) {
zpool_close(zhp);
return (NULL);
}
if (vdev_guid != 0) {
if ((*vdevp = find_vdev(zhdl, nvroot, vdev_guid)) == NULL) {
zpool_close(zhp);
return (NULL);
}
}
return (zhp);
}
/*
* Given a vdev, attempt to replace it with every known spare until one
* succeeds or we run out of devices to try.
* Return whether we were successful or not in replacing the device.
*/
static boolean_t
replace_with_spare(fmd_hdl_t *hdl, zpool_handle_t *zhp, nvlist_t *vdev)
{
nvlist_t *config, *nvroot, *replacement;
nvlist_t **spares;
uint_t s, nspares;
char *dev_name;
zprop_source_t source;
int ashift;
config = zpool_get_config(zhp, NULL);
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) != 0)
return (B_FALSE);
/*
* Find out if there are any hot spares available in the pool.
*/
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) != 0)
return (B_FALSE);
/*
* lookup "ashift" pool property, we may need it for the replacement
*/
ashift = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &source);
replacement = fmd_nvl_alloc(hdl, FMD_SLEEP);
(void) nvlist_add_string(replacement, ZPOOL_CONFIG_TYPE,
VDEV_TYPE_ROOT);
dev_name = zpool_vdev_name(NULL, zhp, vdev, B_FALSE);
/*
* Try to replace each spare, ending when we successfully
* replace it.
*/
for (s = 0; s < nspares; s++) {
boolean_t rebuild = B_FALSE;
char *spare_name, *type;
if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
&spare_name) != 0)
continue;
/* prefer sequential resilvering for distributed spares */
if ((nvlist_lookup_string(spares[s], ZPOOL_CONFIG_TYPE,
&type) == 0) && strcmp(type, VDEV_TYPE_DRAID_SPARE) == 0)
rebuild = B_TRUE;
/* if set, add the "ashift" pool property to the spare nvlist */
if (source != ZPROP_SRC_DEFAULT)
(void) nvlist_add_uint64(spares[s],
ZPOOL_CONFIG_ASHIFT, ashift);
(void) nvlist_add_nvlist_array(replacement,
ZPOOL_CONFIG_CHILDREN, &spares[s], 1);
fmd_hdl_debug(hdl, "zpool_vdev_replace '%s' with spare '%s'",
dev_name, basename(spare_name));
if (zpool_vdev_attach(zhp, dev_name, spare_name,
replacement, B_TRUE, rebuild) == 0) {
free(dev_name);
nvlist_free(replacement);
return (B_TRUE);
}
}
free(dev_name);
nvlist_free(replacement);
return (B_FALSE);
}
/*
* Repair this vdev if we had diagnosed a 'fault.fs.zfs.device' and
* ASRU is now usable. ZFS has found the device to be present and
* functioning.
*/
/*ARGSUSED*/
static void
zfs_vdev_repair(fmd_hdl_t *hdl, nvlist_t *nvl)
{
zfs_retire_data_t *zdp = fmd_hdl_getspecific(hdl);
zfs_retire_repaired_t *zrp;
uint64_t pool_guid, vdev_guid;
if (nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_POOL_GUID,
&pool_guid) != 0 || nvlist_lookup_uint64(nvl,
FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, &vdev_guid) != 0)
return;
/*
* Before checking the state of the ASRU, go through and see if we've
* already made an attempt to repair this ASRU. This list is cleared
* whenever we receive any kind of list event, and is designed to
* prevent us from generating a feedback loop when we attempt repairs
* against a faulted pool. The problem is that checking the unusable
* state of the ASRU can involve opening the pool, which can post
* statechange events but otherwise leave the pool in the faulted
* state. This list allows us to detect when a statechange event is
* due to our own request.
*/
for (zrp = zdp->zrd_repaired; zrp != NULL; zrp = zrp->zrr_next) {
if (zrp->zrr_pool == pool_guid &&
zrp->zrr_vdev == vdev_guid)
return;
}
zrp = fmd_hdl_alloc(hdl, sizeof (zfs_retire_repaired_t), FMD_SLEEP);
zrp->zrr_next = zdp->zrd_repaired;
zrp->zrr_pool = pool_guid;
zrp->zrr_vdev = vdev_guid;
zdp->zrd_repaired = zrp;
fmd_hdl_debug(hdl, "marking repaired vdev %llu on pool %llu",
vdev_guid, pool_guid);
}
/*ARGSUSED*/
static void
zfs_retire_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl,
const char *class)
{
uint64_t pool_guid, vdev_guid;
zpool_handle_t *zhp;
nvlist_t *resource, *fault;
nvlist_t **faults;
uint_t f, nfaults;
zfs_retire_data_t *zdp = fmd_hdl_getspecific(hdl);
libzfs_handle_t *zhdl = zdp->zrd_hdl;
boolean_t fault_device, degrade_device;
boolean_t is_repair;
char *scheme;
nvlist_t *vdev = NULL;
char *uuid;
int repair_done = 0;
boolean_t retire;
boolean_t is_disk;
vdev_aux_t aux;
uint64_t state = 0;
fmd_hdl_debug(hdl, "zfs_retire_recv: '%s'", class);
nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE, &state);
/*
* If this is a resource notifying us of device removal then simply
* check for an available spare and continue unless the device is a
* l2arc vdev, in which case we just offline it.
*/
if (strcmp(class, "resource.fs.zfs.removed") == 0 ||
(strcmp(class, "resource.fs.zfs.statechange") == 0 &&
(state == VDEV_STATE_REMOVED || state == VDEV_STATE_FAULTED))) {
char *devtype;
char *devname;
if (nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_POOL_GUID,
&pool_guid) != 0 ||
nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID,
&vdev_guid) != 0)
return;
if ((zhp = find_by_guid(zhdl, pool_guid, vdev_guid,
&vdev)) == NULL)
return;
devname = zpool_vdev_name(NULL, zhp, vdev, B_FALSE);
/* Can't replace l2arc with a spare: offline the device */
if (nvlist_lookup_string(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_TYPE,
&devtype) == 0 && strcmp(devtype, VDEV_TYPE_L2CACHE) == 0) {
fmd_hdl_debug(hdl, "zpool_vdev_offline '%s'", devname);
zpool_vdev_offline(zhp, devname, B_TRUE);
} else if (!fmd_prop_get_int32(hdl, "spare_on_remove") ||
replace_with_spare(hdl, zhp, vdev) == B_FALSE) {
/* Could not handle with spare */
fmd_hdl_debug(hdl, "no spare for '%s'", devname);
}
free(devname);
zpool_close(zhp);
return;
}
if (strcmp(class, FM_LIST_RESOLVED_CLASS) == 0)
return;
/*
* Note: on Linux statechange events are more than just
* healthy ones so we need to confirm the actual state value.
*/
if (strcmp(class, "resource.fs.zfs.statechange") == 0 &&
state == VDEV_STATE_HEALTHY) {
zfs_vdev_repair(hdl, nvl);
return;
}
if (strcmp(class, "sysevent.fs.zfs.vdev_remove") == 0) {
zfs_vdev_repair(hdl, nvl);
return;
}
zfs_retire_clear_data(hdl, zdp);
if (strcmp(class, FM_LIST_REPAIRED_CLASS) == 0)
is_repair = B_TRUE;
else
is_repair = B_FALSE;
/*
* We subscribe to zfs faults as well as all repair events.
*/
if (nvlist_lookup_nvlist_array(nvl, FM_SUSPECT_FAULT_LIST,
&faults, &nfaults) != 0)
return;
for (f = 0; f < nfaults; f++) {
fault = faults[f];
fault_device = B_FALSE;
degrade_device = B_FALSE;
is_disk = B_FALSE;
if (nvlist_lookup_boolean_value(fault, FM_SUSPECT_RETIRE,
&retire) == 0 && retire == 0)
continue;
/*
* While we subscribe to fault.fs.zfs.*, we only take action
* for faults targeting a specific vdev (open failure or SERD
* failure). We also subscribe to fault.io.* events, so that
* faulty disks will be faulted in the ZFS configuration.
*/
if (fmd_nvl_class_match(hdl, fault, "fault.fs.zfs.vdev.io")) {
fault_device = B_TRUE;
} else if (fmd_nvl_class_match(hdl, fault,
"fault.fs.zfs.vdev.checksum")) {
degrade_device = B_TRUE;
} else if (fmd_nvl_class_match(hdl, fault,
"fault.fs.zfs.device")) {
fault_device = B_FALSE;
} else if (fmd_nvl_class_match(hdl, fault, "fault.io.*")) {
is_disk = B_TRUE;
fault_device = B_TRUE;
} else {
continue;
}
if (is_disk) {
continue;
} else {
/*
* This is a ZFS fault. Lookup the resource, and
* attempt to find the matching vdev.
*/
if (nvlist_lookup_nvlist(fault, FM_FAULT_RESOURCE,
&resource) != 0 ||
nvlist_lookup_string(resource, FM_FMRI_SCHEME,
&scheme) != 0)
continue;
if (strcmp(scheme, FM_FMRI_SCHEME_ZFS) != 0)
continue;
if (nvlist_lookup_uint64(resource, FM_FMRI_ZFS_POOL,
&pool_guid) != 0)
continue;
if (nvlist_lookup_uint64(resource, FM_FMRI_ZFS_VDEV,
&vdev_guid) != 0) {
if (is_repair)
vdev_guid = 0;
else
continue;
}
if ((zhp = find_by_guid(zhdl, pool_guid, vdev_guid,
&vdev)) == NULL)
continue;
aux = VDEV_AUX_ERR_EXCEEDED;
}
if (vdev_guid == 0) {
/*
* For pool-level repair events, clear the entire pool.
*/
fmd_hdl_debug(hdl, "zpool_clear of pool '%s'",
zpool_get_name(zhp));
(void) zpool_clear(zhp, NULL, NULL);
zpool_close(zhp);
continue;
}
/*
* If this is a repair event, then mark the vdev as repaired and
* continue.
*/
if (is_repair) {
repair_done = 1;
fmd_hdl_debug(hdl, "zpool_clear of pool '%s' vdev %llu",
zpool_get_name(zhp), vdev_guid);
(void) zpool_vdev_clear(zhp, vdev_guid);
zpool_close(zhp);
continue;
}
/*
* Actively fault the device if needed.
*/
if (fault_device)
(void) zpool_vdev_fault(zhp, vdev_guid, aux);
if (degrade_device)
(void) zpool_vdev_degrade(zhp, vdev_guid, aux);
if (fault_device || degrade_device)
fmd_hdl_debug(hdl, "zpool_vdev_%s: vdev %llu on '%s'",
fault_device ? "fault" : "degrade", vdev_guid,
zpool_get_name(zhp));
/*
* Attempt to substitute a hot spare.
*/
(void) replace_with_spare(hdl, zhp, vdev);
zpool_close(zhp);
}
if (strcmp(class, FM_LIST_REPAIRED_CLASS) == 0 && repair_done &&
nvlist_lookup_string(nvl, FM_SUSPECT_UUID, &uuid) == 0)
fmd_case_uuresolved(hdl, uuid);
}
static const fmd_hdl_ops_t fmd_ops = {
zfs_retire_recv, /* fmdo_recv */
NULL, /* fmdo_timeout */
NULL, /* fmdo_close */
NULL, /* fmdo_stats */
NULL, /* fmdo_gc */
};
static const fmd_prop_t fmd_props[] = {
{ "spare_on_remove", FMD_TYPE_BOOL, "true" },
{ NULL, 0, NULL }
};
static const fmd_hdl_info_t fmd_info = {
"ZFS Retire Agent", "1.0", &fmd_ops, fmd_props
};
void
_zfs_retire_init(fmd_hdl_t *hdl)
{
zfs_retire_data_t *zdp;
libzfs_handle_t *zhdl;
if ((zhdl = libzfs_init()) == NULL)
return;
if (fmd_hdl_register(hdl, FMD_API_VERSION, &fmd_info) != 0) {
libzfs_fini(zhdl);
return;
}
zdp = fmd_hdl_zalloc(hdl, sizeof (zfs_retire_data_t), FMD_SLEEP);
zdp->zrd_hdl = zhdl;
fmd_hdl_setspecific(hdl, zdp);
}
void
_zfs_retire_fini(fmd_hdl_t *hdl)
{
zfs_retire_data_t *zdp = fmd_hdl_getspecific(hdl);
if (zdp != NULL) {
zfs_retire_clear_data(hdl, zdp);
libzfs_fini(zdp->zrd_hdl);
fmd_hdl_free(hdl, zdp, sizeof (zfs_retire_data_t));
}
}
diff --git a/sys/contrib/openzfs/cmd/zed/zed.c b/sys/contrib/openzfs/cmd/zed/zed.c
index 0aa03fded468..e45176c00bf2 100644
--- a/sys/contrib/openzfs/cmd/zed/zed.c
+++ b/sys/contrib/openzfs/cmd/zed/zed.c
@@ -1,308 +1,308 @@
/*
* This file is part of the ZFS Event Daemon (ZED).
*
* Developed at Lawrence Livermore National Laboratory (LLNL-CODE-403049).
* Copyright (C) 2013-2014 Lawrence Livermore National Security, LLC.
* Refer to the OpenZFS git commit log for authoritative copyright attribution.
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License Version 1.0 (CDDL-1.0).
* You can obtain a copy of the license from the top-level file
* "OPENSOLARIS.LICENSE" or at <http://opensource.org/licenses/CDDL-1.0>.
* You may not use this file except in compliance with the license.
*/
#include <errno.h>
#include <fcntl.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <unistd.h>
#include "zed.h"
#include "zed_conf.h"
#include "zed_event.h"
#include "zed_file.h"
#include "zed_log.h"
static volatile sig_atomic_t _got_exit = 0;
static volatile sig_atomic_t _got_hup = 0;
/*
* Signal handler for SIGINT & SIGTERM.
*/
static void
_exit_handler(int signum)
{
_got_exit = 1;
}
/*
* Signal handler for SIGHUP.
*/
static void
_hup_handler(int signum)
{
_got_hup = 1;
}
/*
* Register signal handlers.
*/
static void
_setup_sig_handlers(void)
{
struct sigaction sa;
if (sigemptyset(&sa.sa_mask) < 0)
zed_log_die("Failed to initialize sigset");
sa.sa_flags = SA_RESTART;
sa.sa_handler = SIG_IGN;
if (sigaction(SIGPIPE, &sa, NULL) < 0)
zed_log_die("Failed to ignore SIGPIPE");
sa.sa_handler = _exit_handler;
if (sigaction(SIGINT, &sa, NULL) < 0)
zed_log_die("Failed to register SIGINT handler");
if (sigaction(SIGTERM, &sa, NULL) < 0)
zed_log_die("Failed to register SIGTERM handler");
sa.sa_handler = _hup_handler;
if (sigaction(SIGHUP, &sa, NULL) < 0)
zed_log_die("Failed to register SIGHUP handler");
(void) sigaddset(&sa.sa_mask, SIGCHLD);
if (pthread_sigmask(SIG_BLOCK, &sa.sa_mask, NULL) < 0)
zed_log_die("Failed to block SIGCHLD");
}
/*
* Lock all current and future pages in the virtual memory address space.
* Access to locked pages will never be delayed by a page fault.
*
* EAGAIN is tested up to max_tries in case this is a transient error.
*
* Note that memory locks are not inherited by a child created via fork()
* and are automatically removed during an execve(). As such, this must
* be called after the daemon fork()s (when running in the background).
*/
static void
_lock_memory(void)
{
#if HAVE_MLOCKALL
int i = 0;
const int max_tries = 10;
for (i = 0; i < max_tries; i++) {
if (mlockall(MCL_CURRENT | MCL_FUTURE) == 0) {
zed_log_msg(LOG_INFO, "Locked all pages in memory");
return;
}
if (errno != EAGAIN)
break;
}
zed_log_die("Failed to lock memory pages: %s", strerror(errno));
#else /* HAVE_MLOCKALL */
zed_log_die("Failed to lock memory pages: mlockall() not supported");
#endif /* HAVE_MLOCKALL */
}
/*
* Start daemonization of the process including the double fork().
*
* The parent process will block here until _finish_daemonize() is called
* (in the grandchild process), at which point the parent process will exit.
* This prevents the parent process from exiting until initialization is
* complete.
*/
static void
_start_daemonize(void)
{
pid_t pid;
struct sigaction sa;
/* Create pipe for communicating with child during daemonization. */
zed_log_pipe_open();
/* Background process and ensure child is not process group leader. */
pid = fork();
if (pid < 0) {
zed_log_die("Failed to create child process: %s",
strerror(errno));
} else if (pid > 0) {
/* Close writes since parent will only read from pipe. */
zed_log_pipe_close_writes();
/* Wait for notification that daemonization is complete. */
zed_log_pipe_wait();
zed_log_pipe_close_reads();
_exit(EXIT_SUCCESS);
}
/* Close reads since child will only write to pipe. */
zed_log_pipe_close_reads();
/* Create independent session and detach from terminal. */
if (setsid() < 0)
zed_log_die("Failed to create new session: %s",
strerror(errno));
/* Prevent child from terminating on HUP when session leader exits. */
if (sigemptyset(&sa.sa_mask) < 0)
zed_log_die("Failed to initialize sigset");
sa.sa_flags = 0;
sa.sa_handler = SIG_IGN;
if (sigaction(SIGHUP, &sa, NULL) < 0)
zed_log_die("Failed to ignore SIGHUP");
/* Ensure process cannot re-acquire terminal. */
pid = fork();
if (pid < 0) {
zed_log_die("Failed to create grandchild process: %s",
strerror(errno));
} else if (pid > 0) {
_exit(EXIT_SUCCESS);
}
}
/*
* Finish daemonization of the process by closing stdin/stdout/stderr.
*
* This must be called at the end of initialization after all external
* communication channels are established and accessible.
*/
static void
_finish_daemonize(void)
{
int devnull;
/* Preserve fd 0/1/2, but discard data to/from stdin/stdout/stderr. */
devnull = open("/dev/null", O_RDWR);
if (devnull < 0)
zed_log_die("Failed to open /dev/null: %s", strerror(errno));
if (dup2(devnull, STDIN_FILENO) < 0)
zed_log_die("Failed to dup /dev/null onto stdin: %s",
strerror(errno));
if (dup2(devnull, STDOUT_FILENO) < 0)
zed_log_die("Failed to dup /dev/null onto stdout: %s",
strerror(errno));
if (dup2(devnull, STDERR_FILENO) < 0)
zed_log_die("Failed to dup /dev/null onto stderr: %s",
strerror(errno));
if ((devnull > STDERR_FILENO) && (close(devnull) < 0))
zed_log_die("Failed to close /dev/null: %s", strerror(errno));
/* Notify parent that daemonization is complete. */
zed_log_pipe_close_writes();
}
/*
* ZFS Event Daemon (ZED).
*/
int
main(int argc, char *argv[])
{
struct zed_conf zcp;
uint64_t saved_eid;
int64_t saved_etime[2];
zed_log_init(argv[0]);
zed_log_stderr_open(LOG_NOTICE);
zed_conf_init(&zcp);
zed_conf_parse_opts(&zcp, argc, argv);
if (zcp.do_verbose)
zed_log_stderr_open(LOG_INFO);
if (geteuid() != 0)
zed_log_die("Must be run as root");
zed_file_close_from(STDERR_FILENO + 1);
(void) umask(0);
if (chdir("/") < 0)
zed_log_die("Failed to change to root directory");
if (zed_conf_scan_dir(&zcp) < 0)
exit(EXIT_FAILURE);
if (!zcp.do_foreground) {
_start_daemonize();
zed_log_syslog_open(LOG_DAEMON);
}
_setup_sig_handlers();
if (zcp.do_memlock)
_lock_memory();
if ((zed_conf_write_pid(&zcp) < 0) && (!zcp.do_force))
exit(EXIT_FAILURE);
if (!zcp.do_foreground)
_finish_daemonize();
zed_log_msg(LOG_NOTICE,
"ZFS Event Daemon %s-%s (PID %d)",
ZFS_META_VERSION, ZFS_META_RELEASE, (int)getpid());
if (zed_conf_open_state(&zcp) < 0)
exit(EXIT_FAILURE);
if (zed_conf_read_state(&zcp, &saved_eid, saved_etime) < 0)
exit(EXIT_FAILURE);
idle:
/*
* If -I is specified, attempt to open /dev/zfs repeatedly until
* successful.
*/
do {
if (!zed_event_init(&zcp))
break;
/* Wait for some time and try again. tunable? */
sleep(30);
} while (!_got_exit && zcp.do_idle);
if (_got_exit)
goto out;
zed_event_seek(&zcp, saved_eid, saved_etime);
while (!_got_exit) {
int rv;
if (_got_hup) {
_got_hup = 0;
(void) zed_conf_scan_dir(&zcp);
}
rv = zed_event_service(&zcp);
/* ENODEV: When kernel module is unloaded (osx) */
- if (rv == ENODEV)
+ if (rv != 0)
break;
}
zed_log_msg(LOG_NOTICE, "Exiting");
zed_event_fini(&zcp);
if (zcp.do_idle && !_got_exit)
goto idle;
out:
zed_conf_destroy(&zcp);
zed_log_fini();
exit(EXIT_SUCCESS);
}
diff --git a/sys/contrib/openzfs/cmd/zed/zed.d/all-syslog.sh b/sys/contrib/openzfs/cmd/zed/zed.d/all-syslog.sh
index b07cf0f295ad..ea108c47b779 100755
--- a/sys/contrib/openzfs/cmd/zed/zed.d/all-syslog.sh
+++ b/sys/contrib/openzfs/cmd/zed/zed.d/all-syslog.sh
@@ -1,51 +1,51 @@
#!/bin/sh
#
# Copyright (C) 2013-2014 Lawrence Livermore National Security, LLC.
# Copyright (c) 2020 by Delphix. All rights reserved.
#
#
# Log the zevent via syslog.
#
[ -f "${ZED_ZEDLET_DIR}/zed.rc" ] && . "${ZED_ZEDLET_DIR}/zed.rc"
. "${ZED_ZEDLET_DIR}/zed-functions.sh"
zed_exit_if_ignoring_this_event
# build a string of name=value pairs for this event
msg="eid=${ZEVENT_EID} class=${ZEVENT_SUBCLASS}"
if [ "${ZED_SYSLOG_DISPLAY_GUIDS}" = "1" ]; then
[ -n "${ZEVENT_POOL_GUID}" ] && msg="${msg} pool_guid=${ZEVENT_POOL_GUID}"
[ -n "${ZEVENT_VDEV_GUID}" ] && msg="${msg} vdev_guid=${ZEVENT_VDEV_GUID}"
else
[ -n "${ZEVENT_POOL}" ] && msg="${msg} pool='${ZEVENT_POOL}'"
- [ -n "${ZEVENT_VDEV_PATH}" ] && msg="${msg} vdev=$(basename "${ZEVENT_VDEV_PATH}")"
+ [ -n "${ZEVENT_VDEV_PATH}" ] && msg="${msg} vdev=${ZEVENT_VDEV_PATH##*/}"
fi
# log pool state if state is anything other than 'ACTIVE'
[ -n "${ZEVENT_POOL_STATE_STR}" ] && [ "$ZEVENT_POOL_STATE" -ne 0 ] && \
msg="${msg} pool_state=${ZEVENT_POOL_STATE_STR}"
# Log the following payload nvpairs if they are present
[ -n "${ZEVENT_VDEV_STATE_STR}" ] && msg="${msg} vdev_state=${ZEVENT_VDEV_STATE_STR}"
[ -n "${ZEVENT_CKSUM_ALGORITHM}" ] && msg="${msg} algorithm=${ZEVENT_CKSUM_ALGORITHM}"
[ -n "${ZEVENT_ZIO_SIZE}" ] && msg="${msg} size=${ZEVENT_ZIO_SIZE}"
[ -n "${ZEVENT_ZIO_OFFSET}" ] && msg="${msg} offset=${ZEVENT_ZIO_OFFSET}"
[ -n "${ZEVENT_ZIO_PRIORITY}" ] && msg="${msg} priority=${ZEVENT_ZIO_PRIORITY}"
[ -n "${ZEVENT_ZIO_ERR}" ] && msg="${msg} err=${ZEVENT_ZIO_ERR}"
[ -n "${ZEVENT_ZIO_FLAGS}" ] && msg="${msg} flags=$(printf '0x%x' "${ZEVENT_ZIO_FLAGS}")"
# log delays that are >= 10 milisec
[ -n "${ZEVENT_ZIO_DELAY}" ] && [ "$ZEVENT_ZIO_DELAY" -gt 10000000 ] && \
msg="${msg} delay=$((ZEVENT_ZIO_DELAY / 1000000))ms"
# list the bookmark data together
# shellcheck disable=SC2153
[ -n "${ZEVENT_ZIO_OBJSET}" ] && \
msg="${msg} bookmark=${ZEVENT_ZIO_OBJSET}:${ZEVENT_ZIO_OBJECT}:${ZEVENT_ZIO_LEVEL}:${ZEVENT_ZIO_BLKID}"
zed_log_msg "${msg}"
exit 0
diff --git a/sys/contrib/openzfs/cmd/zed/zed.d/generic-notify.sh b/sys/contrib/openzfs/cmd/zed/zed.d/generic-notify.sh
index 1db26980c1a0..9cf657e39970 100755
--- a/sys/contrib/openzfs/cmd/zed/zed.d/generic-notify.sh
+++ b/sys/contrib/openzfs/cmd/zed/zed.d/generic-notify.sh
@@ -1,54 +1,54 @@
#!/bin/sh
#
# Send notification in response to a given zevent.
#
# This is a generic script than can be symlinked to a file in the
# enabled-zedlets directory to have a notification sent when a particular
# class of zevents occurs. The symlink filename must begin with the zevent
# (sub)class string (e.g., "probe_failure-notify.sh" for the "probe_failure"
# subclass). Refer to the zed(8) manpage for details.
#
# Only one notification per ZED_NOTIFY_INTERVAL_SECS will be sent for a given
# class/pool combination. This protects against spamming the recipient
# should multiple events occur together in time for the same pool.
#
# Exit codes:
# 0: notification sent
# 1: notification failed
# 2: notification not configured
# 3: notification suppressed
[ -f "${ZED_ZEDLET_DIR}/zed.rc" ] && . "${ZED_ZEDLET_DIR}/zed.rc"
. "${ZED_ZEDLET_DIR}/zed-functions.sh"
# Rate-limit the notification based in part on the filename.
#
-rate_limit_tag="${ZEVENT_POOL};${ZEVENT_SUBCLASS};$(basename -- "$0")"
+rate_limit_tag="${ZEVENT_POOL};${ZEVENT_SUBCLASS};${0##*/}"
rate_limit_interval="${ZED_NOTIFY_INTERVAL_SECS}"
zed_rate_limit "${rate_limit_tag}" "${rate_limit_interval}" || exit 3
umask 077
pool_str="${ZEVENT_POOL:+" for ${ZEVENT_POOL}"}"
host_str=" on $(hostname)"
note_subject="ZFS ${ZEVENT_SUBCLASS} event${pool_str}${host_str}"
note_pathname="$(mktemp)"
{
echo "ZFS has posted the following event:"
echo
echo " eid: ${ZEVENT_EID}"
echo " class: ${ZEVENT_SUBCLASS}"
echo " host: $(hostname)"
echo " time: ${ZEVENT_TIME_STRING}"
[ -n "${ZEVENT_VDEV_TYPE}" ] && echo " vtype: ${ZEVENT_VDEV_TYPE}"
[ -n "${ZEVENT_VDEV_PATH}" ] && echo " vpath: ${ZEVENT_VDEV_PATH}"
[ -n "${ZEVENT_VDEV_GUID}" ] && echo " vguid: ${ZEVENT_VDEV_GUID}"
[ -n "${ZEVENT_POOL}" ] && [ -x "${ZPOOL}" ] \
&& "${ZPOOL}" status "${ZEVENT_POOL}"
} > "${note_pathname}"
zed_notify "${note_subject}" "${note_pathname}"; rv=$?
rm -f "${note_pathname}"
exit "${rv}"
diff --git a/sys/contrib/openzfs/cmd/zed/zed.d/statechange-notify.sh b/sys/contrib/openzfs/cmd/zed/zed.d/statechange-notify.sh
index 76f09061c5b5..ab11dfbc99d5 100755
--- a/sys/contrib/openzfs/cmd/zed/zed.d/statechange-notify.sh
+++ b/sys/contrib/openzfs/cmd/zed/zed.d/statechange-notify.sh
@@ -1,74 +1,75 @@
#!/bin/sh
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License Version 1.0 (CDDL-1.0).
# You can obtain a copy of the license from the top-level file
# "OPENSOLARIS.LICENSE" or at <http://opensource.org/licenses/CDDL-1.0>.
# You may not use this file except in compliance with the license.
#
# CDDL HEADER END
#
#
# Send notification in response to a fault induced statechange
#
# ZEVENT_SUBCLASS: 'statechange'
-# ZEVENT_VDEV_STATE_STR: 'DEGRADED', 'FAULTED' or 'REMOVED'
+# ZEVENT_VDEV_STATE_STR: 'DEGRADED', 'FAULTED', 'REMOVED', or 'UNAVAIL'
#
# Exit codes:
# 0: notification sent
# 1: notification failed
# 2: notification not configured
# 3: statechange not relevant
# 4: statechange string missing (unexpected)
[ -f "${ZED_ZEDLET_DIR}/zed.rc" ] && . "${ZED_ZEDLET_DIR}/zed.rc"
. "${ZED_ZEDLET_DIR}/zed-functions.sh"
[ -n "${ZEVENT_VDEV_STATE_STR}" ] || exit 4
if [ "${ZEVENT_VDEV_STATE_STR}" != "FAULTED" ] \
&& [ "${ZEVENT_VDEV_STATE_STR}" != "DEGRADED" ] \
- && [ "${ZEVENT_VDEV_STATE_STR}" != "REMOVED" ]; then
+ && [ "${ZEVENT_VDEV_STATE_STR}" != "REMOVED" ] \
+ && [ "${ZEVENT_VDEV_STATE_STR}" != "UNAVAIL" ]; then
exit 3
fi
umask 077
note_subject="ZFS device fault for pool ${ZEVENT_POOL_GUID} on $(hostname)"
note_pathname="$(mktemp)"
{
if [ "${ZEVENT_VDEV_STATE_STR}" = "FAULTED" ] ; then
echo "The number of I/O errors associated with a ZFS device exceeded"
echo "acceptable levels. ZFS has marked the device as faulted."
elif [ "${ZEVENT_VDEV_STATE_STR}" = "DEGRADED" ] ; then
echo "The number of checksum errors associated with a ZFS device"
echo "exceeded acceptable levels. ZFS has marked the device as"
echo "degraded."
else
echo "ZFS has detected that a device was removed."
fi
echo
echo " impact: Fault tolerance of the pool may be compromised."
echo " eid: ${ZEVENT_EID}"
echo " class: ${ZEVENT_SUBCLASS}"
echo " state: ${ZEVENT_VDEV_STATE_STR}"
echo " host: $(hostname)"
echo " time: ${ZEVENT_TIME_STRING}"
[ -n "${ZEVENT_VDEV_TYPE}" ] && echo " vtype: ${ZEVENT_VDEV_TYPE}"
[ -n "${ZEVENT_VDEV_PATH}" ] && echo " vpath: ${ZEVENT_VDEV_PATH}"
[ -n "${ZEVENT_VDEV_PHYSPATH}" ] && echo " vphys: ${ZEVENT_VDEV_PHYSPATH}"
[ -n "${ZEVENT_VDEV_GUID}" ] && echo " vguid: ${ZEVENT_VDEV_GUID}"
[ -n "${ZEVENT_VDEV_DEVID}" ] && echo " devid: ${ZEVENT_VDEV_DEVID}"
echo " pool: ${ZEVENT_POOL_GUID}"
} > "${note_pathname}"
zed_notify "${note_subject}" "${note_pathname}"; rv=$?
rm -f "${note_pathname}"
exit "${rv}"
diff --git a/sys/contrib/openzfs/cmd/zed/zed.d/zed-functions.sh b/sys/contrib/openzfs/cmd/zed/zed.d/zed-functions.sh
index d1ebf7dbcc16..290f9150b43f 100644
--- a/sys/contrib/openzfs/cmd/zed/zed.d/zed-functions.sh
+++ b/sys/contrib/openzfs/cmd/zed/zed.d/zed-functions.sh
@@ -1,532 +1,614 @@
#!/bin/sh
# shellcheck disable=SC2039
# zed-functions.sh
#
# ZED helper functions for use in ZEDLETs
# Variable Defaults
#
: "${ZED_LOCKDIR:="/var/lock"}"
: "${ZED_NOTIFY_INTERVAL_SECS:=3600}"
: "${ZED_NOTIFY_VERBOSE:=0}"
: "${ZED_RUNDIR:="/var/run"}"
: "${ZED_SYSLOG_PRIORITY:="daemon.notice"}"
: "${ZED_SYSLOG_TAG:="zed"}"
ZED_FLOCK_FD=8
# zed_check_cmd (cmd, ...)
#
# For each argument given, search PATH for the executable command [cmd].
# Log a message if [cmd] is not found.
#
# Arguments
# cmd: name of executable command for which to search
#
# Return
# 0 if all commands are found in PATH and are executable
# n for a count of the command executables that are not found
#
zed_check_cmd()
{
local cmd
local rv=0
for cmd; do
if ! command -v "${cmd}" >/dev/null 2>&1; then
zed_log_err "\"${cmd}\" not installed"
rv=$((rv + 1))
fi
done
return "${rv}"
}
# zed_log_msg (msg, ...)
#
# Write all argument strings to the system log.
#
# Globals
# ZED_SYSLOG_PRIORITY
# ZED_SYSLOG_TAG
#
# Return
# nothing
#
zed_log_msg()
{
logger -p "${ZED_SYSLOG_PRIORITY}" -t "${ZED_SYSLOG_TAG}" -- "$@"
}
# zed_log_err (msg, ...)
#
# Write an error message to the system log. This message will contain the
# script name, EID, and all argument strings.
#
# Globals
# ZED_SYSLOG_PRIORITY
# ZED_SYSLOG_TAG
# ZEVENT_EID
#
# Return
# nothing
#
zed_log_err()
{
logger -p "${ZED_SYSLOG_PRIORITY}" -t "${ZED_SYSLOG_TAG}" -- "error:" \
- "$(basename -- "$0"):""${ZEVENT_EID:+" eid=${ZEVENT_EID}:"}" "$@"
+ "${0##*/}:""${ZEVENT_EID:+" eid=${ZEVENT_EID}:"}" "$@"
}
# zed_lock (lockfile, [fd])
#
# Obtain an exclusive (write) lock on [lockfile]. If the lock cannot be
# immediately acquired, wait until it becomes available.
#
# Every zed_lock() must be paired with a corresponding zed_unlock().
#
# By default, flock-style locks associate the lockfile with file descriptor 8.
# The bash manpage warns that file descriptors >9 should be used with care as
# they may conflict with file descriptors used internally by the shell. File
# descriptor 9 is reserved for zed_rate_limit(). If concurrent locks are held
# within the same process, they must use different file descriptors (preferably
# decrementing from 8); otherwise, obtaining a new lock with a given file
# descriptor will release the previous lock associated with that descriptor.
#
# Arguments
# lockfile: pathname of the lock file; the lock will be stored in
# ZED_LOCKDIR unless the pathname contains a "/".
# fd: integer for the file descriptor used by flock (OPTIONAL unless holding
# concurrent locks)
#
# Globals
# ZED_FLOCK_FD
# ZED_LOCKDIR
#
# Return
# nothing
#
zed_lock()
{
local lockfile="$1"
local fd="${2:-${ZED_FLOCK_FD}}"
local umask_bak
local err
[ -n "${lockfile}" ] || return
if ! expr "${lockfile}" : '.*/' >/dev/null 2>&1; then
lockfile="${ZED_LOCKDIR}/${lockfile}"
fi
umask_bak="$(umask)"
umask 077
# Obtain a lock on the file bound to the given file descriptor.
#
eval "exec ${fd}>> '${lockfile}'"
if ! err="$(flock --exclusive "${fd}" 2>&1)"; then
zed_log_err "failed to lock \"${lockfile}\": ${err}"
fi
umask "${umask_bak}"
}
# zed_unlock (lockfile, [fd])
#
# Release the lock on [lockfile].
#
# Arguments
# lockfile: pathname of the lock file
# fd: integer for the file descriptor used by flock (must match the file
# descriptor passed to the zed_lock function call)
#
# Globals
# ZED_FLOCK_FD
# ZED_LOCKDIR
#
# Return
# nothing
#
zed_unlock()
{
local lockfile="$1"
local fd="${2:-${ZED_FLOCK_FD}}"
local err
[ -n "${lockfile}" ] || return
if ! expr "${lockfile}" : '.*/' >/dev/null 2>&1; then
lockfile="${ZED_LOCKDIR}/${lockfile}"
fi
# Release the lock and close the file descriptor.
if ! err="$(flock --unlock "${fd}" 2>&1)"; then
zed_log_err "failed to unlock \"${lockfile}\": ${err}"
fi
eval "exec ${fd}>&-"
}
# zed_notify (subject, pathname)
#
# Send a notification via all available methods.
#
# Arguments
# subject: notification subject
# pathname: pathname containing the notification message (OPTIONAL)
#
# Return
# 0: notification succeeded via at least one method
# 1: notification failed
# 2: no notification methods configured
#
zed_notify()
{
local subject="$1"
local pathname="$2"
local num_success=0
local num_failure=0
zed_notify_email "${subject}" "${pathname}"; rv=$?
[ "${rv}" -eq 0 ] && num_success=$((num_success + 1))
[ "${rv}" -eq 1 ] && num_failure=$((num_failure + 1))
zed_notify_pushbullet "${subject}" "${pathname}"; rv=$?
[ "${rv}" -eq 0 ] && num_success=$((num_success + 1))
[ "${rv}" -eq 1 ] && num_failure=$((num_failure + 1))
zed_notify_slack_webhook "${subject}" "${pathname}"; rv=$?
[ "${rv}" -eq 0 ] && num_success=$((num_success + 1))
[ "${rv}" -eq 1 ] && num_failure=$((num_failure + 1))
+ zed_notify_pushover "${subject}" "${pathname}"; rv=$?
+ [ "${rv}" -eq 0 ] && num_success=$((num_success + 1))
+ [ "${rv}" -eq 1 ] && num_failure=$((num_failure + 1))
+
[ "${num_success}" -gt 0 ] && return 0
[ "${num_failure}" -gt 0 ] && return 1
return 2
}
# zed_notify_email (subject, pathname)
#
# Send a notification via email to the address specified by ZED_EMAIL_ADDR.
#
# Requires the mail executable to be installed in the standard PATH, or
# ZED_EMAIL_PROG to be defined with the pathname of an executable capable of
# reading a message body from stdin.
#
# Command-line options to the mail executable can be specified in
# ZED_EMAIL_OPTS. This undergoes the following keyword substitutions:
# - @ADDRESS@ is replaced with the space-delimited recipient email address(es)
# - @SUBJECT@ is replaced with the notification subject
#
# Arguments
# subject: notification subject
# pathname: pathname containing the notification message (OPTIONAL)
#
# Globals
# ZED_EMAIL_PROG
# ZED_EMAIL_OPTS
# ZED_EMAIL_ADDR
#
# Return
# 0: notification sent
# 1: notification failed
# 2: not configured
#
zed_notify_email()
{
local subject="$1"
local pathname="${2:-"/dev/null"}"
: "${ZED_EMAIL_PROG:="mail"}"
: "${ZED_EMAIL_OPTS:="-s '@SUBJECT@' @ADDRESS@"}"
# For backward compatibility with ZED_EMAIL.
if [ -n "${ZED_EMAIL}" ] && [ -z "${ZED_EMAIL_ADDR}" ]; then
ZED_EMAIL_ADDR="${ZED_EMAIL}"
fi
[ -n "${ZED_EMAIL_ADDR}" ] || return 2
zed_check_cmd "${ZED_EMAIL_PROG}" || return 1
[ -n "${subject}" ] || return 1
if [ ! -r "${pathname}" ]; then
zed_log_err \
- "$(basename "${ZED_EMAIL_PROG}") cannot read \"${pathname}\""
+ "${ZED_EMAIL_PROG##*/} cannot read \"${pathname}\""
return 1
fi
ZED_EMAIL_OPTS="$(echo "${ZED_EMAIL_OPTS}" \
| sed -e "s/@ADDRESS@/${ZED_EMAIL_ADDR}/g" \
-e "s/@SUBJECT@/${subject}/g")"
# shellcheck disable=SC2086
eval ${ZED_EMAIL_PROG} ${ZED_EMAIL_OPTS} < "${pathname}" >/dev/null 2>&1
rv=$?
if [ "${rv}" -ne 0 ]; then
- zed_log_err "$(basename "${ZED_EMAIL_PROG}") exit=${rv}"
+ zed_log_err "${ZED_EMAIL_PROG##*/} exit=${rv}"
return 1
fi
return 0
}
# zed_notify_pushbullet (subject, pathname)
#
# Send a notification via Pushbullet <https://www.pushbullet.com/>.
# The access token (ZED_PUSHBULLET_ACCESS_TOKEN) identifies this client to the
# Pushbullet server. The optional channel tag (ZED_PUSHBULLET_CHANNEL_TAG) is
# for pushing to notification feeds that can be subscribed to; if a channel is
# not defined, push notifications will instead be sent to all devices
# associated with the account specified by the access token.
#
# Requires awk, curl, and sed executables to be installed in the standard PATH.
#
# References
# https://docs.pushbullet.com/
# https://www.pushbullet.com/security
#
# Arguments
# subject: notification subject
# pathname: pathname containing the notification message (OPTIONAL)
#
# Globals
# ZED_PUSHBULLET_ACCESS_TOKEN
# ZED_PUSHBULLET_CHANNEL_TAG
#
# Return
# 0: notification sent
# 1: notification failed
# 2: not configured
#
zed_notify_pushbullet()
{
local subject="$1"
local pathname="${2:-"/dev/null"}"
local msg_body
local msg_tag
local msg_json
local msg_out
local msg_err
local url="https://api.pushbullet.com/v2/pushes"
[ -n "${ZED_PUSHBULLET_ACCESS_TOKEN}" ] || return 2
[ -n "${subject}" ] || return 1
if [ ! -r "${pathname}" ]; then
zed_log_err "pushbullet cannot read \"${pathname}\""
return 1
fi
zed_check_cmd "awk" "curl" "sed" || return 1
# Escape the following characters in the message body for JSON:
# newline, backslash, double quote, horizontal tab, vertical tab,
# and carriage return.
#
msg_body="$(awk '{ ORS="\\n" } { gsub(/\\/, "\\\\"); gsub(/"/, "\\\"");
gsub(/\t/, "\\t"); gsub(/\f/, "\\f"); gsub(/\r/, "\\r"); print }' \
"${pathname}")"
# Push to a channel if one is configured.
#
[ -n "${ZED_PUSHBULLET_CHANNEL_TAG}" ] && msg_tag="$(printf \
'"channel_tag": "%s", ' "${ZED_PUSHBULLET_CHANNEL_TAG}")"
# Construct the JSON message for pushing a note.
#
msg_json="$(printf '{%s"type": "note", "title": "%s", "body": "%s"}' \
"${msg_tag}" "${subject}" "${msg_body}")"
# Send the POST request and check for errors.
#
msg_out="$(curl -u "${ZED_PUSHBULLET_ACCESS_TOKEN}:" -X POST "${url}" \
--header "Content-Type: application/json" --data-binary "${msg_json}" \
2>/dev/null)"; rv=$?
if [ "${rv}" -ne 0 ]; then
zed_log_err "curl exit=${rv}"
return 1
fi
msg_err="$(echo "${msg_out}" \
| sed -n -e 's/.*"error" *:.*"message" *: *"\([^"]*\)".*/\1/p')"
if [ -n "${msg_err}" ]; then
zed_log_err "pushbullet \"${msg_err}"\"
return 1
fi
return 0
}
# zed_notify_slack_webhook (subject, pathname)
#
# Notification via Slack Webhook <https://api.slack.com/incoming-webhooks>.
# The Webhook URL (ZED_SLACK_WEBHOOK_URL) identifies this client to the
# Slack channel.
#
# Requires awk, curl, and sed executables to be installed in the standard PATH.
#
# References
# https://api.slack.com/incoming-webhooks
#
# Arguments
# subject: notification subject
# pathname: pathname containing the notification message (OPTIONAL)
#
# Globals
# ZED_SLACK_WEBHOOK_URL
#
# Return
# 0: notification sent
# 1: notification failed
# 2: not configured
#
zed_notify_slack_webhook()
{
[ -n "${ZED_SLACK_WEBHOOK_URL}" ] || return 2
local subject="$1"
local pathname="${2:-"/dev/null"}"
local msg_body
local msg_tag
local msg_json
local msg_out
local msg_err
local url="${ZED_SLACK_WEBHOOK_URL}"
[ -n "${subject}" ] || return 1
if [ ! -r "${pathname}" ]; then
zed_log_err "slack webhook cannot read \"${pathname}\""
return 1
fi
zed_check_cmd "awk" "curl" "sed" || return 1
# Escape the following characters in the message body for JSON:
# newline, backslash, double quote, horizontal tab, vertical tab,
# and carriage return.
#
msg_body="$(awk '{ ORS="\\n" } { gsub(/\\/, "\\\\"); gsub(/"/, "\\\"");
gsub(/\t/, "\\t"); gsub(/\f/, "\\f"); gsub(/\r/, "\\r"); print }' \
"${pathname}")"
# Construct the JSON message for posting.
#
- msg_json="$(printf '{"text": "*%s*\n%s"}' "${subject}" "${msg_body}" )"
+ msg_json="$(printf '{"text": "*%s*\\n%s"}' "${subject}" "${msg_body}" )"
# Send the POST request and check for errors.
#
msg_out="$(curl -X POST "${url}" \
--header "Content-Type: application/json" --data-binary "${msg_json}" \
2>/dev/null)"; rv=$?
if [ "${rv}" -ne 0 ]; then
zed_log_err "curl exit=${rv}"
return 1
fi
msg_err="$(echo "${msg_out}" \
| sed -n -e 's/.*"error" *:.*"message" *: *"\([^"]*\)".*/\1/p')"
if [ -n "${msg_err}" ]; then
zed_log_err "slack webhook \"${msg_err}"\"
return 1
fi
return 0
}
+# zed_notify_pushover (subject, pathname)
+#
+# Send a notification via Pushover <https://pushover.net/>.
+# The access token (ZED_PUSHOVER_TOKEN) identifies this client to the
+# Pushover server. The user token (ZED_PUSHOVER_USER) defines the user or
+# group to which the notification will be sent.
+#
+# Requires curl and sed executables to be installed in the standard PATH.
+#
+# References
+# https://pushover.net/api
+#
+# Arguments
+# subject: notification subject
+# pathname: pathname containing the notification message (OPTIONAL)
+#
+# Globals
+# ZED_PUSHOVER_TOKEN
+# ZED_PUSHOVER_USER
+#
+# Return
+# 0: notification sent
+# 1: notification failed
+# 2: not configured
+#
+zed_notify_pushover()
+{
+ local subject="$1"
+ local pathname="${2:-"/dev/null"}"
+ local msg_body
+ local msg_out
+ local msg_err
+ local url="https://api.pushover.net/1/messages.json"
+
+ [ -n "${ZED_PUSHOVER_TOKEN}" ] && [ -n "${ZED_PUSHOVER_USER}" ] || return 2
+
+ if [ ! -r "${pathname}" ]; then
+ zed_log_err "pushover cannot read \"${pathname}\""
+ return 1
+ fi
+
+ zed_check_cmd "curl" "sed" || return 1
+
+ # Read the message body in.
+ #
+ msg_body="$(cat "${pathname}")"
+
+ if [ -z "${msg_body}" ]
+ then
+ msg_body=$subject
+ subject=""
+ fi
+
+ # Send the POST request and check for errors.
+ #
+ msg_out="$( \
+ curl \
+ --form-string "token=${ZED_PUSHOVER_TOKEN}" \
+ --form-string "user=${ZED_PUSHOVER_USER}" \
+ --form-string "message=${msg_body}" \
+ --form-string "title=${subject}" \
+ "${url}" \
+ 2>/dev/null \
+ )"; rv=$?
+ if [ "${rv}" -ne 0 ]; then
+ zed_log_err "curl exit=${rv}"
+ return 1
+ fi
+ msg_err="$(echo "${msg_out}" \
+ | sed -n -e 's/.*"errors" *:.*\[\(.*\)\].*/\1/p')"
+ if [ -n "${msg_err}" ]; then
+ zed_log_err "pushover \"${msg_err}"\"
+ return 1
+ fi
+ return 0
+}
+
+
# zed_rate_limit (tag, [interval])
#
# Check whether an event of a given type [tag] has already occurred within the
# last [interval] seconds.
#
# This function obtains a lock on the statefile using file descriptor 9.
#
# Arguments
# tag: arbitrary string for grouping related events to rate-limit
# interval: time interval in seconds (OPTIONAL)
#
# Globals
# ZED_NOTIFY_INTERVAL_SECS
# ZED_RUNDIR
#
# Return
# 0 if the event should be processed
# 1 if the event should be dropped
#
# State File Format
# time;tag
#
zed_rate_limit()
{
local tag="$1"
local interval="${2:-${ZED_NOTIFY_INTERVAL_SECS}}"
local lockfile="zed.zedlet.state.lock"
local lockfile_fd=9
local statefile="${ZED_RUNDIR}/zed.zedlet.state"
local time_now
local time_prev
local umask_bak
local rv=0
[ -n "${tag}" ] || return 0
zed_lock "${lockfile}" "${lockfile_fd}"
time_now="$(date +%s)"
time_prev="$(grep -E "^[0-9]+;${tag}\$" "${statefile}" 2>/dev/null \
| tail -1 | cut -d\; -f1)"
if [ -n "${time_prev}" ] \
&& [ "$((time_now - time_prev))" -lt "${interval}" ]; then
rv=1
else
umask_bak="$(umask)"
umask 077
grep -E -v "^[0-9]+;${tag}\$" "${statefile}" 2>/dev/null \
> "${statefile}.$$"
echo "${time_now};${tag}" >> "${statefile}.$$"
mv -f "${statefile}.$$" "${statefile}"
umask "${umask_bak}"
fi
zed_unlock "${lockfile}" "${lockfile_fd}"
return "${rv}"
}
# zed_guid_to_pool (guid)
#
# Convert a pool GUID into its pool name (like "tank")
# Arguments
# guid: pool GUID (decimal or hex)
#
# Return
# Pool name
#
zed_guid_to_pool()
{
if [ -z "$1" ] ; then
return
fi
guid="$(printf "%u" "$1")"
$ZPOOL get -H -ovalue,name guid | awk '$1 == '"$guid"' {print $2; exit}'
}
# zed_exit_if_ignoring_this_event
#
# Exit the script if we should ignore this event, as determined by
# $ZED_SYSLOG_SUBCLASS_INCLUDE and $ZED_SYSLOG_SUBCLASS_EXCLUDE in zed.rc.
# This function assumes you've imported the normal zed variables.
zed_exit_if_ignoring_this_event()
{
if [ -n "${ZED_SYSLOG_SUBCLASS_INCLUDE}" ]; then
eval "case ${ZEVENT_SUBCLASS} in
${ZED_SYSLOG_SUBCLASS_INCLUDE});;
*) exit 0;;
esac"
elif [ -n "${ZED_SYSLOG_SUBCLASS_EXCLUDE}" ]; then
eval "case ${ZEVENT_SUBCLASS} in
${ZED_SYSLOG_SUBCLASS_EXCLUDE}) exit 0;;
*);;
esac"
fi
}
diff --git a/sys/contrib/openzfs/cmd/zed/zed.d/zed.rc b/sys/contrib/openzfs/cmd/zed/zed.d/zed.rc
index 1c278b2ef96e..9ac77f929c73 100644
--- a/sys/contrib/openzfs/cmd/zed/zed.d/zed.rc
+++ b/sys/contrib/openzfs/cmd/zed/zed.d/zed.rc
@@ -1,127 +1,144 @@
##
# zed.rc
#
# This file should be owned by root and permissioned 0600.
##
##
# Absolute path to the debug output file.
#
#ZED_DEBUG_LOG="/tmp/zed.debug.log"
##
# Email address of the zpool administrator for receipt of notifications;
# multiple addresses can be specified if they are delimited by whitespace.
# Email will only be sent if ZED_EMAIL_ADDR is defined.
-# Disabled by default; uncomment to enable.
+# Enabled by default; comment to disable.
#
-#ZED_EMAIL_ADDR="root"
+ZED_EMAIL_ADDR="root"
##
# Name or path of executable responsible for sending notifications via email;
# the mail program must be capable of reading a message body from stdin.
# Email will only be sent if ZED_EMAIL_ADDR is defined.
#
#ZED_EMAIL_PROG="mail"
##
# Command-line options for ZED_EMAIL_PROG.
# The string @ADDRESS@ will be replaced with the recipient email address(es).
# The string @SUBJECT@ will be replaced with the notification subject;
# this should be protected with quotes to prevent word-splitting.
# Email will only be sent if ZED_EMAIL_ADDR is defined.
#
#ZED_EMAIL_OPTS="-s '@SUBJECT@' @ADDRESS@"
##
# Default directory for zed lock files.
#
#ZED_LOCKDIR="/var/lock"
##
# Minimum number of seconds between notifications for a similar event.
#
#ZED_NOTIFY_INTERVAL_SECS=3600
##
# Notification verbosity.
# If set to 0, suppress notification if the pool is healthy.
# If set to 1, send notification regardless of pool health.
#
#ZED_NOTIFY_VERBOSE=0
##
# Send notifications for 'ereport.fs.zfs.data' events.
# Disabled by default, any non-empty value will enable the feature.
#
#ZED_NOTIFY_DATA=
##
# Pushbullet access token.
# This grants full access to your account -- protect it accordingly!
# <https://www.pushbullet.com/get-started>
# <https://www.pushbullet.com/account>
# Disabled by default; uncomment to enable.
#
#ZED_PUSHBULLET_ACCESS_TOKEN=""
##
# Pushbullet channel tag for push notification feeds that can be subscribed to.
# <https://www.pushbullet.com/my-channel>
# If not defined, push notifications will instead be sent to all devices
# associated with the account specified by the access token.
# Disabled by default; uncomment to enable.
#
#ZED_PUSHBULLET_CHANNEL_TAG=""
##
# Slack Webhook URL.
# This allows posting to the given channel and includes an access token.
# <https://api.slack.com/incoming-webhooks>
# Disabled by default; uncomment to enable.
#
#ZED_SLACK_WEBHOOK_URL=""
+##
+# Pushover token.
+# This defines the application from which the notification will be sent.
+# <https://pushover.net/api#registration>
+# Disabled by default; uncomment to enable.
+# ZED_PUSHOVER_USER, below, must also be configured.
+#
+#ZED_PUSHOVER_TOKEN=""
+
+##
+# Pushover user key.
+# This defines which user or group will receive Pushover notifications.
+# <https://pushover.net/api#identifiers>
+# Disabled by default; uncomment to enable.
+# ZED_PUSHOVER_TOKEN, above, must also be configured.
+#ZED_PUSHOVER_USER=""
+
##
# Default directory for zed state files.
#
#ZED_RUNDIR="/var/run"
##
# Turn on/off enclosure LEDs when drives get DEGRADED/FAULTED. This works for
# device mapper and multipath devices as well. This works with JBOD enclosures
# and NVMe PCI drives (assuming they're supported by Linux in sysfs).
#
ZED_USE_ENCLOSURE_LEDS=1
##
# Run a scrub after every resilver
# Disabled by default, 1 to enable and 0 to disable.
#ZED_SCRUB_AFTER_RESILVER=0
##
# The syslog priority (e.g., specified as a "facility.level" pair).
#
#ZED_SYSLOG_PRIORITY="daemon.notice"
##
# The syslog tag for marking zed events.
#
#ZED_SYSLOG_TAG="zed"
##
# Which set of event subclasses to log
# By default, events from all subclasses are logged.
# If ZED_SYSLOG_SUBCLASS_INCLUDE is set, only subclasses
# matching the pattern are logged. Use the pipe symbol (|)
# or shell wildcards (*, ?) to match multiple subclasses.
# Otherwise, if ZED_SYSLOG_SUBCLASS_EXCLUDE is set, the
# matching subclasses are excluded from logging.
#ZED_SYSLOG_SUBCLASS_INCLUDE="checksum|scrub_*|vdev.*"
ZED_SYSLOG_SUBCLASS_EXCLUDE="history_event"
##
# Use GUIDs instead of names when logging pool and vdevs
# Disabled by default, 1 to enable and 0 to disable.
#ZED_SYSLOG_DISPLAY_GUIDS=1
diff --git a/sys/contrib/openzfs/cmd/zed/zed_conf.c b/sys/contrib/openzfs/cmd/zed/zed_conf.c
index 2cf2311dbb42..59935102f123 100644
--- a/sys/contrib/openzfs/cmd/zed/zed_conf.c
+++ b/sys/contrib/openzfs/cmd/zed/zed_conf.c
@@ -1,705 +1,706 @@
/*
* This file is part of the ZFS Event Daemon (ZED).
*
* Developed at Lawrence Livermore National Laboratory (LLNL-CODE-403049).
* Copyright (C) 2013-2014 Lawrence Livermore National Security, LLC.
* Refer to the OpenZFS git commit log for authoritative copyright attribution.
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License Version 1.0 (CDDL-1.0).
* You can obtain a copy of the license from the top-level file
* "OPENSOLARIS.LICENSE" or at <http://opensource.org/licenses/CDDL-1.0>.
* You may not use this file except in compliance with the license.
*/
#include <assert.h>
#include <ctype.h>
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
#include <libgen.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <sys/types.h>
#include <sys/stat.h>
#include <sys/uio.h>
#include <unistd.h>
#include "zed.h"
#include "zed_conf.h"
#include "zed_file.h"
#include "zed_log.h"
#include "zed_strings.h"
/*
* Initialise the configuration with default values.
*/
void
zed_conf_init(struct zed_conf *zcp)
{
memset(zcp, 0, sizeof (*zcp));
/* zcp->zfs_hdl opened in zed_event_init() */
/* zcp->zedlets created in zed_conf_scan_dir() */
zcp->pid_fd = -1; /* opened in zed_conf_write_pid() */
zcp->state_fd = -1; /* opened in zed_conf_open_state() */
zcp->zevent_fd = -1; /* opened in zed_event_init() */
zcp->max_jobs = 16;
if (!(zcp->pid_file = strdup(ZED_PID_FILE)) ||
!(zcp->zedlet_dir = strdup(ZED_ZEDLET_DIR)) ||
!(zcp->state_file = strdup(ZED_STATE_FILE)))
zed_log_die("Failed to create conf: %s", strerror(errno));
}
/*
* Destroy the configuration [zcp].
*
* Note: zfs_hdl & zevent_fd are destroyed via zed_event_fini().
*/
void
zed_conf_destroy(struct zed_conf *zcp)
{
if (zcp->state_fd >= 0) {
if (close(zcp->state_fd) < 0)
zed_log_msg(LOG_WARNING,
"Failed to close state file \"%s\": %s",
zcp->state_file, strerror(errno));
zcp->state_fd = -1;
}
if (zcp->pid_file) {
if ((unlink(zcp->pid_file) < 0) && (errno != ENOENT))
zed_log_msg(LOG_WARNING,
"Failed to remove PID file \"%s\": %s",
zcp->pid_file, strerror(errno));
}
if (zcp->pid_fd >= 0) {
if (close(zcp->pid_fd) < 0)
zed_log_msg(LOG_WARNING,
"Failed to close PID file \"%s\": %s",
zcp->pid_file, strerror(errno));
zcp->pid_fd = -1;
}
if (zcp->pid_file) {
free(zcp->pid_file);
zcp->pid_file = NULL;
}
if (zcp->zedlet_dir) {
free(zcp->zedlet_dir);
zcp->zedlet_dir = NULL;
}
if (zcp->state_file) {
free(zcp->state_file);
zcp->state_file = NULL;
}
if (zcp->zedlets) {
zed_strings_destroy(zcp->zedlets);
zcp->zedlets = NULL;
}
}
/*
* Display command-line help and exit.
*
* If [got_err] is 0, output to stdout and exit normally;
* otherwise, output to stderr and exit with a failure status.
*/
static void
_zed_conf_display_help(const char *prog, boolean_t got_err)
{
struct opt { const char *o, *d, *v; };
FILE *fp = got_err ? stderr : stdout;
struct opt *oo;
struct opt iopts[] = {
{ .o = "-h", .d = "Display help" },
{ .o = "-L", .d = "Display license information" },
{ .o = "-V", .d = "Display version information" },
{},
};
struct opt nopts[] = {
{ .o = "-v", .d = "Be verbose" },
{ .o = "-f", .d = "Force daemon to run" },
{ .o = "-F", .d = "Run daemon in the foreground" },
{ .o = "-I",
.d = "Idle daemon until kernel module is (re)loaded" },
{ .o = "-M", .d = "Lock all pages in memory" },
{ .o = "-P", .d = "$PATH for ZED to use (only used by ZTS)" },
{ .o = "-Z", .d = "Zero state file" },
{},
};
struct opt vopts[] = {
{ .o = "-d DIR", .d = "Read enabled ZEDLETs from DIR.",
.v = ZED_ZEDLET_DIR },
{ .o = "-p FILE", .d = "Write daemon's PID to FILE.",
.v = ZED_PID_FILE },
{ .o = "-s FILE", .d = "Write daemon's state to FILE.",
.v = ZED_STATE_FILE },
{ .o = "-j JOBS", .d = "Start at most JOBS at once.",
.v = "16" },
{},
};
fprintf(fp, "Usage: %s [OPTION]...\n", (prog ? prog : "zed"));
fprintf(fp, "\n");
for (oo = iopts; oo->o; ++oo)
fprintf(fp, " %*s %s\n", -8, oo->o, oo->d);
fprintf(fp, "\n");
for (oo = nopts; oo->o; ++oo)
fprintf(fp, " %*s %s\n", -8, oo->o, oo->d);
fprintf(fp, "\n");
for (oo = vopts; oo->o; ++oo)
fprintf(fp, " %*s %s [%s]\n", -8, oo->o, oo->d, oo->v);
fprintf(fp, "\n");
exit(got_err ? EXIT_FAILURE : EXIT_SUCCESS);
}
/*
* Display license information to stdout and exit.
*/
static void
_zed_conf_display_license(void)
{
printf(
"The ZFS Event Daemon (ZED) is distributed under the terms of the\n"
" Common Development and Distribution License (CDDL-1.0)\n"
" <http://opensource.org/licenses/CDDL-1.0>.\n"
"\n"
"Developed at Lawrence Livermore National Laboratory"
" (LLNL-CODE-403049).\n"
"\n");
exit(EXIT_SUCCESS);
}
/*
* Display version information to stdout and exit.
*/
static void
_zed_conf_display_version(void)
{
printf("%s-%s-%s\n",
ZFS_META_NAME, ZFS_META_VERSION, ZFS_META_RELEASE);
exit(EXIT_SUCCESS);
}
/*
* Copy the [path] string to the [resultp] ptr.
* If [path] is not an absolute path, prefix it with the current working dir.
* If [resultp] is non-null, free its existing string before assignment.
*/
static void
_zed_conf_parse_path(char **resultp, const char *path)
{
char buf[PATH_MAX];
assert(resultp != NULL);
assert(path != NULL);
if (*resultp)
free(*resultp);
if (path[0] == '/') {
*resultp = strdup(path);
} else {
if (!getcwd(buf, sizeof (buf)))
zed_log_die("Failed to get current working dir: %s",
strerror(errno));
if (strlcat(buf, "/", sizeof (buf)) >= sizeof (buf) ||
strlcat(buf, path, sizeof (buf)) >= sizeof (buf))
zed_log_die("Failed to copy path: %s",
strerror(ENAMETOOLONG));
*resultp = strdup(buf);
}
if (!*resultp)
zed_log_die("Failed to copy path: %s", strerror(ENOMEM));
}
/*
* Parse the command-line options into the configuration [zcp].
*/
void
zed_conf_parse_opts(struct zed_conf *zcp, int argc, char **argv)
{
const char * const opts = ":hLVd:p:P:s:vfFMZIj:";
int opt;
unsigned long raw;
if (!zcp || !argv || !argv[0])
zed_log_die("Failed to parse options: Internal error");
opterr = 0; /* suppress default getopt err msgs */
while ((opt = getopt(argc, argv, opts)) != -1) {
switch (opt) {
case 'h':
_zed_conf_display_help(argv[0], B_FALSE);
break;
case 'L':
_zed_conf_display_license();
break;
case 'V':
_zed_conf_display_version();
break;
case 'd':
_zed_conf_parse_path(&zcp->zedlet_dir, optarg);
break;
case 'I':
zcp->do_idle = 1;
break;
case 'p':
_zed_conf_parse_path(&zcp->pid_file, optarg);
break;
case 'P':
_zed_conf_parse_path(&zcp->path, optarg);
break;
case 's':
_zed_conf_parse_path(&zcp->state_file, optarg);
break;
case 'v':
zcp->do_verbose = 1;
break;
case 'f':
zcp->do_force = 1;
break;
case 'F':
zcp->do_foreground = 1;
break;
case 'M':
zcp->do_memlock = 1;
break;
case 'Z':
zcp->do_zero = 1;
break;
case 'j':
errno = 0;
raw = strtoul(optarg, NULL, 0);
if (errno == ERANGE || raw > INT16_MAX) {
zed_log_die("%lu is too many jobs", raw);
} if (raw == 0) {
zed_log_die("0 jobs makes no sense");
} else {
zcp->max_jobs = raw;
}
break;
case '?':
default:
if (optopt == '?')
_zed_conf_display_help(argv[0], B_FALSE);
fprintf(stderr, "%s: Invalid option '-%c'\n\n",
argv[0], optopt);
_zed_conf_display_help(argv[0], B_TRUE);
break;
}
}
}
/*
* Scan the [zcp] zedlet_dir for files to exec based on the event class.
* Files must be executable by user, but not writable by group or other.
* Dotfiles are ignored.
*
* Return 0 on success with an updated set of zedlets,
* or -1 on error with errno set.
*/
int
zed_conf_scan_dir(struct zed_conf *zcp)
{
zed_strings_t *zedlets;
DIR *dirp;
struct dirent *direntp;
char pathname[PATH_MAX];
struct stat st;
int n;
if (!zcp) {
errno = EINVAL;
zed_log_msg(LOG_ERR, "Failed to scan zedlet dir: %s",
strerror(errno));
return (-1);
}
zedlets = zed_strings_create();
if (!zedlets) {
errno = ENOMEM;
zed_log_msg(LOG_WARNING, "Failed to scan dir \"%s\": %s",
zcp->zedlet_dir, strerror(errno));
return (-1);
}
dirp = opendir(zcp->zedlet_dir);
if (!dirp) {
int errno_bak = errno;
zed_log_msg(LOG_WARNING, "Failed to open dir \"%s\": %s",
zcp->zedlet_dir, strerror(errno));
zed_strings_destroy(zedlets);
errno = errno_bak;
return (-1);
}
while ((direntp = readdir(dirp))) {
if (direntp->d_name[0] == '.')
continue;
n = snprintf(pathname, sizeof (pathname),
"%s/%s", zcp->zedlet_dir, direntp->d_name);
if ((n < 0) || (n >= sizeof (pathname))) {
zed_log_msg(LOG_WARNING, "Failed to stat \"%s\": %s",
direntp->d_name, strerror(ENAMETOOLONG));
continue;
}
if (stat(pathname, &st) < 0) {
zed_log_msg(LOG_WARNING, "Failed to stat \"%s\": %s",
pathname, strerror(errno));
continue;
}
if (!S_ISREG(st.st_mode)) {
zed_log_msg(LOG_INFO,
"Ignoring \"%s\": not a regular file",
direntp->d_name);
continue;
}
if ((st.st_uid != 0) && !zcp->do_force) {
zed_log_msg(LOG_NOTICE,
"Ignoring \"%s\": not owned by root",
direntp->d_name);
continue;
}
if (!(st.st_mode & S_IXUSR)) {
zed_log_msg(LOG_INFO,
"Ignoring \"%s\": not executable by user",
direntp->d_name);
continue;
}
if ((st.st_mode & S_IWGRP) && !zcp->do_force) {
zed_log_msg(LOG_NOTICE,
"Ignoring \"%s\": writable by group",
direntp->d_name);
continue;
}
if ((st.st_mode & S_IWOTH) && !zcp->do_force) {
zed_log_msg(LOG_NOTICE,
"Ignoring \"%s\": writable by other",
direntp->d_name);
continue;
}
if (zed_strings_add(zedlets, NULL, direntp->d_name) < 0) {
zed_log_msg(LOG_WARNING,
"Failed to register \"%s\": %s",
direntp->d_name, strerror(errno));
continue;
}
if (zcp->do_verbose)
zed_log_msg(LOG_INFO,
"Registered zedlet \"%s\"", direntp->d_name);
}
if (closedir(dirp) < 0) {
int errno_bak = errno;
zed_log_msg(LOG_WARNING, "Failed to close dir \"%s\": %s",
zcp->zedlet_dir, strerror(errno));
zed_strings_destroy(zedlets);
errno = errno_bak;
return (-1);
}
if (zcp->zedlets)
zed_strings_destroy(zcp->zedlets);
zcp->zedlets = zedlets;
return (0);
}
/*
* Write the PID file specified in [zcp].
* Return 0 on success, -1 on error.
*
* This must be called after fork()ing to become a daemon (so the correct PID
* is recorded), but before daemonization is complete and the parent process
* exits (for synchronization with systemd).
*/
int
zed_conf_write_pid(struct zed_conf *zcp)
{
char buf[PATH_MAX];
int n;
char *p;
mode_t mask;
int rv;
if (!zcp || !zcp->pid_file) {
errno = EINVAL;
zed_log_msg(LOG_ERR, "Failed to create PID file: %s",
strerror(errno));
return (-1);
}
assert(zcp->pid_fd == -1);
/*
* Create PID file directory if needed.
*/
n = strlcpy(buf, zcp->pid_file, sizeof (buf));
if (n >= sizeof (buf)) {
errno = ENAMETOOLONG;
zed_log_msg(LOG_ERR, "Failed to create PID file: %s",
strerror(errno));
goto err;
}
p = strrchr(buf, '/');
if (p)
*p = '\0';
if ((mkdirp(buf, 0755) < 0) && (errno != EEXIST)) {
zed_log_msg(LOG_ERR, "Failed to create directory \"%s\": %s",
buf, strerror(errno));
goto err;
}
/*
* Obtain PID file lock.
*/
mask = umask(0);
umask(mask | 022);
zcp->pid_fd = open(zcp->pid_file, O_RDWR | O_CREAT | O_CLOEXEC, 0644);
umask(mask);
if (zcp->pid_fd < 0) {
zed_log_msg(LOG_ERR, "Failed to open PID file \"%s\": %s",
zcp->pid_file, strerror(errno));
goto err;
}
rv = zed_file_lock(zcp->pid_fd);
if (rv < 0) {
zed_log_msg(LOG_ERR, "Failed to lock PID file \"%s\": %s",
zcp->pid_file, strerror(errno));
goto err;
} else if (rv > 0) {
pid_t pid = zed_file_is_locked(zcp->pid_fd);
if (pid < 0) {
zed_log_msg(LOG_ERR,
"Failed to test lock on PID file \"%s\"",
zcp->pid_file);
} else if (pid > 0) {
zed_log_msg(LOG_ERR,
"Found PID %d bound to PID file \"%s\"",
pid, zcp->pid_file);
} else {
zed_log_msg(LOG_ERR,
"Inconsistent lock state on PID file \"%s\"",
zcp->pid_file);
}
goto err;
}
/*
* Write PID file.
*/
n = snprintf(buf, sizeof (buf), "%d\n", (int)getpid());
if ((n < 0) || (n >= sizeof (buf))) {
errno = ERANGE;
zed_log_msg(LOG_ERR, "Failed to write PID file \"%s\": %s",
zcp->pid_file, strerror(errno));
} else if (write(zcp->pid_fd, buf, n) != n) {
zed_log_msg(LOG_ERR, "Failed to write PID file \"%s\": %s",
zcp->pid_file, strerror(errno));
} else if (fdatasync(zcp->pid_fd) < 0) {
zed_log_msg(LOG_ERR, "Failed to sync PID file \"%s\": %s",
zcp->pid_file, strerror(errno));
} else {
return (0);
}
err:
if (zcp->pid_fd >= 0) {
(void) close(zcp->pid_fd);
zcp->pid_fd = -1;
}
return (-1);
}
/*
* Open and lock the [zcp] state_file.
* Return 0 on success, -1 on error.
*
* FIXME: Move state information into kernel.
*/
int
zed_conf_open_state(struct zed_conf *zcp)
{
char dirbuf[PATH_MAX];
int n;
char *p;
int rv;
if (!zcp || !zcp->state_file) {
errno = EINVAL;
zed_log_msg(LOG_ERR, "Failed to open state file: %s",
strerror(errno));
return (-1);
}
n = strlcpy(dirbuf, zcp->state_file, sizeof (dirbuf));
if (n >= sizeof (dirbuf)) {
errno = ENAMETOOLONG;
zed_log_msg(LOG_WARNING, "Failed to open state file: %s",
strerror(errno));
return (-1);
}
p = strrchr(dirbuf, '/');
if (p)
*p = '\0';
if ((mkdirp(dirbuf, 0755) < 0) && (errno != EEXIST)) {
zed_log_msg(LOG_WARNING,
"Failed to create directory \"%s\": %s",
dirbuf, strerror(errno));
return (-1);
}
if (zcp->state_fd >= 0) {
if (close(zcp->state_fd) < 0) {
zed_log_msg(LOG_WARNING,
"Failed to close state file \"%s\": %s",
zcp->state_file, strerror(errno));
return (-1);
}
}
if (zcp->do_zero)
(void) unlink(zcp->state_file);
zcp->state_fd = open(zcp->state_file,
O_RDWR | O_CREAT | O_CLOEXEC, 0644);
if (zcp->state_fd < 0) {
zed_log_msg(LOG_WARNING, "Failed to open state file \"%s\": %s",
zcp->state_file, strerror(errno));
return (-1);
}
rv = zed_file_lock(zcp->state_fd);
if (rv < 0) {
zed_log_msg(LOG_WARNING, "Failed to lock state file \"%s\": %s",
zcp->state_file, strerror(errno));
return (-1);
}
if (rv > 0) {
pid_t pid = zed_file_is_locked(zcp->state_fd);
if (pid < 0) {
zed_log_msg(LOG_WARNING,
"Failed to test lock on state file \"%s\"",
zcp->state_file);
} else if (pid > 0) {
zed_log_msg(LOG_WARNING,
"Found PID %d bound to state file \"%s\"",
pid, zcp->state_file);
} else {
zed_log_msg(LOG_WARNING,
"Inconsistent lock state on state file \"%s\"",
zcp->state_file);
}
return (-1);
}
return (0);
}
/*
* Read the opened [zcp] state_file to obtain the eid & etime of the last event
* processed. Write the state from the last event to the [eidp] & [etime] args
* passed by reference. Note that etime[] is an array of size 2.
* Return 0 on success, -1 on error.
*/
int
zed_conf_read_state(struct zed_conf *zcp, uint64_t *eidp, int64_t etime[])
{
ssize_t len;
struct iovec iov[3];
ssize_t n;
if (!zcp || !eidp || !etime) {
errno = EINVAL;
zed_log_msg(LOG_ERR,
"Failed to read state file: %s", strerror(errno));
return (-1);
}
if (lseek(zcp->state_fd, 0, SEEK_SET) == (off_t)-1) {
zed_log_msg(LOG_WARNING,
"Failed to reposition state file offset: %s",
strerror(errno));
return (-1);
}
len = 0;
iov[0].iov_base = eidp;
len += iov[0].iov_len = sizeof (*eidp);
iov[1].iov_base = &etime[0];
len += iov[1].iov_len = sizeof (etime[0]);
iov[2].iov_base = &etime[1];
len += iov[2].iov_len = sizeof (etime[1]);
n = readv(zcp->state_fd, iov, 3);
if (n == 0) {
*eidp = 0;
} else if (n < 0) {
zed_log_msg(LOG_WARNING,
"Failed to read state file \"%s\": %s",
zcp->state_file, strerror(errno));
return (-1);
} else if (n != len) {
errno = EIO;
zed_log_msg(LOG_WARNING,
"Failed to read state file \"%s\": Read %d of %d bytes",
zcp->state_file, n, len);
return (-1);
}
return (0);
}
/*
* Write the [eid] & [etime] of the last processed event to the opened
* [zcp] state_file. Note that etime[] is an array of size 2.
* Return 0 on success, -1 on error.
*/
int
zed_conf_write_state(struct zed_conf *zcp, uint64_t eid, int64_t etime[])
{
ssize_t len;
struct iovec iov[3];
ssize_t n;
if (!zcp) {
errno = EINVAL;
zed_log_msg(LOG_ERR,
"Failed to write state file: %s", strerror(errno));
return (-1);
}
if (lseek(zcp->state_fd, 0, SEEK_SET) == (off_t)-1) {
zed_log_msg(LOG_WARNING,
"Failed to reposition state file offset: %s",
strerror(errno));
return (-1);
}
len = 0;
iov[0].iov_base = &eid;
len += iov[0].iov_len = sizeof (eid);
iov[1].iov_base = &etime[0];
len += iov[1].iov_len = sizeof (etime[0]);
iov[2].iov_base = &etime[1];
len += iov[2].iov_len = sizeof (etime[1]);
n = writev(zcp->state_fd, iov, 3);
if (n < 0) {
zed_log_msg(LOG_WARNING,
"Failed to write state file \"%s\": %s",
zcp->state_file, strerror(errno));
return (-1);
}
if (n != len) {
errno = EIO;
zed_log_msg(LOG_WARNING,
"Failed to write state file \"%s\": Wrote %d of %d bytes",
zcp->state_file, n, len);
return (-1);
}
if (fdatasync(zcp->state_fd) < 0) {
zed_log_msg(LOG_WARNING,
"Failed to sync state file \"%s\": %s",
zcp->state_file, strerror(errno));
return (-1);
}
return (0);
}
diff --git a/sys/contrib/openzfs/cmd/zed/zed_exec.c b/sys/contrib/openzfs/cmd/zed/zed_exec.c
index 1eecfa0a92c4..03dcd03aceb7 100644
--- a/sys/contrib/openzfs/cmd/zed/zed_exec.c
+++ b/sys/contrib/openzfs/cmd/zed/zed_exec.c
@@ -1,368 +1,370 @@
/*
* This file is part of the ZFS Event Daemon (ZED).
*
* Developed at Lawrence Livermore National Laboratory (LLNL-CODE-403049).
* Copyright (C) 2013-2014 Lawrence Livermore National Security, LLC.
* Refer to the OpenZFS git commit log for authoritative copyright attribution.
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License Version 1.0 (CDDL-1.0).
* You can obtain a copy of the license from the top-level file
* "OPENSOLARIS.LICENSE" or at <http://opensource.org/licenses/CDDL-1.0>.
* You may not use this file except in compliance with the license.
*/
#include <assert.h>
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
#include <stdlib.h>
#include <string.h>
#include <stddef.h>
#include <sys/avl.h>
#include <sys/resource.h>
#include <sys/stat.h>
#include <sys/wait.h>
#include <time.h>
#include <unistd.h>
#include <pthread.h>
+#include <signal.h>
+
#include "zed_exec.h"
#include "zed_log.h"
#include "zed_strings.h"
#define ZEVENT_FILENO 3
struct launched_process_node {
avl_node_t node;
pid_t pid;
uint64_t eid;
char *name;
};
static int
_launched_process_node_compare(const void *x1, const void *x2)
{
pid_t p1;
pid_t p2;
assert(x1 != NULL);
assert(x2 != NULL);
p1 = ((const struct launched_process_node *) x1)->pid;
p2 = ((const struct launched_process_node *) x2)->pid;
if (p1 < p2)
return (-1);
else if (p1 == p2)
return (0);
else
return (1);
}
static pthread_t _reap_children_tid = (pthread_t)-1;
static volatile boolean_t _reap_children_stop;
static avl_tree_t _launched_processes;
static pthread_mutex_t _launched_processes_lock = PTHREAD_MUTEX_INITIALIZER;
static int16_t _launched_processes_limit;
/*
* Create an environment string array for passing to execve() using the
* NAME=VALUE strings in container [zsp].
* Return a newly-allocated environment, or NULL on error.
*/
static char **
_zed_exec_create_env(zed_strings_t *zsp)
{
int num_ptrs;
int buflen;
char *buf;
char **pp;
char *p;
const char *q;
int i;
int len;
num_ptrs = zed_strings_count(zsp) + 1;
buflen = num_ptrs * sizeof (char *);
for (q = zed_strings_first(zsp); q; q = zed_strings_next(zsp))
buflen += strlen(q) + 1;
buf = calloc(1, buflen);
if (!buf)
return (NULL);
pp = (char **)buf;
p = buf + (num_ptrs * sizeof (char *));
i = 0;
for (q = zed_strings_first(zsp); q; q = zed_strings_next(zsp)) {
pp[i] = p;
len = strlen(q) + 1;
memcpy(p, q, len);
p += len;
i++;
}
pp[i] = NULL;
assert(buf + buflen == p);
return ((char **)buf);
}
/*
* Fork a child process to handle event [eid]. The program [prog]
* in directory [dir] is executed with the environment [env].
*
* The file descriptor [zfd] is the zevent_fd used to track the
* current cursor location within the zevent nvlist.
*/
static void
_zed_exec_fork_child(uint64_t eid, const char *dir, const char *prog,
char *env[], int zfd, boolean_t in_foreground)
{
char path[PATH_MAX];
int n;
pid_t pid;
int fd;
struct launched_process_node *node;
sigset_t mask;
struct timespec launch_timeout =
{ .tv_sec = 0, .tv_nsec = 200 * 1000 * 1000, };
assert(dir != NULL);
assert(prog != NULL);
assert(env != NULL);
assert(zfd >= 0);
while (__atomic_load_n(&_launched_processes_limit,
__ATOMIC_SEQ_CST) <= 0)
(void) nanosleep(&launch_timeout, NULL);
n = snprintf(path, sizeof (path), "%s/%s", dir, prog);
if ((n < 0) || (n >= sizeof (path))) {
zed_log_msg(LOG_WARNING,
"Failed to fork \"%s\" for eid=%llu: %s",
prog, eid, strerror(ENAMETOOLONG));
return;
}
(void) pthread_mutex_lock(&_launched_processes_lock);
pid = fork();
if (pid < 0) {
(void) pthread_mutex_unlock(&_launched_processes_lock);
zed_log_msg(LOG_WARNING,
"Failed to fork \"%s\" for eid=%llu: %s",
prog, eid, strerror(errno));
return;
} else if (pid == 0) {
(void) sigemptyset(&mask);
(void) sigprocmask(SIG_SETMASK, &mask, NULL);
(void) umask(022);
if (in_foreground && /* we're already devnulled if daemonised */
(fd = open("/dev/null", O_RDWR | O_CLOEXEC)) != -1) {
(void) dup2(fd, STDIN_FILENO);
(void) dup2(fd, STDOUT_FILENO);
(void) dup2(fd, STDERR_FILENO);
}
(void) dup2(zfd, ZEVENT_FILENO);
execle(path, prog, NULL, env);
_exit(127);
}
/* parent process */
node = calloc(1, sizeof (*node));
if (node) {
node->pid = pid;
node->eid = eid;
node->name = strdup(prog);
avl_add(&_launched_processes, node);
}
(void) pthread_mutex_unlock(&_launched_processes_lock);
__atomic_sub_fetch(&_launched_processes_limit, 1, __ATOMIC_SEQ_CST);
zed_log_msg(LOG_INFO, "Invoking \"%s\" eid=%llu pid=%d",
prog, eid, pid);
}
static void
_nop(int sig)
{}
static void *
_reap_children(void *arg)
{
struct launched_process_node node, *pnode;
pid_t pid;
int status;
struct rusage usage;
struct sigaction sa = {};
(void) sigfillset(&sa.sa_mask);
(void) sigdelset(&sa.sa_mask, SIGCHLD);
(void) pthread_sigmask(SIG_SETMASK, &sa.sa_mask, NULL);
(void) sigemptyset(&sa.sa_mask);
sa.sa_handler = _nop;
sa.sa_flags = SA_NOCLDSTOP;
(void) sigaction(SIGCHLD, &sa, NULL);
for (_reap_children_stop = B_FALSE; !_reap_children_stop; ) {
(void) pthread_mutex_lock(&_launched_processes_lock);
pid = wait4(0, &status, WNOHANG, &usage);
if (pid == 0 || pid == (pid_t)-1) {
(void) pthread_mutex_unlock(&_launched_processes_lock);
if (pid == 0 || errno == ECHILD)
pause();
else if (errno != EINTR)
zed_log_msg(LOG_WARNING,
"Failed to wait for children: %s",
strerror(errno));
} else {
memset(&node, 0, sizeof (node));
node.pid = pid;
pnode = avl_find(&_launched_processes, &node, NULL);
if (pnode) {
memcpy(&node, pnode, sizeof (node));
avl_remove(&_launched_processes, pnode);
free(pnode);
}
(void) pthread_mutex_unlock(&_launched_processes_lock);
__atomic_add_fetch(&_launched_processes_limit, 1,
__ATOMIC_SEQ_CST);
usage.ru_utime.tv_sec += usage.ru_stime.tv_sec;
usage.ru_utime.tv_usec += usage.ru_stime.tv_usec;
usage.ru_utime.tv_sec +=
usage.ru_utime.tv_usec / (1000 * 1000);
usage.ru_utime.tv_usec %= 1000 * 1000;
if (WIFEXITED(status)) {
zed_log_msg(LOG_INFO,
"Finished \"%s\" eid=%llu pid=%d "
"time=%llu.%06us exit=%d",
node.name, node.eid, pid,
(unsigned long long) usage.ru_utime.tv_sec,
(unsigned int) usage.ru_utime.tv_usec,
WEXITSTATUS(status));
} else if (WIFSIGNALED(status)) {
zed_log_msg(LOG_INFO,
"Finished \"%s\" eid=%llu pid=%d "
"time=%llu.%06us sig=%d/%s",
node.name, node.eid, pid,
(unsigned long long) usage.ru_utime.tv_sec,
(unsigned int) usage.ru_utime.tv_usec,
WTERMSIG(status),
strsignal(WTERMSIG(status)));
} else {
zed_log_msg(LOG_INFO,
"Finished \"%s\" eid=%llu pid=%d "
"time=%llu.%06us status=0x%X",
node.name, node.eid,
(unsigned long long) usage.ru_utime.tv_sec,
(unsigned int) usage.ru_utime.tv_usec,
(unsigned int) status);
}
free(node.name);
}
}
return (NULL);
}
void
zed_exec_fini(void)
{
struct launched_process_node *node;
void *ck = NULL;
if (_reap_children_tid == (pthread_t)-1)
return;
_reap_children_stop = B_TRUE;
(void) pthread_kill(_reap_children_tid, SIGCHLD);
(void) pthread_join(_reap_children_tid, NULL);
while ((node = avl_destroy_nodes(&_launched_processes, &ck)) != NULL) {
free(node->name);
free(node);
}
avl_destroy(&_launched_processes);
(void) pthread_mutex_destroy(&_launched_processes_lock);
(void) pthread_mutex_init(&_launched_processes_lock, NULL);
_reap_children_tid = (pthread_t)-1;
}
/*
* Process the event [eid] by synchronously invoking all zedlets with a
* matching class prefix.
*
* Each executable in [zcp->zedlets] from the directory [zcp->zedlet_dir]
* is matched against the event's [class], [subclass], and the "all" class
* (which matches all events).
* Every zedlet with a matching class prefix is invoked.
* The NAME=VALUE strings in [envs] will be passed to the zedlet as
* environment variables.
*
* The file descriptor [zcp->zevent_fd] is the zevent_fd used to track the
* current cursor location within the zevent nvlist.
*
* Return 0 on success, -1 on error.
*/
int
zed_exec_process(uint64_t eid, const char *class, const char *subclass,
struct zed_conf *zcp, zed_strings_t *envs)
{
const char *class_strings[4];
const char *allclass = "all";
const char **csp;
const char *z;
char **e;
int n;
if (!zcp->zedlet_dir || !zcp->zedlets || !envs || zcp->zevent_fd < 0)
return (-1);
if (_reap_children_tid == (pthread_t)-1) {
_launched_processes_limit = zcp->max_jobs;
if (pthread_create(&_reap_children_tid, NULL,
_reap_children, NULL) != 0)
return (-1);
pthread_setname_np(_reap_children_tid, "reap ZEDLETs");
avl_create(&_launched_processes, _launched_process_node_compare,
sizeof (struct launched_process_node),
offsetof(struct launched_process_node, node));
}
csp = class_strings;
if (class)
*csp++ = class;
if (subclass)
*csp++ = subclass;
if (allclass)
*csp++ = allclass;
*csp = NULL;
e = _zed_exec_create_env(envs);
for (z = zed_strings_first(zcp->zedlets); z;
z = zed_strings_next(zcp->zedlets)) {
for (csp = class_strings; *csp; csp++) {
n = strlen(*csp);
if ((strncmp(z, *csp, n) == 0) && !isalpha(z[n]))
_zed_exec_fork_child(eid, zcp->zedlet_dir,
z, e, zcp->zevent_fd, zcp->do_foreground);
}
}
free(e);
return (0);
}
diff --git a/sys/contrib/openzfs/cmd/zpool/zpool.d/dm-deps b/sys/contrib/openzfs/cmd/zpool/zpool.d/dm-deps
index ee39514e4d92..42af6a8d63cd 100755
--- a/sys/contrib/openzfs/cmd/zpool/zpool.d/dm-deps
+++ b/sys/contrib/openzfs/cmd/zpool/zpool.d/dm-deps
@@ -1,29 +1,27 @@
#!/bin/sh
#
# Show device mapper dependent / underlying devices. This is useful for
# looking up the /dev/sd* devices associated with a dm or multipath device.
#
if [ "$1" = "-h" ] ; then
echo "Show device mapper dependent (underlying) devices."
exit
fi
dev="$VDEV_PATH"
# If the VDEV path is a symlink, resolve it to a real device
if [ -L "$dev" ] ; then
dev=$(readlink "$dev")
fi
-dev=$(basename "$dev")
+dev="${dev##*/}"
val=""
if [ -d "/sys/class/block/$dev/slaves" ] ; then
- # ls -C: output in columns, no newlines
- val=$(ls -C "/sys/class/block/$dev/slaves")
-
- # ls -C will print two spaces between files; change to one space.
- val=$(echo "$val" | sed -r 's/[[:blank:]]+/ /g')
+ # ls -C: output in columns, no newlines, two spaces (change to one)
+ # shellcheck disable=SC2012
+ val=$(ls -C "/sys/class/block/$dev/slaves" | tr -s '[:space:]' ' ')
fi
echo "dm-deps=$val"
diff --git a/sys/contrib/openzfs/cmd/zpool/zpool.d/iostat b/sys/contrib/openzfs/cmd/zpool/zpool.d/iostat
index 41a3acfae7a4..19be475e9b27 100755
--- a/sys/contrib/openzfs/cmd/zpool/zpool.d/iostat
+++ b/sys/contrib/openzfs/cmd/zpool/zpool.d/iostat
@@ -1,77 +1,77 @@
#!/bin/sh
#
# Display most relevant iostat bandwidth/latency numbers. The output is
# dependent on the name of the script/symlink used to call it.
#
helpstr="
iostat: Show iostat values since boot (summary page).
iostat-1s: Do a single 1-second iostat sample and show values.
iostat-10s: Do a single 10-second iostat sample and show values."
-script=$(basename "$0")
+script="${0##*/}"
if [ "$1" = "-h" ] ; then
echo "$helpstr" | grep "$script:" | tr -s '\t' | cut -f 2-
exit
fi
if [ "$script" = "iostat-1s" ] ; then
# Do a single one-second sample
interval=1
# Don't show summary stats
brief="yes"
elif [ "$script" = "iostat-10s" ] ; then
# Do a single ten-second sample
interval=10
# Don't show summary stats
brief="yes"
fi
if [ -f "$VDEV_UPATH" ] ; then
# We're a file-based vdev, iostat doesn't work on us. Do nothing.
exit
fi
if [ "$(uname)" = "FreeBSD" ]; then
out=$(iostat -dKx \
${interval:+"-w $interval"} \
${interval:+"-c 1"} \
"$VDEV_UPATH" | tail -n 2)
else
out=$(iostat -kx \
${brief:+"-y"} \
${interval:+"$interval"} \
${interval:+"1"} \
- "$VDEV_UPATH" | awk NF | tail -n 2)
+ "$VDEV_UPATH" | grep -v '^$' | tail -n 2)
fi
# Sample output (we want the last two lines):
#
# Linux 2.6.32-642.13.1.el6.x86_64 (centos68) 03/09/2017 _x86_64_ (6 CPU)
#
# avg-cpu: %user %nice %system %iowait %steal %idle
# 0.00 0.00 0.00 0.00 0.00 100.00
#
# Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await r_await w_await svctm %util
# sdb 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
#
# Get the column names
cols=$(echo "$out" | head -n 1)
# Get the values and tab separate them to make them cut-able.
-vals=$(echo "$out" | tail -n 1 | sed -r 's/[[:blank:]]+/\t/g')
+vals=$(echo "$out" | tail -n 1 | tr -s '[:space:]' '\t')
i=0
for col in $cols ; do
i=$((i+1))
# Skip the first column since it's just the device name
if [ $i -eq 1 ]; then
continue
fi
# Get i'th value
val=$(echo "$vals" | cut -f "$i")
echo "$col=$val"
done
diff --git a/sys/contrib/openzfs/cmd/zpool/zpool.d/lsblk b/sys/contrib/openzfs/cmd/zpool/zpool.d/lsblk
index 1cdef40494fe..919783a1c1bf 100755
--- a/sys/contrib/openzfs/cmd/zpool/zpool.d/lsblk
+++ b/sys/contrib/openzfs/cmd/zpool/zpool.d/lsblk
@@ -1,83 +1,83 @@
#!/bin/sh
#
# Print some common lsblk values
#
# Any (lowercased) name symlinked to the lsblk script will be passed to lsblk
# as one of its --output names. Here's a partial list of --output names
# from the lsblk binary:
#
# Available columns (for --output):
# NAME device name
# KNAME internal kernel device name
# MAJ:MIN major:minor device number
# FSTYPE filesystem type
# MOUNTPOINT where the device is mounted
# LABEL filesystem LABEL
# UUID filesystem UUID
# RA read-ahead of the device
# RO read-only device
# RM removable device
# MODEL device identifier
# SIZE size of the device
# STATE state of the device
# OWNER user name
# GROUP group name
# MODE device node permissions
# ALIGNMENT alignment offset
# MIN-IO minimum I/O size
# OPT-IO optimal I/O size
# PHY-SEC physical sector size
# LOG-SEC logical sector size
# ROTA rotational device
# SCHED I/O scheduler name
# RQ-SIZE request queue size
# TYPE device type
# DISC-ALN discard alignment offset
# DISC-GRAN discard granularity
# DISC-MAX discard max bytes
# DISC-ZERO discard zeroes data
#
# If the script is run as just 'lsblk' then print out disk size, vendor,
# and model number.
helpstr="
label: Show filesystem label.
model: Show disk model number.
size: Show the disk capacity.
vendor: Show the disk vendor.
lsblk: Show the disk size, vendor, and model number."
-script=$(basename "$0")
+script="${0##*/}"
if [ "$1" = "-h" ] ; then
echo "$helpstr" | grep "$script:" | tr -s '\t' | cut -f 2-
exit
fi
if [ "$script" = "lsblk" ] ; then
list="size vendor model"
else
list=$(echo "$script" | tr '[:upper:]' '[:lower:]')
fi
# Older versions of lsblk don't support all these values (like SERIAL).
for i in $list ; do
# Special case: Looking up the size of a file-based vdev can't
# be done with lsblk.
if [ "$i" = "size" ] && [ -f "$VDEV_UPATH" ] ; then
size=$(du -h --apparent-size "$VDEV_UPATH" | cut -f 1)
echo "size=$size"
continue
fi
val=""
if val=$(eval "lsblk -dl -n -o $i $VDEV_UPATH 2>/dev/null") ; then
# Remove leading/trailing whitespace from value
val=$(echo "$val" | sed -e 's/^[[:space:]]*//' \
-e 's/[[:space:]]*$//')
fi
echo "$i=$val"
done
diff --git a/sys/contrib/openzfs/cmd/zpool/zpool.d/media b/sys/contrib/openzfs/cmd/zpool/zpool.d/media
index 5683cdc3c023..660f78b743fc 100755
--- a/sys/contrib/openzfs/cmd/zpool/zpool.d/media
+++ b/sys/contrib/openzfs/cmd/zpool/zpool.d/media
@@ -1,34 +1,31 @@
#!/bin/sh
#
# Print out the type of device
#
if [ "$1" = "-h" ] ; then
echo "Show whether a vdev is a file, hdd, ssd, or iscsi."
exit
fi
if [ -b "$VDEV_UPATH" ]; then
- device=$(basename "$VDEV_UPATH")
- val=$(cat "/sys/block/$device/queue/rotational" 2>/dev/null)
- if [ "$val" = "0" ]; then
- MEDIA="ssd"
- fi
-
- if [ "$val" = "1" ]; then
- MEDIA="hdd"
- fi
+ device="${VDEV_UPATH##*/}"
+ read -r val 2>/dev/null < "/sys/block/$device/queue/rotational"
+ case "$val" in
+ 0) MEDIA="ssd" ;;
+ 1) MEDIA="hdd" ;;
+ esac
vpd_pg83="/sys/block/$device/device/vpd_pg83"
if [ -f "$vpd_pg83" ]; then
if grep -q --binary "iqn." "$vpd_pg83"; then
MEDIA="iscsi"
fi
fi
else
if [ -f "$VDEV_UPATH" ]; then
MEDIA="file"
fi
fi
echo "media=$MEDIA"
diff --git a/sys/contrib/openzfs/cmd/zpool/zpool.d/ses b/sys/contrib/openzfs/cmd/zpool/zpool.d/ses
index b1836d676528..b51fe31894ab 100755
--- a/sys/contrib/openzfs/cmd/zpool/zpool.d/ses
+++ b/sys/contrib/openzfs/cmd/zpool/zpool.d/ses
@@ -1,58 +1,58 @@
#!/bin/sh
#
# Print SCSI Enclosure Services (SES) info. The output is dependent on the name
# of the script/symlink used to call it.
#
helpstr="
enc: Show disk enclosure w:x:y:z value.
slot: Show disk slot number as reported by the enclosure.
encdev: Show /dev/sg* device associated with the enclosure disk slot.
fault_led: Show value of the disk enclosure slot fault LED.
locate_led: Show value of the disk enclosure slot locate LED.
ses: Show disk's enc, enc device, slot, and fault/locate LED values."
-script=$(basename "$0")
+script="${0##*/}"
if [ "$1" = "-h" ] ; then
echo "$helpstr" | grep "$script:" | tr -s '\t' | cut -f 2-
exit
fi
if [ "$script" = "ses" ] ; then
scripts='enc encdev slot fault_led locate_led'
else
scripts="$script"
fi
for i in $scripts ; do
if [ -z "$VDEV_ENC_SYSFS_PATH" ] ; then
echo "$i="
continue
fi
val=""
case $i in
enc)
val=$(ls "$VDEV_ENC_SYSFS_PATH/../../" 2>/dev/null)
;;
slot)
val=$(cat "$VDEV_ENC_SYSFS_PATH/slot" 2>/dev/null)
;;
encdev)
val=$(ls "$VDEV_ENC_SYSFS_PATH/../device/scsi_generic" 2>/dev/null)
;;
fault_led)
# JBODs fault LED is called 'fault', NVMe fault LED is called
# 'attention'.
if [ -f "$VDEV_ENC_SYSFS_PATH/fault" ] ; then
val=$(cat "$VDEV_ENC_SYSFS_PATH/fault" 2>/dev/null)
elif [ -f "$VDEV_ENC_SYSFS_PATH/attention" ] ; then
val=$(cat "$VDEV_ENC_SYSFS_PATH/attention" 2>/dev/null)
fi
;;
locate_led)
val=$(cat "$VDEV_ENC_SYSFS_PATH/locate" 2>/dev/null)
;;
esac
echo "$i=$val"
done
diff --git a/sys/contrib/openzfs/cmd/zpool/zpool_main.c b/sys/contrib/openzfs/cmd/zpool/zpool_main.c
index 9cf0163ab327..b93a6196beea 100644
--- a/sys/contrib/openzfs/cmd/zpool/zpool_main.c
+++ b/sys/contrib/openzfs/cmd/zpool/zpool_main.c
@@ -1,10784 +1,10784 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2012 by Frederik Wessels. All rights reserved.
* Copyright (c) 2012 by Cyril Plisko. All rights reserved.
* Copyright (c) 2013 by Prasad Joshi (sTec). All rights reserved.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
* Copyright (c) 2017 Datto Inc.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>
* Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
* Copyright [2021] Hewlett Packard Enterprise Development LP
*/
#include <assert.h>
#include <ctype.h>
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
#include <getopt.h>
#include <libgen.h>
#include <libintl.h>
#include <libuutil.h>
#include <locale.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <strings.h>
#include <time.h>
#include <unistd.h>
#include <pwd.h>
#include <zone.h>
#include <sys/wait.h>
#include <zfs_prop.h>
#include <sys/fs/zfs.h>
#include <sys/stat.h>
#include <sys/systeminfo.h>
#include <sys/fm/fs/zfs.h>
#include <sys/fm/util.h>
#include <sys/fm/protocol.h>
#include <sys/zfs_ioctl.h>
#include <sys/mount.h>
#include <sys/sysmacros.h>
#include <math.h>
#include <libzfs.h>
#include <libzutil.h>
#include "zpool_util.h"
#include "zfs_comutil.h"
#include "zfeature_common.h"
#include "statcommon.h"
libzfs_handle_t *g_zfs;
static int zpool_do_create(int, char **);
static int zpool_do_destroy(int, char **);
static int zpool_do_add(int, char **);
static int zpool_do_remove(int, char **);
static int zpool_do_labelclear(int, char **);
static int zpool_do_checkpoint(int, char **);
static int zpool_do_list(int, char **);
static int zpool_do_iostat(int, char **);
static int zpool_do_status(int, char **);
static int zpool_do_online(int, char **);
static int zpool_do_offline(int, char **);
static int zpool_do_clear(int, char **);
static int zpool_do_reopen(int, char **);
static int zpool_do_reguid(int, char **);
static int zpool_do_attach(int, char **);
static int zpool_do_detach(int, char **);
static int zpool_do_replace(int, char **);
static int zpool_do_split(int, char **);
static int zpool_do_initialize(int, char **);
static int zpool_do_scrub(int, char **);
static int zpool_do_resilver(int, char **);
static int zpool_do_trim(int, char **);
static int zpool_do_import(int, char **);
static int zpool_do_export(int, char **);
static int zpool_do_upgrade(int, char **);
static int zpool_do_history(int, char **);
static int zpool_do_events(int, char **);
static int zpool_do_get(int, char **);
static int zpool_do_set(int, char **);
static int zpool_do_sync(int, char **);
static int zpool_do_version(int, char **);
static int zpool_do_wait(int, char **);
static zpool_compat_status_t zpool_do_load_compat(
const char *, boolean_t *);
/*
* These libumem hooks provide a reasonable set of defaults for the allocator's
* debugging facilities.
*/
#ifdef DEBUG
const char *
_umem_debug_init(void)
{
return ("default,verbose"); /* $UMEM_DEBUG setting */
}
const char *
_umem_logging_init(void)
{
return ("fail,contents"); /* $UMEM_LOGGING setting */
}
#endif
typedef enum {
HELP_ADD,
HELP_ATTACH,
HELP_CLEAR,
HELP_CREATE,
HELP_CHECKPOINT,
HELP_DESTROY,
HELP_DETACH,
HELP_EXPORT,
HELP_HISTORY,
HELP_IMPORT,
HELP_IOSTAT,
HELP_LABELCLEAR,
HELP_LIST,
HELP_OFFLINE,
HELP_ONLINE,
HELP_REPLACE,
HELP_REMOVE,
HELP_INITIALIZE,
HELP_SCRUB,
HELP_RESILVER,
HELP_TRIM,
HELP_STATUS,
HELP_UPGRADE,
HELP_EVENTS,
HELP_GET,
HELP_SET,
HELP_SPLIT,
HELP_SYNC,
HELP_REGUID,
HELP_REOPEN,
HELP_VERSION,
HELP_WAIT
} zpool_help_t;
/*
* Flags for stats to display with "zpool iostats"
*/
enum iostat_type {
IOS_DEFAULT = 0,
IOS_LATENCY = 1,
IOS_QUEUES = 2,
IOS_L_HISTO = 3,
IOS_RQ_HISTO = 4,
IOS_COUNT, /* always last element */
};
/* iostat_type entries as bitmasks */
#define IOS_DEFAULT_M (1ULL << IOS_DEFAULT)
#define IOS_LATENCY_M (1ULL << IOS_LATENCY)
#define IOS_QUEUES_M (1ULL << IOS_QUEUES)
#define IOS_L_HISTO_M (1ULL << IOS_L_HISTO)
#define IOS_RQ_HISTO_M (1ULL << IOS_RQ_HISTO)
/* Mask of all the histo bits */
#define IOS_ANYHISTO_M (IOS_L_HISTO_M | IOS_RQ_HISTO_M)
/*
* Lookup table for iostat flags to nvlist names. Basically a list
* of all the nvlists a flag requires. Also specifies the order in
* which data gets printed in zpool iostat.
*/
static const char *vsx_type_to_nvlist[IOS_COUNT][13] = {
[IOS_L_HISTO] = {
ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
NULL},
[IOS_LATENCY] = {
ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
NULL},
[IOS_QUEUES] = {
ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
NULL},
[IOS_RQ_HISTO] = {
ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO,
ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO,
ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO,
ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO,
ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO,
NULL},
};
/*
* Given a cb->cb_flags with a histogram bit set, return the iostat_type.
* Right now, only one histo bit is ever set at one time, so we can
* just do a highbit64(a)
*/
#define IOS_HISTO_IDX(a) (highbit64(a & IOS_ANYHISTO_M) - 1)
typedef struct zpool_command {
const char *name;
int (*func)(int, char **);
zpool_help_t usage;
} zpool_command_t;
/*
* Master command table. Each ZFS command has a name, associated function, and
* usage message. The usage messages need to be internationalized, so we have
* to have a function to return the usage message based on a command index.
*
* These commands are organized according to how they are displayed in the usage
* message. An empty command (one with a NULL name) indicates an empty line in
* the generic usage message.
*/
static zpool_command_t command_table[] = {
{ "version", zpool_do_version, HELP_VERSION },
{ NULL },
{ "create", zpool_do_create, HELP_CREATE },
{ "destroy", zpool_do_destroy, HELP_DESTROY },
{ NULL },
{ "add", zpool_do_add, HELP_ADD },
{ "remove", zpool_do_remove, HELP_REMOVE },
{ NULL },
{ "labelclear", zpool_do_labelclear, HELP_LABELCLEAR },
{ NULL },
{ "checkpoint", zpool_do_checkpoint, HELP_CHECKPOINT },
{ NULL },
{ "list", zpool_do_list, HELP_LIST },
{ "iostat", zpool_do_iostat, HELP_IOSTAT },
{ "status", zpool_do_status, HELP_STATUS },
{ NULL },
{ "online", zpool_do_online, HELP_ONLINE },
{ "offline", zpool_do_offline, HELP_OFFLINE },
{ "clear", zpool_do_clear, HELP_CLEAR },
{ "reopen", zpool_do_reopen, HELP_REOPEN },
{ NULL },
{ "attach", zpool_do_attach, HELP_ATTACH },
{ "detach", zpool_do_detach, HELP_DETACH },
{ "replace", zpool_do_replace, HELP_REPLACE },
{ "split", zpool_do_split, HELP_SPLIT },
{ NULL },
{ "initialize", zpool_do_initialize, HELP_INITIALIZE },
{ "resilver", zpool_do_resilver, HELP_RESILVER },
{ "scrub", zpool_do_scrub, HELP_SCRUB },
{ "trim", zpool_do_trim, HELP_TRIM },
{ NULL },
{ "import", zpool_do_import, HELP_IMPORT },
{ "export", zpool_do_export, HELP_EXPORT },
{ "upgrade", zpool_do_upgrade, HELP_UPGRADE },
{ "reguid", zpool_do_reguid, HELP_REGUID },
{ NULL },
{ "history", zpool_do_history, HELP_HISTORY },
{ "events", zpool_do_events, HELP_EVENTS },
{ NULL },
{ "get", zpool_do_get, HELP_GET },
{ "set", zpool_do_set, HELP_SET },
{ "sync", zpool_do_sync, HELP_SYNC },
{ NULL },
{ "wait", zpool_do_wait, HELP_WAIT },
};
#define NCOMMAND (ARRAY_SIZE(command_table))
#define VDEV_ALLOC_CLASS_LOGS "logs"
static zpool_command_t *current_command;
static char history_str[HIS_MAX_RECORD_LEN];
static boolean_t log_history = B_TRUE;
static uint_t timestamp_fmt = NODATE;
static const char *
get_usage(zpool_help_t idx)
{
switch (idx) {
case HELP_ADD:
return (gettext("\tadd [-fgLnP] [-o property=value] "
"<pool> <vdev> ...\n"));
case HELP_ATTACH:
return (gettext("\tattach [-fsw] [-o property=value] "
"<pool> <device> <new-device>\n"));
case HELP_CLEAR:
return (gettext("\tclear [-nF] <pool> [device]\n"));
case HELP_CREATE:
return (gettext("\tcreate [-fnd] [-o property=value] ... \n"
"\t [-O file-system-property=value] ... \n"
"\t [-m mountpoint] [-R root] <pool> <vdev> ...\n"));
case HELP_CHECKPOINT:
return (gettext("\tcheckpoint [-d [-w]] <pool> ...\n"));
case HELP_DESTROY:
return (gettext("\tdestroy [-f] <pool>\n"));
case HELP_DETACH:
return (gettext("\tdetach <pool> <device>\n"));
case HELP_EXPORT:
return (gettext("\texport [-af] <pool> ...\n"));
case HELP_HISTORY:
return (gettext("\thistory [-il] [<pool>] ...\n"));
case HELP_IMPORT:
return (gettext("\timport [-d dir] [-D]\n"
"\timport [-o mntopts] [-o property=value] ... \n"
"\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
"[-R root] [-F [-n]] -a\n"
"\timport [-o mntopts] [-o property=value] ... \n"
"\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
"[-R root] [-F [-n]]\n"
"\t [--rewind-to-checkpoint] <pool | id> [newpool]\n"));
case HELP_IOSTAT:
return (gettext("\tiostat [[[-c [script1,script2,...]"
"[-lq]]|[-rw]] [-T d | u] [-ghHLpPvy]\n"
"\t [[pool ...]|[pool vdev ...]|[vdev ...]]"
" [[-n] interval [count]]\n"));
case HELP_LABELCLEAR:
return (gettext("\tlabelclear [-f] <vdev>\n"));
case HELP_LIST:
return (gettext("\tlist [-gHLpPv] [-o property[,...]] "
"[-T d|u] [pool] ... \n"
"\t [interval [count]]\n"));
case HELP_OFFLINE:
return (gettext("\toffline [-f] [-t] <pool> <device> ...\n"));
case HELP_ONLINE:
return (gettext("\tonline [-e] <pool> <device> ...\n"));
case HELP_REPLACE:
return (gettext("\treplace [-fsw] [-o property=value] "
"<pool> <device> [new-device]\n"));
case HELP_REMOVE:
return (gettext("\tremove [-npsw] <pool> <device> ...\n"));
case HELP_REOPEN:
return (gettext("\treopen [-n] <pool>\n"));
case HELP_INITIALIZE:
return (gettext("\tinitialize [-c | -s] [-w] <pool> "
"[<device> ...]\n"));
case HELP_SCRUB:
return (gettext("\tscrub [-s | -p] [-w] <pool> ...\n"));
case HELP_RESILVER:
return (gettext("\tresilver <pool> ...\n"));
case HELP_TRIM:
return (gettext("\ttrim [-dw] [-r <rate>] [-c | -s] <pool> "
"[<device> ...]\n"));
case HELP_STATUS:
return (gettext("\tstatus [-c [script1,script2,...]] "
"[-igLpPstvxD] [-T d|u] [pool] ... \n"
"\t [interval [count]]\n"));
case HELP_UPGRADE:
return (gettext("\tupgrade\n"
"\tupgrade -v\n"
"\tupgrade [-V version] <-a | pool ...>\n"));
case HELP_EVENTS:
return (gettext("\tevents [-vHf [pool] | -c]\n"));
case HELP_GET:
return (gettext("\tget [-Hp] [-o \"all\" | field[,...]] "
"<\"all\" | property[,...]> <pool> ...\n"));
case HELP_SET:
return (gettext("\tset <property=value> <pool> \n"));
case HELP_SPLIT:
return (gettext("\tsplit [-gLnPl] [-R altroot] [-o mntopts]\n"
"\t [-o property=value] <pool> <newpool> "
"[<device> ...]\n"));
case HELP_REGUID:
return (gettext("\treguid <pool>\n"));
case HELP_SYNC:
return (gettext("\tsync [pool] ...\n"));
case HELP_VERSION:
return (gettext("\tversion\n"));
case HELP_WAIT:
return (gettext("\twait [-Hp] [-T d|u] [-t <activity>[,...]] "
"<pool> [interval]\n"));
}
abort();
/* NOTREACHED */
}
static void
zpool_collect_leaves(zpool_handle_t *zhp, nvlist_t *nvroot, nvlist_t *res)
{
uint_t children = 0;
nvlist_t **child;
uint_t i;
(void) nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children);
if (children == 0) {
char *path = zpool_vdev_name(g_zfs, zhp, nvroot,
VDEV_NAME_PATH);
if (strcmp(path, VDEV_TYPE_INDIRECT) != 0 &&
strcmp(path, VDEV_TYPE_HOLE) != 0)
fnvlist_add_boolean(res, path);
free(path);
return;
}
for (i = 0; i < children; i++) {
zpool_collect_leaves(zhp, child[i], res);
}
}
/*
* Callback routine that will print out a pool property value.
*/
static int
print_prop_cb(int prop, void *cb)
{
FILE *fp = cb;
(void) fprintf(fp, "\t%-19s ", zpool_prop_to_name(prop));
if (zpool_prop_readonly(prop))
(void) fprintf(fp, " NO ");
else
(void) fprintf(fp, " YES ");
if (zpool_prop_values(prop) == NULL)
(void) fprintf(fp, "-\n");
else
(void) fprintf(fp, "%s\n", zpool_prop_values(prop));
return (ZPROP_CONT);
}
/*
* Display usage message. If we're inside a command, display only the usage for
* that command. Otherwise, iterate over the entire command table and display
* a complete usage message.
*/
static void
usage(boolean_t requested)
{
FILE *fp = requested ? stdout : stderr;
if (current_command == NULL) {
int i;
(void) fprintf(fp, gettext("usage: zpool command args ...\n"));
(void) fprintf(fp,
gettext("where 'command' is one of the following:\n\n"));
for (i = 0; i < NCOMMAND; i++) {
if (command_table[i].name == NULL)
(void) fprintf(fp, "\n");
else
(void) fprintf(fp, "%s",
get_usage(command_table[i].usage));
}
} else {
(void) fprintf(fp, gettext("usage:\n"));
(void) fprintf(fp, "%s", get_usage(current_command->usage));
}
if (current_command != NULL &&
((strcmp(current_command->name, "set") == 0) ||
(strcmp(current_command->name, "get") == 0) ||
(strcmp(current_command->name, "list") == 0))) {
(void) fprintf(fp,
gettext("\nthe following properties are supported:\n"));
(void) fprintf(fp, "\n\t%-19s %s %s\n\n",
"PROPERTY", "EDIT", "VALUES");
/* Iterate over all properties */
(void) zprop_iter(print_prop_cb, fp, B_FALSE, B_TRUE,
ZFS_TYPE_POOL);
(void) fprintf(fp, "\t%-19s ", "feature@...");
(void) fprintf(fp, "YES disabled | enabled | active\n");
(void) fprintf(fp, gettext("\nThe feature@ properties must be "
"appended with a feature name.\nSee zpool-features(7).\n"));
}
/*
* See comments at end of main().
*/
if (getenv("ZFS_ABORT") != NULL) {
(void) printf("dumping core by request\n");
abort();
}
exit(requested ? 0 : 2);
}
/*
* zpool initialize [-c | -s] [-w] <pool> [<vdev> ...]
* Initialize all unused blocks in the specified vdevs, or all vdevs in the pool
* if none specified.
*
* -c Cancel. Ends active initializing.
* -s Suspend. Initializing can then be restarted with no flags.
* -w Wait. Blocks until initializing has completed.
*/
int
zpool_do_initialize(int argc, char **argv)
{
int c;
char *poolname;
zpool_handle_t *zhp;
nvlist_t *vdevs;
int err = 0;
boolean_t wait = B_FALSE;
struct option long_options[] = {
{"cancel", no_argument, NULL, 'c'},
{"suspend", no_argument, NULL, 's'},
{"wait", no_argument, NULL, 'w'},
{0, 0, 0, 0}
};
pool_initialize_func_t cmd_type = POOL_INITIALIZE_START;
while ((c = getopt_long(argc, argv, "csw", long_options, NULL)) != -1) {
switch (c) {
case 'c':
if (cmd_type != POOL_INITIALIZE_START &&
cmd_type != POOL_INITIALIZE_CANCEL) {
(void) fprintf(stderr, gettext("-c cannot be "
"combined with other options\n"));
usage(B_FALSE);
}
cmd_type = POOL_INITIALIZE_CANCEL;
break;
case 's':
if (cmd_type != POOL_INITIALIZE_START &&
cmd_type != POOL_INITIALIZE_SUSPEND) {
(void) fprintf(stderr, gettext("-s cannot be "
"combined with other options\n"));
usage(B_FALSE);
}
cmd_type = POOL_INITIALIZE_SUSPEND;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
if (optopt != 0) {
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
} else {
(void) fprintf(stderr,
gettext("invalid option '%s'\n"),
argv[optind - 1]);
}
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
return (-1);
}
if (wait && (cmd_type != POOL_INITIALIZE_START)) {
(void) fprintf(stderr, gettext("-w cannot be used with -c or "
"-s\n"));
usage(B_FALSE);
}
poolname = argv[0];
zhp = zpool_open(g_zfs, poolname);
if (zhp == NULL)
return (-1);
vdevs = fnvlist_alloc();
if (argc == 1) {
/* no individual leaf vdevs specified, so add them all */
nvlist_t *config = zpool_get_config(zhp, NULL);
nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE);
zpool_collect_leaves(zhp, nvroot, vdevs);
} else {
for (int i = 1; i < argc; i++) {
fnvlist_add_boolean(vdevs, argv[i]);
}
}
if (wait)
err = zpool_initialize_wait(zhp, cmd_type, vdevs);
else
err = zpool_initialize(zhp, cmd_type, vdevs);
fnvlist_free(vdevs);
zpool_close(zhp);
return (err);
}
/*
* print a pool vdev config for dry runs
*/
static void
print_vdev_tree(zpool_handle_t *zhp, const char *name, nvlist_t *nv, int indent,
const char *match, int name_flags)
{
nvlist_t **child;
uint_t c, children;
char *vname;
boolean_t printed = B_FALSE;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0) {
if (name != NULL)
(void) printf("\t%*s%s\n", indent, "", name);
return;
}
for (c = 0; c < children; c++) {
uint64_t is_log = B_FALSE, is_hole = B_FALSE;
char *class = "";
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
&is_hole);
if (is_hole == B_TRUE) {
continue;
}
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&is_log);
if (is_log)
class = VDEV_ALLOC_BIAS_LOG;
(void) nvlist_lookup_string(child[c],
ZPOOL_CONFIG_ALLOCATION_BIAS, &class);
if (strcmp(match, class) != 0)
continue;
if (!printed && name != NULL) {
(void) printf("\t%*s%s\n", indent, "", name);
printed = B_TRUE;
}
vname = zpool_vdev_name(g_zfs, zhp, child[c], name_flags);
print_vdev_tree(zhp, vname, child[c], indent + 2, "",
name_flags);
free(vname);
}
}
/*
* Print the list of l2cache devices for dry runs.
*/
static void
print_cache_list(nvlist_t *nv, int indent)
{
nvlist_t **child;
uint_t c, children;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0 && children > 0) {
(void) printf("\t%*s%s\n", indent, "", "cache");
} else {
return;
}
for (c = 0; c < children; c++) {
char *vname;
vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
(void) printf("\t%*s%s\n", indent + 2, "", vname);
free(vname);
}
}
/*
* Print the list of spares for dry runs.
*/
static void
print_spare_list(nvlist_t *nv, int indent)
{
nvlist_t **child;
uint_t c, children;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
&child, &children) == 0 && children > 0) {
(void) printf("\t%*s%s\n", indent, "", "spares");
} else {
return;
}
for (c = 0; c < children; c++) {
char *vname;
vname = zpool_vdev_name(g_zfs, NULL, child[c], 0);
(void) printf("\t%*s%s\n", indent + 2, "", vname);
free(vname);
}
}
static boolean_t
prop_list_contains_feature(nvlist_t *proplist)
{
nvpair_t *nvp;
for (nvp = nvlist_next_nvpair(proplist, NULL); NULL != nvp;
nvp = nvlist_next_nvpair(proplist, nvp)) {
if (zpool_prop_feature(nvpair_name(nvp)))
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Add a property pair (name, string-value) into a property nvlist.
*/
static int
add_prop_list(const char *propname, char *propval, nvlist_t **props,
boolean_t poolprop)
{
zpool_prop_t prop = ZPOOL_PROP_INVAL;
nvlist_t *proplist;
const char *normnm;
char *strval;
if (*props == NULL &&
nvlist_alloc(props, NV_UNIQUE_NAME, 0) != 0) {
(void) fprintf(stderr,
gettext("internal error: out of memory\n"));
return (1);
}
proplist = *props;
if (poolprop) {
const char *vname = zpool_prop_to_name(ZPOOL_PROP_VERSION);
const char *cname =
zpool_prop_to_name(ZPOOL_PROP_COMPATIBILITY);
if ((prop = zpool_name_to_prop(propname)) == ZPOOL_PROP_INVAL &&
!zpool_prop_feature(propname)) {
(void) fprintf(stderr, gettext("property '%s' is "
"not a valid pool property\n"), propname);
return (2);
}
/*
* feature@ properties and version should not be specified
* at the same time.
*/
if ((prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname) &&
nvlist_exists(proplist, vname)) ||
(prop == ZPOOL_PROP_VERSION &&
prop_list_contains_feature(proplist))) {
(void) fprintf(stderr, gettext("'feature@' and "
"'version' properties cannot be specified "
"together\n"));
return (2);
}
/*
* if version is specified, only "legacy" compatibility
* may be requested
*/
if ((prop == ZPOOL_PROP_COMPATIBILITY &&
strcmp(propval, ZPOOL_COMPAT_LEGACY) != 0 &&
nvlist_exists(proplist, vname)) ||
(prop == ZPOOL_PROP_VERSION &&
nvlist_exists(proplist, cname) &&
strcmp(fnvlist_lookup_string(proplist, cname),
ZPOOL_COMPAT_LEGACY) != 0)) {
(void) fprintf(stderr, gettext("when 'version' is "
"specified, the 'compatibility' feature may only "
"be set to '" ZPOOL_COMPAT_LEGACY "'\n"));
return (2);
}
if (zpool_prop_feature(propname))
normnm = propname;
else
normnm = zpool_prop_to_name(prop);
} else {
zfs_prop_t fsprop = zfs_name_to_prop(propname);
if (zfs_prop_valid_for_type(fsprop, ZFS_TYPE_FILESYSTEM,
B_FALSE)) {
normnm = zfs_prop_to_name(fsprop);
} else if (zfs_prop_user(propname) ||
zfs_prop_userquota(propname)) {
normnm = propname;
} else {
(void) fprintf(stderr, gettext("property '%s' is "
"not a valid filesystem property\n"), propname);
return (2);
}
}
if (nvlist_lookup_string(proplist, normnm, &strval) == 0 &&
prop != ZPOOL_PROP_CACHEFILE) {
(void) fprintf(stderr, gettext("property '%s' "
"specified multiple times\n"), propname);
return (2);
}
if (nvlist_add_string(proplist, normnm, propval) != 0) {
(void) fprintf(stderr, gettext("internal "
"error: out of memory\n"));
return (1);
}
return (0);
}
/*
* Set a default property pair (name, string-value) in a property nvlist
*/
static int
add_prop_list_default(const char *propname, char *propval, nvlist_t **props,
boolean_t poolprop)
{
char *pval;
if (nvlist_lookup_string(*props, propname, &pval) == 0)
return (0);
return (add_prop_list(propname, propval, props, B_TRUE));
}
/*
* zpool add [-fgLnP] [-o property=value] <pool> <vdev> ...
*
* -f Force addition of devices, even if they appear in use
* -g Display guid for individual vdev name.
* -L Follow links when resolving vdev path name.
* -n Do not add the devices, but display the resulting layout if
* they were to be added.
* -o Set property=value.
* -P Display full path for vdev name.
*
* Adds the given vdevs to 'pool'. As with create, the bulk of this work is
* handled by make_root_vdev(), which constructs the nvlist needed to pass to
* libzfs.
*/
int
zpool_do_add(int argc, char **argv)
{
boolean_t force = B_FALSE;
boolean_t dryrun = B_FALSE;
int name_flags = 0;
int c;
nvlist_t *nvroot;
char *poolname;
int ret;
zpool_handle_t *zhp;
nvlist_t *config;
nvlist_t *props = NULL;
char *propval;
/* check options */
while ((c = getopt(argc, argv, "fgLno:P")) != -1) {
switch (c) {
case 'f':
force = B_TRUE;
break;
case 'g':
name_flags |= VDEV_NAME_GUID;
break;
case 'L':
name_flags |= VDEV_NAME_FOLLOW_LINKS;
break;
case 'n':
dryrun = B_TRUE;
break;
case 'o':
if ((propval = strchr(optarg, '=')) == NULL) {
(void) fprintf(stderr, gettext("missing "
"'=' for -o option\n"));
usage(B_FALSE);
}
*propval = '\0';
propval++;
if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
(add_prop_list(optarg, propval, &props, B_TRUE)))
usage(B_FALSE);
break;
case 'P':
name_flags |= VDEV_NAME_PATH;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing vdev specification\n"));
usage(B_FALSE);
}
poolname = argv[0];
argc--;
argv++;
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
if ((config = zpool_get_config(zhp, NULL)) == NULL) {
(void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
poolname);
zpool_close(zhp);
return (1);
}
/* unless manually specified use "ashift" pool property (if set) */
if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
int intval;
zprop_source_t src;
char strval[ZPOOL_MAXPROPLEN];
intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
if (src != ZPROP_SRC_DEFAULT) {
(void) sprintf(strval, "%" PRId32, intval);
verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
&props, B_TRUE) == 0);
}
}
/* pass off to make_root_vdev for processing */
nvroot = make_root_vdev(zhp, props, force, !force, B_FALSE, dryrun,
argc, argv);
if (nvroot == NULL) {
zpool_close(zhp);
return (1);
}
if (dryrun) {
nvlist_t *poolnvroot;
nvlist_t **l2child, **sparechild;
uint_t l2children, sparechildren, c;
char *vname;
boolean_t hadcache = B_FALSE, hadspare = B_FALSE;
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&poolnvroot) == 0);
(void) printf(gettext("would update '%s' to the following "
"configuration:\n\n"), zpool_get_name(zhp));
/* print original main pool and new tree */
print_vdev_tree(zhp, poolname, poolnvroot, 0, "",
name_flags | VDEV_NAME_TYPE_ID);
print_vdev_tree(zhp, NULL, nvroot, 0, "", name_flags);
/* print other classes: 'dedup', 'special', and 'log' */
if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_DEDUP)) {
print_vdev_tree(zhp, "dedup", poolnvroot, 0,
VDEV_ALLOC_BIAS_DEDUP, name_flags);
print_vdev_tree(zhp, NULL, nvroot, 0,
VDEV_ALLOC_BIAS_DEDUP, name_flags);
} else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_DEDUP)) {
print_vdev_tree(zhp, "dedup", nvroot, 0,
VDEV_ALLOC_BIAS_DEDUP, name_flags);
}
if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
print_vdev_tree(zhp, "special", poolnvroot, 0,
VDEV_ALLOC_BIAS_SPECIAL, name_flags);
print_vdev_tree(zhp, NULL, nvroot, 0,
VDEV_ALLOC_BIAS_SPECIAL, name_flags);
} else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_SPECIAL)) {
print_vdev_tree(zhp, "special", nvroot, 0,
VDEV_ALLOC_BIAS_SPECIAL, name_flags);
}
if (num_logs(poolnvroot) > 0) {
print_vdev_tree(zhp, "logs", poolnvroot, 0,
VDEV_ALLOC_BIAS_LOG, name_flags);
print_vdev_tree(zhp, NULL, nvroot, 0,
VDEV_ALLOC_BIAS_LOG, name_flags);
} else if (num_logs(nvroot) > 0) {
print_vdev_tree(zhp, "logs", nvroot, 0,
VDEV_ALLOC_BIAS_LOG, name_flags);
}
/* Do the same for the caches */
if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_L2CACHE,
&l2child, &l2children) == 0 && l2children) {
hadcache = B_TRUE;
(void) printf(gettext("\tcache\n"));
for (c = 0; c < l2children; c++) {
vname = zpool_vdev_name(g_zfs, NULL,
l2child[c], name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2child, &l2children) == 0 && l2children) {
if (!hadcache)
(void) printf(gettext("\tcache\n"));
for (c = 0; c < l2children; c++) {
vname = zpool_vdev_name(g_zfs, NULL,
l2child[c], name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
/* And finally the spares */
if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_SPARES,
&sparechild, &sparechildren) == 0 && sparechildren > 0) {
hadspare = B_TRUE;
(void) printf(gettext("\tspares\n"));
for (c = 0; c < sparechildren; c++) {
vname = zpool_vdev_name(g_zfs, NULL,
sparechild[c], name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&sparechild, &sparechildren) == 0 && sparechildren > 0) {
if (!hadspare)
(void) printf(gettext("\tspares\n"));
for (c = 0; c < sparechildren; c++) {
vname = zpool_vdev_name(g_zfs, NULL,
sparechild[c], name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
ret = 0;
} else {
ret = (zpool_add(zhp, nvroot) != 0);
}
nvlist_free(props);
nvlist_free(nvroot);
zpool_close(zhp);
return (ret);
}
/*
* zpool remove [-npsw] <pool> <vdev> ...
*
* Removes the given vdev from the pool.
*/
int
zpool_do_remove(int argc, char **argv)
{
char *poolname;
int i, ret = 0;
zpool_handle_t *zhp = NULL;
boolean_t stop = B_FALSE;
int c;
boolean_t noop = B_FALSE;
boolean_t parsable = B_FALSE;
boolean_t wait = B_FALSE;
/* check options */
while ((c = getopt(argc, argv, "npsw")) != -1) {
switch (c) {
case 'n':
noop = B_TRUE;
break;
case 'p':
parsable = B_TRUE;
break;
case 's':
stop = B_TRUE;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
poolname = argv[0];
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
if (stop && noop) {
(void) fprintf(stderr, gettext("stop request ignored\n"));
return (0);
}
if (stop) {
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if (zpool_vdev_remove_cancel(zhp) != 0)
ret = 1;
if (wait) {
(void) fprintf(stderr, gettext("invalid option "
"combination: -w cannot be used with -s\n"));
usage(B_FALSE);
}
} else {
if (argc < 2) {
(void) fprintf(stderr, gettext("missing device\n"));
usage(B_FALSE);
}
for (i = 1; i < argc; i++) {
if (noop) {
uint64_t size;
if (zpool_vdev_indirect_size(zhp, argv[i],
&size) != 0) {
ret = 1;
break;
}
if (parsable) {
(void) printf("%s %llu\n",
argv[i], (unsigned long long)size);
} else {
char valstr[32];
zfs_nicenum(size, valstr,
sizeof (valstr));
(void) printf("Memory that will be "
"used after removing %s: %s\n",
argv[i], valstr);
}
} else {
if (zpool_vdev_remove(zhp, argv[i]) != 0)
ret = 1;
}
}
if (ret == 0 && wait)
ret = zpool_wait(zhp, ZPOOL_WAIT_REMOVE);
}
zpool_close(zhp);
return (ret);
}
/*
* Return 1 if a vdev is active (being used in a pool)
* Return 0 if a vdev is inactive (offlined or faulted, or not in active pool)
*
* This is useful for checking if a disk in an active pool is offlined or
* faulted.
*/
static int
vdev_is_active(char *vdev_path)
{
int fd;
fd = open(vdev_path, O_EXCL);
if (fd < 0) {
return (1); /* cant open O_EXCL - disk is active */
}
close(fd);
return (0); /* disk is inactive in the pool */
}
/*
* zpool labelclear [-f] <vdev>
*
* -f Force clearing the label for the vdevs which are members of
* the exported or foreign pools.
*
* Verifies that the vdev is not active and zeros out the label information
* on the device.
*/
int
zpool_do_labelclear(int argc, char **argv)
{
char vdev[MAXPATHLEN];
char *name = NULL;
struct stat st;
int c, fd = -1, ret = 0;
nvlist_t *config;
pool_state_t state;
boolean_t inuse = B_FALSE;
boolean_t force = B_FALSE;
/* check options */
while ((c = getopt(argc, argv, "f")) != -1) {
switch (c) {
case 'f':
force = B_TRUE;
break;
default:
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get vdev name */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing vdev name\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
/*
* Check if we were given absolute path and use it as is.
* Otherwise if the provided vdev name doesn't point to a file,
* try prepending expected disk paths and partition numbers.
*/
(void) strlcpy(vdev, argv[0], sizeof (vdev));
if (vdev[0] != '/' && stat(vdev, &st) != 0) {
int error;
error = zfs_resolve_shortname(argv[0], vdev, MAXPATHLEN);
if (error == 0 && zfs_dev_is_whole_disk(vdev)) {
if (zfs_append_partition(vdev, MAXPATHLEN) == -1)
error = ENOENT;
}
if (error || (stat(vdev, &st) != 0)) {
(void) fprintf(stderr, gettext(
"failed to find device %s, try specifying absolute "
"path instead\n"), argv[0]);
return (1);
}
}
if ((fd = open(vdev, O_RDWR)) < 0) {
(void) fprintf(stderr, gettext("failed to open %s: %s\n"),
vdev, strerror(errno));
return (1);
}
/*
* Flush all dirty pages for the block device. This should not be
* fatal when the device does not support BLKFLSBUF as would be the
* case for a file vdev.
*/
if ((zfs_dev_flush(fd) != 0) && (errno != ENOTTY))
(void) fprintf(stderr, gettext("failed to invalidate "
"cache for %s: %s\n"), vdev, strerror(errno));
if (zpool_read_label(fd, &config, NULL) != 0) {
(void) fprintf(stderr,
gettext("failed to read label from %s\n"), vdev);
ret = 1;
goto errout;
}
nvlist_free(config);
ret = zpool_in_use(g_zfs, fd, &state, &name, &inuse);
if (ret != 0) {
(void) fprintf(stderr,
gettext("failed to check state for %s\n"), vdev);
ret = 1;
goto errout;
}
if (!inuse)
goto wipe_label;
switch (state) {
default:
case POOL_STATE_ACTIVE:
case POOL_STATE_SPARE:
case POOL_STATE_L2CACHE:
/*
* We allow the user to call 'zpool offline -f'
* on an offlined disk in an active pool. We can check if
* the disk is online by calling vdev_is_active().
*/
if (force && !vdev_is_active(vdev))
break;
(void) fprintf(stderr, gettext(
"%s is a member (%s) of pool \"%s\""),
vdev, zpool_pool_state_to_name(state), name);
if (force) {
(void) fprintf(stderr, gettext(
". Offline the disk first to clear its label."));
}
printf("\n");
ret = 1;
goto errout;
case POOL_STATE_EXPORTED:
if (force)
break;
(void) fprintf(stderr, gettext(
"use '-f' to override the following error:\n"
"%s is a member of exported pool \"%s\"\n"),
vdev, name);
ret = 1;
goto errout;
case POOL_STATE_POTENTIALLY_ACTIVE:
if (force)
break;
(void) fprintf(stderr, gettext(
"use '-f' to override the following error:\n"
"%s is a member of potentially active pool \"%s\"\n"),
vdev, name);
ret = 1;
goto errout;
case POOL_STATE_DESTROYED:
/* inuse should never be set for a destroyed pool */
assert(0);
break;
}
wipe_label:
ret = zpool_clear_label(fd);
if (ret != 0) {
(void) fprintf(stderr,
gettext("failed to clear label for %s\n"), vdev);
}
errout:
free(name);
(void) close(fd);
return (ret);
}
/*
* zpool create [-fnd] [-o property=value] ...
* [-O file-system-property=value] ...
* [-R root] [-m mountpoint] <pool> <dev> ...
*
* -f Force creation, even if devices appear in use
* -n Do not create the pool, but display the resulting layout if it
* were to be created.
* -R Create a pool under an alternate root
* -m Set default mountpoint for the root dataset. By default it's
* '/<pool>'
* -o Set property=value.
* -o Set feature@feature=enabled|disabled.
* -d Don't automatically enable all supported pool features
* (individual features can be enabled with -o).
* -O Set fsproperty=value in the pool's root file system
*
* Creates the named pool according to the given vdev specification. The
* bulk of the vdev processing is done in make_root_vdev() in zpool_vdev.c.
* Once we get the nvlist back from make_root_vdev(), we either print out the
* contents (if '-n' was specified), or pass it to libzfs to do the creation.
*/
int
zpool_do_create(int argc, char **argv)
{
boolean_t force = B_FALSE;
boolean_t dryrun = B_FALSE;
boolean_t enable_pool_features = B_TRUE;
int c;
nvlist_t *nvroot = NULL;
char *poolname;
char *tname = NULL;
int ret = 1;
char *altroot = NULL;
char *compat = NULL;
char *mountpoint = NULL;
nvlist_t *fsprops = NULL;
nvlist_t *props = NULL;
char *propval;
/* check options */
while ((c = getopt(argc, argv, ":fndR:m:o:O:t:")) != -1) {
switch (c) {
case 'f':
force = B_TRUE;
break;
case 'n':
dryrun = B_TRUE;
break;
case 'd':
enable_pool_features = B_FALSE;
break;
case 'R':
altroot = optarg;
if (add_prop_list(zpool_prop_to_name(
ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
goto errout;
if (add_prop_list_default(zpool_prop_to_name(
ZPOOL_PROP_CACHEFILE), "none", &props, B_TRUE))
goto errout;
break;
case 'm':
/* Equivalent to -O mountpoint=optarg */
mountpoint = optarg;
break;
case 'o':
if ((propval = strchr(optarg, '=')) == NULL) {
(void) fprintf(stderr, gettext("missing "
"'=' for -o option\n"));
goto errout;
}
*propval = '\0';
propval++;
if (add_prop_list(optarg, propval, &props, B_TRUE))
goto errout;
/*
* If the user is creating a pool that doesn't support
* feature flags, don't enable any features.
*/
if (zpool_name_to_prop(optarg) == ZPOOL_PROP_VERSION) {
char *end;
u_longlong_t ver;
ver = strtoull(propval, &end, 10);
if (*end == '\0' &&
ver < SPA_VERSION_FEATURES) {
enable_pool_features = B_FALSE;
}
}
if (zpool_name_to_prop(optarg) == ZPOOL_PROP_ALTROOT)
altroot = propval;
if (zpool_name_to_prop(optarg) ==
ZPOOL_PROP_COMPATIBILITY)
compat = propval;
break;
case 'O':
if ((propval = strchr(optarg, '=')) == NULL) {
(void) fprintf(stderr, gettext("missing "
"'=' for -O option\n"));
goto errout;
}
*propval = '\0';
propval++;
/*
* Mountpoints are checked and then added later.
* Uniquely among properties, they can be specified
* more than once, to avoid conflict with -m.
*/
if (0 == strcmp(optarg,
zfs_prop_to_name(ZFS_PROP_MOUNTPOINT))) {
mountpoint = propval;
} else if (add_prop_list(optarg, propval, &fsprops,
B_FALSE)) {
goto errout;
}
break;
case 't':
/*
* Sanity check temporary pool name.
*/
if (strchr(optarg, '/') != NULL) {
(void) fprintf(stderr, gettext("cannot create "
"'%s': invalid character '/' in temporary "
"name\n"), optarg);
(void) fprintf(stderr, gettext("use 'zfs "
"create' to create a dataset\n"));
goto errout;
}
if (add_prop_list(zpool_prop_to_name(
ZPOOL_PROP_TNAME), optarg, &props, B_TRUE))
goto errout;
if (add_prop_list_default(zpool_prop_to_name(
ZPOOL_PROP_CACHEFILE), "none", &props, B_TRUE))
goto errout;
tname = optarg;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
goto badusage;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
goto badusage;
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
goto badusage;
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing vdev specification\n"));
goto badusage;
}
poolname = argv[0];
/*
* As a special case, check for use of '/' in the name, and direct the
* user to use 'zfs create' instead.
*/
if (strchr(poolname, '/') != NULL) {
(void) fprintf(stderr, gettext("cannot create '%s': invalid "
"character '/' in pool name\n"), poolname);
(void) fprintf(stderr, gettext("use 'zfs create' to "
"create a dataset\n"));
goto errout;
}
/* pass off to make_root_vdev for bulk processing */
nvroot = make_root_vdev(NULL, props, force, !force, B_FALSE, dryrun,
argc - 1, argv + 1);
if (nvroot == NULL)
goto errout;
/* make_root_vdev() allows 0 toplevel children if there are spares */
if (!zfs_allocatable_devs(nvroot)) {
(void) fprintf(stderr, gettext("invalid vdev "
"specification: at least one toplevel vdev must be "
"specified\n"));
goto errout;
}
if (altroot != NULL && altroot[0] != '/') {
(void) fprintf(stderr, gettext("invalid alternate root '%s': "
"must be an absolute path\n"), altroot);
goto errout;
}
/*
* Check the validity of the mountpoint and direct the user to use the
* '-m' mountpoint option if it looks like its in use.
*/
if (mountpoint == NULL ||
(strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) != 0 &&
strcmp(mountpoint, ZFS_MOUNTPOINT_NONE) != 0)) {
char buf[MAXPATHLEN];
DIR *dirp;
if (mountpoint && mountpoint[0] != '/') {
(void) fprintf(stderr, gettext("invalid mountpoint "
"'%s': must be an absolute path, 'legacy', or "
"'none'\n"), mountpoint);
goto errout;
}
if (mountpoint == NULL) {
if (altroot != NULL)
(void) snprintf(buf, sizeof (buf), "%s/%s",
altroot, poolname);
else
(void) snprintf(buf, sizeof (buf), "/%s",
poolname);
} else {
if (altroot != NULL)
(void) snprintf(buf, sizeof (buf), "%s%s",
altroot, mountpoint);
else
(void) snprintf(buf, sizeof (buf), "%s",
mountpoint);
}
if ((dirp = opendir(buf)) == NULL && errno != ENOENT) {
(void) fprintf(stderr, gettext("mountpoint '%s' : "
"%s\n"), buf, strerror(errno));
(void) fprintf(stderr, gettext("use '-m' "
"option to provide a different default\n"));
goto errout;
} else if (dirp) {
int count = 0;
while (count < 3 && readdir(dirp) != NULL)
count++;
(void) closedir(dirp);
if (count > 2) {
(void) fprintf(stderr, gettext("mountpoint "
"'%s' exists and is not empty\n"), buf);
(void) fprintf(stderr, gettext("use '-m' "
"option to provide a "
"different default\n"));
goto errout;
}
}
}
/*
* Now that the mountpoint's validity has been checked, ensure that
* the property is set appropriately prior to creating the pool.
*/
if (mountpoint != NULL) {
ret = add_prop_list(zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
mountpoint, &fsprops, B_FALSE);
if (ret != 0)
goto errout;
}
ret = 1;
if (dryrun) {
/*
* For a dry run invocation, print out a basic message and run
* through all the vdevs in the list and print out in an
* appropriate hierarchy.
*/
(void) printf(gettext("would create '%s' with the "
"following layout:\n\n"), poolname);
print_vdev_tree(NULL, poolname, nvroot, 0, "", 0);
print_vdev_tree(NULL, "dedup", nvroot, 0,
VDEV_ALLOC_BIAS_DEDUP, 0);
print_vdev_tree(NULL, "special", nvroot, 0,
VDEV_ALLOC_BIAS_SPECIAL, 0);
print_vdev_tree(NULL, "logs", nvroot, 0,
VDEV_ALLOC_BIAS_LOG, 0);
print_cache_list(nvroot, 0);
print_spare_list(nvroot, 0);
ret = 0;
} else {
/*
* Load in feature set.
* Note: if compatibility property not given, we'll have
* NULL, which means 'all features'.
*/
boolean_t requested_features[SPA_FEATURES];
if (zpool_do_load_compat(compat, requested_features) !=
ZPOOL_COMPATIBILITY_OK)
goto errout;
/*
* props contains list of features to enable.
* For each feature:
* - remove it if feature@name=disabled
* - leave it there if feature@name=enabled
* - add it if:
* - enable_pool_features (ie: no '-d' or '-o version')
* - it's supported by the kernel module
* - it's in the requested feature set
* - warn if it's enabled but not in compat
*/
for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
char propname[MAXPATHLEN];
char *propval;
zfeature_info_t *feat = &spa_feature_table[i];
(void) snprintf(propname, sizeof (propname),
"feature@%s", feat->fi_uname);
if (!nvlist_lookup_string(props, propname, &propval)) {
if (strcmp(propval, ZFS_FEATURE_DISABLED) == 0)
(void) nvlist_remove_all(props,
propname);
if (strcmp(propval,
ZFS_FEATURE_ENABLED) == 0 &&
!requested_features[i])
(void) fprintf(stderr, gettext(
"Warning: feature \"%s\" enabled "
"but is not in specified "
"'compatibility' feature set.\n"),
feat->fi_uname);
} else if (
enable_pool_features &&
feat->fi_zfs_mod_supported &&
requested_features[i]) {
ret = add_prop_list(propname,
ZFS_FEATURE_ENABLED, &props, B_TRUE);
if (ret != 0)
goto errout;
}
}
ret = 1;
if (zpool_create(g_zfs, poolname,
nvroot, props, fsprops) == 0) {
zfs_handle_t *pool = zfs_open(g_zfs,
tname ? tname : poolname, ZFS_TYPE_FILESYSTEM);
if (pool != NULL) {
if (zfs_mount(pool, NULL, 0) == 0) {
ret = zfs_shareall(pool);
zfs_commit_all_shares();
}
zfs_close(pool);
}
} else if (libzfs_errno(g_zfs) == EZFS_INVALIDNAME) {
(void) fprintf(stderr, gettext("pool name may have "
"been omitted\n"));
}
}
errout:
nvlist_free(nvroot);
nvlist_free(fsprops);
nvlist_free(props);
return (ret);
badusage:
nvlist_free(fsprops);
nvlist_free(props);
usage(B_FALSE);
return (2);
}
/*
* zpool destroy <pool>
*
* -f Forcefully unmount any datasets
*
* Destroy the given pool. Automatically unmounts any datasets in the pool.
*/
int
zpool_do_destroy(int argc, char **argv)
{
boolean_t force = B_FALSE;
int c;
char *pool;
zpool_handle_t *zhp;
int ret;
/* check options */
while ((c = getopt(argc, argv, "f")) != -1) {
switch (c) {
case 'f':
force = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
pool = argv[0];
if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
/*
* As a special case, check for use of '/' in the name, and
* direct the user to use 'zfs destroy' instead.
*/
if (strchr(pool, '/') != NULL)
(void) fprintf(stderr, gettext("use 'zfs destroy' to "
"destroy a dataset\n"));
return (1);
}
if (zpool_disable_datasets(zhp, force) != 0) {
(void) fprintf(stderr, gettext("could not destroy '%s': "
"could not unmount datasets\n"), zpool_get_name(zhp));
zpool_close(zhp);
return (1);
}
/* The history must be logged as part of the export */
log_history = B_FALSE;
ret = (zpool_destroy(zhp, history_str) != 0);
zpool_close(zhp);
return (ret);
}
typedef struct export_cbdata {
boolean_t force;
boolean_t hardforce;
} export_cbdata_t;
/*
* Export one pool
*/
static int
zpool_export_one(zpool_handle_t *zhp, void *data)
{
export_cbdata_t *cb = data;
if (zpool_disable_datasets(zhp, cb->force) != 0)
return (1);
/* The history must be logged as part of the export */
log_history = B_FALSE;
if (cb->hardforce) {
if (zpool_export_force(zhp, history_str) != 0)
return (1);
} else if (zpool_export(zhp, cb->force, history_str) != 0) {
return (1);
}
return (0);
}
/*
* zpool export [-f] <pool> ...
*
* -a Export all pools
* -f Forcefully unmount datasets
*
* Export the given pools. By default, the command will attempt to cleanly
* unmount any active datasets within the pool. If the '-f' flag is specified,
* then the datasets will be forcefully unmounted.
*/
int
zpool_do_export(int argc, char **argv)
{
export_cbdata_t cb;
boolean_t do_all = B_FALSE;
boolean_t force = B_FALSE;
boolean_t hardforce = B_FALSE;
int c, ret;
/* check options */
while ((c = getopt(argc, argv, "afF")) != -1) {
switch (c) {
case 'a':
do_all = B_TRUE;
break;
case 'f':
force = B_TRUE;
break;
case 'F':
hardforce = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
cb.force = force;
cb.hardforce = hardforce;
argc -= optind;
argv += optind;
if (do_all) {
if (argc != 0) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
return (for_each_pool(argc, argv, B_TRUE, NULL,
B_FALSE, zpool_export_one, &cb));
}
/* check arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool argument\n"));
usage(B_FALSE);
}
ret = for_each_pool(argc, argv, B_TRUE, NULL, B_FALSE, zpool_export_one,
&cb);
return (ret);
}
/*
* Given a vdev configuration, determine the maximum width needed for the device
* name column.
*/
static int
max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max,
int name_flags)
{
char *name;
nvlist_t **child;
uint_t c, children;
int ret;
name = zpool_vdev_name(g_zfs, zhp, nv, name_flags);
if (strlen(name) + depth > max)
max = strlen(name) + depth;
free(name);
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
&child, &children) == 0) {
for (c = 0; c < children; c++)
if ((ret = max_width(zhp, child[c], depth + 2,
max, name_flags)) > max)
max = ret;
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0) {
for (c = 0; c < children; c++)
if ((ret = max_width(zhp, child[c], depth + 2,
max, name_flags)) > max)
max = ret;
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (c = 0; c < children; c++)
if ((ret = max_width(zhp, child[c], depth + 2,
max, name_flags)) > max)
max = ret;
}
return (max);
}
typedef struct spare_cbdata {
uint64_t cb_guid;
zpool_handle_t *cb_zhp;
} spare_cbdata_t;
static boolean_t
find_vdev(nvlist_t *nv, uint64_t search)
{
uint64_t guid;
nvlist_t **child;
uint_t c, children;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&
search == guid)
return (B_TRUE);
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (c = 0; c < children; c++)
if (find_vdev(child[c], search))
return (B_TRUE);
}
return (B_FALSE);
}
static int
find_spare(zpool_handle_t *zhp, void *data)
{
spare_cbdata_t *cbp = data;
nvlist_t *config, *nvroot;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
if (find_vdev(nvroot, cbp->cb_guid)) {
cbp->cb_zhp = zhp;
return (1);
}
zpool_close(zhp);
return (0);
}
typedef struct status_cbdata {
int cb_count;
int cb_name_flags;
int cb_namewidth;
boolean_t cb_allpools;
boolean_t cb_verbose;
boolean_t cb_literal;
boolean_t cb_explain;
boolean_t cb_first;
boolean_t cb_dedup_stats;
boolean_t cb_print_status;
boolean_t cb_print_slow_ios;
boolean_t cb_print_vdev_init;
boolean_t cb_print_vdev_trim;
vdev_cmd_data_list_t *vcdl;
} status_cbdata_t;
/* Return 1 if string is NULL, empty, or whitespace; return 0 otherwise. */
static int
is_blank_str(char *str)
{
while (str != NULL && *str != '\0') {
if (!isblank(*str))
return (0);
str++;
}
return (1);
}
/* Print command output lines for specific vdev in a specific pool */
static void
zpool_print_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, char *path)
{
vdev_cmd_data_t *data;
int i, j;
char *val;
for (i = 0; i < vcdl->count; i++) {
if ((strcmp(vcdl->data[i].path, path) != 0) ||
(strcmp(vcdl->data[i].pool, pool) != 0)) {
/* Not the vdev we're looking for */
continue;
}
data = &vcdl->data[i];
/* Print out all the output values for this vdev */
for (j = 0; j < vcdl->uniq_cols_cnt; j++) {
val = NULL;
/* Does this vdev have values for this column? */
for (int k = 0; k < data->cols_cnt; k++) {
if (strcmp(data->cols[k],
vcdl->uniq_cols[j]) == 0) {
/* yes it does, record the value */
val = data->lines[k];
break;
}
}
/*
* Mark empty values with dashes to make output
* awk-able.
*/
if (val == NULL || is_blank_str(val))
val = "-";
printf("%*s", vcdl->uniq_cols_width[j], val);
if (j < vcdl->uniq_cols_cnt - 1)
printf(" ");
}
/* Print out any values that aren't in a column at the end */
for (j = data->cols_cnt; j < data->lines_cnt; j++) {
/* Did we have any columns? If so print a spacer. */
if (vcdl->uniq_cols_cnt > 0)
printf(" ");
val = data->lines[j];
printf("%s", val ? val : "");
}
break;
}
}
/*
* Print vdev initialization status for leaves
*/
static void
print_status_initialize(vdev_stat_t *vs, boolean_t verbose)
{
if (verbose) {
if ((vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE ||
vs->vs_initialize_state == VDEV_INITIALIZE_SUSPENDED ||
vs->vs_initialize_state == VDEV_INITIALIZE_COMPLETE) &&
!vs->vs_scan_removing) {
char zbuf[1024];
char tbuf[256];
struct tm zaction_ts;
time_t t = vs->vs_initialize_action_time;
int initialize_pct = 100;
if (vs->vs_initialize_state !=
VDEV_INITIALIZE_COMPLETE) {
initialize_pct = (vs->vs_initialize_bytes_done *
100 / (vs->vs_initialize_bytes_est + 1));
}
(void) localtime_r(&t, &zaction_ts);
(void) strftime(tbuf, sizeof (tbuf), "%c", &zaction_ts);
switch (vs->vs_initialize_state) {
case VDEV_INITIALIZE_SUSPENDED:
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
gettext("suspended, started at"), tbuf);
break;
case VDEV_INITIALIZE_ACTIVE:
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
gettext("started at"), tbuf);
break;
case VDEV_INITIALIZE_COMPLETE:
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
gettext("completed at"), tbuf);
break;
}
(void) printf(gettext(" (%d%% initialized%s)"),
initialize_pct, zbuf);
} else {
(void) printf(gettext(" (uninitialized)"));
}
} else if (vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE) {
(void) printf(gettext(" (initializing)"));
}
}
/*
* Print vdev TRIM status for leaves
*/
static void
print_status_trim(vdev_stat_t *vs, boolean_t verbose)
{
if (verbose) {
if ((vs->vs_trim_state == VDEV_TRIM_ACTIVE ||
vs->vs_trim_state == VDEV_TRIM_SUSPENDED ||
vs->vs_trim_state == VDEV_TRIM_COMPLETE) &&
!vs->vs_scan_removing) {
char zbuf[1024];
char tbuf[256];
struct tm zaction_ts;
time_t t = vs->vs_trim_action_time;
int trim_pct = 100;
if (vs->vs_trim_state != VDEV_TRIM_COMPLETE) {
trim_pct = (vs->vs_trim_bytes_done *
100 / (vs->vs_trim_bytes_est + 1));
}
(void) localtime_r(&t, &zaction_ts);
(void) strftime(tbuf, sizeof (tbuf), "%c", &zaction_ts);
switch (vs->vs_trim_state) {
case VDEV_TRIM_SUSPENDED:
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
gettext("suspended, started at"), tbuf);
break;
case VDEV_TRIM_ACTIVE:
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
gettext("started at"), tbuf);
break;
case VDEV_TRIM_COMPLETE:
(void) snprintf(zbuf, sizeof (zbuf), ", %s %s",
gettext("completed at"), tbuf);
break;
}
(void) printf(gettext(" (%d%% trimmed%s)"),
trim_pct, zbuf);
} else if (vs->vs_trim_notsup) {
(void) printf(gettext(" (trim unsupported)"));
} else {
(void) printf(gettext(" (untrimmed)"));
}
} else if (vs->vs_trim_state == VDEV_TRIM_ACTIVE) {
(void) printf(gettext(" (trimming)"));
}
}
/*
* Return the color associated with a health string. This includes returning
* NULL for no color change.
*/
static char *
health_str_to_color(const char *health)
{
if (strcmp(health, gettext("FAULTED")) == 0 ||
strcmp(health, gettext("SUSPENDED")) == 0 ||
strcmp(health, gettext("UNAVAIL")) == 0) {
return (ANSI_RED);
}
if (strcmp(health, gettext("OFFLINE")) == 0 ||
strcmp(health, gettext("DEGRADED")) == 0 ||
strcmp(health, gettext("REMOVED")) == 0) {
return (ANSI_YELLOW);
}
return (NULL);
}
/*
* Print out configuration state as requested by status_callback.
*/
static void
print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name,
nvlist_t *nv, int depth, boolean_t isspare, vdev_rebuild_stat_t *vrs)
{
nvlist_t **child, *root;
uint_t c, i, vsc, children;
pool_scan_stat_t *ps = NULL;
vdev_stat_t *vs;
char rbuf[6], wbuf[6], cbuf[6];
char *vname;
uint64_t notpresent;
spare_cbdata_t spare_cb;
const char *state;
char *type;
char *path = NULL;
char *rcolor = NULL, *wcolor = NULL, *ccolor = NULL;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &vsc) == 0);
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
if (strcmp(type, VDEV_TYPE_INDIRECT) == 0)
return;
state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
if (isspare) {
/*
* For hot spares, we use the terms 'INUSE' and 'AVAILABLE' for
* online drives.
*/
if (vs->vs_aux == VDEV_AUX_SPARED)
state = gettext("INUSE");
else if (vs->vs_state == VDEV_STATE_HEALTHY)
state = gettext("AVAIL");
}
printf_color(health_str_to_color(state),
"\t%*s%-*s %-8s", depth, "", cb->cb_namewidth - depth,
name, state);
if (!isspare) {
if (vs->vs_read_errors)
rcolor = ANSI_RED;
if (vs->vs_write_errors)
wcolor = ANSI_RED;
if (vs->vs_checksum_errors)
ccolor = ANSI_RED;
if (cb->cb_literal) {
printf(" ");
printf_color(rcolor, "%5llu",
(u_longlong_t)vs->vs_read_errors);
printf(" ");
printf_color(wcolor, "%5llu",
(u_longlong_t)vs->vs_write_errors);
printf(" ");
printf_color(ccolor, "%5llu",
(u_longlong_t)vs->vs_checksum_errors);
} else {
zfs_nicenum(vs->vs_read_errors, rbuf, sizeof (rbuf));
zfs_nicenum(vs->vs_write_errors, wbuf, sizeof (wbuf));
zfs_nicenum(vs->vs_checksum_errors, cbuf,
sizeof (cbuf));
printf(" ");
printf_color(rcolor, "%5s", rbuf);
printf(" ");
printf_color(wcolor, "%5s", wbuf);
printf(" ");
printf_color(ccolor, "%5s", cbuf);
}
if (cb->cb_print_slow_ios) {
if (children == 0) {
/* Only leafs vdevs have slow IOs */
zfs_nicenum(vs->vs_slow_ios, rbuf,
sizeof (rbuf));
} else {
snprintf(rbuf, sizeof (rbuf), "-");
}
if (cb->cb_literal)
printf(" %5llu", (u_longlong_t)vs->vs_slow_ios);
else
printf(" %5s", rbuf);
}
}
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
&notpresent) == 0) {
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0);
(void) printf(" %s %s", gettext("was"), path);
} else if (vs->vs_aux != 0) {
(void) printf(" ");
color_start(ANSI_RED);
switch (vs->vs_aux) {
case VDEV_AUX_OPEN_FAILED:
(void) printf(gettext("cannot open"));
break;
case VDEV_AUX_BAD_GUID_SUM:
(void) printf(gettext("missing device"));
break;
case VDEV_AUX_NO_REPLICAS:
(void) printf(gettext("insufficient replicas"));
break;
case VDEV_AUX_VERSION_NEWER:
(void) printf(gettext("newer version"));
break;
case VDEV_AUX_UNSUP_FEAT:
(void) printf(gettext("unsupported feature(s)"));
break;
case VDEV_AUX_ASHIFT_TOO_BIG:
(void) printf(gettext("unsupported minimum blocksize"));
break;
case VDEV_AUX_SPARED:
verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
&spare_cb.cb_guid) == 0);
if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) {
if (strcmp(zpool_get_name(spare_cb.cb_zhp),
zpool_get_name(zhp)) == 0)
(void) printf(gettext("currently in "
"use"));
else
(void) printf(gettext("in use by "
"pool '%s'"),
zpool_get_name(spare_cb.cb_zhp));
zpool_close(spare_cb.cb_zhp);
} else {
(void) printf(gettext("currently in use"));
}
break;
case VDEV_AUX_ERR_EXCEEDED:
(void) printf(gettext("too many errors"));
break;
case VDEV_AUX_IO_FAILURE:
(void) printf(gettext("experienced I/O failures"));
break;
case VDEV_AUX_BAD_LOG:
(void) printf(gettext("bad intent log"));
break;
case VDEV_AUX_EXTERNAL:
(void) printf(gettext("external device fault"));
break;
case VDEV_AUX_SPLIT_POOL:
(void) printf(gettext("split into new pool"));
break;
case VDEV_AUX_ACTIVE:
(void) printf(gettext("currently in use"));
break;
case VDEV_AUX_CHILDREN_OFFLINE:
(void) printf(gettext("all children offline"));
break;
case VDEV_AUX_BAD_LABEL:
(void) printf(gettext("invalid label"));
break;
default:
(void) printf(gettext("corrupted data"));
break;
}
color_end();
} else if (children == 0 && !isspare &&
getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL &&
VDEV_STAT_VALID(vs_physical_ashift, vsc) &&
vs->vs_configured_ashift < vs->vs_physical_ashift) {
(void) printf(
gettext(" block size: %dB configured, %dB native"),
1 << vs->vs_configured_ashift, 1 << vs->vs_physical_ashift);
}
/* The root vdev has the scrub/resilver stats */
root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
ZPOOL_CONFIG_VDEV_TREE);
(void) nvlist_lookup_uint64_array(root, ZPOOL_CONFIG_SCAN_STATS,
(uint64_t **)&ps, &c);
if (ps != NULL && ps->pss_state == DSS_SCANNING && children == 0) {
if (vs->vs_scan_processed != 0) {
(void) printf(gettext(" (%s)"),
(ps->pss_func == POOL_SCAN_RESILVER) ?
"resilvering" : "repairing");
} else if (vs->vs_resilver_deferred) {
(void) printf(gettext(" (awaiting resilver)"));
}
}
/* The top-level vdevs have the rebuild stats */
if (vrs != NULL && vrs->vrs_state == VDEV_REBUILD_ACTIVE &&
children == 0) {
if (vs->vs_rebuild_processed != 0) {
(void) printf(gettext(" (resilvering)"));
}
}
if (cb->vcdl != NULL) {
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
printf(" ");
zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
}
}
/* Display vdev initialization and trim status for leaves. */
if (children == 0) {
print_status_initialize(vs, cb->cb_print_vdev_init);
print_status_trim(vs, cb->cb_print_vdev_trim);
}
(void) printf("\n");
for (c = 0; c < children; c++) {
uint64_t islog = B_FALSE, ishole = B_FALSE;
/* Don't print logs or holes here */
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&islog);
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
&ishole);
if (islog || ishole)
continue;
/* Only print normal classes here */
if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
continue;
/* Provide vdev_rebuild_stats to children if available */
if (vrs == NULL) {
(void) nvlist_lookup_uint64_array(nv,
ZPOOL_CONFIG_REBUILD_STATS,
(uint64_t **)&vrs, &i);
}
vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
print_status_config(zhp, cb, vname, child[c], depth + 2,
isspare, vrs);
free(vname);
}
}
/*
* Print the configuration of an exported pool. Iterate over all vdevs in the
* pool, printing out the name and status for each one.
*/
static void
print_import_config(status_cbdata_t *cb, const char *name, nvlist_t *nv,
int depth)
{
nvlist_t **child;
uint_t c, children;
vdev_stat_t *vs;
char *type, *vname;
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
if (strcmp(type, VDEV_TYPE_MISSING) == 0 ||
strcmp(type, VDEV_TYPE_HOLE) == 0)
return;
verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
(void) printf("\t%*s%-*s", depth, "", cb->cb_namewidth - depth, name);
(void) printf(" %s", zpool_state_to_name(vs->vs_state, vs->vs_aux));
if (vs->vs_aux != 0) {
(void) printf(" ");
switch (vs->vs_aux) {
case VDEV_AUX_OPEN_FAILED:
(void) printf(gettext("cannot open"));
break;
case VDEV_AUX_BAD_GUID_SUM:
(void) printf(gettext("missing device"));
break;
case VDEV_AUX_NO_REPLICAS:
(void) printf(gettext("insufficient replicas"));
break;
case VDEV_AUX_VERSION_NEWER:
(void) printf(gettext("newer version"));
break;
case VDEV_AUX_UNSUP_FEAT:
(void) printf(gettext("unsupported feature(s)"));
break;
case VDEV_AUX_ERR_EXCEEDED:
(void) printf(gettext("too many errors"));
break;
case VDEV_AUX_ACTIVE:
(void) printf(gettext("currently in use"));
break;
case VDEV_AUX_CHILDREN_OFFLINE:
(void) printf(gettext("all children offline"));
break;
case VDEV_AUX_BAD_LABEL:
(void) printf(gettext("invalid label"));
break;
default:
(void) printf(gettext("corrupted data"));
break;
}
}
(void) printf("\n");
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
return;
for (c = 0; c < children; c++) {
uint64_t is_log = B_FALSE;
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&is_log);
if (is_log)
continue;
if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
continue;
vname = zpool_vdev_name(g_zfs, NULL, child[c],
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
print_import_config(cb, vname, child[c], depth + 2);
free(vname);
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0) {
(void) printf(gettext("\tcache\n"));
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, NULL, child[c],
cb->cb_name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
&child, &children) == 0) {
(void) printf(gettext("\tspares\n"));
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, NULL, child[c],
cb->cb_name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
}
/*
* Print specialized class vdevs.
*
* These are recorded as top level vdevs in the main pool child array
* but with "is_log" set to 1 or an "alloc_bias" string. We use either
* print_status_config() or print_import_config() to print the top level
* class vdevs then any of their children (eg mirrored slogs) are printed
* recursively - which works because only the top level vdev is marked.
*/
static void
print_class_vdevs(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
const char *class)
{
uint_t c, children;
nvlist_t **child;
boolean_t printed = B_FALSE;
assert(zhp != NULL || !cb->cb_verbose);
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child,
&children) != 0)
return;
for (c = 0; c < children; c++) {
uint64_t is_log = B_FALSE;
char *bias = NULL;
char *type = NULL;
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&is_log);
if (is_log) {
bias = VDEV_ALLOC_CLASS_LOGS;
} else {
(void) nvlist_lookup_string(child[c],
ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
(void) nvlist_lookup_string(child[c],
ZPOOL_CONFIG_TYPE, &type);
}
if (bias == NULL || strcmp(bias, class) != 0)
continue;
if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
continue;
if (!printed) {
(void) printf("\t%s\t\n", gettext(class));
printed = B_TRUE;
}
char *name = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
if (cb->cb_print_status)
print_status_config(zhp, cb, name, child[c], 2,
B_FALSE, NULL);
else
print_import_config(cb, name, child[c], 2);
free(name);
}
}
/*
* Display the status for the given pool.
*/
static int
show_import(nvlist_t *config, boolean_t report_error)
{
uint64_t pool_state;
vdev_stat_t *vs;
char *name;
uint64_t guid;
uint64_t hostid = 0;
char *msgid;
char *hostname = "unknown";
nvlist_t *nvroot, *nvinfo;
zpool_status_t reason;
zpool_errata_t errata;
const char *health;
uint_t vsc;
char *comment;
status_cbdata_t cb = { 0 };
verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
&name) == 0);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&guid) == 0);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
&pool_state) == 0);
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &vsc) == 0);
health = zpool_state_to_name(vs->vs_state, vs->vs_aux);
reason = zpool_import_status(config, &msgid, &errata);
/*
* If we're importing using a cachefile, then we won't report any
* errors unless we are in the scan phase of the import.
*/
if (reason != ZPOOL_STATUS_OK && !report_error)
return (reason);
(void) printf(gettext(" pool: %s\n"), name);
(void) printf(gettext(" id: %llu\n"), (u_longlong_t)guid);
(void) printf(gettext(" state: %s"), health);
if (pool_state == POOL_STATE_DESTROYED)
(void) printf(gettext(" (DESTROYED)"));
(void) printf("\n");
switch (reason) {
case ZPOOL_STATUS_MISSING_DEV_R:
case ZPOOL_STATUS_MISSING_DEV_NR:
case ZPOOL_STATUS_BAD_GUID_SUM:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices are "
"missing from the system.\n"));
break;
case ZPOOL_STATUS_CORRUPT_LABEL_R:
case ZPOOL_STATUS_CORRUPT_LABEL_NR:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices contains"
" corrupted data.\n"));
break;
case ZPOOL_STATUS_CORRUPT_DATA:
(void) printf(
gettext(" status: The pool data is corrupted.\n"));
break;
case ZPOOL_STATUS_OFFLINE_DEV:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices "
"are offlined.\n"));
break;
case ZPOOL_STATUS_CORRUPT_POOL:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool metadata is "
"corrupted.\n"));
break;
case ZPOOL_STATUS_VERSION_OLDER:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
"a legacy on-disk version.\n"));
break;
case ZPOOL_STATUS_VERSION_NEWER:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
"an incompatible version.\n"));
break;
case ZPOOL_STATUS_FEAT_DISABLED:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("Some supported "
"features are not enabled on the pool.\n\t"
"(Note that they may be intentionally disabled "
"if the\n\t'compatibility' property is set.)\n"));
break;
case ZPOOL_STATUS_COMPATIBILITY_ERR:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("Error reading or parsing "
"the file(s) indicated by the 'compatibility'\n"
"property.\n"));
break;
case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more features "
"are enabled on the pool despite not being\n"
"requested by the 'compatibility' property.\n"));
break;
case ZPOOL_STATUS_UNSUP_FEAT_READ:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool uses the following "
"feature(s) not supported on this system:\n"));
color_start(ANSI_YELLOW);
zpool_print_unsup_feat(config);
color_end();
break;
case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool can only be "
"accessed in read-only mode on this system. It\n\tcannot be"
" accessed in read-write mode because it uses the "
"following\n\tfeature(s) not supported on this system:\n"));
color_start(ANSI_YELLOW);
zpool_print_unsup_feat(config);
color_end();
break;
case ZPOOL_STATUS_HOSTID_ACTIVE:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool is currently "
"imported by another system.\n"));
break;
case ZPOOL_STATUS_HOSTID_REQUIRED:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool has the "
"multihost property on. It cannot\n\tbe safely imported "
"when the system hostid is not set.\n"));
break;
case ZPOOL_STATUS_HOSTID_MISMATCH:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool was last accessed "
"by another system.\n"));
break;
case ZPOOL_STATUS_FAULTED_DEV_R:
case ZPOOL_STATUS_FAULTED_DEV_NR:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices are "
"faulted.\n"));
break;
case ZPOOL_STATUS_BAD_LOG:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("An intent log record cannot "
"be read.\n"));
break;
case ZPOOL_STATUS_RESILVERING:
case ZPOOL_STATUS_REBUILDING:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices were "
"being resilvered.\n"));
break;
case ZPOOL_STATUS_ERRATA:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"),
errata);
break;
case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices are "
"configured to use a non-native block size.\n"
"\tExpect reduced performance.\n"));
break;
default:
/*
* No other status can be seen when importing pools.
*/
assert(reason == ZPOOL_STATUS_OK);
}
/*
* Print out an action according to the overall state of the pool.
*/
if (vs->vs_state == VDEV_STATE_HEALTHY) {
if (reason == ZPOOL_STATUS_VERSION_OLDER ||
reason == ZPOOL_STATUS_FEAT_DISABLED) {
(void) printf(gettext(" action: The pool can be "
"imported using its name or numeric identifier, "
"though\n\tsome features will not be available "
"without an explicit 'zpool upgrade'.\n"));
} else if (reason == ZPOOL_STATUS_COMPATIBILITY_ERR) {
(void) printf(gettext(" action: The pool can be "
"imported using its name or numeric\n\tidentifier, "
"though the file(s) indicated by its "
"'compatibility'\n\tproperty cannot be parsed at "
"this time.\n"));
} else if (reason == ZPOOL_STATUS_HOSTID_MISMATCH) {
(void) printf(gettext(" action: The pool can be "
"imported using its name or numeric "
"identifier and\n\tthe '-f' flag.\n"));
} else if (reason == ZPOOL_STATUS_ERRATA) {
switch (errata) {
case ZPOOL_ERRATA_NONE:
break;
case ZPOOL_ERRATA_ZOL_2094_SCRUB:
(void) printf(gettext(" action: The pool can "
"be imported using its name or numeric "
"identifier,\n\thowever there is a compat"
"ibility issue which should be corrected"
"\n\tby running 'zpool scrub'\n"));
break;
case ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY:
(void) printf(gettext(" action: The pool can"
"not be imported with this version of ZFS "
"due to\n\tan active asynchronous destroy. "
"Revert to an earlier version\n\tand "
"allow the destroy to complete before "
"updating.\n"));
break;
case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
(void) printf(gettext(" action: Existing "
"encrypted datasets contain an on-disk "
"incompatibility, which\n\tneeds to be "
"corrected. Backup these datasets to new "
"encrypted datasets\n\tand destroy the "
"old ones.\n"));
break;
case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
(void) printf(gettext(" action: Existing "
"encrypted snapshots and bookmarks contain "
"an on-disk\n\tincompatibility. This may "
"cause on-disk corruption if they are used"
"\n\twith 'zfs recv'. To correct the "
"issue, enable the bookmark_v2 feature.\n\t"
"No additional action is needed if there "
"are no encrypted snapshots or\n\t"
"bookmarks. If preserving the encrypted "
"snapshots and bookmarks is\n\trequired, "
"use a non-raw send to backup and restore "
"them. Alternately,\n\tthey may be removed"
" to resolve the incompatibility.\n"));
break;
default:
/*
* All errata must contain an action message.
*/
assert(0);
}
} else {
(void) printf(gettext(" action: The pool can be "
"imported using its name or numeric "
"identifier.\n"));
}
} else if (vs->vs_state == VDEV_STATE_DEGRADED) {
(void) printf(gettext(" action: The pool can be imported "
"despite missing or damaged devices. The\n\tfault "
"tolerance of the pool may be compromised if imported.\n"));
} else {
switch (reason) {
case ZPOOL_STATUS_VERSION_NEWER:
(void) printf(gettext(" action: The pool cannot be "
"imported. Access the pool on a system running "
"newer\n\tsoftware, or recreate the pool from "
"backup.\n"));
break;
case ZPOOL_STATUS_UNSUP_FEAT_READ:
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("The pool cannot be "
"imported. Access the pool on a system that "
"supports\n\tthe required feature(s), or recreate "
"the pool from backup.\n"));
break;
case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("The pool cannot be "
"imported in read-write mode. Import the pool "
"with\n"
"\t\"-o readonly=on\", access the pool on a system "
"that supports the\n\trequired feature(s), or "
"recreate the pool from backup.\n"));
break;
case ZPOOL_STATUS_MISSING_DEV_R:
case ZPOOL_STATUS_MISSING_DEV_NR:
case ZPOOL_STATUS_BAD_GUID_SUM:
(void) printf(gettext(" action: The pool cannot be "
"imported. Attach the missing\n\tdevices and try "
"again.\n"));
break;
case ZPOOL_STATUS_HOSTID_ACTIVE:
VERIFY0(nvlist_lookup_nvlist(config,
ZPOOL_CONFIG_LOAD_INFO, &nvinfo));
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
hostname = fnvlist_lookup_string(nvinfo,
ZPOOL_CONFIG_MMP_HOSTNAME);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
hostid = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_HOSTID);
(void) printf(gettext(" action: The pool must be "
"exported from %s (hostid=%lx)\n\tbefore it "
"can be safely imported.\n"), hostname,
(unsigned long) hostid);
break;
case ZPOOL_STATUS_HOSTID_REQUIRED:
(void) printf(gettext(" action: Set a unique system "
"hostid with the zgenhostid(8) command.\n"));
break;
default:
(void) printf(gettext(" action: The pool cannot be "
"imported due to damaged devices or data.\n"));
}
}
/* Print the comment attached to the pool. */
if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
(void) printf(gettext("comment: %s\n"), comment);
/*
* If the state is "closed" or "can't open", and the aux state
* is "corrupt data":
*/
if (((vs->vs_state == VDEV_STATE_CLOSED) ||
(vs->vs_state == VDEV_STATE_CANT_OPEN)) &&
(vs->vs_aux == VDEV_AUX_CORRUPT_DATA)) {
if (pool_state == POOL_STATE_DESTROYED)
(void) printf(gettext("\tThe pool was destroyed, "
"but can be imported using the '-Df' flags.\n"));
else if (pool_state != POOL_STATE_EXPORTED)
(void) printf(gettext("\tThe pool may be active on "
"another system, but can be imported using\n\t"
"the '-f' flag.\n"));
}
if (msgid != NULL) {
(void) printf(gettext(
" see: https://openzfs.github.io/openzfs-docs/msg/%s\n"),
msgid);
}
(void) printf(gettext(" config:\n\n"));
cb.cb_namewidth = max_width(NULL, nvroot, 0, strlen(name),
VDEV_NAME_TYPE_ID);
if (cb.cb_namewidth < 10)
cb.cb_namewidth = 10;
print_import_config(&cb, name, nvroot, 0);
print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_DEDUP);
print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_SPECIAL);
print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_CLASS_LOGS);
if (reason == ZPOOL_STATUS_BAD_GUID_SUM) {
(void) printf(gettext("\n\tAdditional devices are known to "
"be part of this pool, though their\n\texact "
"configuration cannot be determined.\n"));
}
return (0);
}
static boolean_t
zfs_force_import_required(nvlist_t *config)
{
uint64_t state;
uint64_t hostid = 0;
nvlist_t *nvinfo;
state = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE);
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, &hostid);
if (state != POOL_STATE_EXPORTED && hostid != get_system_hostid())
return (B_TRUE);
nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE)) {
mmp_state_t mmp_state = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_STATE);
if (mmp_state != MMP_STATE_INACTIVE)
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Perform the import for the given configuration. This passes the heavy
* lifting off to zpool_import_props(), and then mounts the datasets contained
* within the pool.
*/
static int
do_import(nvlist_t *config, const char *newname, const char *mntopts,
nvlist_t *props, int flags)
{
int ret = 0;
zpool_handle_t *zhp;
char *name;
uint64_t version;
name = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);
version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
if (!SPA_VERSION_IS_SUPPORTED(version)) {
(void) fprintf(stderr, gettext("cannot import '%s': pool "
"is formatted using an unsupported ZFS version\n"), name);
return (1);
} else if (zfs_force_import_required(config) &&
!(flags & ZFS_IMPORT_ANY_HOST)) {
mmp_state_t mmp_state = MMP_STATE_INACTIVE;
nvlist_t *nvinfo;
nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE))
mmp_state = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_STATE);
if (mmp_state == MMP_STATE_ACTIVE) {
char *hostname = "<unknown>";
uint64_t hostid = 0;
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
hostname = fnvlist_lookup_string(nvinfo,
ZPOOL_CONFIG_MMP_HOSTNAME);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
hostid = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_HOSTID);
(void) fprintf(stderr, gettext("cannot import '%s': "
"pool is imported on %s (hostid: "
"0x%lx)\nExport the pool on the other system, "
"then run 'zpool import'.\n"),
name, hostname, (unsigned long) hostid);
} else if (mmp_state == MMP_STATE_NO_HOSTID) {
(void) fprintf(stderr, gettext("Cannot import '%s': "
"pool has the multihost property on and the\n"
"system's hostid is not set. Set a unique hostid "
"with the zgenhostid(8) command.\n"), name);
} else {
char *hostname = "<unknown>";
uint64_t timestamp = 0;
uint64_t hostid = 0;
if (nvlist_exists(config, ZPOOL_CONFIG_HOSTNAME))
hostname = fnvlist_lookup_string(config,
ZPOOL_CONFIG_HOSTNAME);
if (nvlist_exists(config, ZPOOL_CONFIG_TIMESTAMP))
timestamp = fnvlist_lookup_uint64(config,
ZPOOL_CONFIG_TIMESTAMP);
if (nvlist_exists(config, ZPOOL_CONFIG_HOSTID))
hostid = fnvlist_lookup_uint64(config,
ZPOOL_CONFIG_HOSTID);
(void) fprintf(stderr, gettext("cannot import '%s': "
"pool was previously in use from another system.\n"
"Last accessed by %s (hostid=%lx) at %s"
"The pool can be imported, use 'zpool import -f' "
"to import the pool.\n"), name, hostname,
(unsigned long)hostid, ctime((time_t *)&timestamp));
}
return (1);
}
if (zpool_import_props(g_zfs, config, newname, props, flags) != 0)
return (1);
if (newname != NULL)
name = (char *)newname;
if ((zhp = zpool_open_canfail(g_zfs, name)) == NULL)
return (1);
/*
* Loading keys is best effort. We don't want to return immediately
* if it fails but we do want to give the error to the caller.
*/
if (flags & ZFS_IMPORT_LOAD_KEYS) {
ret = zfs_crypto_attempt_load_keys(g_zfs, name);
if (ret != 0)
ret = 1;
}
if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL &&
!(flags & ZFS_IMPORT_ONLY) &&
zpool_enable_datasets(zhp, mntopts, 0) != 0) {
zpool_close(zhp);
return (1);
}
zpool_close(zhp);
return (ret);
}
static int
import_pools(nvlist_t *pools, nvlist_t *props, char *mntopts, int flags,
char *orig_name, char *new_name,
boolean_t do_destroyed, boolean_t pool_specified, boolean_t do_all,
importargs_t *import)
{
nvlist_t *config = NULL;
nvlist_t *found_config = NULL;
uint64_t pool_state;
/*
* At this point we have a list of import candidate configs. Even if
* we were searching by pool name or guid, we still need to
* post-process the list to deal with pool state and possible
* duplicate names.
*/
int err = 0;
nvpair_t *elem = NULL;
boolean_t first = B_TRUE;
while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {
verify(nvpair_value_nvlist(elem, &config) == 0);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
&pool_state) == 0);
if (!do_destroyed && pool_state == POOL_STATE_DESTROYED)
continue;
if (do_destroyed && pool_state != POOL_STATE_DESTROYED)
continue;
verify(nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY,
import->policy) == 0);
if (!pool_specified) {
if (first)
first = B_FALSE;
else if (!do_all)
(void) printf("\n");
if (do_all) {
err |= do_import(config, NULL, mntopts,
props, flags);
} else {
/*
* If we're importing from cachefile, then
* we don't want to report errors until we
* are in the scan phase of the import. If
* we get an error, then we return that error
* to invoke the scan phase.
*/
if (import->cachefile && !import->scan)
err = show_import(config, B_FALSE);
else
(void) show_import(config, B_TRUE);
}
} else if (import->poolname != NULL) {
char *name;
/*
* We are searching for a pool based on name.
*/
verify(nvlist_lookup_string(config,
ZPOOL_CONFIG_POOL_NAME, &name) == 0);
if (strcmp(name, import->poolname) == 0) {
if (found_config != NULL) {
(void) fprintf(stderr, gettext(
"cannot import '%s': more than "
"one matching pool\n"),
import->poolname);
(void) fprintf(stderr, gettext(
"import by numeric ID instead\n"));
err = B_TRUE;
}
found_config = config;
}
} else {
uint64_t guid;
/*
* Search for a pool by guid.
*/
verify(nvlist_lookup_uint64(config,
ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
if (guid == import->guid)
found_config = config;
}
}
/*
* If we were searching for a specific pool, verify that we found a
* pool, and then do the import.
*/
if (pool_specified && err == 0) {
if (found_config == NULL) {
(void) fprintf(stderr, gettext("cannot import '%s': "
"no such pool available\n"), orig_name);
err = B_TRUE;
} else {
err |= do_import(found_config, new_name,
mntopts, props, flags);
}
}
/*
* If we were just looking for pools, report an error if none were
* found.
*/
if (!pool_specified && first)
(void) fprintf(stderr,
gettext("no pools available to import\n"));
return (err);
}
typedef struct target_exists_args {
const char *poolname;
uint64_t poolguid;
} target_exists_args_t;
static int
name_or_guid_exists(zpool_handle_t *zhp, void *data)
{
target_exists_args_t *args = data;
nvlist_t *config = zpool_get_config(zhp, NULL);
int found = 0;
if (config == NULL)
return (0);
if (args->poolname != NULL) {
char *pool_name;
verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
&pool_name) == 0);
if (strcmp(pool_name, args->poolname) == 0)
found = 1;
} else {
uint64_t pool_guid;
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&pool_guid) == 0);
if (pool_guid == args->poolguid)
found = 1;
}
zpool_close(zhp);
return (found);
}
/*
* zpool checkpoint <pool>
* checkpoint --discard <pool>
*
* -d Discard the checkpoint from a checkpointed
* --discard pool.
*
* -w Wait for discarding a checkpoint to complete.
* --wait
*
* Checkpoints the specified pool, by taking a "snapshot" of its
* current state. A pool can only have one checkpoint at a time.
*/
int
zpool_do_checkpoint(int argc, char **argv)
{
boolean_t discard, wait;
char *pool;
zpool_handle_t *zhp;
int c, err;
struct option long_options[] = {
{"discard", no_argument, NULL, 'd'},
{"wait", no_argument, NULL, 'w'},
{0, 0, 0, 0}
};
discard = B_FALSE;
wait = B_FALSE;
while ((c = getopt_long(argc, argv, ":dw", long_options, NULL)) != -1) {
switch (c) {
case 'd':
discard = B_TRUE;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
if (wait && !discard) {
(void) fprintf(stderr, gettext("--wait only valid when "
"--discard also specified\n"));
usage(B_FALSE);
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
pool = argv[0];
if ((zhp = zpool_open(g_zfs, pool)) == NULL) {
/* As a special case, check for use of '/' in the name */
if (strchr(pool, '/') != NULL)
(void) fprintf(stderr, gettext("'zpool checkpoint' "
"doesn't work on datasets. To save the state "
"of a dataset from a specific point in time "
"please use 'zfs snapshot'\n"));
return (1);
}
if (discard) {
err = (zpool_discard_checkpoint(zhp) != 0);
if (err == 0 && wait)
err = zpool_wait(zhp, ZPOOL_WAIT_CKPT_DISCARD);
} else {
err = (zpool_checkpoint(zhp) != 0);
}
zpool_close(zhp);
return (err);
}
#define CHECKPOINT_OPT 1024
/*
* zpool import [-d dir] [-D]
* import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
* [-d dir | -c cachefile | -s] [-f] -a
* import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
* [-d dir | -c cachefile | -s] [-f] [-n] [-F] <pool | id>
* [newpool]
*
* -c Read pool information from a cachefile instead of searching
* devices. If importing from a cachefile config fails, then
* fallback to searching for devices only in the directories that
* exist in the cachefile.
*
* -d Scan in a specific directory, other than /dev/. More than
* one directory can be specified using multiple '-d' options.
*
* -D Scan for previously destroyed pools or import all or only
* specified destroyed pools.
*
* -R Temporarily import the pool, with all mountpoints relative to
* the given root. The pool will remain exported when the machine
* is rebooted.
*
* -V Import even in the presence of faulted vdevs. This is an
* intentionally undocumented option for testing purposes, and
* treats the pool configuration as complete, leaving any bad
* vdevs in the FAULTED state. In other words, it does verbatim
* import.
*
* -f Force import, even if it appears that the pool is active.
*
* -F Attempt rewind if necessary.
*
* -n See if rewind would work, but don't actually rewind.
*
* -N Import the pool but don't mount datasets.
*
* -T Specify a starting txg to use for import. This option is
* intentionally undocumented option for testing purposes.
*
* -a Import all pools found.
*
* -l Load encryption keys while importing.
*
* -o Set property=value and/or temporary mount options (without '=').
*
* -s Scan using the default search path, the libblkid cache will
* not be consulted.
*
* --rewind-to-checkpoint
* Import the pool and revert back to the checkpoint.
*
* The import command scans for pools to import, and import pools based on pool
* name and GUID. The pool can also be renamed as part of the import process.
*/
int
zpool_do_import(int argc, char **argv)
{
char **searchdirs = NULL;
char *env, *envdup = NULL;
int nsearch = 0;
int c;
int err = 0;
nvlist_t *pools = NULL;
boolean_t do_all = B_FALSE;
boolean_t do_destroyed = B_FALSE;
char *mntopts = NULL;
uint64_t searchguid = 0;
char *searchname = NULL;
char *propval;
nvlist_t *policy = NULL;
nvlist_t *props = NULL;
int flags = ZFS_IMPORT_NORMAL;
uint32_t rewind_policy = ZPOOL_NO_REWIND;
boolean_t dryrun = B_FALSE;
boolean_t do_rewind = B_FALSE;
boolean_t xtreme_rewind = B_FALSE;
boolean_t do_scan = B_FALSE;
boolean_t pool_exists = B_FALSE;
boolean_t pool_specified = B_FALSE;
uint64_t txg = -1ULL;
char *cachefile = NULL;
importargs_t idata = { 0 };
char *endptr;
struct option long_options[] = {
{"rewind-to-checkpoint", no_argument, NULL, CHECKPOINT_OPT},
{0, 0, 0, 0}
};
/* check options */
while ((c = getopt_long(argc, argv, ":aCc:d:DEfFlmnNo:R:stT:VX",
long_options, NULL)) != -1) {
switch (c) {
case 'a':
do_all = B_TRUE;
break;
case 'c':
cachefile = optarg;
break;
case 'd':
if (searchdirs == NULL) {
searchdirs = safe_malloc(sizeof (char *));
} else {
char **tmp = safe_malloc((nsearch + 1) *
sizeof (char *));
bcopy(searchdirs, tmp, nsearch *
sizeof (char *));
free(searchdirs);
searchdirs = tmp;
}
searchdirs[nsearch++] = optarg;
break;
case 'D':
do_destroyed = B_TRUE;
break;
case 'f':
flags |= ZFS_IMPORT_ANY_HOST;
break;
case 'F':
do_rewind = B_TRUE;
break;
case 'l':
flags |= ZFS_IMPORT_LOAD_KEYS;
break;
case 'm':
flags |= ZFS_IMPORT_MISSING_LOG;
break;
case 'n':
dryrun = B_TRUE;
break;
case 'N':
flags |= ZFS_IMPORT_ONLY;
break;
case 'o':
if ((propval = strchr(optarg, '=')) != NULL) {
*propval = '\0';
propval++;
if (add_prop_list(optarg, propval,
&props, B_TRUE))
goto error;
} else {
mntopts = optarg;
}
break;
case 'R':
if (add_prop_list(zpool_prop_to_name(
ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
goto error;
if (add_prop_list_default(zpool_prop_to_name(
ZPOOL_PROP_CACHEFILE), "none", &props, B_TRUE))
goto error;
break;
case 's':
do_scan = B_TRUE;
break;
case 't':
flags |= ZFS_IMPORT_TEMP_NAME;
if (add_prop_list_default(zpool_prop_to_name(
ZPOOL_PROP_CACHEFILE), "none", &props, B_TRUE))
goto error;
break;
case 'T':
errno = 0;
txg = strtoull(optarg, &endptr, 0);
if (errno != 0 || *endptr != '\0') {
(void) fprintf(stderr,
gettext("invalid txg value\n"));
usage(B_FALSE);
}
rewind_policy = ZPOOL_DO_REWIND | ZPOOL_EXTREME_REWIND;
break;
case 'V':
flags |= ZFS_IMPORT_VERBATIM;
break;
case 'X':
xtreme_rewind = B_TRUE;
break;
case CHECKPOINT_OPT:
flags |= ZFS_IMPORT_CHECKPOINT;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (cachefile && nsearch != 0) {
(void) fprintf(stderr, gettext("-c is incompatible with -d\n"));
usage(B_FALSE);
}
if (cachefile && do_scan) {
(void) fprintf(stderr, gettext("-c is incompatible with -s\n"));
usage(B_FALSE);
}
if ((flags & ZFS_IMPORT_LOAD_KEYS) && (flags & ZFS_IMPORT_ONLY)) {
(void) fprintf(stderr, gettext("-l is incompatible with -N\n"));
usage(B_FALSE);
}
if ((flags & ZFS_IMPORT_LOAD_KEYS) && !do_all && argc == 0) {
(void) fprintf(stderr, gettext("-l is only meaningful during "
"an import\n"));
usage(B_FALSE);
}
if ((dryrun || xtreme_rewind) && !do_rewind) {
(void) fprintf(stderr,
gettext("-n or -X only meaningful with -F\n"));
usage(B_FALSE);
}
if (dryrun)
rewind_policy = ZPOOL_TRY_REWIND;
else if (do_rewind)
rewind_policy = ZPOOL_DO_REWIND;
if (xtreme_rewind)
rewind_policy |= ZPOOL_EXTREME_REWIND;
/* In the future, we can capture further policy and include it here */
if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, txg) != 0 ||
nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
rewind_policy) != 0)
goto error;
/* check argument count */
if (do_all) {
if (argc != 0) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
} else {
if (argc > 2) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
}
/*
* Check for the effective uid. We do this explicitly here because
* otherwise any attempt to discover pools will silently fail.
*/
if (argc == 0 && geteuid() != 0) {
(void) fprintf(stderr, gettext("cannot "
"discover pools: permission denied\n"));
if (searchdirs != NULL)
free(searchdirs);
nvlist_free(props);
nvlist_free(policy);
return (1);
}
/*
* Depending on the arguments given, we do one of the following:
*
* <none> Iterate through all pools and display information about
* each one.
*
* -a Iterate through all pools and try to import each one.
*
* <id> Find the pool that corresponds to the given GUID/pool
* name and import that one.
*
* -D Above options applies only to destroyed pools.
*/
if (argc != 0) {
char *endptr;
errno = 0;
searchguid = strtoull(argv[0], &endptr, 10);
if (errno != 0 || *endptr != '\0') {
searchname = argv[0];
searchguid = 0;
}
pool_specified = B_TRUE;
/*
* User specified a name or guid. Ensure it's unique.
*/
target_exists_args_t search = {searchname, searchguid};
pool_exists = zpool_iter(g_zfs, name_or_guid_exists, &search);
}
/*
* Check the environment for the preferred search path.
*/
if ((searchdirs == NULL) && (env = getenv("ZPOOL_IMPORT_PATH"))) {
char *dir;
envdup = strdup(env);
dir = strtok(envdup, ":");
while (dir != NULL) {
if (searchdirs == NULL) {
searchdirs = safe_malloc(sizeof (char *));
} else {
char **tmp = safe_malloc((nsearch + 1) *
sizeof (char *));
bcopy(searchdirs, tmp, nsearch *
sizeof (char *));
free(searchdirs);
searchdirs = tmp;
}
searchdirs[nsearch++] = dir;
dir = strtok(NULL, ":");
}
}
idata.path = searchdirs;
idata.paths = nsearch;
idata.poolname = searchname;
idata.guid = searchguid;
idata.cachefile = cachefile;
idata.scan = do_scan;
idata.policy = policy;
pools = zpool_search_import(g_zfs, &idata, &libzfs_config_ops);
if (pools != NULL && pool_exists &&
(argc == 1 || strcmp(argv[0], argv[1]) == 0)) {
(void) fprintf(stderr, gettext("cannot import '%s': "
"a pool with that name already exists\n"),
argv[0]);
(void) fprintf(stderr, gettext("use the form '%s "
"<pool | id> <newpool>' to give it a new name\n"),
"zpool import");
err = 1;
} else if (pools == NULL && pool_exists) {
(void) fprintf(stderr, gettext("cannot import '%s': "
"a pool with that name is already created/imported,\n"),
argv[0]);
(void) fprintf(stderr, gettext("and no additional pools "
"with that name were found\n"));
err = 1;
} else if (pools == NULL) {
if (argc != 0) {
(void) fprintf(stderr, gettext("cannot import '%s': "
"no such pool available\n"), argv[0]);
}
err = 1;
}
if (err == 1) {
if (searchdirs != NULL)
free(searchdirs);
if (envdup != NULL)
free(envdup);
nvlist_free(policy);
nvlist_free(pools);
nvlist_free(props);
return (1);
}
err = import_pools(pools, props, mntopts, flags,
argc >= 1 ? argv[0] : NULL,
argc >= 2 ? argv[1] : NULL,
do_destroyed, pool_specified, do_all, &idata);
/*
* If we're using the cachefile and we failed to import, then
* fallback to scanning the directory for pools that match
* those in the cachefile.
*/
if (err != 0 && cachefile != NULL) {
(void) printf(gettext("cachefile import failed, retrying\n"));
/*
* We use the scan flag to gather the directories that exist
* in the cachefile. If we need to fallback to searching for
* the pool config, we will only search devices in these
* directories.
*/
idata.scan = B_TRUE;
nvlist_free(pools);
pools = zpool_search_import(g_zfs, &idata, &libzfs_config_ops);
err = import_pools(pools, props, mntopts, flags,
argc >= 1 ? argv[0] : NULL,
argc >= 2 ? argv[1] : NULL,
do_destroyed, pool_specified, do_all, &idata);
}
error:
nvlist_free(props);
nvlist_free(pools);
nvlist_free(policy);
if (searchdirs != NULL)
free(searchdirs);
if (envdup != NULL)
free(envdup);
return (err ? 1 : 0);
}
/*
* zpool sync [-f] [pool] ...
*
* -f (undocumented) force uberblock (and config including zpool cache file)
* update.
*
* Sync the specified pool(s).
* Without arguments "zpool sync" will sync all pools.
* This command initiates TXG sync(s) and will return after the TXG(s) commit.
*
*/
static int
zpool_do_sync(int argc, char **argv)
{
int ret;
boolean_t force = B_FALSE;
/* check options */
while ((ret = getopt(argc, argv, "f")) != -1) {
switch (ret) {
case 'f':
force = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* if argc == 0 we will execute zpool_sync_one on all pools */
ret = for_each_pool(argc, argv, B_FALSE, NULL, B_FALSE, zpool_sync_one,
&force);
return (ret);
}
typedef struct iostat_cbdata {
uint64_t cb_flags;
int cb_name_flags;
int cb_namewidth;
int cb_iteration;
char **cb_vdev_names; /* Only show these vdevs */
unsigned int cb_vdev_names_count;
boolean_t cb_verbose;
boolean_t cb_literal;
boolean_t cb_scripted;
zpool_list_t *cb_list;
vdev_cmd_data_list_t *vcdl;
} iostat_cbdata_t;
/* iostat labels */
typedef struct name_and_columns {
const char *name; /* Column name */
unsigned int columns; /* Center name to this number of columns */
} name_and_columns_t;
#define IOSTAT_MAX_LABELS 13 /* Max number of labels on one line */
static const name_and_columns_t iostat_top_labels[][IOSTAT_MAX_LABELS] =
{
[IOS_DEFAULT] = {{"capacity", 2}, {"operations", 2}, {"bandwidth", 2},
{NULL}},
[IOS_LATENCY] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
{"asyncq_wait", 2}, {"scrub", 1}, {"trim", 1}, {NULL}},
[IOS_QUEUES] = {{"syncq_read", 2}, {"syncq_write", 2},
{"asyncq_read", 2}, {"asyncq_write", 2}, {"scrubq_read", 2},
{"trimq_write", 2}, {NULL}},
[IOS_L_HISTO] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
{"asyncq_wait", 2}, {NULL}},
[IOS_RQ_HISTO] = {{"sync_read", 2}, {"sync_write", 2},
{"async_read", 2}, {"async_write", 2}, {"scrub", 2},
{"trim", 2}, {NULL}},
};
/* Shorthand - if "columns" field not set, default to 1 column */
static const name_and_columns_t iostat_bottom_labels[][IOSTAT_MAX_LABELS] =
{
[IOS_DEFAULT] = {{"alloc"}, {"free"}, {"read"}, {"write"}, {"read"},
{"write"}, {NULL}},
[IOS_LATENCY] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
{"write"}, {"read"}, {"write"}, {"wait"}, {"wait"}, {NULL}},
[IOS_QUEUES] = {{"pend"}, {"activ"}, {"pend"}, {"activ"}, {"pend"},
{"activ"}, {"pend"}, {"activ"}, {"pend"}, {"activ"},
{"pend"}, {"activ"}, {NULL}},
[IOS_L_HISTO] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
{"write"}, {"read"}, {"write"}, {"scrub"}, {"trim"}, {NULL}},
[IOS_RQ_HISTO] = {{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"}, {NULL}},
};
static const char *histo_to_title[] = {
[IOS_L_HISTO] = "latency",
[IOS_RQ_HISTO] = "req_size",
};
/*
* Return the number of labels in a null-terminated name_and_columns_t
* array.
*
*/
static unsigned int
label_array_len(const name_and_columns_t *labels)
{
int i = 0;
while (labels[i].name)
i++;
return (i);
}
/*
* Return the number of strings in a null-terminated string array.
* For example:
*
* const char foo[] = {"bar", "baz", NULL}
*
* returns 2
*/
static uint64_t
str_array_len(const char *array[])
{
uint64_t i = 0;
while (array[i])
i++;
return (i);
}
/*
* Return a default column width for default/latency/queue columns. This does
* not include histograms, which have their columns autosized.
*/
static unsigned int
default_column_width(iostat_cbdata_t *cb, enum iostat_type type)
{
unsigned long column_width = 5; /* Normal niceprint */
static unsigned long widths[] = {
/*
* Choose some sane default column sizes for printing the
* raw numbers.
*/
[IOS_DEFAULT] = 15, /* 1PB capacity */
[IOS_LATENCY] = 10, /* 1B ns = 10sec */
[IOS_QUEUES] = 6, /* 1M queue entries */
[IOS_L_HISTO] = 10, /* 1B ns = 10sec */
[IOS_RQ_HISTO] = 6, /* 1M queue entries */
};
if (cb->cb_literal)
column_width = widths[type];
return (column_width);
}
/*
* Print the column labels, i.e:
*
* capacity operations bandwidth
* alloc free read write read write ...
*
* If force_column_width is set, use it for the column width. If not set, use
* the default column width.
*/
static void
print_iostat_labels(iostat_cbdata_t *cb, unsigned int force_column_width,
const name_and_columns_t labels[][IOSTAT_MAX_LABELS])
{
int i, idx, s;
int text_start, rw_column_width, spaces_to_end;
uint64_t flags = cb->cb_flags;
uint64_t f;
unsigned int column_width = force_column_width;
/* For each bit set in flags */
for (f = flags; f; f &= ~(1ULL << idx)) {
idx = lowbit64(f) - 1;
if (!force_column_width)
column_width = default_column_width(cb, idx);
/* Print our top labels centered over "read write" label. */
for (i = 0; i < label_array_len(labels[idx]); i++) {
const char *name = labels[idx][i].name;
/*
* We treat labels[][].columns == 0 as shorthand
* for one column. It makes writing out the label
* tables more concise.
*/
unsigned int columns = MAX(1, labels[idx][i].columns);
unsigned int slen = strlen(name);
rw_column_width = (column_width * columns) +
(2 * (columns - 1));
text_start = (int)((rw_column_width) / columns -
slen / columns);
if (text_start < 0)
text_start = 0;
printf(" "); /* Two spaces between columns */
/* Space from beginning of column to label */
for (s = 0; s < text_start; s++)
printf(" ");
printf("%s", name);
/* Print space after label to end of column */
spaces_to_end = rw_column_width - text_start - slen;
if (spaces_to_end < 0)
spaces_to_end = 0;
for (s = 0; s < spaces_to_end; s++)
printf(" ");
}
}
}
/*
* print_cmd_columns - Print custom column titles from -c
*
* If the user specified the "zpool status|iostat -c" then print their custom
* column titles in the header. For example, print_cmd_columns() would print
* the " col1 col2" part of this:
*
* $ zpool iostat -vc 'echo col1=val1; echo col2=val2'
* ...
* capacity operations bandwidth
* pool alloc free read write read write col1 col2
* ---------- ----- ----- ----- ----- ----- ----- ---- ----
* mypool 269K 1008M 0 0 107 946
* mirror 269K 1008M 0 0 107 946
* sdb - - 0 0 102 473 val1 val2
* sdc - - 0 0 5 473 val1 val2
* ---------- ----- ----- ----- ----- ----- ----- ---- ----
*/
static void
print_cmd_columns(vdev_cmd_data_list_t *vcdl, int use_dashes)
{
int i, j;
vdev_cmd_data_t *data = &vcdl->data[0];
if (vcdl->count == 0 || data == NULL)
return;
/*
* Each vdev cmd should have the same column names unless the user did
* something weird with their cmd. Just take the column names from the
* first vdev and assume it works for all of them.
*/
for (i = 0; i < vcdl->uniq_cols_cnt; i++) {
printf(" ");
if (use_dashes) {
for (j = 0; j < vcdl->uniq_cols_width[i]; j++)
printf("-");
} else {
printf_color(ANSI_BOLD, "%*s", vcdl->uniq_cols_width[i],
vcdl->uniq_cols[i]);
}
}
}
/*
* Utility function to print out a line of dashes like:
*
* -------------------------------- ----- ----- ----- ----- -----
*
* ...or a dashed named-row line like:
*
* logs - - - - -
*
* @cb: iostat data
*
* @force_column_width If non-zero, use the value as the column width.
* Otherwise use the default column widths.
*
* @name: Print a dashed named-row line starting
* with @name. Otherwise, print a regular
* dashed line.
*/
static void
print_iostat_dashes(iostat_cbdata_t *cb, unsigned int force_column_width,
const char *name)
{
int i;
unsigned int namewidth;
uint64_t flags = cb->cb_flags;
uint64_t f;
int idx;
const name_and_columns_t *labels;
const char *title;
if (cb->cb_flags & IOS_ANYHISTO_M) {
title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
} else if (cb->cb_vdev_names_count) {
title = "vdev";
} else {
title = "pool";
}
namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
name ? strlen(name) : 0);
if (name) {
printf("%-*s", namewidth, name);
} else {
for (i = 0; i < namewidth; i++)
(void) printf("-");
}
/* For each bit in flags */
for (f = flags; f; f &= ~(1ULL << idx)) {
unsigned int column_width;
idx = lowbit64(f) - 1;
if (force_column_width)
column_width = force_column_width;
else
column_width = default_column_width(cb, idx);
labels = iostat_bottom_labels[idx];
for (i = 0; i < label_array_len(labels); i++) {
if (name)
printf(" %*s-", column_width - 1, " ");
else
printf(" %.*s", column_width,
"--------------------");
}
}
}
static void
print_iostat_separator_impl(iostat_cbdata_t *cb,
unsigned int force_column_width)
{
print_iostat_dashes(cb, force_column_width, NULL);
}
static void
print_iostat_separator(iostat_cbdata_t *cb)
{
print_iostat_separator_impl(cb, 0);
}
static void
print_iostat_header_impl(iostat_cbdata_t *cb, unsigned int force_column_width,
const char *histo_vdev_name)
{
unsigned int namewidth;
const char *title;
if (cb->cb_flags & IOS_ANYHISTO_M) {
title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
} else if (cb->cb_vdev_names_count) {
title = "vdev";
} else {
title = "pool";
}
namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
histo_vdev_name ? strlen(histo_vdev_name) : 0);
if (histo_vdev_name)
printf("%-*s", namewidth, histo_vdev_name);
else
printf("%*s", namewidth, "");
print_iostat_labels(cb, force_column_width, iostat_top_labels);
printf("\n");
printf("%-*s", namewidth, title);
print_iostat_labels(cb, force_column_width, iostat_bottom_labels);
if (cb->vcdl != NULL)
print_cmd_columns(cb->vcdl, 0);
printf("\n");
print_iostat_separator_impl(cb, force_column_width);
if (cb->vcdl != NULL)
print_cmd_columns(cb->vcdl, 1);
printf("\n");
}
static void
print_iostat_header(iostat_cbdata_t *cb)
{
print_iostat_header_impl(cb, 0, NULL);
}
/*
* Display a single statistic.
*/
static void
print_one_stat(uint64_t value, enum zfs_nicenum_format format,
unsigned int column_size, boolean_t scripted)
{
char buf[64];
zfs_nicenum_format(value, buf, sizeof (buf), format);
if (scripted)
printf("\t%s", buf);
else
printf(" %*s", column_size, buf);
}
/*
* Calculate the default vdev stats
*
* Subtract oldvs from newvs, apply a scaling factor, and save the resulting
* stats into calcvs.
*/
static void
calc_default_iostats(vdev_stat_t *oldvs, vdev_stat_t *newvs,
vdev_stat_t *calcvs)
{
int i;
memcpy(calcvs, newvs, sizeof (*calcvs));
for (i = 0; i < ARRAY_SIZE(calcvs->vs_ops); i++)
calcvs->vs_ops[i] = (newvs->vs_ops[i] - oldvs->vs_ops[i]);
for (i = 0; i < ARRAY_SIZE(calcvs->vs_bytes); i++)
calcvs->vs_bytes[i] = (newvs->vs_bytes[i] - oldvs->vs_bytes[i]);
}
/*
* Internal representation of the extended iostats data.
*
* The extended iostat stats are exported in nvlists as either uint64_t arrays
* or single uint64_t's. We make both look like arrays to make them easier
* to process. In order to make single uint64_t's look like arrays, we set
* __data to the stat data, and then set *data = &__data with count = 1. Then,
* we can just use *data and count.
*/
struct stat_array {
uint64_t *data;
uint_t count; /* Number of entries in data[] */
uint64_t __data; /* Only used when data is a single uint64_t */
};
static uint64_t
stat_histo_max(struct stat_array *nva, unsigned int len)
{
uint64_t max = 0;
int i;
for (i = 0; i < len; i++)
max = MAX(max, array64_max(nva[i].data, nva[i].count));
return (max);
}
/*
* Helper function to lookup a uint64_t array or uint64_t value and store its
* data as a stat_array. If the nvpair is a single uint64_t value, then we make
* it look like a one element array to make it easier to process.
*/
static int
nvpair64_to_stat_array(nvlist_t *nvl, const char *name,
struct stat_array *nva)
{
nvpair_t *tmp;
int ret;
verify(nvlist_lookup_nvpair(nvl, name, &tmp) == 0);
switch (nvpair_type(tmp)) {
case DATA_TYPE_UINT64_ARRAY:
ret = nvpair_value_uint64_array(tmp, &nva->data, &nva->count);
break;
case DATA_TYPE_UINT64:
ret = nvpair_value_uint64(tmp, &nva->__data);
nva->data = &nva->__data;
nva->count = 1;
break;
default:
/* Not a uint64_t */
ret = EINVAL;
break;
}
return (ret);
}
/*
* Given a list of nvlist names, look up the extended stats in newnv and oldnv,
* subtract them, and return the results in a newly allocated stat_array.
* You must free the returned array after you are done with it with
* free_calc_stats().
*
* Additionally, you can set "oldnv" to NULL if you simply want the newnv
* values.
*/
static struct stat_array *
calc_and_alloc_stats_ex(const char **names, unsigned int len, nvlist_t *oldnv,
nvlist_t *newnv)
{
nvlist_t *oldnvx = NULL, *newnvx;
struct stat_array *oldnva, *newnva, *calcnva;
int i, j;
unsigned int alloc_size = (sizeof (struct stat_array)) * len;
/* Extract our extended stats nvlist from the main list */
verify(nvlist_lookup_nvlist(newnv, ZPOOL_CONFIG_VDEV_STATS_EX,
&newnvx) == 0);
if (oldnv) {
verify(nvlist_lookup_nvlist(oldnv, ZPOOL_CONFIG_VDEV_STATS_EX,
&oldnvx) == 0);
}
newnva = safe_malloc(alloc_size);
oldnva = safe_malloc(alloc_size);
calcnva = safe_malloc(alloc_size);
for (j = 0; j < len; j++) {
verify(nvpair64_to_stat_array(newnvx, names[j],
&newnva[j]) == 0);
calcnva[j].count = newnva[j].count;
alloc_size = calcnva[j].count * sizeof (calcnva[j].data[0]);
calcnva[j].data = safe_malloc(alloc_size);
memcpy(calcnva[j].data, newnva[j].data, alloc_size);
if (oldnvx) {
verify(nvpair64_to_stat_array(oldnvx, names[j],
&oldnva[j]) == 0);
for (i = 0; i < oldnva[j].count; i++)
calcnva[j].data[i] -= oldnva[j].data[i];
}
}
free(newnva);
free(oldnva);
return (calcnva);
}
static void
free_calc_stats(struct stat_array *nva, unsigned int len)
{
int i;
for (i = 0; i < len; i++)
free(nva[i].data);
free(nva);
}
static void
print_iostat_histo(struct stat_array *nva, unsigned int len,
iostat_cbdata_t *cb, unsigned int column_width, unsigned int namewidth,
double scale)
{
int i, j;
char buf[6];
uint64_t val;
enum zfs_nicenum_format format;
unsigned int buckets;
unsigned int start_bucket;
if (cb->cb_literal)
format = ZFS_NICENUM_RAW;
else
format = ZFS_NICENUM_1024;
/* All these histos are the same size, so just use nva[0].count */
buckets = nva[0].count;
if (cb->cb_flags & IOS_RQ_HISTO_M) {
/* Start at 512 - req size should never be lower than this */
start_bucket = 9;
} else {
start_bucket = 0;
}
for (j = start_bucket; j < buckets; j++) {
/* Print histogram bucket label */
if (cb->cb_flags & IOS_L_HISTO_M) {
/* Ending range of this bucket */
val = (1UL << (j + 1)) - 1;
zfs_nicetime(val, buf, sizeof (buf));
} else {
/* Request size (starting range of bucket) */
val = (1UL << j);
zfs_nicenum(val, buf, sizeof (buf));
}
if (cb->cb_scripted)
printf("%llu", (u_longlong_t)val);
else
printf("%-*s", namewidth, buf);
/* Print the values on the line */
for (i = 0; i < len; i++) {
print_one_stat(nva[i].data[j] * scale, format,
column_width, cb->cb_scripted);
}
printf("\n");
}
}
static void
print_solid_separator(unsigned int length)
{
while (length--)
printf("-");
printf("\n");
}
static void
print_iostat_histos(iostat_cbdata_t *cb, nvlist_t *oldnv,
nvlist_t *newnv, double scale, const char *name)
{
unsigned int column_width;
unsigned int namewidth;
unsigned int entire_width;
enum iostat_type type;
struct stat_array *nva;
const char **names;
unsigned int names_len;
/* What type of histo are we? */
type = IOS_HISTO_IDX(cb->cb_flags);
/* Get NULL-terminated array of nvlist names for our histo */
names = vsx_type_to_nvlist[type];
names_len = str_array_len(names); /* num of names */
nva = calc_and_alloc_stats_ex(names, names_len, oldnv, newnv);
if (cb->cb_literal) {
column_width = MAX(5,
(unsigned int) log10(stat_histo_max(nva, names_len)) + 1);
} else {
column_width = 5;
}
namewidth = MAX(cb->cb_namewidth,
strlen(histo_to_title[IOS_HISTO_IDX(cb->cb_flags)]));
/*
* Calculate the entire line width of what we're printing. The
* +2 is for the two spaces between columns:
*/
/* read write */
/* ----- ----- */
/* |___| <---------- column_width */
/* */
/* |__________| <--- entire_width */
/* */
entire_width = namewidth + (column_width + 2) *
label_array_len(iostat_bottom_labels[type]);
if (cb->cb_scripted)
printf("%s\n", name);
else
print_iostat_header_impl(cb, column_width, name);
print_iostat_histo(nva, names_len, cb, column_width,
namewidth, scale);
free_calc_stats(nva, names_len);
if (!cb->cb_scripted)
print_solid_separator(entire_width);
}
/*
* Calculate the average latency of a power-of-two latency histogram
*/
static uint64_t
single_histo_average(uint64_t *histo, unsigned int buckets)
{
int i;
uint64_t count = 0, total = 0;
for (i = 0; i < buckets; i++) {
/*
* Our buckets are power-of-two latency ranges. Use the
* midpoint latency of each bucket to calculate the average.
* For example:
*
* Bucket Midpoint
* 8ns-15ns: 12ns
* 16ns-31ns: 24ns
* ...
*/
if (histo[i] != 0) {
total += histo[i] * (((1UL << i) + ((1UL << i)/2)));
count += histo[i];
}
}
/* Prevent divide by zero */
return (count == 0 ? 0 : total / count);
}
static void
print_iostat_queues(iostat_cbdata_t *cb, nvlist_t *oldnv,
nvlist_t *newnv)
{
int i;
uint64_t val;
const char *names[] = {
ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
};
struct stat_array *nva;
unsigned int column_width = default_column_width(cb, IOS_QUEUES);
enum zfs_nicenum_format format;
nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), NULL, newnv);
if (cb->cb_literal)
format = ZFS_NICENUM_RAW;
else
format = ZFS_NICENUM_1024;
for (i = 0; i < ARRAY_SIZE(names); i++) {
val = nva[i].data[0];
print_one_stat(val, format, column_width, cb->cb_scripted);
}
free_calc_stats(nva, ARRAY_SIZE(names));
}
static void
print_iostat_latency(iostat_cbdata_t *cb, nvlist_t *oldnv,
nvlist_t *newnv)
{
int i;
uint64_t val;
const char *names[] = {
ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
};
struct stat_array *nva;
unsigned int column_width = default_column_width(cb, IOS_LATENCY);
enum zfs_nicenum_format format;
nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), oldnv, newnv);
if (cb->cb_literal)
format = ZFS_NICENUM_RAWTIME;
else
format = ZFS_NICENUM_TIME;
/* Print our avg latencies on the line */
for (i = 0; i < ARRAY_SIZE(names); i++) {
/* Compute average latency for a latency histo */
val = single_histo_average(nva[i].data, nva[i].count);
print_one_stat(val, format, column_width, cb->cb_scripted);
}
free_calc_stats(nva, ARRAY_SIZE(names));
}
/*
* Print default statistics (capacity/operations/bandwidth)
*/
static void
print_iostat_default(vdev_stat_t *vs, iostat_cbdata_t *cb, double scale)
{
unsigned int column_width = default_column_width(cb, IOS_DEFAULT);
enum zfs_nicenum_format format;
char na; /* char to print for "not applicable" values */
if (cb->cb_literal) {
format = ZFS_NICENUM_RAW;
na = '0';
} else {
format = ZFS_NICENUM_1024;
na = '-';
}
/* only toplevel vdevs have capacity stats */
if (vs->vs_space == 0) {
if (cb->cb_scripted)
printf("\t%c\t%c", na, na);
else
printf(" %*c %*c", column_width, na, column_width,
na);
} else {
print_one_stat(vs->vs_alloc, format, column_width,
cb->cb_scripted);
print_one_stat(vs->vs_space - vs->vs_alloc, format,
column_width, cb->cb_scripted);
}
print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_READ] * scale),
format, column_width, cb->cb_scripted);
print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_WRITE] * scale),
format, column_width, cb->cb_scripted);
print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_READ] * scale),
format, column_width, cb->cb_scripted);
print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_WRITE] * scale),
format, column_width, cb->cb_scripted);
}
static const char *class_name[] = {
VDEV_ALLOC_BIAS_DEDUP,
VDEV_ALLOC_BIAS_SPECIAL,
VDEV_ALLOC_CLASS_LOGS
};
/*
* Print out all the statistics for the given vdev. This can either be the
* toplevel configuration, or called recursively. If 'name' is NULL, then this
* is a verbose output, and we don't want to display the toplevel pool stats.
*
* Returns the number of stat lines printed.
*/
static unsigned int
print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv,
nvlist_t *newnv, iostat_cbdata_t *cb, int depth)
{
nvlist_t **oldchild, **newchild;
uint_t c, children, oldchildren;
vdev_stat_t *oldvs, *newvs, *calcvs;
vdev_stat_t zerovs = { 0 };
char *vname;
int i;
int ret = 0;
uint64_t tdelta;
double scale;
if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
return (ret);
calcvs = safe_malloc(sizeof (*calcvs));
if (oldnv != NULL) {
verify(nvlist_lookup_uint64_array(oldnv,
ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&oldvs, &c) == 0);
} else {
oldvs = &zerovs;
}
/* Do we only want to see a specific vdev? */
for (i = 0; i < cb->cb_vdev_names_count; i++) {
/* Yes we do. Is this the vdev? */
if (strcmp(name, cb->cb_vdev_names[i]) == 0) {
/*
* This is our vdev. Since it is the only vdev we
* will be displaying, make depth = 0 so that it
* doesn't get indented.
*/
depth = 0;
break;
}
}
if (cb->cb_vdev_names_count && (i == cb->cb_vdev_names_count)) {
/* Couldn't match the name */
goto children;
}
verify(nvlist_lookup_uint64_array(newnv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&newvs, &c) == 0);
/*
* Print the vdev name unless it's is a histogram. Histograms
* display the vdev name in the header itself.
*/
if (!(cb->cb_flags & IOS_ANYHISTO_M)) {
if (cb->cb_scripted) {
printf("%s", name);
} else {
if (strlen(name) + depth > cb->cb_namewidth)
(void) printf("%*s%s", depth, "", name);
else
(void) printf("%*s%s%*s", depth, "", name,
(int)(cb->cb_namewidth - strlen(name) -
depth), "");
}
}
/* Calculate our scaling factor */
tdelta = newvs->vs_timestamp - oldvs->vs_timestamp;
if ((oldvs->vs_timestamp == 0) && (cb->cb_flags & IOS_ANYHISTO_M)) {
/*
* If we specify printing histograms with no time interval, then
* print the histogram numbers over the entire lifetime of the
* vdev.
*/
scale = 1;
} else {
if (tdelta == 0)
scale = 1.0;
else
scale = (double)NANOSEC / tdelta;
}
if (cb->cb_flags & IOS_DEFAULT_M) {
calc_default_iostats(oldvs, newvs, calcvs);
print_iostat_default(calcvs, cb, scale);
}
if (cb->cb_flags & IOS_LATENCY_M)
print_iostat_latency(cb, oldnv, newnv);
if (cb->cb_flags & IOS_QUEUES_M)
print_iostat_queues(cb, oldnv, newnv);
if (cb->cb_flags & IOS_ANYHISTO_M) {
printf("\n");
print_iostat_histos(cb, oldnv, newnv, scale, name);
}
if (cb->vcdl != NULL) {
char *path;
if (nvlist_lookup_string(newnv, ZPOOL_CONFIG_PATH,
&path) == 0) {
printf(" ");
zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
}
}
if (!(cb->cb_flags & IOS_ANYHISTO_M))
printf("\n");
ret++;
children:
free(calcvs);
if (!cb->cb_verbose)
return (ret);
if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_CHILDREN,
&newchild, &children) != 0)
return (ret);
if (oldnv) {
if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_CHILDREN,
&oldchild, &oldchildren) != 0)
return (ret);
children = MIN(oldchildren, children);
}
/*
* print normal top-level devices
*/
for (c = 0; c < children; c++) {
uint64_t ishole = B_FALSE, islog = B_FALSE;
(void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_HOLE,
&ishole);
(void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_LOG,
&islog);
if (ishole || islog)
continue;
if (nvlist_exists(newchild[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
continue;
vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
- cb->cb_name_flags);
+ cb->cb_name_flags | VDEV_NAME_TYPE_ID);
ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL,
newchild[c], cb, depth + 2);
free(vname);
}
/*
* print all other top-level devices
*/
for (uint_t n = 0; n < 3; n++) {
boolean_t printed = B_FALSE;
for (c = 0; c < children; c++) {
uint64_t islog = B_FALSE;
char *bias = NULL;
char *type = NULL;
(void) nvlist_lookup_uint64(newchild[c],
ZPOOL_CONFIG_IS_LOG, &islog);
if (islog) {
bias = VDEV_ALLOC_CLASS_LOGS;
} else {
(void) nvlist_lookup_string(newchild[c],
ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
(void) nvlist_lookup_string(newchild[c],
ZPOOL_CONFIG_TYPE, &type);
}
if (bias == NULL || strcmp(bias, class_name[n]) != 0)
continue;
if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
continue;
if (!printed) {
if ((!(cb->cb_flags & IOS_ANYHISTO_M)) &&
!cb->cb_scripted && !cb->cb_vdev_names) {
print_iostat_dashes(cb, 0,
class_name[n]);
}
printf("\n");
printed = B_TRUE;
}
vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
- cb->cb_name_flags);
+ cb->cb_name_flags | VDEV_NAME_TYPE_ID);
ret += print_vdev_stats(zhp, vname, oldnv ?
oldchild[c] : NULL, newchild[c], cb, depth + 2);
free(vname);
}
}
/*
* Include level 2 ARC devices in iostat output
*/
if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_L2CACHE,
&newchild, &children) != 0)
return (ret);
if (oldnv) {
if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_L2CACHE,
&oldchild, &oldchildren) != 0)
return (ret);
children = MIN(oldchildren, children);
}
if (children > 0) {
if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && !cb->cb_scripted &&
!cb->cb_vdev_names) {
print_iostat_dashes(cb, 0, "cache");
}
printf("\n");
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
cb->cb_name_flags);
ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c]
: NULL, newchild[c], cb, depth + 2);
free(vname);
}
}
return (ret);
}
static int
refresh_iostat(zpool_handle_t *zhp, void *data)
{
iostat_cbdata_t *cb = data;
boolean_t missing;
/*
* If the pool has disappeared, remove it from the list and continue.
*/
if (zpool_refresh_stats(zhp, &missing) != 0)
return (-1);
if (missing)
pool_list_remove(cb->cb_list, zhp);
return (0);
}
/*
* Callback to print out the iostats for the given pool.
*/
static int
print_iostat(zpool_handle_t *zhp, void *data)
{
iostat_cbdata_t *cb = data;
nvlist_t *oldconfig, *newconfig;
nvlist_t *oldnvroot, *newnvroot;
int ret;
newconfig = zpool_get_config(zhp, &oldconfig);
if (cb->cb_iteration == 1)
oldconfig = NULL;
verify(nvlist_lookup_nvlist(newconfig, ZPOOL_CONFIG_VDEV_TREE,
&newnvroot) == 0);
if (oldconfig == NULL)
oldnvroot = NULL;
else
verify(nvlist_lookup_nvlist(oldconfig, ZPOOL_CONFIG_VDEV_TREE,
&oldnvroot) == 0);
ret = print_vdev_stats(zhp, zpool_get_name(zhp), oldnvroot, newnvroot,
cb, 0);
if ((ret != 0) && !(cb->cb_flags & IOS_ANYHISTO_M) &&
!cb->cb_scripted && cb->cb_verbose && !cb->cb_vdev_names_count) {
print_iostat_separator(cb);
if (cb->vcdl != NULL) {
print_cmd_columns(cb->vcdl, 1);
}
printf("\n");
}
return (ret);
}
static int
get_columns(void)
{
struct winsize ws;
int columns = 80;
int error;
if (isatty(STDOUT_FILENO)) {
error = ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws);
if (error == 0)
columns = ws.ws_col;
} else {
columns = 999;
}
return (columns);
}
/*
* Return the required length of the pool/vdev name column. The minimum
* allowed width and output formatting flags must be provided.
*/
static int
get_namewidth(zpool_handle_t *zhp, int min_width, int flags, boolean_t verbose)
{
nvlist_t *config, *nvroot;
int width = min_width;
if ((config = zpool_get_config(zhp, NULL)) != NULL) {
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
unsigned int poolname_len = strlen(zpool_get_name(zhp));
if (verbose == B_FALSE) {
width = MAX(poolname_len, min_width);
} else {
width = MAX(poolname_len,
max_width(zhp, nvroot, 0, min_width, flags));
}
}
return (width);
}
/*
* Parse the input string, get the 'interval' and 'count' value if there is one.
*/
static void
get_interval_count(int *argcp, char **argv, float *iv,
unsigned long *cnt)
{
float interval = 0;
unsigned long count = 0;
int argc = *argcp;
/*
* Determine if the last argument is an integer or a pool name
*/
if (argc > 0 && zfs_isnumber(argv[argc - 1])) {
char *end;
errno = 0;
interval = strtof(argv[argc - 1], &end);
if (*end == '\0' && errno == 0) {
if (interval == 0) {
(void) fprintf(stderr, gettext(
"interval cannot be zero\n"));
usage(B_FALSE);
}
/*
* Ignore the last parameter
*/
argc--;
} else {
/*
* If this is not a valid number, just plow on. The
* user will get a more informative error message later
* on.
*/
interval = 0;
}
}
/*
* If the last argument is also an integer, then we have both a count
* and an interval.
*/
if (argc > 0 && zfs_isnumber(argv[argc - 1])) {
char *end;
errno = 0;
count = interval;
interval = strtof(argv[argc - 1], &end);
if (*end == '\0' && errno == 0) {
if (interval == 0) {
(void) fprintf(stderr, gettext(
"interval cannot be zero\n"));
usage(B_FALSE);
}
/*
* Ignore the last parameter
*/
argc--;
} else {
interval = 0;
}
}
*iv = interval;
*cnt = count;
*argcp = argc;
}
static void
get_timestamp_arg(char c)
{
if (c == 'u')
timestamp_fmt = UDATE;
else if (c == 'd')
timestamp_fmt = DDATE;
else
usage(B_FALSE);
}
/*
* Return stat flags that are supported by all pools by both the module and
* zpool iostat. "*data" should be initialized to all 0xFFs before running.
* It will get ANDed down until only the flags that are supported on all pools
* remain.
*/
static int
get_stat_flags_cb(zpool_handle_t *zhp, void *data)
{
uint64_t *mask = data;
nvlist_t *config, *nvroot, *nvx;
uint64_t flags = 0;
int i, j;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
/* Default stats are always supported, but for completeness.. */
if (nvlist_exists(nvroot, ZPOOL_CONFIG_VDEV_STATS))
flags |= IOS_DEFAULT_M;
/* Get our extended stats nvlist from the main list */
if (nvlist_lookup_nvlist(nvroot, ZPOOL_CONFIG_VDEV_STATS_EX,
&nvx) != 0) {
/*
* No extended stats; they're probably running an older
* module. No big deal, we support that too.
*/
goto end;
}
/* For each extended stat, make sure all its nvpairs are supported */
for (j = 0; j < ARRAY_SIZE(vsx_type_to_nvlist); j++) {
if (!vsx_type_to_nvlist[j][0])
continue;
/* Start off by assuming the flag is supported, then check */
flags |= (1ULL << j);
for (i = 0; vsx_type_to_nvlist[j][i]; i++) {
if (!nvlist_exists(nvx, vsx_type_to_nvlist[j][i])) {
/* flag isn't supported */
flags = flags & ~(1ULL << j);
break;
}
}
}
end:
*mask = *mask & flags;
return (0);
}
/*
* Return a bitmask of stats that are supported on all pools by both the module
* and zpool iostat.
*/
static uint64_t
get_stat_flags(zpool_list_t *list)
{
uint64_t mask = -1;
/*
* get_stat_flags_cb() will lop off bits from "mask" until only the
* flags that are supported on all pools remain.
*/
pool_list_iter(list, B_FALSE, get_stat_flags_cb, &mask);
return (mask);
}
/*
* Return 1 if cb_data->cb_vdev_names[0] is this vdev's name, 0 otherwise.
*/
static int
is_vdev_cb(void *zhp_data, nvlist_t *nv, void *cb_data)
{
iostat_cbdata_t *cb = cb_data;
char *name = NULL;
int ret = 0;
zpool_handle_t *zhp = zhp_data;
name = zpool_vdev_name(g_zfs, zhp, nv, cb->cb_name_flags);
if (strcmp(name, cb->cb_vdev_names[0]) == 0)
ret = 1; /* match */
free(name);
return (ret);
}
/*
* Returns 1 if cb_data->cb_vdev_names[0] is a vdev name, 0 otherwise.
*/
static int
is_vdev(zpool_handle_t *zhp, void *cb_data)
{
return (for_each_vdev(zhp, is_vdev_cb, cb_data));
}
/*
* Check if vdevs are in a pool
*
* Return 1 if all argv[] strings are vdev names in pool "pool_name". Otherwise
* return 0. If pool_name is NULL, then search all pools.
*/
static int
are_vdevs_in_pool(int argc, char **argv, char *pool_name,
iostat_cbdata_t *cb)
{
char **tmp_name;
int ret = 0;
int i;
int pool_count = 0;
if ((argc == 0) || !*argv)
return (0);
if (pool_name)
pool_count = 1;
/* Temporarily hijack cb_vdev_names for a second... */
tmp_name = cb->cb_vdev_names;
/* Go though our list of prospective vdev names */
for (i = 0; i < argc; i++) {
cb->cb_vdev_names = argv + i;
/* Is this name a vdev in our pools? */
ret = for_each_pool(pool_count, &pool_name, B_TRUE, NULL,
B_FALSE, is_vdev, cb);
if (!ret) {
/* No match */
break;
}
}
cb->cb_vdev_names = tmp_name;
return (ret);
}
static int
is_pool_cb(zpool_handle_t *zhp, void *data)
{
char *name = data;
if (strcmp(name, zpool_get_name(zhp)) == 0)
return (1);
return (0);
}
/*
* Do we have a pool named *name? If so, return 1, otherwise 0.
*/
static int
is_pool(char *name)
{
return (for_each_pool(0, NULL, B_TRUE, NULL, B_FALSE, is_pool_cb,
name));
}
/* Are all our argv[] strings pool names? If so return 1, 0 otherwise. */
static int
are_all_pools(int argc, char **argv)
{
if ((argc == 0) || !*argv)
return (0);
while (--argc >= 0)
if (!is_pool(argv[argc]))
return (0);
return (1);
}
/*
* Helper function to print out vdev/pool names we can't resolve. Used for an
* error message.
*/
static void
error_list_unresolved_vdevs(int argc, char **argv, char *pool_name,
iostat_cbdata_t *cb)
{
int i;
char *name;
char *str;
for (i = 0; i < argc; i++) {
name = argv[i];
if (is_pool(name))
str = gettext("pool");
else if (are_vdevs_in_pool(1, &name, pool_name, cb))
str = gettext("vdev in this pool");
else if (are_vdevs_in_pool(1, &name, NULL, cb))
str = gettext("vdev in another pool");
else
str = gettext("unknown");
fprintf(stderr, "\t%s (%s)\n", name, str);
}
}
/*
* Same as get_interval_count(), but with additional checks to not misinterpret
* guids as interval/count values. Assumes VDEV_NAME_GUID is set in
* cb.cb_name_flags.
*/
static void
get_interval_count_filter_guids(int *argc, char **argv, float *interval,
unsigned long *count, iostat_cbdata_t *cb)
{
char **tmpargv = argv;
int argc_for_interval = 0;
/* Is the last arg an interval value? Or a guid? */
if (*argc >= 1 && !are_vdevs_in_pool(1, &argv[*argc - 1], NULL, cb)) {
/*
* The last arg is not a guid, so it's probably an
* interval value.
*/
argc_for_interval++;
if (*argc >= 2 &&
!are_vdevs_in_pool(1, &argv[*argc - 2], NULL, cb)) {
/*
* The 2nd to last arg is not a guid, so it's probably
* an interval value.
*/
argc_for_interval++;
}
}
/* Point to our list of possible intervals */
tmpargv = &argv[*argc - argc_for_interval];
*argc = *argc - argc_for_interval;
get_interval_count(&argc_for_interval, tmpargv,
interval, count);
}
/*
* Floating point sleep(). Allows you to pass in a floating point value for
* seconds.
*/
static void
fsleep(float sec)
{
struct timespec req;
req.tv_sec = floor(sec);
req.tv_nsec = (sec - (float)req.tv_sec) * NANOSEC;
nanosleep(&req, NULL);
}
/*
* Terminal height, in rows. Returns -1 if stdout is not connected to a TTY or
* if we were unable to determine its size.
*/
static int
terminal_height(void)
{
struct winsize win;
if (isatty(STDOUT_FILENO) == 0)
return (-1);
if (ioctl(STDOUT_FILENO, TIOCGWINSZ, &win) != -1 && win.ws_row > 0)
return (win.ws_row);
return (-1);
}
/*
* Run one of the zpool status/iostat -c scripts with the help (-h) option and
* print the result.
*
* name: Short name of the script ('iostat').
* path: Full path to the script ('/usr/local/etc/zfs/zpool.d/iostat');
*/
static void
print_zpool_script_help(char *name, char *path)
{
char *argv[] = {path, "-h", NULL};
char **lines = NULL;
int lines_cnt = 0;
int rc;
rc = libzfs_run_process_get_stdout_nopath(path, argv, NULL, &lines,
&lines_cnt);
if (rc != 0 || lines == NULL || lines_cnt <= 0) {
if (lines != NULL)
libzfs_free_str_array(lines, lines_cnt);
return;
}
for (int i = 0; i < lines_cnt; i++)
if (!is_blank_str(lines[i]))
printf(" %-14s %s\n", name, lines[i]);
libzfs_free_str_array(lines, lines_cnt);
}
/*
* Go though the zpool status/iostat -c scripts in the user's path, run their
* help option (-h), and print out the results.
*/
static void
print_zpool_dir_scripts(char *dirpath)
{
DIR *dir;
struct dirent *ent;
char fullpath[MAXPATHLEN];
struct stat dir_stat;
if ((dir = opendir(dirpath)) != NULL) {
/* print all the files and directories within directory */
while ((ent = readdir(dir)) != NULL) {
sprintf(fullpath, "%s/%s", dirpath, ent->d_name);
/* Print the scripts */
if (stat(fullpath, &dir_stat) == 0)
if (dir_stat.st_mode & S_IXUSR &&
S_ISREG(dir_stat.st_mode))
print_zpool_script_help(ent->d_name,
fullpath);
}
closedir(dir);
}
}
/*
* Print out help text for all zpool status/iostat -c scripts.
*/
static void
print_zpool_script_list(char *subcommand)
{
char *dir, *sp;
printf(gettext("Available 'zpool %s -c' commands:\n"), subcommand);
sp = zpool_get_cmd_search_path();
if (sp == NULL)
return;
dir = strtok(sp, ":");
while (dir != NULL) {
print_zpool_dir_scripts(dir);
dir = strtok(NULL, ":");
}
free(sp);
}
/*
* Set the minimum pool/vdev name column width. The width must be at least 10,
* but may be as large as the column width - 42 so it still fits on one line.
* NOTE: 42 is the width of the default capacity/operations/bandwidth output
*/
static int
get_namewidth_iostat(zpool_handle_t *zhp, void *data)
{
iostat_cbdata_t *cb = data;
int width, available_width;
/*
* get_namewidth() returns the maximum width of any name in that column
* for any pool/vdev/device line that will be output.
*/
width = get_namewidth(zhp, cb->cb_namewidth, cb->cb_name_flags,
cb->cb_verbose);
/*
* The width we are calculating is the width of the header and also the
* padding width for names that are less than maximum width. The stats
* take up 42 characters, so the width available for names is:
*/
available_width = get_columns() - 42;
/*
* If the maximum width fits on a screen, then great! Make everything
* line up by justifying all lines to the same width. If that max
* width is larger than what's available, the name plus stats won't fit
* on one line, and justifying to that width would cause every line to
* wrap on the screen. We only want lines with long names to wrap.
* Limit the padding to what won't wrap.
*/
if (width > available_width)
width = available_width;
/*
* And regardless of whatever the screen width is (get_columns can
* return 0 if the width is not known or less than 42 for a narrow
* terminal) have the width be a minimum of 10.
*/
if (width < 10)
width = 10;
/* Save the calculated width */
cb->cb_namewidth = width;
return (0);
}
/*
* zpool iostat [[-c [script1,script2,...]] [-lq]|[-rw]] [-ghHLpPvy] [-n name]
* [-T d|u] [[ pool ...]|[pool vdev ...]|[vdev ...]]
* [interval [count]]
*
* -c CMD For each vdev, run command CMD
* -g Display guid for individual vdev name.
* -L Follow links when resolving vdev path name.
* -P Display full path for vdev name.
* -v Display statistics for individual vdevs
* -h Display help
* -p Display values in parsable (exact) format.
* -H Scripted mode. Don't display headers, and separate properties
* by a single tab.
* -l Display average latency
* -q Display queue depths
* -w Display latency histograms
* -r Display request size histogram
* -T Display a timestamp in date(1) or Unix format
* -n Only print headers once
*
* This command can be tricky because we want to be able to deal with pool
* creation/destruction as well as vdev configuration changes. The bulk of this
* processing is handled by the pool_list_* routines in zpool_iter.c. We rely
* on pool_list_update() to detect the addition of new pools. Configuration
* changes are all handled within libzfs.
*/
int
zpool_do_iostat(int argc, char **argv)
{
int c;
int ret;
int npools;
float interval = 0;
unsigned long count = 0;
int winheight = 24;
zpool_list_t *list;
boolean_t verbose = B_FALSE;
boolean_t latency = B_FALSE, l_histo = B_FALSE, rq_histo = B_FALSE;
boolean_t queues = B_FALSE, parsable = B_FALSE, scripted = B_FALSE;
boolean_t omit_since_boot = B_FALSE;
boolean_t guid = B_FALSE;
boolean_t follow_links = B_FALSE;
boolean_t full_name = B_FALSE;
boolean_t headers_once = B_FALSE;
iostat_cbdata_t cb = { 0 };
char *cmd = NULL;
/* Used for printing error message */
const char flag_to_arg[] = {[IOS_LATENCY] = 'l', [IOS_QUEUES] = 'q',
[IOS_L_HISTO] = 'w', [IOS_RQ_HISTO] = 'r'};
uint64_t unsupported_flags;
/* check options */
while ((c = getopt(argc, argv, "c:gLPT:vyhplqrwnH")) != -1) {
switch (c) {
case 'c':
if (cmd != NULL) {
fprintf(stderr,
gettext("Can't set -c flag twice\n"));
exit(1);
}
if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
!libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
fprintf(stderr, gettext(
"Can't run -c, disabled by "
"ZPOOL_SCRIPTS_ENABLED.\n"));
exit(1);
}
if ((getuid() <= 0 || geteuid() <= 0) &&
!libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
fprintf(stderr, gettext(
"Can't run -c with root privileges "
"unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
exit(1);
}
cmd = optarg;
verbose = B_TRUE;
break;
case 'g':
guid = B_TRUE;
break;
case 'L':
follow_links = B_TRUE;
break;
case 'P':
full_name = B_TRUE;
break;
case 'T':
get_timestamp_arg(*optarg);
break;
case 'v':
verbose = B_TRUE;
break;
case 'p':
parsable = B_TRUE;
break;
case 'l':
latency = B_TRUE;
break;
case 'q':
queues = B_TRUE;
break;
case 'H':
scripted = B_TRUE;
break;
case 'w':
l_histo = B_TRUE;
break;
case 'r':
rq_histo = B_TRUE;
break;
case 'y':
omit_since_boot = B_TRUE;
break;
case 'n':
headers_once = B_TRUE;
break;
case 'h':
usage(B_FALSE);
break;
case '?':
if (optopt == 'c') {
print_zpool_script_list("iostat");
exit(0);
} else {
fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
}
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
cb.cb_literal = parsable;
cb.cb_scripted = scripted;
if (guid)
cb.cb_name_flags |= VDEV_NAME_GUID;
if (follow_links)
cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
if (full_name)
cb.cb_name_flags |= VDEV_NAME_PATH;
cb.cb_iteration = 0;
cb.cb_namewidth = 0;
cb.cb_verbose = verbose;
/* Get our interval and count values (if any) */
if (guid) {
get_interval_count_filter_guids(&argc, argv, &interval,
&count, &cb);
} else {
get_interval_count(&argc, argv, &interval, &count);
}
if (argc == 0) {
/* No args, so just print the defaults. */
} else if (are_all_pools(argc, argv)) {
/* All the args are pool names */
} else if (are_vdevs_in_pool(argc, argv, NULL, &cb)) {
/* All the args are vdevs */
cb.cb_vdev_names = argv;
cb.cb_vdev_names_count = argc;
argc = 0; /* No pools to process */
} else if (are_all_pools(1, argv)) {
/* The first arg is a pool name */
if (are_vdevs_in_pool(argc - 1, argv + 1, argv[0], &cb)) {
/* ...and the rest are vdev names */
cb.cb_vdev_names = argv + 1;
cb.cb_vdev_names_count = argc - 1;
argc = 1; /* One pool to process */
} else {
fprintf(stderr, gettext("Expected either a list of "));
fprintf(stderr, gettext("pools, or list of vdevs in"));
fprintf(stderr, " \"%s\", ", argv[0]);
fprintf(stderr, gettext("but got:\n"));
error_list_unresolved_vdevs(argc - 1, argv + 1,
argv[0], &cb);
fprintf(stderr, "\n");
usage(B_FALSE);
return (1);
}
} else {
/*
* The args don't make sense. The first arg isn't a pool name,
* nor are all the args vdevs.
*/
fprintf(stderr, gettext("Unable to parse pools/vdevs list.\n"));
fprintf(stderr, "\n");
return (1);
}
if (cb.cb_vdev_names_count != 0) {
/*
* If user specified vdevs, it implies verbose.
*/
cb.cb_verbose = B_TRUE;
}
/*
* Construct the list of all interesting pools.
*/
ret = 0;
if ((list = pool_list_get(argc, argv, NULL, parsable, &ret)) == NULL)
return (1);
if (pool_list_count(list) == 0 && argc != 0) {
pool_list_free(list);
return (1);
}
if (pool_list_count(list) == 0 && interval == 0) {
pool_list_free(list);
(void) fprintf(stderr, gettext("no pools available\n"));
return (1);
}
if ((l_histo || rq_histo) && (cmd != NULL || latency || queues)) {
pool_list_free(list);
(void) fprintf(stderr,
gettext("[-r|-w] isn't allowed with [-c|-l|-q]\n"));
usage(B_FALSE);
return (1);
}
if (l_histo && rq_histo) {
pool_list_free(list);
(void) fprintf(stderr,
gettext("Only one of [-r|-w] can be passed at a time\n"));
usage(B_FALSE);
return (1);
}
/*
* Enter the main iostat loop.
*/
cb.cb_list = list;
if (l_histo) {
/*
* Histograms tables look out of place when you try to display
* them with the other stats, so make a rule that you can only
* print histograms by themselves.
*/
cb.cb_flags = IOS_L_HISTO_M;
} else if (rq_histo) {
cb.cb_flags = IOS_RQ_HISTO_M;
} else {
cb.cb_flags = IOS_DEFAULT_M;
if (latency)
cb.cb_flags |= IOS_LATENCY_M;
if (queues)
cb.cb_flags |= IOS_QUEUES_M;
}
/*
* See if the module supports all the stats we want to display.
*/
unsupported_flags = cb.cb_flags & ~get_stat_flags(list);
if (unsupported_flags) {
uint64_t f;
int idx;
fprintf(stderr,
gettext("The loaded zfs module doesn't support:"));
/* for each bit set in unsupported_flags */
for (f = unsupported_flags; f; f &= ~(1ULL << idx)) {
idx = lowbit64(f) - 1;
fprintf(stderr, " -%c", flag_to_arg[idx]);
}
fprintf(stderr, ". Try running a newer module.\n");
pool_list_free(list);
return (1);
}
for (;;) {
if ((npools = pool_list_count(list)) == 0)
(void) fprintf(stderr, gettext("no pools available\n"));
else {
/*
* If this is the first iteration and -y was supplied
* we skip any printing.
*/
boolean_t skip = (omit_since_boot &&
cb.cb_iteration == 0);
/*
* Refresh all statistics. This is done as an
* explicit step before calculating the maximum name
* width, so that any * configuration changes are
* properly accounted for.
*/
(void) pool_list_iter(list, B_FALSE, refresh_iostat,
&cb);
/*
* Iterate over all pools to determine the maximum width
* for the pool / device name column across all pools.
*/
cb.cb_namewidth = 0;
(void) pool_list_iter(list, B_FALSE,
get_namewidth_iostat, &cb);
if (timestamp_fmt != NODATE)
print_timestamp(timestamp_fmt);
if (cmd != NULL && cb.cb_verbose &&
!(cb.cb_flags & IOS_ANYHISTO_M)) {
cb.vcdl = all_pools_for_each_vdev_run(argc,
argv, cmd, g_zfs, cb.cb_vdev_names,
cb.cb_vdev_names_count, cb.cb_name_flags);
} else {
cb.vcdl = NULL;
}
/*
* Check terminal size so we can print headers
* even when terminal window has its height
* changed.
*/
winheight = terminal_height();
/*
* Are we connected to TTY? If not, headers_once
* should be true, to avoid breaking scripts.
*/
if (winheight < 0)
headers_once = B_TRUE;
/*
* If it's the first time and we're not skipping it,
* or either skip or verbose mode, print the header.
*
* The histogram code explicitly prints its header on
* every vdev, so skip this for histograms.
*/
if (((++cb.cb_iteration == 1 && !skip) ||
(skip != verbose) ||
(!headers_once &&
(cb.cb_iteration % winheight) == 0)) &&
(!(cb.cb_flags & IOS_ANYHISTO_M)) &&
!cb.cb_scripted)
print_iostat_header(&cb);
if (skip) {
(void) fsleep(interval);
continue;
}
pool_list_iter(list, B_FALSE, print_iostat, &cb);
/*
* If there's more than one pool, and we're not in
* verbose mode (which prints a separator for us),
* then print a separator.
*
* In addition, if we're printing specific vdevs then
* we also want an ending separator.
*/
if (((npools > 1 && !verbose &&
!(cb.cb_flags & IOS_ANYHISTO_M)) ||
(!(cb.cb_flags & IOS_ANYHISTO_M) &&
cb.cb_vdev_names_count)) &&
!cb.cb_scripted) {
print_iostat_separator(&cb);
if (cb.vcdl != NULL)
print_cmd_columns(cb.vcdl, 1);
printf("\n");
}
if (cb.vcdl != NULL)
free_vdev_cmd_data_list(cb.vcdl);
}
/*
* Flush the output so that redirection to a file isn't buffered
* indefinitely.
*/
(void) fflush(stdout);
if (interval == 0)
break;
if (count != 0 && --count == 0)
break;
(void) fsleep(interval);
}
pool_list_free(list);
return (ret);
}
typedef struct list_cbdata {
boolean_t cb_verbose;
int cb_name_flags;
int cb_namewidth;
boolean_t cb_scripted;
zprop_list_t *cb_proplist;
boolean_t cb_literal;
} list_cbdata_t;
/*
* Given a list of columns to display, output appropriate headers for each one.
*/
static void
print_header(list_cbdata_t *cb)
{
zprop_list_t *pl = cb->cb_proplist;
char headerbuf[ZPOOL_MAXPROPLEN];
const char *header;
boolean_t first = B_TRUE;
boolean_t right_justify;
size_t width = 0;
for (; pl != NULL; pl = pl->pl_next) {
width = pl->pl_width;
if (first && cb->cb_verbose) {
/*
* Reset the width to accommodate the verbose listing
* of devices.
*/
width = cb->cb_namewidth;
}
if (!first)
(void) printf(" ");
else
first = B_FALSE;
right_justify = B_FALSE;
if (pl->pl_prop != ZPROP_INVAL) {
header = zpool_prop_column_name(pl->pl_prop);
right_justify = zpool_prop_align_right(pl->pl_prop);
} else {
int i;
for (i = 0; pl->pl_user_prop[i] != '\0'; i++)
headerbuf[i] = toupper(pl->pl_user_prop[i]);
headerbuf[i] = '\0';
header = headerbuf;
}
if (pl->pl_next == NULL && !right_justify)
(void) printf("%s", header);
else if (right_justify)
(void) printf("%*s", (int)width, header);
else
(void) printf("%-*s", (int)width, header);
}
(void) printf("\n");
}
/*
* Given a pool and a list of properties, print out all the properties according
* to the described layout. Used by zpool_do_list().
*/
static void
print_pool(zpool_handle_t *zhp, list_cbdata_t *cb)
{
zprop_list_t *pl = cb->cb_proplist;
boolean_t first = B_TRUE;
char property[ZPOOL_MAXPROPLEN];
char *propstr;
boolean_t right_justify;
size_t width;
for (; pl != NULL; pl = pl->pl_next) {
width = pl->pl_width;
if (first && cb->cb_verbose) {
/*
* Reset the width to accommodate the verbose listing
* of devices.
*/
width = cb->cb_namewidth;
}
if (!first) {
if (cb->cb_scripted)
(void) printf("\t");
else
(void) printf(" ");
} else {
first = B_FALSE;
}
right_justify = B_FALSE;
if (pl->pl_prop != ZPROP_INVAL) {
if (zpool_get_prop(zhp, pl->pl_prop, property,
sizeof (property), NULL, cb->cb_literal) != 0)
propstr = "-";
else
propstr = property;
right_justify = zpool_prop_align_right(pl->pl_prop);
} else if ((zpool_prop_feature(pl->pl_user_prop) ||
zpool_prop_unsupported(pl->pl_user_prop)) &&
zpool_prop_get_feature(zhp, pl->pl_user_prop, property,
sizeof (property)) == 0) {
propstr = property;
} else {
propstr = "-";
}
/*
* If this is being called in scripted mode, or if this is the
* last column and it is left-justified, don't include a width
* format specifier.
*/
if (cb->cb_scripted || (pl->pl_next == NULL && !right_justify))
(void) printf("%s", propstr);
else if (right_justify)
(void) printf("%*s", (int)width, propstr);
else
(void) printf("%-*s", (int)width, propstr);
}
(void) printf("\n");
}
static void
print_one_column(zpool_prop_t prop, uint64_t value, const char *str,
boolean_t scripted, boolean_t valid, enum zfs_nicenum_format format)
{
char propval[64];
boolean_t fixed;
size_t width = zprop_width(prop, &fixed, ZFS_TYPE_POOL);
switch (prop) {
case ZPOOL_PROP_EXPANDSZ:
case ZPOOL_PROP_CHECKPOINT:
case ZPOOL_PROP_DEDUPRATIO:
if (value == 0)
(void) strlcpy(propval, "-", sizeof (propval));
else
zfs_nicenum_format(value, propval, sizeof (propval),
format);
break;
case ZPOOL_PROP_FRAGMENTATION:
if (value == ZFS_FRAG_INVALID) {
(void) strlcpy(propval, "-", sizeof (propval));
} else if (format == ZFS_NICENUM_RAW) {
(void) snprintf(propval, sizeof (propval), "%llu",
(unsigned long long)value);
} else {
(void) snprintf(propval, sizeof (propval), "%llu%%",
(unsigned long long)value);
}
break;
case ZPOOL_PROP_CAPACITY:
/* capacity value is in parts-per-10,000 (aka permyriad) */
if (format == ZFS_NICENUM_RAW)
(void) snprintf(propval, sizeof (propval), "%llu",
(unsigned long long)value / 100);
else
(void) snprintf(propval, sizeof (propval),
value < 1000 ? "%1.2f%%" : value < 10000 ?
"%2.1f%%" : "%3.0f%%", value / 100.0);
break;
case ZPOOL_PROP_HEALTH:
width = 8;
(void) strlcpy(propval, str, sizeof (propval));
break;
default:
zfs_nicenum_format(value, propval, sizeof (propval), format);
}
if (!valid)
(void) strlcpy(propval, "-", sizeof (propval));
if (scripted)
(void) printf("\t%s", propval);
else
(void) printf(" %*s", (int)width, propval);
}
/*
* print static default line per vdev
* not compatible with '-o' <proplist> option
*/
static void
print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
list_cbdata_t *cb, int depth, boolean_t isspare)
{
nvlist_t **child;
vdev_stat_t *vs;
uint_t c, children;
char *vname;
boolean_t scripted = cb->cb_scripted;
uint64_t islog = B_FALSE;
char *dashes = "%-*s - - - - "
"- - - - -\n";
verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
if (name != NULL) {
boolean_t toplevel = (vs->vs_space != 0);
uint64_t cap;
enum zfs_nicenum_format format;
const char *state;
if (cb->cb_literal)
format = ZFS_NICENUM_RAW;
else
format = ZFS_NICENUM_1024;
if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
return;
if (scripted)
(void) printf("\t%s", name);
else if (strlen(name) + depth > cb->cb_namewidth)
(void) printf("%*s%s", depth, "", name);
else
(void) printf("%*s%s%*s", depth, "", name,
(int)(cb->cb_namewidth - strlen(name) - depth), "");
/*
* Print the properties for the individual vdevs. Some
* properties are only applicable to toplevel vdevs. The
* 'toplevel' boolean value is passed to the print_one_column()
* to indicate that the value is valid.
*/
print_one_column(ZPOOL_PROP_SIZE, vs->vs_space, NULL, scripted,
toplevel, format);
print_one_column(ZPOOL_PROP_ALLOCATED, vs->vs_alloc, NULL,
scripted, toplevel, format);
print_one_column(ZPOOL_PROP_FREE, vs->vs_space - vs->vs_alloc,
NULL, scripted, toplevel, format);
print_one_column(ZPOOL_PROP_CHECKPOINT,
vs->vs_checkpoint_space, NULL, scripted, toplevel, format);
print_one_column(ZPOOL_PROP_EXPANDSZ, vs->vs_esize, NULL,
scripted, B_TRUE, format);
print_one_column(ZPOOL_PROP_FRAGMENTATION,
vs->vs_fragmentation, NULL, scripted,
(vs->vs_fragmentation != ZFS_FRAG_INVALID && toplevel),
format);
cap = (vs->vs_space == 0) ? 0 :
(vs->vs_alloc * 10000 / vs->vs_space);
print_one_column(ZPOOL_PROP_CAPACITY, cap, NULL,
scripted, toplevel, format);
print_one_column(ZPOOL_PROP_DEDUPRATIO, 0, NULL,
scripted, toplevel, format);
state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
if (isspare) {
if (vs->vs_aux == VDEV_AUX_SPARED)
state = "INUSE";
else if (vs->vs_state == VDEV_STATE_HEALTHY)
state = "AVAIL";
}
print_one_column(ZPOOL_PROP_HEALTH, 0, state, scripted,
B_TRUE, format);
(void) printf("\n");
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
return;
/* list the normal vdevs first */
for (c = 0; c < children; c++) {
uint64_t ishole = B_FALSE;
if (nvlist_lookup_uint64(child[c],
ZPOOL_CONFIG_IS_HOLE, &ishole) == 0 && ishole)
continue;
if (nvlist_lookup_uint64(child[c],
ZPOOL_CONFIG_IS_LOG, &islog) == 0 && islog)
continue;
if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS))
continue;
vname = zpool_vdev_name(g_zfs, zhp, child[c],
- cb->cb_name_flags);
+ cb->cb_name_flags | VDEV_NAME_TYPE_ID);
print_list_stats(zhp, vname, child[c], cb, depth + 2, B_FALSE);
free(vname);
}
/* list the classes: 'logs', 'dedup', and 'special' */
for (uint_t n = 0; n < 3; n++) {
boolean_t printed = B_FALSE;
for (c = 0; c < children; c++) {
char *bias = NULL;
char *type = NULL;
if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&islog) == 0 && islog) {
bias = VDEV_ALLOC_CLASS_LOGS;
} else {
(void) nvlist_lookup_string(child[c],
ZPOOL_CONFIG_ALLOCATION_BIAS, &bias);
(void) nvlist_lookup_string(child[c],
ZPOOL_CONFIG_TYPE, &type);
}
if (bias == NULL || strcmp(bias, class_name[n]) != 0)
continue;
if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0)
continue;
if (!printed) {
/* LINTED E_SEC_PRINTF_VAR_FMT */
(void) printf(dashes, cb->cb_namewidth,
class_name[n]);
printed = B_TRUE;
}
vname = zpool_vdev_name(g_zfs, zhp, child[c],
- cb->cb_name_flags);
+ cb->cb_name_flags | VDEV_NAME_TYPE_ID);
print_list_stats(zhp, vname, child[c], cb, depth + 2,
B_FALSE);
free(vname);
}
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0 && children > 0) {
/* LINTED E_SEC_PRINTF_VAR_FMT */
(void) printf(dashes, cb->cb_namewidth, "cache");
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags);
print_list_stats(zhp, vname, child[c], cb, depth + 2,
B_FALSE);
free(vname);
}
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, &child,
&children) == 0 && children > 0) {
/* LINTED E_SEC_PRINTF_VAR_FMT */
(void) printf(dashes, cb->cb_namewidth, "spare");
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags);
print_list_stats(zhp, vname, child[c], cb, depth + 2,
B_TRUE);
free(vname);
}
}
}
/*
* Generic callback function to list a pool.
*/
static int
list_callback(zpool_handle_t *zhp, void *data)
{
list_cbdata_t *cbp = data;
print_pool(zhp, cbp);
if (cbp->cb_verbose) {
nvlist_t *config, *nvroot;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
print_list_stats(zhp, NULL, nvroot, cbp, 0, B_FALSE);
}
return (0);
}
/*
* Set the minimum pool/vdev name column width. The width must be at least 9,
* but may be as large as needed.
*/
static int
get_namewidth_list(zpool_handle_t *zhp, void *data)
{
list_cbdata_t *cb = data;
int width;
width = get_namewidth(zhp, cb->cb_namewidth, cb->cb_name_flags,
cb->cb_verbose);
if (width < 9)
width = 9;
cb->cb_namewidth = width;
return (0);
}
/*
* zpool list [-gHLpP] [-o prop[,prop]*] [-T d|u] [pool] ... [interval [count]]
*
* -g Display guid for individual vdev name.
* -H Scripted mode. Don't display headers, and separate properties
* by a single tab.
* -L Follow links when resolving vdev path name.
* -o List of properties to display. Defaults to
* "name,size,allocated,free,expandsize,fragmentation,capacity,"
* "dedupratio,health,altroot"
* -p Display values in parsable (exact) format.
* -P Display full path for vdev name.
* -T Display a timestamp in date(1) or Unix format
*
* List all pools in the system, whether or not they're healthy. Output space
* statistics for each one, as well as health status summary.
*/
int
zpool_do_list(int argc, char **argv)
{
int c;
int ret = 0;
list_cbdata_t cb = { 0 };
static char default_props[] =
"name,size,allocated,free,checkpoint,expandsize,fragmentation,"
"capacity,dedupratio,health,altroot";
char *props = default_props;
float interval = 0;
unsigned long count = 0;
zpool_list_t *list;
boolean_t first = B_TRUE;
/* check options */
while ((c = getopt(argc, argv, ":gHLo:pPT:v")) != -1) {
switch (c) {
case 'g':
cb.cb_name_flags |= VDEV_NAME_GUID;
break;
case 'H':
cb.cb_scripted = B_TRUE;
break;
case 'L':
cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
break;
case 'o':
props = optarg;
break;
case 'P':
cb.cb_name_flags |= VDEV_NAME_PATH;
break;
case 'p':
cb.cb_literal = B_TRUE;
break;
case 'T':
get_timestamp_arg(*optarg);
break;
case 'v':
cb.cb_verbose = B_TRUE;
cb.cb_namewidth = 8; /* 8 until precalc is avail */
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
get_interval_count(&argc, argv, &interval, &count);
if (zprop_get_list(g_zfs, props, &cb.cb_proplist, ZFS_TYPE_POOL) != 0)
usage(B_FALSE);
for (;;) {
if ((list = pool_list_get(argc, argv, &cb.cb_proplist,
cb.cb_literal, &ret)) == NULL)
return (1);
if (pool_list_count(list) == 0)
break;
cb.cb_namewidth = 0;
(void) pool_list_iter(list, B_FALSE, get_namewidth_list, &cb);
if (timestamp_fmt != NODATE)
print_timestamp(timestamp_fmt);
if (!cb.cb_scripted && (first || cb.cb_verbose)) {
print_header(&cb);
first = B_FALSE;
}
ret = pool_list_iter(list, B_TRUE, list_callback, &cb);
if (interval == 0)
break;
if (count != 0 && --count == 0)
break;
pool_list_free(list);
(void) fsleep(interval);
}
if (argc == 0 && !cb.cb_scripted && pool_list_count(list) == 0) {
(void) printf(gettext("no pools available\n"));
ret = 0;
}
pool_list_free(list);
zprop_free_list(cb.cb_proplist);
return (ret);
}
static int
zpool_do_attach_or_replace(int argc, char **argv, int replacing)
{
boolean_t force = B_FALSE;
boolean_t rebuild = B_FALSE;
boolean_t wait = B_FALSE;
int c;
nvlist_t *nvroot;
char *poolname, *old_disk, *new_disk;
zpool_handle_t *zhp;
nvlist_t *props = NULL;
char *propval;
int ret;
/* check options */
while ((c = getopt(argc, argv, "fo:sw")) != -1) {
switch (c) {
case 'f':
force = B_TRUE;
break;
case 'o':
if ((propval = strchr(optarg, '=')) == NULL) {
(void) fprintf(stderr, gettext("missing "
"'=' for -o option\n"));
usage(B_FALSE);
}
*propval = '\0';
propval++;
if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
(add_prop_list(optarg, propval, &props, B_TRUE)))
usage(B_FALSE);
break;
case 's':
rebuild = B_TRUE;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
poolname = argv[0];
if (argc < 2) {
(void) fprintf(stderr,
gettext("missing <device> specification\n"));
usage(B_FALSE);
}
old_disk = argv[1];
if (argc < 3) {
if (!replacing) {
(void) fprintf(stderr,
gettext("missing <new_device> specification\n"));
usage(B_FALSE);
}
new_disk = old_disk;
argc -= 1;
argv += 1;
} else {
new_disk = argv[2];
argc -= 2;
argv += 2;
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
nvlist_free(props);
return (1);
}
if (zpool_get_config(zhp, NULL) == NULL) {
(void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
poolname);
zpool_close(zhp);
nvlist_free(props);
return (1);
}
/* unless manually specified use "ashift" pool property (if set) */
if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
int intval;
zprop_source_t src;
char strval[ZPOOL_MAXPROPLEN];
intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
if (src != ZPROP_SRC_DEFAULT) {
(void) sprintf(strval, "%" PRId32, intval);
verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
&props, B_TRUE) == 0);
}
}
nvroot = make_root_vdev(zhp, props, force, B_FALSE, replacing, B_FALSE,
argc, argv);
if (nvroot == NULL) {
zpool_close(zhp);
nvlist_free(props);
return (1);
}
ret = zpool_vdev_attach(zhp, old_disk, new_disk, nvroot, replacing,
rebuild);
if (ret == 0 && wait)
ret = zpool_wait(zhp,
replacing ? ZPOOL_WAIT_REPLACE : ZPOOL_WAIT_RESILVER);
nvlist_free(props);
nvlist_free(nvroot);
zpool_close(zhp);
return (ret);
}
/*
* zpool replace [-fsw] [-o property=value] <pool> <device> <new_device>
*
* -f Force attach, even if <new_device> appears to be in use.
* -s Use sequential instead of healing reconstruction for resilver.
* -o Set property=value.
* -w Wait for replacing to complete before returning
*
* Replace <device> with <new_device>.
*/
/* ARGSUSED */
int
zpool_do_replace(int argc, char **argv)
{
return (zpool_do_attach_or_replace(argc, argv, B_TRUE));
}
/*
* zpool attach [-fsw] [-o property=value] <pool> <device> <new_device>
*
* -f Force attach, even if <new_device> appears to be in use.
* -s Use sequential instead of healing reconstruction for resilver.
* -o Set property=value.
* -w Wait for resilvering to complete before returning
*
* Attach <new_device> to the mirror containing <device>. If <device> is not
* part of a mirror, then <device> will be transformed into a mirror of
* <device> and <new_device>. In either case, <new_device> will begin life
* with a DTL of [0, now], and will immediately begin to resilver itself.
*/
int
zpool_do_attach(int argc, char **argv)
{
return (zpool_do_attach_or_replace(argc, argv, B_FALSE));
}
/*
* zpool detach [-f] <pool> <device>
*
* -f Force detach of <device>, even if DTLs argue against it
* (not supported yet)
*
* Detach a device from a mirror. The operation will be refused if <device>
* is the last device in the mirror, or if the DTLs indicate that this device
* has the only valid copy of some data.
*/
/* ARGSUSED */
int
zpool_do_detach(int argc, char **argv)
{
int c;
char *poolname, *path;
zpool_handle_t *zhp;
int ret;
/* check options */
while ((c = getopt(argc, argv, "")) != -1) {
switch (c) {
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr,
gettext("missing <device> specification\n"));
usage(B_FALSE);
}
poolname = argv[0];
path = argv[1];
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
ret = zpool_vdev_detach(zhp, path);
zpool_close(zhp);
return (ret);
}
/*
* zpool split [-gLnP] [-o prop=val] ...
* [-o mntopt] ...
* [-R altroot] <pool> <newpool> [<device> ...]
*
* -g Display guid for individual vdev name.
* -L Follow links when resolving vdev path name.
* -n Do not split the pool, but display the resulting layout if
* it were to be split.
* -o Set property=value, or set mount options.
* -P Display full path for vdev name.
* -R Mount the split-off pool under an alternate root.
* -l Load encryption keys while importing.
*
* Splits the named pool and gives it the new pool name. Devices to be split
* off may be listed, provided that no more than one device is specified
* per top-level vdev mirror. The newly split pool is left in an exported
* state unless -R is specified.
*
* Restrictions: the top-level of the pool pool must only be made up of
* mirrors; all devices in the pool must be healthy; no device may be
* undergoing a resilvering operation.
*/
int
zpool_do_split(int argc, char **argv)
{
char *srcpool, *newpool, *propval;
char *mntopts = NULL;
splitflags_t flags;
int c, ret = 0;
boolean_t loadkeys = B_FALSE;
zpool_handle_t *zhp;
nvlist_t *config, *props = NULL;
flags.dryrun = B_FALSE;
flags.import = B_FALSE;
flags.name_flags = 0;
/* check options */
while ((c = getopt(argc, argv, ":gLR:lno:P")) != -1) {
switch (c) {
case 'g':
flags.name_flags |= VDEV_NAME_GUID;
break;
case 'L':
flags.name_flags |= VDEV_NAME_FOLLOW_LINKS;
break;
case 'R':
flags.import = B_TRUE;
if (add_prop_list(
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), optarg,
&props, B_TRUE) != 0) {
nvlist_free(props);
usage(B_FALSE);
}
break;
case 'l':
loadkeys = B_TRUE;
break;
case 'n':
flags.dryrun = B_TRUE;
break;
case 'o':
if ((propval = strchr(optarg, '=')) != NULL) {
*propval = '\0';
propval++;
if (add_prop_list(optarg, propval,
&props, B_TRUE) != 0) {
nvlist_free(props);
usage(B_FALSE);
}
} else {
mntopts = optarg;
}
break;
case 'P':
flags.name_flags |= VDEV_NAME_PATH;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
break;
}
}
if (!flags.import && mntopts != NULL) {
(void) fprintf(stderr, gettext("setting mntopts is only "
"valid when importing the pool\n"));
usage(B_FALSE);
}
if (!flags.import && loadkeys) {
(void) fprintf(stderr, gettext("loading keys is only "
"valid when importing the pool\n"));
usage(B_FALSE);
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("Missing pool name\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("Missing new pool name\n"));
usage(B_FALSE);
}
srcpool = argv[0];
newpool = argv[1];
argc -= 2;
argv += 2;
if ((zhp = zpool_open(g_zfs, srcpool)) == NULL) {
nvlist_free(props);
return (1);
}
config = split_mirror_vdev(zhp, newpool, props, flags, argc, argv);
if (config == NULL) {
ret = 1;
} else {
if (flags.dryrun) {
(void) printf(gettext("would create '%s' with the "
"following layout:\n\n"), newpool);
print_vdev_tree(NULL, newpool, config, 0, "",
flags.name_flags);
print_vdev_tree(NULL, "dedup", config, 0,
VDEV_ALLOC_BIAS_DEDUP, 0);
print_vdev_tree(NULL, "special", config, 0,
VDEV_ALLOC_BIAS_SPECIAL, 0);
}
}
zpool_close(zhp);
if (ret != 0 || flags.dryrun || !flags.import) {
nvlist_free(config);
nvlist_free(props);
return (ret);
}
/*
* The split was successful. Now we need to open the new
* pool and import it.
*/
if ((zhp = zpool_open_canfail(g_zfs, newpool)) == NULL) {
nvlist_free(config);
nvlist_free(props);
return (1);
}
if (loadkeys) {
ret = zfs_crypto_attempt_load_keys(g_zfs, newpool);
if (ret != 0)
ret = 1;
}
if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL &&
zpool_enable_datasets(zhp, mntopts, 0) != 0) {
ret = 1;
(void) fprintf(stderr, gettext("Split was successful, but "
"the datasets could not all be mounted\n"));
(void) fprintf(stderr, gettext("Try doing '%s' with a "
"different altroot\n"), "zpool import");
}
zpool_close(zhp);
nvlist_free(config);
nvlist_free(props);
return (ret);
}
/*
* zpool online <pool> <device> ...
*/
int
zpool_do_online(int argc, char **argv)
{
int c, i;
char *poolname;
zpool_handle_t *zhp;
int ret = 0;
vdev_state_t newstate;
int flags = 0;
/* check options */
while ((c = getopt(argc, argv, "e")) != -1) {
switch (c) {
case 'e':
flags |= ZFS_ONLINE_EXPAND;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing device name\n"));
usage(B_FALSE);
}
poolname = argv[0];
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
for (i = 1; i < argc; i++) {
if (zpool_vdev_online(zhp, argv[i], flags, &newstate) == 0) {
if (newstate != VDEV_STATE_HEALTHY) {
(void) printf(gettext("warning: device '%s' "
"onlined, but remains in faulted state\n"),
argv[i]);
if (newstate == VDEV_STATE_FAULTED)
(void) printf(gettext("use 'zpool "
"clear' to restore a faulted "
"device\n"));
else
(void) printf(gettext("use 'zpool "
"replace' to replace devices "
"that are no longer present\n"));
}
} else {
ret = 1;
}
}
zpool_close(zhp);
return (ret);
}
/*
* zpool offline [-ft] <pool> <device> ...
*
* -f Force the device into a faulted state.
*
* -t Only take the device off-line temporarily. The offline/faulted
* state will not be persistent across reboots.
*/
/* ARGSUSED */
int
zpool_do_offline(int argc, char **argv)
{
int c, i;
char *poolname;
zpool_handle_t *zhp;
int ret = 0;
boolean_t istmp = B_FALSE;
boolean_t fault = B_FALSE;
/* check options */
while ((c = getopt(argc, argv, "ft")) != -1) {
switch (c) {
case 'f':
fault = B_TRUE;
break;
case 't':
istmp = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing device name\n"));
usage(B_FALSE);
}
poolname = argv[0];
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
for (i = 1; i < argc; i++) {
if (fault) {
uint64_t guid = zpool_vdev_path_to_guid(zhp, argv[i]);
vdev_aux_t aux;
if (istmp == B_FALSE) {
/* Force the fault to persist across imports */
aux = VDEV_AUX_EXTERNAL_PERSIST;
} else {
aux = VDEV_AUX_EXTERNAL;
}
if (guid == 0 || zpool_vdev_fault(zhp, guid, aux) != 0)
ret = 1;
} else {
if (zpool_vdev_offline(zhp, argv[i], istmp) != 0)
ret = 1;
}
}
zpool_close(zhp);
return (ret);
}
/*
* zpool clear <pool> [device]
*
* Clear all errors associated with a pool or a particular device.
*/
int
zpool_do_clear(int argc, char **argv)
{
int c;
int ret = 0;
boolean_t dryrun = B_FALSE;
boolean_t do_rewind = B_FALSE;
boolean_t xtreme_rewind = B_FALSE;
uint32_t rewind_policy = ZPOOL_NO_REWIND;
nvlist_t *policy = NULL;
zpool_handle_t *zhp;
char *pool, *device;
/* check options */
while ((c = getopt(argc, argv, "FnX")) != -1) {
switch (c) {
case 'F':
do_rewind = B_TRUE;
break;
case 'n':
dryrun = B_TRUE;
break;
case 'X':
xtreme_rewind = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc > 2) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if ((dryrun || xtreme_rewind) && !do_rewind) {
(void) fprintf(stderr,
gettext("-n or -X only meaningful with -F\n"));
usage(B_FALSE);
}
if (dryrun)
rewind_policy = ZPOOL_TRY_REWIND;
else if (do_rewind)
rewind_policy = ZPOOL_DO_REWIND;
if (xtreme_rewind)
rewind_policy |= ZPOOL_EXTREME_REWIND;
/* In future, further rewind policy choices can be passed along here */
if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY,
rewind_policy) != 0) {
return (1);
}
pool = argv[0];
device = argc == 2 ? argv[1] : NULL;
if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
nvlist_free(policy);
return (1);
}
if (zpool_clear(zhp, device, policy) != 0)
ret = 1;
zpool_close(zhp);
nvlist_free(policy);
return (ret);
}
/*
* zpool reguid <pool>
*/
int
zpool_do_reguid(int argc, char **argv)
{
int c;
char *poolname;
zpool_handle_t *zhp;
int ret = 0;
/* check options */
while ((c = getopt(argc, argv, "")) != -1) {
switch (c) {
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
poolname = argv[0];
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
ret = zpool_reguid(zhp);
zpool_close(zhp);
return (ret);
}
/*
* zpool reopen <pool>
*
* Reopen the pool so that the kernel can update the sizes of all vdevs.
*/
int
zpool_do_reopen(int argc, char **argv)
{
int c;
int ret = 0;
boolean_t scrub_restart = B_TRUE;
/* check options */
while ((c = getopt(argc, argv, "n")) != -1) {
switch (c) {
case 'n':
scrub_restart = B_FALSE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* if argc == 0 we will execute zpool_reopen_one on all pools */
ret = for_each_pool(argc, argv, B_TRUE, NULL, B_FALSE, zpool_reopen_one,
&scrub_restart);
return (ret);
}
typedef struct scrub_cbdata {
int cb_type;
pool_scrub_cmd_t cb_scrub_cmd;
} scrub_cbdata_t;
static boolean_t
zpool_has_checkpoint(zpool_handle_t *zhp)
{
nvlist_t *config, *nvroot;
config = zpool_get_config(zhp, NULL);
if (config != NULL) {
pool_checkpoint_stat_t *pcs = NULL;
uint_t c;
nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
if (pcs == NULL || pcs->pcs_state == CS_NONE)
return (B_FALSE);
assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS ||
pcs->pcs_state == CS_CHECKPOINT_DISCARDING);
return (B_TRUE);
}
return (B_FALSE);
}
static int
scrub_callback(zpool_handle_t *zhp, void *data)
{
scrub_cbdata_t *cb = data;
int err;
/*
* Ignore faulted pools.
*/
if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
(void) fprintf(stderr, gettext("cannot scan '%s': pool is "
"currently unavailable\n"), zpool_get_name(zhp));
return (1);
}
err = zpool_scan(zhp, cb->cb_type, cb->cb_scrub_cmd);
if (err == 0 && zpool_has_checkpoint(zhp) &&
cb->cb_type == POOL_SCAN_SCRUB) {
(void) printf(gettext("warning: will not scrub state that "
"belongs to the checkpoint of pool '%s'\n"),
zpool_get_name(zhp));
}
return (err != 0);
}
static int
wait_callback(zpool_handle_t *zhp, void *data)
{
zpool_wait_activity_t *act = data;
return (zpool_wait(zhp, *act));
}
/*
* zpool scrub [-s | -p] [-w] <pool> ...
*
* -s Stop. Stops any in-progress scrub.
* -p Pause. Pause in-progress scrub.
* -w Wait. Blocks until scrub has completed.
*/
int
zpool_do_scrub(int argc, char **argv)
{
int c;
scrub_cbdata_t cb;
boolean_t wait = B_FALSE;
int error;
cb.cb_type = POOL_SCAN_SCRUB;
cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
/* check options */
while ((c = getopt(argc, argv, "spw")) != -1) {
switch (c) {
case 's':
cb.cb_type = POOL_SCAN_NONE;
break;
case 'p':
cb.cb_scrub_cmd = POOL_SCRUB_PAUSE;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
if (cb.cb_type == POOL_SCAN_NONE &&
cb.cb_scrub_cmd == POOL_SCRUB_PAUSE) {
(void) fprintf(stderr, gettext("invalid option combination: "
"-s and -p are mutually exclusive\n"));
usage(B_FALSE);
}
if (wait && (cb.cb_type == POOL_SCAN_NONE ||
cb.cb_scrub_cmd == POOL_SCRUB_PAUSE)) {
(void) fprintf(stderr, gettext("invalid option combination: "
"-w cannot be used with -p or -s\n"));
usage(B_FALSE);
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
error = for_each_pool(argc, argv, B_TRUE, NULL, B_FALSE,
scrub_callback, &cb);
if (wait && !error) {
zpool_wait_activity_t act = ZPOOL_WAIT_SCRUB;
error = for_each_pool(argc, argv, B_TRUE, NULL, B_FALSE,
wait_callback, &act);
}
return (error);
}
/*
* zpool resilver <pool> ...
*
* Restarts any in-progress resilver
*/
int
zpool_do_resilver(int argc, char **argv)
{
int c;
scrub_cbdata_t cb;
cb.cb_type = POOL_SCAN_RESILVER;
cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
/* check options */
while ((c = getopt(argc, argv, "")) != -1) {
switch (c) {
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
return (for_each_pool(argc, argv, B_TRUE, NULL, B_FALSE,
scrub_callback, &cb));
}
/*
* zpool trim [-d] [-r <rate>] [-c | -s] <pool> [<device> ...]
*
* -c Cancel. Ends any in-progress trim.
* -d Secure trim. Requires kernel and device support.
* -r <rate> Sets the TRIM rate in bytes (per second). Supports
* adding a multiplier suffix such as 'k' or 'm'.
* -s Suspend. TRIM can then be restarted with no flags.
* -w Wait. Blocks until trimming has completed.
*/
int
zpool_do_trim(int argc, char **argv)
{
struct option long_options[] = {
{"cancel", no_argument, NULL, 'c'},
{"secure", no_argument, NULL, 'd'},
{"rate", required_argument, NULL, 'r'},
{"suspend", no_argument, NULL, 's'},
{"wait", no_argument, NULL, 'w'},
{0, 0, 0, 0}
};
pool_trim_func_t cmd_type = POOL_TRIM_START;
uint64_t rate = 0;
boolean_t secure = B_FALSE;
boolean_t wait = B_FALSE;
int c;
while ((c = getopt_long(argc, argv, "cdr:sw", long_options, NULL))
!= -1) {
switch (c) {
case 'c':
if (cmd_type != POOL_TRIM_START &&
cmd_type != POOL_TRIM_CANCEL) {
(void) fprintf(stderr, gettext("-c cannot be "
"combined with other options\n"));
usage(B_FALSE);
}
cmd_type = POOL_TRIM_CANCEL;
break;
case 'd':
if (cmd_type != POOL_TRIM_START) {
(void) fprintf(stderr, gettext("-d cannot be "
"combined with the -c or -s options\n"));
usage(B_FALSE);
}
secure = B_TRUE;
break;
case 'r':
if (cmd_type != POOL_TRIM_START) {
(void) fprintf(stderr, gettext("-r cannot be "
"combined with the -c or -s options\n"));
usage(B_FALSE);
}
if (zfs_nicestrtonum(NULL, optarg, &rate) == -1) {
(void) fprintf(stderr,
gettext("invalid value for rate\n"));
usage(B_FALSE);
}
break;
case 's':
if (cmd_type != POOL_TRIM_START &&
cmd_type != POOL_TRIM_SUSPEND) {
(void) fprintf(stderr, gettext("-s cannot be "
"combined with other options\n"));
usage(B_FALSE);
}
cmd_type = POOL_TRIM_SUSPEND;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
if (optopt != 0) {
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
} else {
(void) fprintf(stderr,
gettext("invalid option '%s'\n"),
argv[optind - 1]);
}
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
return (-1);
}
if (wait && (cmd_type != POOL_TRIM_START)) {
(void) fprintf(stderr, gettext("-w cannot be used with -c or "
"-s\n"));
usage(B_FALSE);
}
char *poolname = argv[0];
zpool_handle_t *zhp = zpool_open(g_zfs, poolname);
if (zhp == NULL)
return (-1);
trimflags_t trim_flags = {
.secure = secure,
.rate = rate,
.wait = wait,
};
nvlist_t *vdevs = fnvlist_alloc();
if (argc == 1) {
/* no individual leaf vdevs specified, so add them all */
nvlist_t *config = zpool_get_config(zhp, NULL);
nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE);
zpool_collect_leaves(zhp, nvroot, vdevs);
trim_flags.fullpool = B_TRUE;
} else {
trim_flags.fullpool = B_FALSE;
for (int i = 1; i < argc; i++) {
fnvlist_add_boolean(vdevs, argv[i]);
}
}
int error = zpool_trim(zhp, cmd_type, vdevs, &trim_flags);
fnvlist_free(vdevs);
zpool_close(zhp);
return (error);
}
/*
* Converts a total number of seconds to a human readable string broken
* down in to days/hours/minutes/seconds.
*/
static void
secs_to_dhms(uint64_t total, char *buf)
{
uint64_t days = total / 60 / 60 / 24;
uint64_t hours = (total / 60 / 60) % 24;
uint64_t mins = (total / 60) % 60;
uint64_t secs = (total % 60);
if (days > 0) {
(void) sprintf(buf, "%llu days %02llu:%02llu:%02llu",
(u_longlong_t)days, (u_longlong_t)hours,
(u_longlong_t)mins, (u_longlong_t)secs);
} else {
(void) sprintf(buf, "%02llu:%02llu:%02llu",
(u_longlong_t)hours, (u_longlong_t)mins,
(u_longlong_t)secs);
}
}
/*
* Print out detailed scrub status.
*/
static void
print_scan_scrub_resilver_status(pool_scan_stat_t *ps)
{
time_t start, end, pause;
uint64_t pass_scanned, scanned, pass_issued, issued, total;
uint64_t elapsed, scan_rate, issue_rate;
double fraction_done;
char processed_buf[7], scanned_buf[7], issued_buf[7], total_buf[7];
char srate_buf[7], irate_buf[7], time_buf[32];
printf(" ");
printf_color(ANSI_BOLD, gettext("scan:"));
printf(" ");
/* If there's never been a scan, there's not much to say. */
if (ps == NULL || ps->pss_func == POOL_SCAN_NONE ||
ps->pss_func >= POOL_SCAN_FUNCS) {
(void) printf(gettext("none requested\n"));
return;
}
start = ps->pss_start_time;
end = ps->pss_end_time;
pause = ps->pss_pass_scrub_pause;
zfs_nicebytes(ps->pss_processed, processed_buf, sizeof (processed_buf));
assert(ps->pss_func == POOL_SCAN_SCRUB ||
ps->pss_func == POOL_SCAN_RESILVER);
/* Scan is finished or canceled. */
if (ps->pss_state == DSS_FINISHED) {
secs_to_dhms(end - start, time_buf);
if (ps->pss_func == POOL_SCAN_SCRUB) {
(void) printf(gettext("scrub repaired %s "
"in %s with %llu errors on %s"), processed_buf,
time_buf, (u_longlong_t)ps->pss_errors,
ctime(&end));
} else if (ps->pss_func == POOL_SCAN_RESILVER) {
(void) printf(gettext("resilvered %s "
"in %s with %llu errors on %s"), processed_buf,
time_buf, (u_longlong_t)ps->pss_errors,
ctime(&end));
}
return;
} else if (ps->pss_state == DSS_CANCELED) {
if (ps->pss_func == POOL_SCAN_SCRUB) {
(void) printf(gettext("scrub canceled on %s"),
ctime(&end));
} else if (ps->pss_func == POOL_SCAN_RESILVER) {
(void) printf(gettext("resilver canceled on %s"),
ctime(&end));
}
return;
}
assert(ps->pss_state == DSS_SCANNING);
/* Scan is in progress. Resilvers can't be paused. */
if (ps->pss_func == POOL_SCAN_SCRUB) {
if (pause == 0) {
(void) printf(gettext("scrub in progress since %s"),
ctime(&start));
} else {
(void) printf(gettext("scrub paused since %s"),
ctime(&pause));
(void) printf(gettext("\tscrub started on %s"),
ctime(&start));
}
} else if (ps->pss_func == POOL_SCAN_RESILVER) {
(void) printf(gettext("resilver in progress since %s"),
ctime(&start));
}
scanned = ps->pss_examined;
pass_scanned = ps->pss_pass_exam;
issued = ps->pss_issued;
pass_issued = ps->pss_pass_issued;
total = ps->pss_to_examine;
/* we are only done with a block once we have issued the IO for it */
fraction_done = (double)issued / total;
/* elapsed time for this pass, rounding up to 1 if it's 0 */
elapsed = time(NULL) - ps->pss_pass_start;
elapsed -= ps->pss_pass_scrub_spent_paused;
elapsed = (elapsed != 0) ? elapsed : 1;
scan_rate = pass_scanned / elapsed;
issue_rate = pass_issued / elapsed;
uint64_t total_secs_left = (issue_rate != 0 && total >= issued) ?
((total - issued) / issue_rate) : UINT64_MAX;
secs_to_dhms(total_secs_left, time_buf);
/* format all of the numbers we will be reporting */
zfs_nicebytes(scanned, scanned_buf, sizeof (scanned_buf));
zfs_nicebytes(issued, issued_buf, sizeof (issued_buf));
zfs_nicebytes(total, total_buf, sizeof (total_buf));
zfs_nicebytes(scan_rate, srate_buf, sizeof (srate_buf));
zfs_nicebytes(issue_rate, irate_buf, sizeof (irate_buf));
/* do not print estimated time if we have a paused scrub */
if (pause == 0) {
(void) printf(gettext("\t%s scanned at %s/s, "
"%s issued at %s/s, %s total\n"),
scanned_buf, srate_buf, issued_buf, irate_buf, total_buf);
} else {
(void) printf(gettext("\t%s scanned, %s issued, %s total\n"),
scanned_buf, issued_buf, total_buf);
}
if (ps->pss_func == POOL_SCAN_RESILVER) {
(void) printf(gettext("\t%s resilvered, %.2f%% done"),
processed_buf, 100 * fraction_done);
} else if (ps->pss_func == POOL_SCAN_SCRUB) {
(void) printf(gettext("\t%s repaired, %.2f%% done"),
processed_buf, 100 * fraction_done);
}
if (pause == 0) {
if (total_secs_left != UINT64_MAX &&
issue_rate >= 10 * 1024 * 1024) {
(void) printf(gettext(", %s to go\n"), time_buf);
} else {
(void) printf(gettext(", no estimated "
"completion time\n"));
}
} else {
(void) printf(gettext("\n"));
}
}
static void
print_rebuild_status_impl(vdev_rebuild_stat_t *vrs, char *vdev_name)
{
if (vrs == NULL || vrs->vrs_state == VDEV_REBUILD_NONE)
return;
printf(" ");
printf_color(ANSI_BOLD, gettext("scan:"));
printf(" ");
uint64_t bytes_scanned = vrs->vrs_bytes_scanned;
uint64_t bytes_issued = vrs->vrs_bytes_issued;
uint64_t bytes_rebuilt = vrs->vrs_bytes_rebuilt;
uint64_t bytes_est = vrs->vrs_bytes_est;
uint64_t scan_rate = (vrs->vrs_pass_bytes_scanned /
(vrs->vrs_pass_time_ms + 1)) * 1000;
uint64_t issue_rate = (vrs->vrs_pass_bytes_issued /
(vrs->vrs_pass_time_ms + 1)) * 1000;
double scan_pct = MIN((double)bytes_scanned * 100 /
(bytes_est + 1), 100);
/* Format all of the numbers we will be reporting */
char bytes_scanned_buf[7], bytes_issued_buf[7];
char bytes_rebuilt_buf[7], bytes_est_buf[7];
char scan_rate_buf[7], issue_rate_buf[7], time_buf[32];
zfs_nicebytes(bytes_scanned, bytes_scanned_buf,
sizeof (bytes_scanned_buf));
zfs_nicebytes(bytes_issued, bytes_issued_buf,
sizeof (bytes_issued_buf));
zfs_nicebytes(bytes_rebuilt, bytes_rebuilt_buf,
sizeof (bytes_rebuilt_buf));
zfs_nicebytes(bytes_est, bytes_est_buf, sizeof (bytes_est_buf));
zfs_nicebytes(scan_rate, scan_rate_buf, sizeof (scan_rate_buf));
zfs_nicebytes(issue_rate, issue_rate_buf, sizeof (issue_rate_buf));
time_t start = vrs->vrs_start_time;
time_t end = vrs->vrs_end_time;
/* Rebuild is finished or canceled. */
if (vrs->vrs_state == VDEV_REBUILD_COMPLETE) {
secs_to_dhms(vrs->vrs_scan_time_ms / 1000, time_buf);
(void) printf(gettext("resilvered (%s) %s in %s "
"with %llu errors on %s"), vdev_name, bytes_rebuilt_buf,
time_buf, (u_longlong_t)vrs->vrs_errors, ctime(&end));
return;
} else if (vrs->vrs_state == VDEV_REBUILD_CANCELED) {
(void) printf(gettext("resilver (%s) canceled on %s"),
vdev_name, ctime(&end));
return;
} else if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
(void) printf(gettext("resilver (%s) in progress since %s"),
vdev_name, ctime(&start));
}
assert(vrs->vrs_state == VDEV_REBUILD_ACTIVE);
secs_to_dhms(MAX((int64_t)bytes_est - (int64_t)bytes_scanned, 0) /
MAX(scan_rate, 1), time_buf);
(void) printf(gettext("\t%s scanned at %s/s, %s issued %s/s, "
"%s total\n"), bytes_scanned_buf, scan_rate_buf,
bytes_issued_buf, issue_rate_buf, bytes_est_buf);
(void) printf(gettext("\t%s resilvered, %.2f%% done"),
bytes_rebuilt_buf, scan_pct);
if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
if (scan_rate >= 10 * 1024 * 1024) {
(void) printf(gettext(", %s to go\n"), time_buf);
} else {
(void) printf(gettext(", no estimated "
"completion time\n"));
}
} else {
(void) printf(gettext("\n"));
}
}
/*
* Print rebuild status for top-level vdevs.
*/
static void
print_rebuild_status(zpool_handle_t *zhp, nvlist_t *nvroot)
{
nvlist_t **child;
uint_t children;
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
for (uint_t c = 0; c < children; c++) {
vdev_rebuild_stat_t *vrs;
uint_t i;
if (nvlist_lookup_uint64_array(child[c],
ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {
char *name = zpool_vdev_name(g_zfs, zhp,
child[c], VDEV_NAME_TYPE_ID);
print_rebuild_status_impl(vrs, name);
free(name);
}
}
}
/*
* As we don't scrub checkpointed blocks, we want to warn the user that we
* skipped scanning some blocks if a checkpoint exists or existed at any
* time during the scan. If a sequential instead of healing reconstruction
* was performed then the blocks were reconstructed. However, their checksums
* have not been verified so we still print the warning.
*/
static void
print_checkpoint_scan_warning(pool_scan_stat_t *ps, pool_checkpoint_stat_t *pcs)
{
if (ps == NULL || pcs == NULL)
return;
if (pcs->pcs_state == CS_NONE ||
pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
return;
assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS);
if (ps->pss_state == DSS_NONE)
return;
if ((ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) &&
ps->pss_end_time < pcs->pcs_start_time)
return;
if (ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) {
(void) printf(gettext(" scan warning: skipped blocks "
"that are only referenced by the checkpoint.\n"));
} else {
assert(ps->pss_state == DSS_SCANNING);
(void) printf(gettext(" scan warning: skipping blocks "
"that are only referenced by the checkpoint.\n"));
}
}
/*
* Returns B_TRUE if there is an active rebuild in progress. Otherwise,
* B_FALSE is returned and 'rebuild_end_time' is set to the end time for
* the last completed (or cancelled) rebuild.
*/
static boolean_t
check_rebuilding(nvlist_t *nvroot, uint64_t *rebuild_end_time)
{
nvlist_t **child;
uint_t children;
boolean_t rebuilding = B_FALSE;
uint64_t end_time = 0;
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
for (uint_t c = 0; c < children; c++) {
vdev_rebuild_stat_t *vrs;
uint_t i;
if (nvlist_lookup_uint64_array(child[c],
ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) {
if (vrs->vrs_end_time > end_time)
end_time = vrs->vrs_end_time;
if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
rebuilding = B_TRUE;
end_time = 0;
break;
}
}
}
if (rebuild_end_time != NULL)
*rebuild_end_time = end_time;
return (rebuilding);
}
/*
* Print the scan status.
*/
static void
print_scan_status(zpool_handle_t *zhp, nvlist_t *nvroot)
{
uint64_t rebuild_end_time = 0, resilver_end_time = 0;
boolean_t have_resilver = B_FALSE, have_scrub = B_FALSE;
boolean_t active_resilver = B_FALSE;
pool_checkpoint_stat_t *pcs = NULL;
pool_scan_stat_t *ps = NULL;
uint_t c;
if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
(uint64_t **)&ps, &c) == 0) {
if (ps->pss_func == POOL_SCAN_RESILVER) {
resilver_end_time = ps->pss_end_time;
active_resilver = (ps->pss_state == DSS_SCANNING);
}
have_resilver = (ps->pss_func == POOL_SCAN_RESILVER);
have_scrub = (ps->pss_func == POOL_SCAN_SCRUB);
}
boolean_t active_rebuild = check_rebuilding(nvroot, &rebuild_end_time);
boolean_t have_rebuild = (active_rebuild || (rebuild_end_time > 0));
/* Always print the scrub status when available. */
if (have_scrub)
print_scan_scrub_resilver_status(ps);
/*
* When there is an active resilver or rebuild print its status.
* Otherwise print the status of the last resilver or rebuild.
*/
if (active_resilver || (!active_rebuild && have_resilver &&
resilver_end_time && resilver_end_time > rebuild_end_time)) {
print_scan_scrub_resilver_status(ps);
} else if (active_rebuild || (!active_resilver && have_rebuild &&
rebuild_end_time && rebuild_end_time > resilver_end_time)) {
print_rebuild_status(zhp, nvroot);
}
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
print_checkpoint_scan_warning(ps, pcs);
}
/*
* Print out detailed removal status.
*/
static void
print_removal_status(zpool_handle_t *zhp, pool_removal_stat_t *prs)
{
char copied_buf[7], examined_buf[7], total_buf[7], rate_buf[7];
time_t start, end;
nvlist_t *config, *nvroot;
nvlist_t **child;
uint_t children;
char *vdev_name;
if (prs == NULL || prs->prs_state == DSS_NONE)
return;
/*
* Determine name of vdev.
*/
config = zpool_get_config(zhp, NULL);
nvroot = fnvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE);
verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0);
assert(prs->prs_removing_vdev < children);
vdev_name = zpool_vdev_name(g_zfs, zhp,
child[prs->prs_removing_vdev], B_TRUE);
printf_color(ANSI_BOLD, gettext("remove: "));
start = prs->prs_start_time;
end = prs->prs_end_time;
zfs_nicenum(prs->prs_copied, copied_buf, sizeof (copied_buf));
/*
* Removal is finished or canceled.
*/
if (prs->prs_state == DSS_FINISHED) {
uint64_t minutes_taken = (end - start) / 60;
(void) printf(gettext("Removal of vdev %llu copied %s "
"in %lluh%um, completed on %s"),
(longlong_t)prs->prs_removing_vdev,
copied_buf,
(u_longlong_t)(minutes_taken / 60),
(uint_t)(minutes_taken % 60),
ctime((time_t *)&end));
} else if (prs->prs_state == DSS_CANCELED) {
(void) printf(gettext("Removal of %s canceled on %s"),
vdev_name, ctime(&end));
} else {
uint64_t copied, total, elapsed, mins_left, hours_left;
double fraction_done;
uint_t rate;
assert(prs->prs_state == DSS_SCANNING);
/*
* Removal is in progress.
*/
(void) printf(gettext(
"Evacuation of %s in progress since %s"),
vdev_name, ctime(&start));
copied = prs->prs_copied > 0 ? prs->prs_copied : 1;
total = prs->prs_to_copy;
fraction_done = (double)copied / total;
/* elapsed time for this pass */
elapsed = time(NULL) - prs->prs_start_time;
elapsed = elapsed > 0 ? elapsed : 1;
rate = copied / elapsed;
rate = rate > 0 ? rate : 1;
mins_left = ((total - copied) / rate) / 60;
hours_left = mins_left / 60;
zfs_nicenum(copied, examined_buf, sizeof (examined_buf));
zfs_nicenum(total, total_buf, sizeof (total_buf));
zfs_nicenum(rate, rate_buf, sizeof (rate_buf));
/*
* do not print estimated time if hours_left is more than
* 30 days
*/
(void) printf(gettext(
"\t%s copied out of %s at %s/s, %.2f%% done"),
examined_buf, total_buf, rate_buf, 100 * fraction_done);
if (hours_left < (30 * 24)) {
(void) printf(gettext(", %lluh%um to go\n"),
(u_longlong_t)hours_left, (uint_t)(mins_left % 60));
} else {
(void) printf(gettext(
", (copy is slow, no estimated time)\n"));
}
}
free(vdev_name);
if (prs->prs_mapping_memory > 0) {
char mem_buf[7];
zfs_nicenum(prs->prs_mapping_memory, mem_buf, sizeof (mem_buf));
(void) printf(gettext(
"\t%s memory used for removed device mappings\n"),
mem_buf);
}
}
static void
print_checkpoint_status(pool_checkpoint_stat_t *pcs)
{
time_t start;
char space_buf[7];
if (pcs == NULL || pcs->pcs_state == CS_NONE)
return;
(void) printf(gettext("checkpoint: "));
start = pcs->pcs_start_time;
zfs_nicenum(pcs->pcs_space, space_buf, sizeof (space_buf));
if (pcs->pcs_state == CS_CHECKPOINT_EXISTS) {
char *date = ctime(&start);
/*
* ctime() adds a newline at the end of the generated
* string, thus the weird format specifier and the
* strlen() call used to chop it off from the output.
*/
(void) printf(gettext("created %.*s, consumes %s\n"),
(int)(strlen(date) - 1), date, space_buf);
return;
}
assert(pcs->pcs_state == CS_CHECKPOINT_DISCARDING);
(void) printf(gettext("discarding, %s remaining.\n"),
space_buf);
}
static void
print_error_log(zpool_handle_t *zhp)
{
nvlist_t *nverrlist = NULL;
nvpair_t *elem;
char *pathname;
size_t len = MAXPATHLEN * 2;
if (zpool_get_errlog(zhp, &nverrlist) != 0)
return;
(void) printf("errors: Permanent errors have been "
"detected in the following files:\n\n");
pathname = safe_malloc(len);
elem = NULL;
while ((elem = nvlist_next_nvpair(nverrlist, elem)) != NULL) {
nvlist_t *nv;
uint64_t dsobj, obj;
verify(nvpair_value_nvlist(elem, &nv) == 0);
verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_DATASET,
&dsobj) == 0);
verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_OBJECT,
&obj) == 0);
zpool_obj_to_path(zhp, dsobj, obj, pathname, len);
(void) printf("%7s %s\n", "", pathname);
}
free(pathname);
nvlist_free(nverrlist);
}
static void
print_spares(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **spares,
uint_t nspares)
{
uint_t i;
char *name;
if (nspares == 0)
return;
(void) printf(gettext("\tspares\n"));
for (i = 0; i < nspares; i++) {
name = zpool_vdev_name(g_zfs, zhp, spares[i],
cb->cb_name_flags);
print_status_config(zhp, cb, name, spares[i], 2, B_TRUE, NULL);
free(name);
}
}
static void
print_l2cache(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **l2cache,
uint_t nl2cache)
{
uint_t i;
char *name;
if (nl2cache == 0)
return;
(void) printf(gettext("\tcache\n"));
for (i = 0; i < nl2cache; i++) {
name = zpool_vdev_name(g_zfs, zhp, l2cache[i],
cb->cb_name_flags);
print_status_config(zhp, cb, name, l2cache[i], 2,
B_FALSE, NULL);
free(name);
}
}
static void
print_dedup_stats(nvlist_t *config)
{
ddt_histogram_t *ddh;
ddt_stat_t *dds;
ddt_object_t *ddo;
uint_t c;
char dspace[6], mspace[6];
/*
* If the pool was faulted then we may not have been able to
* obtain the config. Otherwise, if we have anything in the dedup
* table continue processing the stats.
*/
if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_OBJ_STATS,
(uint64_t **)&ddo, &c) != 0)
return;
(void) printf("\n");
(void) printf(gettext(" dedup: "));
if (ddo->ddo_count == 0) {
(void) printf(gettext("no DDT entries\n"));
return;
}
zfs_nicebytes(ddo->ddo_dspace, dspace, sizeof (dspace));
zfs_nicebytes(ddo->ddo_mspace, mspace, sizeof (mspace));
(void) printf("DDT entries %llu, size %s on disk, %s in core\n",
(u_longlong_t)ddo->ddo_count,
dspace,
mspace);
verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS,
(uint64_t **)&dds, &c) == 0);
verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_HISTOGRAM,
(uint64_t **)&ddh, &c) == 0);
zpool_dump_ddt(dds, ddh);
}
/*
* Display a summary of pool status. Displays a summary such as:
*
* pool: tank
* status: DEGRADED
* reason: One or more devices ...
* see: https://openzfs.github.io/openzfs-docs/msg/ZFS-xxxx-01
* config:
* mirror DEGRADED
* c1t0d0 OK
* c2t0d0 UNAVAIL
*
* When given the '-v' option, we print out the complete config. If the '-e'
* option is specified, then we print out error rate information as well.
*/
static int
status_callback(zpool_handle_t *zhp, void *data)
{
status_cbdata_t *cbp = data;
nvlist_t *config, *nvroot;
char *msgid;
zpool_status_t reason;
zpool_errata_t errata;
const char *health;
uint_t c;
vdev_stat_t *vs;
config = zpool_get_config(zhp, NULL);
reason = zpool_get_status(zhp, &msgid, &errata);
cbp->cb_count++;
/*
* If we were given 'zpool status -x', only report those pools with
* problems.
*/
if (cbp->cb_explain &&
(reason == ZPOOL_STATUS_OK ||
reason == ZPOOL_STATUS_VERSION_OLDER ||
reason == ZPOOL_STATUS_FEAT_DISABLED ||
reason == ZPOOL_STATUS_COMPATIBILITY_ERR ||
reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) {
if (!cbp->cb_allpools) {
(void) printf(gettext("pool '%s' is healthy\n"),
zpool_get_name(zhp));
if (cbp->cb_first)
cbp->cb_first = B_FALSE;
}
return (0);
}
if (cbp->cb_first)
cbp->cb_first = B_FALSE;
else
(void) printf("\n");
nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
health = zpool_get_state_str(zhp);
printf(" ");
printf_color(ANSI_BOLD, gettext("pool:"));
printf(" %s\n", zpool_get_name(zhp));
printf(" ");
printf_color(ANSI_BOLD, gettext("state: "));
printf_color(health_str_to_color(health), "%s", health);
printf("\n");
switch (reason) {
case ZPOOL_STATUS_MISSING_DEV_R:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices could "
"not be opened. Sufficient replicas exist for\n\tthe pool "
"to continue functioning in a degraded state.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Attach the missing device "
"and online it using 'zpool online'.\n"));
break;
case ZPOOL_STATUS_MISSING_DEV_NR:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices could "
"not be opened. There are insufficient\n\treplicas for the"
" pool to continue functioning.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Attach the missing device "
"and online it using 'zpool online'.\n"));
break;
case ZPOOL_STATUS_CORRUPT_LABEL_R:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices could "
"not be used because the label is missing or\n\tinvalid. "
"Sufficient replicas exist for the pool to continue\n\t"
"functioning in a degraded state.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Replace the device using "
"'zpool replace'.\n"));
break;
case ZPOOL_STATUS_CORRUPT_LABEL_NR:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices could "
"not be used because the label is missing \n\tor invalid. "
"There are insufficient replicas for the pool to "
"continue\n\tfunctioning.\n"));
zpool_explain_recover(zpool_get_handle(zhp),
zpool_get_name(zhp), reason, config);
break;
case ZPOOL_STATUS_FAILING_DEV:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices has "
"experienced an unrecoverable error. An\n\tattempt was "
"made to correct the error. Applications are "
"unaffected.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Determine if the "
"device needs to be replaced, and clear the errors\n\tusing"
" 'zpool clear' or replace the device with 'zpool "
"replace'.\n"));
break;
case ZPOOL_STATUS_OFFLINE_DEV:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices has "
"been taken offline by the administrator.\n\tSufficient "
"replicas exist for the pool to continue functioning in "
"a\n\tdegraded state.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Online the device "
"using 'zpool online' or replace the device with\n\t'zpool "
"replace'.\n"));
break;
case ZPOOL_STATUS_REMOVED_DEV:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices has "
"been removed by the administrator.\n\tSufficient "
"replicas exist for the pool to continue functioning in "
"a\n\tdegraded state.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Online the device "
"using zpool online' or replace the device with\n\t'zpool "
"replace'.\n"));
break;
case ZPOOL_STATUS_RESILVERING:
case ZPOOL_STATUS_REBUILDING:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices is "
"currently being resilvered. The pool will\n\tcontinue "
"to function, possibly in a degraded state.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Wait for the resilver to "
"complete.\n"));
break;
case ZPOOL_STATUS_REBUILD_SCRUB:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices have "
"been sequentially resilvered, scrubbing\n\tthe pool "
"is recommended.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Use 'zpool scrub' to "
"verify all data checksums.\n"));
break;
case ZPOOL_STATUS_CORRUPT_DATA:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices has "
"experienced an error resulting in data\n\tcorruption. "
"Applications may be affected.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Restore the file in question"
" if possible. Otherwise restore the\n\tentire pool from "
"backup.\n"));
break;
case ZPOOL_STATUS_CORRUPT_POOL:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool metadata is "
"corrupted and the pool cannot be opened.\n"));
zpool_explain_recover(zpool_get_handle(zhp),
zpool_get_name(zhp), reason, config);
break;
case ZPOOL_STATUS_VERSION_OLDER:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool is formatted using "
"a legacy on-disk format. The pool can\n\tstill be used, "
"but some features are unavailable.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Upgrade the pool using "
"'zpool upgrade'. Once this is done, the\n\tpool will no "
"longer be accessible on software that does not support\n\t"
"feature flags.\n"));
break;
case ZPOOL_STATUS_VERSION_NEWER:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool has been upgraded "
"to a newer, incompatible on-disk version.\n\tThe pool "
"cannot be accessed on this system.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Access the pool from a "
"system running more recent software, or\n\trestore the "
"pool from backup.\n"));
break;
case ZPOOL_STATUS_FEAT_DISABLED:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("Some supported and "
"requested features are not enabled on the pool.\n\t"
"The pool can still be used, but some features are "
"unavailable.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Enable all features using "
"'zpool upgrade'. Once this is done,\n\tthe pool may no "
"longer be accessible by software that does not support\n\t"
"the features. See zpool-features(7) for details.\n"));
break;
case ZPOOL_STATUS_COMPATIBILITY_ERR:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("This pool has a "
"compatibility list specified, but it could not be\n\t"
"read/parsed at this time. The pool can still be used, "
"but this\n\tshould be investigated.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Check the value of the "
"'compatibility' property against the\n\t"
"appropriate file in " ZPOOL_SYSCONF_COMPAT_D " or "
ZPOOL_DATA_COMPAT_D ".\n"));
break;
case ZPOOL_STATUS_INCOMPATIBLE_FEAT:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more features "
"are enabled on the pool despite not being\n\t"
"requested by the 'compatibility' property.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Consider setting "
"'compatibility' to an appropriate value, or\n\t"
"adding needed features to the relevant file in\n\t"
ZPOOL_SYSCONF_COMPAT_D " or " ZPOOL_DATA_COMPAT_D ".\n"));
break;
case ZPOOL_STATUS_UNSUP_FEAT_READ:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool cannot be accessed "
"on this system because it uses the\n\tfollowing feature(s)"
" not supported on this system:\n"));
zpool_print_unsup_feat(config);
(void) printf("\n");
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Access the pool from a "
"system that supports the required feature(s),\n\tor "
"restore the pool from backup.\n"));
break;
case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool can only be "
"accessed in read-only mode on this system. It\n\tcannot be"
" accessed in read-write mode because it uses the "
"following\n\tfeature(s) not supported on this system:\n"));
zpool_print_unsup_feat(config);
(void) printf("\n");
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("The pool cannot be accessed "
"in read-write mode. Import the pool with\n"
"\t\"-o readonly=on\", access the pool from a system that "
"supports the\n\trequired feature(s), or restore the "
"pool from backup.\n"));
break;
case ZPOOL_STATUS_FAULTED_DEV_R:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices are "
"faulted in response to persistent errors.\n\tSufficient "
"replicas exist for the pool to continue functioning "
"in a\n\tdegraded state.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Replace the faulted device, "
"or use 'zpool clear' to mark the device\n\trepaired.\n"));
break;
case ZPOOL_STATUS_FAULTED_DEV_NR:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices are "
"faulted in response to persistent errors. There are "
"insufficient replicas for the pool to\n\tcontinue "
"functioning.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Destroy and re-create the "
"pool from a backup source. Manually marking the device\n"
"\trepaired using 'zpool clear' may allow some data "
"to be recovered.\n"));
break;
case ZPOOL_STATUS_IO_FAILURE_MMP:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("The pool is suspended "
"because multihost writes failed or were delayed;\n\t"
"another system could import the pool undetected.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Make sure the pool's devices"
" are connected, then reboot your system and\n\timport the "
"pool.\n"));
break;
case ZPOOL_STATUS_IO_FAILURE_WAIT:
case ZPOOL_STATUS_IO_FAILURE_CONTINUE:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("One or more devices are "
"faulted in response to IO failures.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Make sure the affected "
"devices are connected, then run 'zpool clear'.\n"));
break;
case ZPOOL_STATUS_BAD_LOG:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("An intent log record "
"could not be read.\n"
"\tWaiting for administrator intervention to fix the "
"faulted pool.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Either restore the affected "
"device(s) and run 'zpool online',\n"
"\tor ignore the intent log records by running "
"'zpool clear'.\n"));
break;
case ZPOOL_STATUS_NON_NATIVE_ASHIFT:
(void) printf(gettext("status: One or more devices are "
"configured to use a non-native block size.\n"
"\tExpect reduced performance.\n"));
(void) printf(gettext("action: Replace affected devices with "
"devices that support the\n\tconfigured block size, or "
"migrate data to a properly configured\n\tpool.\n"));
break;
case ZPOOL_STATUS_HOSTID_MISMATCH:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("Mismatch between pool hostid"
" and system hostid on imported pool.\n\tThis pool was "
"previously imported into a system with a different "
"hostid,\n\tand then was verbatim imported into this "
"system.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("Export this pool on all "
"systems on which it is imported.\n"
"\tThen import it to correct the mismatch.\n"));
break;
case ZPOOL_STATUS_ERRATA:
printf_color(ANSI_BOLD, gettext("status: "));
printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"),
errata);
switch (errata) {
case ZPOOL_ERRATA_NONE:
break;
case ZPOOL_ERRATA_ZOL_2094_SCRUB:
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("To correct the issue"
" run 'zpool scrub'.\n"));
break;
case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
(void) printf(gettext("\tExisting encrypted datasets "
"contain an on-disk incompatibility\n\twhich "
"needs to be corrected.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("To correct the issue"
" backup existing encrypted datasets to new\n\t"
"encrypted datasets and destroy the old ones. "
"'zfs mount -o ro' can\n\tbe used to temporarily "
"mount existing encrypted datasets readonly.\n"));
break;
case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION:
(void) printf(gettext("\tExisting encrypted snapshots "
"and bookmarks contain an on-disk\n\tincompat"
"ibility. This may cause on-disk corruption if "
"they are used\n\twith 'zfs recv'.\n"));
printf_color(ANSI_BOLD, gettext("action: "));
printf_color(ANSI_YELLOW, gettext("To correct the"
"issue, enable the bookmark_v2 feature. No "
"additional\n\taction is needed if there are no "
"encrypted snapshots or bookmarks.\n\tIf preserving"
"the encrypted snapshots and bookmarks is required,"
" use\n\ta non-raw send to backup and restore them."
" Alternately, they may be\n\tremoved to resolve "
"the incompatibility.\n"));
break;
default:
/*
* All errata which allow the pool to be imported
* must contain an action message.
*/
assert(0);
}
break;
default:
/*
* The remaining errors can't actually be generated, yet.
*/
assert(reason == ZPOOL_STATUS_OK);
}
if (msgid != NULL) {
printf(" ");
printf_color(ANSI_BOLD, gettext("see:"));
printf(gettext(
" https://openzfs.github.io/openzfs-docs/msg/%s\n"),
msgid);
}
if (config != NULL) {
uint64_t nerr;
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
pool_checkpoint_stat_t *pcs = NULL;
pool_removal_stat_t *prs = NULL;
print_scan_status(zhp, nvroot);
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
print_removal_status(zhp, prs);
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
print_checkpoint_status(pcs);
cbp->cb_namewidth = max_width(zhp, nvroot, 0, 0,
cbp->cb_name_flags | VDEV_NAME_TYPE_ID);
if (cbp->cb_namewidth < 10)
cbp->cb_namewidth = 10;
color_start(ANSI_BOLD);
(void) printf(gettext("config:\n\n"));
(void) printf(gettext("\t%-*s %-8s %5s %5s %5s"),
cbp->cb_namewidth, "NAME", "STATE", "READ", "WRITE",
"CKSUM");
color_end();
if (cbp->cb_print_slow_ios) {
printf_color(ANSI_BOLD, " %5s", gettext("SLOW"));
}
if (cbp->vcdl != NULL)
print_cmd_columns(cbp->vcdl, 0);
printf("\n");
print_status_config(zhp, cbp, zpool_get_name(zhp), nvroot, 0,
B_FALSE, NULL);
print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_DEDUP);
print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_SPECIAL);
print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_CLASS_LOGS);
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0)
print_l2cache(zhp, cbp, l2cache, nl2cache);
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0)
print_spares(zhp, cbp, spares, nspares);
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
&nerr) == 0) {
nvlist_t *nverrlist = NULL;
/*
* If the approximate error count is small, get a
* precise count by fetching the entire log and
* uniquifying the results.
*/
if (nerr > 0 && nerr < 100 && !cbp->cb_verbose &&
zpool_get_errlog(zhp, &nverrlist) == 0) {
nvpair_t *elem;
elem = NULL;
nerr = 0;
while ((elem = nvlist_next_nvpair(nverrlist,
elem)) != NULL) {
nerr++;
}
}
nvlist_free(nverrlist);
(void) printf("\n");
if (nerr == 0)
(void) printf(gettext("errors: No known data "
"errors\n"));
else if (!cbp->cb_verbose)
(void) printf(gettext("errors: %llu data "
"errors, use '-v' for a list\n"),
(u_longlong_t)nerr);
else
print_error_log(zhp);
}
if (cbp->cb_dedup_stats)
print_dedup_stats(config);
} else {
(void) printf(gettext("config: The configuration cannot be "
"determined.\n"));
}
return (0);
}
/*
* zpool status [-c [script1,script2,...]] [-igLpPstvx] [-T d|u] [pool] ...
* [interval [count]]
*
* -c CMD For each vdev, run command CMD
* -i Display vdev initialization status.
* -g Display guid for individual vdev name.
* -L Follow links when resolving vdev path name.
* -p Display values in parsable (exact) format.
* -P Display full path for vdev name.
* -s Display slow IOs column.
* -v Display complete error logs
* -x Display only pools with potential problems
* -D Display dedup status (undocumented)
* -t Display vdev TRIM status.
* -T Display a timestamp in date(1) or Unix format
*
* Describes the health status of all pools or some subset.
*/
int
zpool_do_status(int argc, char **argv)
{
int c;
int ret;
float interval = 0;
unsigned long count = 0;
status_cbdata_t cb = { 0 };
char *cmd = NULL;
/* check options */
while ((c = getopt(argc, argv, "c:igLpPsvxDtT:")) != -1) {
switch (c) {
case 'c':
if (cmd != NULL) {
fprintf(stderr,
gettext("Can't set -c flag twice\n"));
exit(1);
}
if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
!libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
fprintf(stderr, gettext(
"Can't run -c, disabled by "
"ZPOOL_SCRIPTS_ENABLED.\n"));
exit(1);
}
if ((getuid() <= 0 || geteuid() <= 0) &&
!libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
fprintf(stderr, gettext(
"Can't run -c with root privileges "
"unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
exit(1);
}
cmd = optarg;
break;
case 'i':
cb.cb_print_vdev_init = B_TRUE;
break;
case 'g':
cb.cb_name_flags |= VDEV_NAME_GUID;
break;
case 'L':
cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
break;
case 'p':
cb.cb_literal = B_TRUE;
break;
case 'P':
cb.cb_name_flags |= VDEV_NAME_PATH;
break;
case 's':
cb.cb_print_slow_ios = B_TRUE;
break;
case 'v':
cb.cb_verbose = B_TRUE;
break;
case 'x':
cb.cb_explain = B_TRUE;
break;
case 'D':
cb.cb_dedup_stats = B_TRUE;
break;
case 't':
cb.cb_print_vdev_trim = B_TRUE;
break;
case 'T':
get_timestamp_arg(*optarg);
break;
case '?':
if (optopt == 'c') {
print_zpool_script_list("status");
exit(0);
} else {
fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
}
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
get_interval_count(&argc, argv, &interval, &count);
if (argc == 0)
cb.cb_allpools = B_TRUE;
cb.cb_first = B_TRUE;
cb.cb_print_status = B_TRUE;
for (;;) {
if (timestamp_fmt != NODATE)
print_timestamp(timestamp_fmt);
if (cmd != NULL)
cb.vcdl = all_pools_for_each_vdev_run(argc, argv, cmd,
NULL, NULL, 0, 0);
ret = for_each_pool(argc, argv, B_TRUE, NULL, cb.cb_literal,
status_callback, &cb);
if (cb.vcdl != NULL)
free_vdev_cmd_data_list(cb.vcdl);
if (argc == 0 && cb.cb_count == 0)
(void) fprintf(stderr, gettext("no pools available\n"));
else if (cb.cb_explain && cb.cb_first && cb.cb_allpools)
(void) printf(gettext("all pools are healthy\n"));
if (ret != 0)
return (ret);
if (interval == 0)
break;
if (count != 0 && --count == 0)
break;
(void) fsleep(interval);
}
return (0);
}
typedef struct upgrade_cbdata {
int cb_first;
int cb_argc;
uint64_t cb_version;
char **cb_argv;
} upgrade_cbdata_t;
static int
check_unsupp_fs(zfs_handle_t *zhp, void *unsupp_fs)
{
int zfs_version = (int)zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
int *count = (int *)unsupp_fs;
if (zfs_version > ZPL_VERSION) {
(void) printf(gettext("%s (v%d) is not supported by this "
"implementation of ZFS.\n"),
zfs_get_name(zhp), zfs_version);
(*count)++;
}
zfs_iter_filesystems(zhp, check_unsupp_fs, unsupp_fs);
zfs_close(zhp);
return (0);
}
static int
upgrade_version(zpool_handle_t *zhp, uint64_t version)
{
int ret;
nvlist_t *config;
uint64_t oldversion;
int unsupp_fs = 0;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&oldversion) == 0);
char compat[ZFS_MAXPROPLEN];
if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,
ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
compat[0] = '\0';
assert(SPA_VERSION_IS_SUPPORTED(oldversion));
assert(oldversion < version);
ret = zfs_iter_root(zpool_get_handle(zhp), check_unsupp_fs, &unsupp_fs);
if (ret != 0)
return (ret);
if (unsupp_fs) {
(void) fprintf(stderr, gettext("Upgrade not performed due "
"to %d unsupported filesystems (max v%d).\n"),
unsupp_fs, (int)ZPL_VERSION);
return (1);
}
if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) {
(void) fprintf(stderr, gettext("Upgrade not performed because "
"'compatibility' property set to '"
ZPOOL_COMPAT_LEGACY "'.\n"));
return (1);
}
ret = zpool_upgrade(zhp, version);
if (ret != 0)
return (ret);
if (version >= SPA_VERSION_FEATURES) {
(void) printf(gettext("Successfully upgraded "
"'%s' from version %llu to feature flags.\n"),
zpool_get_name(zhp), (u_longlong_t)oldversion);
} else {
(void) printf(gettext("Successfully upgraded "
"'%s' from version %llu to version %llu.\n"),
zpool_get_name(zhp), (u_longlong_t)oldversion,
(u_longlong_t)version);
}
return (0);
}
static int
upgrade_enable_all(zpool_handle_t *zhp, int *countp)
{
int i, ret, count;
boolean_t firstff = B_TRUE;
nvlist_t *enabled = zpool_get_features(zhp);
char compat[ZFS_MAXPROPLEN];
if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat,
ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
compat[0] = '\0';
boolean_t requested_features[SPA_FEATURES];
if (zpool_do_load_compat(compat, requested_features) !=
ZPOOL_COMPATIBILITY_OK)
return (-1);
count = 0;
for (i = 0; i < SPA_FEATURES; i++) {
const char *fname = spa_feature_table[i].fi_uname;
const char *fguid = spa_feature_table[i].fi_guid;
if (!spa_feature_table[i].fi_zfs_mod_supported)
continue;
if (!nvlist_exists(enabled, fguid) && requested_features[i]) {
char *propname;
verify(-1 != asprintf(&propname, "feature@%s", fname));
ret = zpool_set_prop(zhp, propname,
ZFS_FEATURE_ENABLED);
if (ret != 0) {
free(propname);
return (ret);
}
count++;
if (firstff) {
(void) printf(gettext("Enabled the "
"following features on '%s':\n"),
zpool_get_name(zhp));
firstff = B_FALSE;
}
(void) printf(gettext(" %s\n"), fname);
free(propname);
}
}
if (countp != NULL)
*countp = count;
return (0);
}
static int
upgrade_cb(zpool_handle_t *zhp, void *arg)
{
upgrade_cbdata_t *cbp = arg;
nvlist_t *config;
uint64_t version;
boolean_t modified_pool = B_FALSE;
int ret;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&version) == 0);
assert(SPA_VERSION_IS_SUPPORTED(version));
if (version < cbp->cb_version) {
cbp->cb_first = B_FALSE;
ret = upgrade_version(zhp, cbp->cb_version);
if (ret != 0)
return (ret);
modified_pool = B_TRUE;
/*
* If they did "zpool upgrade -a", then we could
* be doing ioctls to different pools. We need
* to log this history once to each pool, and bypass
* the normal history logging that happens in main().
*/
(void) zpool_log_history(g_zfs, history_str);
log_history = B_FALSE;
}
if (cbp->cb_version >= SPA_VERSION_FEATURES) {
int count;
ret = upgrade_enable_all(zhp, &count);
if (ret != 0)
return (ret);
if (count > 0) {
cbp->cb_first = B_FALSE;
modified_pool = B_TRUE;
}
}
if (modified_pool) {
(void) printf("\n");
(void) after_zpool_upgrade(zhp);
}
return (0);
}
static int
upgrade_list_older_cb(zpool_handle_t *zhp, void *arg)
{
upgrade_cbdata_t *cbp = arg;
nvlist_t *config;
uint64_t version;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&version) == 0);
assert(SPA_VERSION_IS_SUPPORTED(version));
if (version < SPA_VERSION_FEATURES) {
if (cbp->cb_first) {
(void) printf(gettext("The following pools are "
"formatted with legacy version numbers and can\n"
"be upgraded to use feature flags. After "
"being upgraded, these pools\nwill no "
"longer be accessible by software that does not "
"support feature\nflags.\n\n"
"Note that setting a pool's 'compatibility' "
"feature to '" ZPOOL_COMPAT_LEGACY "' will\n"
"inhibit upgrades.\n\n"));
(void) printf(gettext("VER POOL\n"));
(void) printf(gettext("--- ------------\n"));
cbp->cb_first = B_FALSE;
}
(void) printf("%2llu %s\n", (u_longlong_t)version,
zpool_get_name(zhp));
}
return (0);
}
static int
upgrade_list_disabled_cb(zpool_handle_t *zhp, void *arg)
{
upgrade_cbdata_t *cbp = arg;
nvlist_t *config;
uint64_t version;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&version) == 0);
if (version >= SPA_VERSION_FEATURES) {
int i;
boolean_t poolfirst = B_TRUE;
nvlist_t *enabled = zpool_get_features(zhp);
for (i = 0; i < SPA_FEATURES; i++) {
const char *fguid = spa_feature_table[i].fi_guid;
const char *fname = spa_feature_table[i].fi_uname;
if (!spa_feature_table[i].fi_zfs_mod_supported)
continue;
if (!nvlist_exists(enabled, fguid)) {
if (cbp->cb_first) {
(void) printf(gettext("\nSome "
"supported features are not "
"enabled on the following pools. "
"Once a\nfeature is enabled the "
"pool may become incompatible with "
"software\nthat does not support "
"the feature. See "
"zpool-features(7) for "
"details.\n\n"
"Note that the pool "
"'compatibility' feature can be "
"used to inhibit\nfeature "
"upgrades.\n\n"));
(void) printf(gettext("POOL "
"FEATURE\n"));
(void) printf(gettext("------"
"---------\n"));
cbp->cb_first = B_FALSE;
}
if (poolfirst) {
(void) printf(gettext("%s\n"),
zpool_get_name(zhp));
poolfirst = B_FALSE;
}
(void) printf(gettext(" %s\n"), fname);
}
/*
* If they did "zpool upgrade -a", then we could
* be doing ioctls to different pools. We need
* to log this history once to each pool, and bypass
* the normal history logging that happens in main().
*/
(void) zpool_log_history(g_zfs, history_str);
log_history = B_FALSE;
}
}
return (0);
}
/* ARGSUSED */
static int
upgrade_one(zpool_handle_t *zhp, void *data)
{
boolean_t modified_pool = B_FALSE;
upgrade_cbdata_t *cbp = data;
uint64_t cur_version;
int ret;
if (strcmp("log", zpool_get_name(zhp)) == 0) {
(void) fprintf(stderr, gettext("'log' is now a reserved word\n"
"Pool 'log' must be renamed using export and import"
" to upgrade.\n"));
return (1);
}
cur_version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
if (cur_version > cbp->cb_version) {
(void) printf(gettext("Pool '%s' is already formatted "
"using more current version '%llu'.\n\n"),
zpool_get_name(zhp), (u_longlong_t)cur_version);
return (0);
}
if (cbp->cb_version != SPA_VERSION && cur_version == cbp->cb_version) {
(void) printf(gettext("Pool '%s' is already formatted "
"using version %llu.\n\n"), zpool_get_name(zhp),
(u_longlong_t)cbp->cb_version);
return (0);
}
if (cur_version != cbp->cb_version) {
modified_pool = B_TRUE;
ret = upgrade_version(zhp, cbp->cb_version);
if (ret != 0)
return (ret);
}
if (cbp->cb_version >= SPA_VERSION_FEATURES) {
int count = 0;
ret = upgrade_enable_all(zhp, &count);
if (ret != 0)
return (ret);
if (count != 0) {
modified_pool = B_TRUE;
} else if (cur_version == SPA_VERSION) {
(void) printf(gettext("Pool '%s' already has all "
"supported and requested features enabled.\n"),
zpool_get_name(zhp));
}
}
if (modified_pool) {
(void) printf("\n");
(void) after_zpool_upgrade(zhp);
}
return (0);
}
/*
* zpool upgrade
* zpool upgrade -v
* zpool upgrade [-V version] <-a | pool ...>
*
* With no arguments, display downrev'd ZFS pool available for upgrade.
* Individual pools can be upgraded by specifying the pool, and '-a' will
* upgrade all pools.
*/
int
zpool_do_upgrade(int argc, char **argv)
{
int c;
upgrade_cbdata_t cb = { 0 };
int ret = 0;
boolean_t showversions = B_FALSE;
boolean_t upgradeall = B_FALSE;
char *end;
/* check options */
while ((c = getopt(argc, argv, ":avV:")) != -1) {
switch (c) {
case 'a':
upgradeall = B_TRUE;
break;
case 'v':
showversions = B_TRUE;
break;
case 'V':
cb.cb_version = strtoll(optarg, &end, 10);
if (*end != '\0' ||
!SPA_VERSION_IS_SUPPORTED(cb.cb_version)) {
(void) fprintf(stderr,
gettext("invalid version '%s'\n"), optarg);
usage(B_FALSE);
}
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
cb.cb_argc = argc;
cb.cb_argv = argv;
argc -= optind;
argv += optind;
if (cb.cb_version == 0) {
cb.cb_version = SPA_VERSION;
} else if (!upgradeall && argc == 0) {
(void) fprintf(stderr, gettext("-V option is "
"incompatible with other arguments\n"));
usage(B_FALSE);
}
if (showversions) {
if (upgradeall || argc != 0) {
(void) fprintf(stderr, gettext("-v option is "
"incompatible with other arguments\n"));
usage(B_FALSE);
}
} else if (upgradeall) {
if (argc != 0) {
(void) fprintf(stderr, gettext("-a option should not "
"be used along with a pool name\n"));
usage(B_FALSE);
}
}
(void) printf(gettext("This system supports ZFS pool feature "
"flags.\n\n"));
if (showversions) {
int i;
(void) printf(gettext("The following features are "
"supported:\n\n"));
(void) printf(gettext("FEAT DESCRIPTION\n"));
(void) printf("----------------------------------------------"
"---------------\n");
for (i = 0; i < SPA_FEATURES; i++) {
zfeature_info_t *fi = &spa_feature_table[i];
if (!fi->fi_zfs_mod_supported)
continue;
const char *ro =
(fi->fi_flags & ZFEATURE_FLAG_READONLY_COMPAT) ?
" (read-only compatible)" : "";
(void) printf("%-37s%s\n", fi->fi_uname, ro);
(void) printf(" %s\n", fi->fi_desc);
}
(void) printf("\n");
(void) printf(gettext("The following legacy versions are also "
"supported:\n\n"));
(void) printf(gettext("VER DESCRIPTION\n"));
(void) printf("--- -----------------------------------------"
"---------------\n");
(void) printf(gettext(" 1 Initial ZFS version\n"));
(void) printf(gettext(" 2 Ditto blocks "
"(replicated metadata)\n"));
(void) printf(gettext(" 3 Hot spares and double parity "
"RAID-Z\n"));
(void) printf(gettext(" 4 zpool history\n"));
(void) printf(gettext(" 5 Compression using the gzip "
"algorithm\n"));
(void) printf(gettext(" 6 bootfs pool property\n"));
(void) printf(gettext(" 7 Separate intent log devices\n"));
(void) printf(gettext(" 8 Delegated administration\n"));
(void) printf(gettext(" 9 refquota and refreservation "
"properties\n"));
(void) printf(gettext(" 10 Cache devices\n"));
(void) printf(gettext(" 11 Improved scrub performance\n"));
(void) printf(gettext(" 12 Snapshot properties\n"));
(void) printf(gettext(" 13 snapused property\n"));
(void) printf(gettext(" 14 passthrough-x aclinherit\n"));
(void) printf(gettext(" 15 user/group space accounting\n"));
(void) printf(gettext(" 16 stmf property support\n"));
(void) printf(gettext(" 17 Triple-parity RAID-Z\n"));
(void) printf(gettext(" 18 Snapshot user holds\n"));
(void) printf(gettext(" 19 Log device removal\n"));
(void) printf(gettext(" 20 Compression using zle "
"(zero-length encoding)\n"));
(void) printf(gettext(" 21 Deduplication\n"));
(void) printf(gettext(" 22 Received properties\n"));
(void) printf(gettext(" 23 Slim ZIL\n"));
(void) printf(gettext(" 24 System attributes\n"));
(void) printf(gettext(" 25 Improved scrub stats\n"));
(void) printf(gettext(" 26 Improved snapshot deletion "
"performance\n"));
(void) printf(gettext(" 27 Improved snapshot creation "
"performance\n"));
(void) printf(gettext(" 28 Multiple vdev replacements\n"));
(void) printf(gettext("\nFor more information on a particular "
"version, including supported releases,\n"));
(void) printf(gettext("see the ZFS Administration Guide.\n\n"));
} else if (argc == 0 && upgradeall) {
cb.cb_first = B_TRUE;
ret = zpool_iter(g_zfs, upgrade_cb, &cb);
if (ret == 0 && cb.cb_first) {
if (cb.cb_version == SPA_VERSION) {
(void) printf(gettext("All pools are already "
"formatted using feature flags.\n\n"));
(void) printf(gettext("Every feature flags "
"pool already has all supported and "
"requested features enabled.\n"));
} else {
(void) printf(gettext("All pools are already "
"formatted with version %llu or higher.\n"),
(u_longlong_t)cb.cb_version);
}
}
} else if (argc == 0) {
cb.cb_first = B_TRUE;
ret = zpool_iter(g_zfs, upgrade_list_older_cb, &cb);
assert(ret == 0);
if (cb.cb_first) {
(void) printf(gettext("All pools are formatted "
"using feature flags.\n\n"));
} else {
(void) printf(gettext("\nUse 'zpool upgrade -v' "
"for a list of available legacy versions.\n"));
}
cb.cb_first = B_TRUE;
ret = zpool_iter(g_zfs, upgrade_list_disabled_cb, &cb);
assert(ret == 0);
if (cb.cb_first) {
(void) printf(gettext("Every feature flags pool has "
"all supported and requested features enabled.\n"));
} else {
(void) printf(gettext("\n"));
}
} else {
ret = for_each_pool(argc, argv, B_FALSE, NULL, B_FALSE,
upgrade_one, &cb);
}
return (ret);
}
typedef struct hist_cbdata {
boolean_t first;
boolean_t longfmt;
boolean_t internal;
} hist_cbdata_t;
static void
print_history_records(nvlist_t *nvhis, hist_cbdata_t *cb)
{
nvlist_t **records;
uint_t numrecords;
int i;
verify(nvlist_lookup_nvlist_array(nvhis, ZPOOL_HIST_RECORD,
&records, &numrecords) == 0);
for (i = 0; i < numrecords; i++) {
nvlist_t *rec = records[i];
char tbuf[64] = "";
if (nvlist_exists(rec, ZPOOL_HIST_TIME)) {
time_t tsec;
struct tm t;
tsec = fnvlist_lookup_uint64(records[i],
ZPOOL_HIST_TIME);
(void) localtime_r(&tsec, &t);
(void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t);
}
if (nvlist_exists(rec, ZPOOL_HIST_ELAPSED_NS)) {
uint64_t elapsed_ns = fnvlist_lookup_int64(records[i],
ZPOOL_HIST_ELAPSED_NS);
(void) snprintf(tbuf + strlen(tbuf),
sizeof (tbuf) - strlen(tbuf),
" (%lldms)", (long long)elapsed_ns / 1000 / 1000);
}
if (nvlist_exists(rec, ZPOOL_HIST_CMD)) {
(void) printf("%s %s", tbuf,
fnvlist_lookup_string(rec, ZPOOL_HIST_CMD));
} else if (nvlist_exists(rec, ZPOOL_HIST_INT_EVENT)) {
int ievent =
fnvlist_lookup_uint64(rec, ZPOOL_HIST_INT_EVENT);
if (!cb->internal)
continue;
if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS) {
(void) printf("%s unrecognized record:\n",
tbuf);
dump_nvlist(rec, 4);
continue;
}
(void) printf("%s [internal %s txg:%lld] %s", tbuf,
zfs_history_event_names[ievent],
(longlong_t)fnvlist_lookup_uint64(
rec, ZPOOL_HIST_TXG),
fnvlist_lookup_string(rec, ZPOOL_HIST_INT_STR));
} else if (nvlist_exists(rec, ZPOOL_HIST_INT_NAME)) {
if (!cb->internal)
continue;
(void) printf("%s [txg:%lld] %s", tbuf,
(longlong_t)fnvlist_lookup_uint64(
rec, ZPOOL_HIST_TXG),
fnvlist_lookup_string(rec, ZPOOL_HIST_INT_NAME));
if (nvlist_exists(rec, ZPOOL_HIST_DSNAME)) {
(void) printf(" %s (%llu)",
fnvlist_lookup_string(rec,
ZPOOL_HIST_DSNAME),
(u_longlong_t)fnvlist_lookup_uint64(rec,
ZPOOL_HIST_DSID));
}
(void) printf(" %s", fnvlist_lookup_string(rec,
ZPOOL_HIST_INT_STR));
} else if (nvlist_exists(rec, ZPOOL_HIST_IOCTL)) {
if (!cb->internal)
continue;
(void) printf("%s ioctl %s\n", tbuf,
fnvlist_lookup_string(rec, ZPOOL_HIST_IOCTL));
if (nvlist_exists(rec, ZPOOL_HIST_INPUT_NVL)) {
(void) printf(" input:\n");
dump_nvlist(fnvlist_lookup_nvlist(rec,
ZPOOL_HIST_INPUT_NVL), 8);
}
if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_NVL)) {
(void) printf(" output:\n");
dump_nvlist(fnvlist_lookup_nvlist(rec,
ZPOOL_HIST_OUTPUT_NVL), 8);
}
if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_SIZE)) {
(void) printf(" output nvlist omitted; "
"original size: %lldKB\n",
(longlong_t)fnvlist_lookup_int64(rec,
ZPOOL_HIST_OUTPUT_SIZE) / 1024);
}
if (nvlist_exists(rec, ZPOOL_HIST_ERRNO)) {
(void) printf(" errno: %lld\n",
(longlong_t)fnvlist_lookup_int64(rec,
ZPOOL_HIST_ERRNO));
}
} else {
if (!cb->internal)
continue;
(void) printf("%s unrecognized record:\n", tbuf);
dump_nvlist(rec, 4);
}
if (!cb->longfmt) {
(void) printf("\n");
continue;
}
(void) printf(" [");
if (nvlist_exists(rec, ZPOOL_HIST_WHO)) {
uid_t who = fnvlist_lookup_uint64(rec, ZPOOL_HIST_WHO);
struct passwd *pwd = getpwuid(who);
(void) printf("user %d ", (int)who);
if (pwd != NULL)
(void) printf("(%s) ", pwd->pw_name);
}
if (nvlist_exists(rec, ZPOOL_HIST_HOST)) {
(void) printf("on %s",
fnvlist_lookup_string(rec, ZPOOL_HIST_HOST));
}
if (nvlist_exists(rec, ZPOOL_HIST_ZONE)) {
(void) printf(":%s",
fnvlist_lookup_string(rec, ZPOOL_HIST_ZONE));
}
(void) printf("]");
(void) printf("\n");
}
}
/*
* Print out the command history for a specific pool.
*/
static int
get_history_one(zpool_handle_t *zhp, void *data)
{
nvlist_t *nvhis;
int ret;
hist_cbdata_t *cb = (hist_cbdata_t *)data;
uint64_t off = 0;
boolean_t eof = B_FALSE;
cb->first = B_FALSE;
(void) printf(gettext("History for '%s':\n"), zpool_get_name(zhp));
while (!eof) {
if ((ret = zpool_get_history(zhp, &nvhis, &off, &eof)) != 0)
return (ret);
print_history_records(nvhis, cb);
nvlist_free(nvhis);
}
(void) printf("\n");
return (ret);
}
/*
* zpool history <pool>
*
* Displays the history of commands that modified pools.
*/
int
zpool_do_history(int argc, char **argv)
{
hist_cbdata_t cbdata = { 0 };
int ret;
int c;
cbdata.first = B_TRUE;
/* check options */
while ((c = getopt(argc, argv, "li")) != -1) {
switch (c) {
case 'l':
cbdata.longfmt = B_TRUE;
break;
case 'i':
cbdata.internal = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
ret = for_each_pool(argc, argv, B_FALSE, NULL, B_FALSE, get_history_one,
&cbdata);
if (argc == 0 && cbdata.first == B_TRUE) {
(void) fprintf(stderr, gettext("no pools available\n"));
return (0);
}
return (ret);
}
typedef struct ev_opts {
int verbose;
int scripted;
int follow;
int clear;
char poolname[ZFS_MAX_DATASET_NAME_LEN];
} ev_opts_t;
static void
zpool_do_events_short(nvlist_t *nvl, ev_opts_t *opts)
{
char ctime_str[26], str[32], *ptr;
int64_t *tv;
uint_t n;
verify(nvlist_lookup_int64_array(nvl, FM_EREPORT_TIME, &tv, &n) == 0);
memset(str, ' ', 32);
(void) ctime_r((const time_t *)&tv[0], ctime_str);
(void) memcpy(str, ctime_str+4, 6); /* 'Jun 30' */
(void) memcpy(str+7, ctime_str+20, 4); /* '1993' */
(void) memcpy(str+12, ctime_str+11, 8); /* '21:49:08' */
(void) sprintf(str+20, ".%09lld", (longlong_t)tv[1]); /* '.123456789' */
if (opts->scripted)
(void) printf(gettext("%s\t"), str);
else
(void) printf(gettext("%s "), str);
verify(nvlist_lookup_string(nvl, FM_CLASS, &ptr) == 0);
(void) printf(gettext("%s\n"), ptr);
}
static void
zpool_do_events_nvprint(nvlist_t *nvl, int depth)
{
nvpair_t *nvp;
for (nvp = nvlist_next_nvpair(nvl, NULL);
nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) {
data_type_t type = nvpair_type(nvp);
const char *name = nvpair_name(nvp);
boolean_t b;
uint8_t i8;
uint16_t i16;
uint32_t i32;
uint64_t i64;
char *str;
nvlist_t *cnv;
printf(gettext("%*s%s = "), depth, "", name);
switch (type) {
case DATA_TYPE_BOOLEAN:
printf(gettext("%s"), "1");
break;
case DATA_TYPE_BOOLEAN_VALUE:
(void) nvpair_value_boolean_value(nvp, &b);
printf(gettext("%s"), b ? "1" : "0");
break;
case DATA_TYPE_BYTE:
(void) nvpair_value_byte(nvp, &i8);
printf(gettext("0x%x"), i8);
break;
case DATA_TYPE_INT8:
(void) nvpair_value_int8(nvp, (void *)&i8);
printf(gettext("0x%x"), i8);
break;
case DATA_TYPE_UINT8:
(void) nvpair_value_uint8(nvp, &i8);
printf(gettext("0x%x"), i8);
break;
case DATA_TYPE_INT16:
(void) nvpair_value_int16(nvp, (void *)&i16);
printf(gettext("0x%x"), i16);
break;
case DATA_TYPE_UINT16:
(void) nvpair_value_uint16(nvp, &i16);
printf(gettext("0x%x"), i16);
break;
case DATA_TYPE_INT32:
(void) nvpair_value_int32(nvp, (void *)&i32);
printf(gettext("0x%x"), i32);
break;
case DATA_TYPE_UINT32:
(void) nvpair_value_uint32(nvp, &i32);
printf(gettext("0x%x"), i32);
break;
case DATA_TYPE_INT64:
(void) nvpair_value_int64(nvp, (void *)&i64);
printf(gettext("0x%llx"), (u_longlong_t)i64);
break;
case DATA_TYPE_UINT64:
(void) nvpair_value_uint64(nvp, &i64);
/*
* translate vdev state values to readable
* strings to aide zpool events consumers
*/
if (strcmp(name,
FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE) == 0 ||
strcmp(name,
FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE) == 0) {
printf(gettext("\"%s\" (0x%llx)"),
zpool_state_to_name(i64, VDEV_AUX_NONE),
(u_longlong_t)i64);
} else {
printf(gettext("0x%llx"), (u_longlong_t)i64);
}
break;
case DATA_TYPE_HRTIME:
(void) nvpair_value_hrtime(nvp, (void *)&i64);
printf(gettext("0x%llx"), (u_longlong_t)i64);
break;
case DATA_TYPE_STRING:
(void) nvpair_value_string(nvp, &str);
printf(gettext("\"%s\""), str ? str : "<NULL>");
break;
case DATA_TYPE_NVLIST:
printf(gettext("(embedded nvlist)\n"));
(void) nvpair_value_nvlist(nvp, &cnv);
zpool_do_events_nvprint(cnv, depth + 8);
printf(gettext("%*s(end %s)"), depth, "", name);
break;
case DATA_TYPE_NVLIST_ARRAY: {
nvlist_t **val;
uint_t i, nelem;
(void) nvpair_value_nvlist_array(nvp, &val, &nelem);
printf(gettext("(%d embedded nvlists)\n"), nelem);
for (i = 0; i < nelem; i++) {
printf(gettext("%*s%s[%d] = %s\n"),
depth, "", name, i, "(embedded nvlist)");
zpool_do_events_nvprint(val[i], depth + 8);
printf(gettext("%*s(end %s[%i])\n"),
depth, "", name, i);
}
printf(gettext("%*s(end %s)\n"), depth, "", name);
}
break;
case DATA_TYPE_INT8_ARRAY: {
int8_t *val;
uint_t i, nelem;
(void) nvpair_value_int8_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_UINT8_ARRAY: {
uint8_t *val;
uint_t i, nelem;
(void) nvpair_value_uint8_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_INT16_ARRAY: {
int16_t *val;
uint_t i, nelem;
(void) nvpair_value_int16_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_UINT16_ARRAY: {
uint16_t *val;
uint_t i, nelem;
(void) nvpair_value_uint16_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_INT32_ARRAY: {
int32_t *val;
uint_t i, nelem;
(void) nvpair_value_int32_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_UINT32_ARRAY: {
uint32_t *val;
uint_t i, nelem;
(void) nvpair_value_uint32_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_INT64_ARRAY: {
int64_t *val;
uint_t i, nelem;
(void) nvpair_value_int64_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%llx "),
(u_longlong_t)val[i]);
break;
}
case DATA_TYPE_UINT64_ARRAY: {
uint64_t *val;
uint_t i, nelem;
(void) nvpair_value_uint64_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%llx "),
(u_longlong_t)val[i]);
break;
}
case DATA_TYPE_STRING_ARRAY: {
char **str;
uint_t i, nelem;
(void) nvpair_value_string_array(nvp, &str, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("\"%s\" "),
str[i] ? str[i] : "<NULL>");
break;
}
case DATA_TYPE_BOOLEAN_ARRAY:
case DATA_TYPE_BYTE_ARRAY:
case DATA_TYPE_DOUBLE:
case DATA_TYPE_DONTCARE:
case DATA_TYPE_UNKNOWN:
printf(gettext("<unknown>"));
break;
}
printf(gettext("\n"));
}
}
static int
zpool_do_events_next(ev_opts_t *opts)
{
nvlist_t *nvl;
int zevent_fd, ret, dropped;
char *pool;
zevent_fd = open(ZFS_DEV, O_RDWR);
VERIFY(zevent_fd >= 0);
if (!opts->scripted)
(void) printf(gettext("%-30s %s\n"), "TIME", "CLASS");
while (1) {
ret = zpool_events_next(g_zfs, &nvl, &dropped,
(opts->follow ? ZEVENT_NONE : ZEVENT_NONBLOCK), zevent_fd);
if (ret || nvl == NULL)
break;
if (dropped > 0)
(void) printf(gettext("dropped %d events\n"), dropped);
if (strlen(opts->poolname) > 0 &&
nvlist_lookup_string(nvl, FM_FMRI_ZFS_POOL, &pool) == 0 &&
strcmp(opts->poolname, pool) != 0)
continue;
zpool_do_events_short(nvl, opts);
if (opts->verbose) {
zpool_do_events_nvprint(nvl, 8);
printf(gettext("\n"));
}
(void) fflush(stdout);
nvlist_free(nvl);
}
VERIFY(0 == close(zevent_fd));
return (ret);
}
static int
zpool_do_events_clear(ev_opts_t *opts)
{
int count, ret;
ret = zpool_events_clear(g_zfs, &count);
if (!ret)
(void) printf(gettext("cleared %d events\n"), count);
return (ret);
}
/*
* zpool events [-vHf [pool] | -c]
*
* Displays events logs by ZFS.
*/
int
zpool_do_events(int argc, char **argv)
{
ev_opts_t opts = { 0 };
int ret;
int c;
/* check options */
while ((c = getopt(argc, argv, "vHfc")) != -1) {
switch (c) {
case 'v':
opts.verbose = 1;
break;
case 'H':
opts.scripted = 1;
break;
case 'f':
opts.follow = 1;
break;
case 'c':
opts.clear = 1;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
} else if (argc == 1) {
(void) strlcpy(opts.poolname, argv[0], sizeof (opts.poolname));
if (!zfs_name_valid(opts.poolname, ZFS_TYPE_POOL)) {
(void) fprintf(stderr,
gettext("invalid pool name '%s'\n"), opts.poolname);
usage(B_FALSE);
}
}
if ((argc == 1 || opts.verbose || opts.scripted || opts.follow) &&
opts.clear) {
(void) fprintf(stderr,
gettext("invalid options combined with -c\n"));
usage(B_FALSE);
}
if (opts.clear)
ret = zpool_do_events_clear(&opts);
else
ret = zpool_do_events_next(&opts);
return (ret);
}
static int
get_callback(zpool_handle_t *zhp, void *data)
{
zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
char value[MAXNAMELEN];
zprop_source_t srctype;
zprop_list_t *pl;
for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) {
/*
* Skip the special fake placeholder. This will also skip
* over the name property when 'all' is specified.
*/
if (pl->pl_prop == ZPOOL_PROP_NAME &&
pl == cbp->cb_proplist)
continue;
if (pl->pl_prop == ZPROP_INVAL &&
(zpool_prop_feature(pl->pl_user_prop) ||
zpool_prop_unsupported(pl->pl_user_prop))) {
srctype = ZPROP_SRC_LOCAL;
if (zpool_prop_get_feature(zhp, pl->pl_user_prop,
value, sizeof (value)) == 0) {
zprop_print_one_property(zpool_get_name(zhp),
cbp, pl->pl_user_prop, value, srctype,
NULL, NULL);
}
} else {
if (zpool_get_prop(zhp, pl->pl_prop, value,
sizeof (value), &srctype, cbp->cb_literal) != 0)
continue;
zprop_print_one_property(zpool_get_name(zhp), cbp,
zpool_prop_to_name(pl->pl_prop), value, srctype,
NULL, NULL);
}
}
return (0);
}
/*
* zpool get [-Hp] [-o "all" | field[,...]] <"all" | property[,...]> <pool> ...
*
* -H Scripted mode. Don't display headers, and separate properties
* by a single tab.
* -o List of columns to display. Defaults to
* "name,property,value,source".
* -p Display values in parsable (exact) format.
*
* Get properties of pools in the system. Output space statistics
* for each one as well as other attributes.
*/
int
zpool_do_get(int argc, char **argv)
{
zprop_get_cbdata_t cb = { 0 };
zprop_list_t fake_name = { 0 };
int ret;
int c, i;
char *value;
cb.cb_first = B_TRUE;
/*
* Set up default columns and sources.
*/
cb.cb_sources = ZPROP_SRC_ALL;
cb.cb_columns[0] = GET_COL_NAME;
cb.cb_columns[1] = GET_COL_PROPERTY;
cb.cb_columns[2] = GET_COL_VALUE;
cb.cb_columns[3] = GET_COL_SOURCE;
cb.cb_type = ZFS_TYPE_POOL;
/* check options */
while ((c = getopt(argc, argv, ":Hpo:")) != -1) {
switch (c) {
case 'p':
cb.cb_literal = B_TRUE;
break;
case 'H':
cb.cb_scripted = B_TRUE;
break;
case 'o':
bzero(&cb.cb_columns, sizeof (cb.cb_columns));
i = 0;
while (*optarg != '\0') {
static char *col_subopts[] =
{ "name", "property", "value", "source",
"all", NULL };
if (i == ZFS_GET_NCOLS) {
(void) fprintf(stderr, gettext("too "
"many fields given to -o "
"option\n"));
usage(B_FALSE);
}
switch (getsubopt(&optarg, col_subopts,
&value)) {
case 0:
cb.cb_columns[i++] = GET_COL_NAME;
break;
case 1:
cb.cb_columns[i++] = GET_COL_PROPERTY;
break;
case 2:
cb.cb_columns[i++] = GET_COL_VALUE;
break;
case 3:
cb.cb_columns[i++] = GET_COL_SOURCE;
break;
case 4:
if (i > 0) {
(void) fprintf(stderr,
gettext("\"all\" conflicts "
"with specific fields "
"given to -o option\n"));
usage(B_FALSE);
}
cb.cb_columns[0] = GET_COL_NAME;
cb.cb_columns[1] = GET_COL_PROPERTY;
cb.cb_columns[2] = GET_COL_VALUE;
cb.cb_columns[3] = GET_COL_SOURCE;
i = ZFS_GET_NCOLS;
break;
default:
(void) fprintf(stderr,
gettext("invalid column name "
"'%s'\n"), value);
usage(B_FALSE);
}
}
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing property "
"argument\n"));
usage(B_FALSE);
}
if (zprop_get_list(g_zfs, argv[0], &cb.cb_proplist,
ZFS_TYPE_POOL) != 0)
usage(B_FALSE);
argc--;
argv++;
if (cb.cb_proplist != NULL) {
fake_name.pl_prop = ZPOOL_PROP_NAME;
fake_name.pl_width = strlen(gettext("NAME"));
fake_name.pl_next = cb.cb_proplist;
cb.cb_proplist = &fake_name;
}
ret = for_each_pool(argc, argv, B_TRUE, &cb.cb_proplist, cb.cb_literal,
get_callback, &cb);
if (cb.cb_proplist == &fake_name)
zprop_free_list(fake_name.pl_next);
else
zprop_free_list(cb.cb_proplist);
return (ret);
}
typedef struct set_cbdata {
char *cb_propname;
char *cb_value;
boolean_t cb_any_successful;
} set_cbdata_t;
static int
set_callback(zpool_handle_t *zhp, void *data)
{
int error;
set_cbdata_t *cb = (set_cbdata_t *)data;
/* Check if we have out-of-bounds features */
if (strcmp(cb->cb_propname, ZPOOL_CONFIG_COMPATIBILITY) == 0) {
boolean_t features[SPA_FEATURES];
if (zpool_do_load_compat(cb->cb_value, features) !=
ZPOOL_COMPATIBILITY_OK)
return (-1);
nvlist_t *enabled = zpool_get_features(zhp);
spa_feature_t i;
for (i = 0; i < SPA_FEATURES; i++) {
const char *fguid = spa_feature_table[i].fi_guid;
if (nvlist_exists(enabled, fguid) && !features[i])
break;
}
if (i < SPA_FEATURES)
(void) fprintf(stderr, gettext("Warning: one or "
"more features already enabled on pool '%s'\n"
"are not present in this compatibility set.\n"),
zpool_get_name(zhp));
}
/* if we're setting a feature, check it's in compatibility set */
if (zpool_prop_feature(cb->cb_propname) &&
strcmp(cb->cb_value, ZFS_FEATURE_ENABLED) == 0) {
char *fname = strchr(cb->cb_propname, '@') + 1;
spa_feature_t f;
if (zfeature_lookup_name(fname, &f) == 0) {
char compat[ZFS_MAXPROPLEN];
if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY,
compat, ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
compat[0] = '\0';
boolean_t features[SPA_FEATURES];
if (zpool_do_load_compat(compat, features) !=
ZPOOL_COMPATIBILITY_OK) {
(void) fprintf(stderr, gettext("Error: "
"cannot enable feature '%s' on pool '%s'\n"
"because the pool's 'compatibility' "
"property cannot be parsed.\n"),
fname, zpool_get_name(zhp));
return (-1);
}
if (!features[f]) {
(void) fprintf(stderr, gettext("Error: "
"cannot enable feature '%s' on pool '%s'\n"
"as it is not specified in this pool's "
"current compatibility set.\n"
"Consider setting 'compatibility' to a "
"less restrictive set, or to 'off'.\n"),
fname, zpool_get_name(zhp));
return (-1);
}
}
}
error = zpool_set_prop(zhp, cb->cb_propname, cb->cb_value);
if (!error)
cb->cb_any_successful = B_TRUE;
return (error);
}
int
zpool_do_set(int argc, char **argv)
{
set_cbdata_t cb = { 0 };
int error;
if (argc > 1 && argv[1][0] == '-') {
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
argv[1][1]);
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing property=value "
"argument\n"));
usage(B_FALSE);
}
if (argc < 3) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc > 3) {
(void) fprintf(stderr, gettext("too many pool names\n"));
usage(B_FALSE);
}
cb.cb_propname = argv[1];
cb.cb_value = strchr(cb.cb_propname, '=');
if (cb.cb_value == NULL) {
(void) fprintf(stderr, gettext("missing value in "
"property=value argument\n"));
usage(B_FALSE);
}
*(cb.cb_value) = '\0';
cb.cb_value++;
error = for_each_pool(argc - 2, argv + 2, B_TRUE, NULL, B_FALSE,
set_callback, &cb);
return (error);
}
/* Add up the total number of bytes left to initialize/trim across all vdevs */
static uint64_t
vdev_activity_remaining(nvlist_t *nv, zpool_wait_activity_t activity)
{
uint64_t bytes_remaining;
nvlist_t **child;
uint_t c, children;
vdev_stat_t *vs;
assert(activity == ZPOOL_WAIT_INITIALIZE ||
activity == ZPOOL_WAIT_TRIM);
verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
if (activity == ZPOOL_WAIT_INITIALIZE &&
vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE)
bytes_remaining = vs->vs_initialize_bytes_est -
vs->vs_initialize_bytes_done;
else if (activity == ZPOOL_WAIT_TRIM &&
vs->vs_trim_state == VDEV_TRIM_ACTIVE)
bytes_remaining = vs->vs_trim_bytes_est -
vs->vs_trim_bytes_done;
else
bytes_remaining = 0;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
for (c = 0; c < children; c++)
bytes_remaining += vdev_activity_remaining(child[c], activity);
return (bytes_remaining);
}
/* Add up the total number of bytes left to rebuild across top-level vdevs */
static uint64_t
vdev_activity_top_remaining(nvlist_t *nv)
{
uint64_t bytes_remaining = 0;
nvlist_t **child;
uint_t children;
int error;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
for (uint_t c = 0; c < children; c++) {
vdev_rebuild_stat_t *vrs;
uint_t i;
error = nvlist_lookup_uint64_array(child[c],
ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i);
if (error == 0) {
if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) {
bytes_remaining += (vrs->vrs_bytes_est -
vrs->vrs_bytes_rebuilt);
}
}
}
return (bytes_remaining);
}
/* Whether any vdevs are 'spare' or 'replacing' vdevs */
static boolean_t
vdev_any_spare_replacing(nvlist_t *nv)
{
nvlist_t **child;
uint_t c, children;
char *vdev_type;
(void) nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &vdev_type);
if (strcmp(vdev_type, VDEV_TYPE_REPLACING) == 0 ||
strcmp(vdev_type, VDEV_TYPE_SPARE) == 0 ||
strcmp(vdev_type, VDEV_TYPE_DRAID_SPARE) == 0) {
return (B_TRUE);
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
for (c = 0; c < children; c++) {
if (vdev_any_spare_replacing(child[c]))
return (B_TRUE);
}
return (B_FALSE);
}
typedef struct wait_data {
char *wd_poolname;
boolean_t wd_scripted;
boolean_t wd_exact;
boolean_t wd_headers_once;
boolean_t wd_should_exit;
/* Which activities to wait for */
boolean_t wd_enabled[ZPOOL_WAIT_NUM_ACTIVITIES];
float wd_interval;
pthread_cond_t wd_cv;
pthread_mutex_t wd_mutex;
} wait_data_t;
/*
* Print to stdout a single line, containing one column for each activity that
* we are waiting for specifying how many bytes of work are left for that
* activity.
*/
static void
print_wait_status_row(wait_data_t *wd, zpool_handle_t *zhp, int row)
{
nvlist_t *config, *nvroot;
uint_t c;
int i;
pool_checkpoint_stat_t *pcs = NULL;
pool_scan_stat_t *pss = NULL;
pool_removal_stat_t *prs = NULL;
char *headers[] = {"DISCARD", "FREE", "INITIALIZE", "REPLACE",
"REMOVE", "RESILVER", "SCRUB", "TRIM"};
int col_widths[ZPOOL_WAIT_NUM_ACTIVITIES];
/* Calculate the width of each column */
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
/*
* Make sure we have enough space in the col for pretty-printed
* numbers and for the column header, and then leave a couple
* spaces between cols for readability.
*/
col_widths[i] = MAX(strlen(headers[i]), 6) + 2;
}
/* Print header if appropriate */
int term_height = terminal_height();
boolean_t reprint_header = (!wd->wd_headers_once && term_height > 0 &&
row % (term_height-1) == 0);
if (!wd->wd_scripted && (row == 0 || reprint_header)) {
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
if (wd->wd_enabled[i])
(void) printf("%*s", col_widths[i], headers[i]);
}
(void) printf("\n");
}
/* Bytes of work remaining in each activity */
int64_t bytes_rem[ZPOOL_WAIT_NUM_ACTIVITIES] = {0};
bytes_rem[ZPOOL_WAIT_FREE] =
zpool_get_prop_int(zhp, ZPOOL_PROP_FREEING, NULL);
config = zpool_get_config(zhp, NULL);
nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
if (pcs != NULL && pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
bytes_rem[ZPOOL_WAIT_CKPT_DISCARD] = pcs->pcs_space;
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
if (prs != NULL && prs->prs_state == DSS_SCANNING)
bytes_rem[ZPOOL_WAIT_REMOVE] = prs->prs_to_copy -
prs->prs_copied;
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&pss, &c);
if (pss != NULL && pss->pss_state == DSS_SCANNING &&
pss->pss_pass_scrub_pause == 0) {
int64_t rem = pss->pss_to_examine - pss->pss_issued;
if (pss->pss_func == POOL_SCAN_SCRUB)
bytes_rem[ZPOOL_WAIT_SCRUB] = rem;
else
bytes_rem[ZPOOL_WAIT_RESILVER] = rem;
} else if (check_rebuilding(nvroot, NULL)) {
bytes_rem[ZPOOL_WAIT_RESILVER] =
vdev_activity_top_remaining(nvroot);
}
bytes_rem[ZPOOL_WAIT_INITIALIZE] =
vdev_activity_remaining(nvroot, ZPOOL_WAIT_INITIALIZE);
bytes_rem[ZPOOL_WAIT_TRIM] =
vdev_activity_remaining(nvroot, ZPOOL_WAIT_TRIM);
/*
* A replace finishes after resilvering finishes, so the amount of work
* left for a replace is the same as for resilvering.
*
* It isn't quite correct to say that if we have any 'spare' or
* 'replacing' vdevs and a resilver is happening, then a replace is in
* progress, like we do here. When a hot spare is used, the faulted vdev
* is not removed after the hot spare is resilvered, so parent 'spare'
* vdev is not removed either. So we could have a 'spare' vdev, but be
* resilvering for a different reason. However, we use it as a heuristic
* because we don't have access to the DTLs, which could tell us whether
* or not we have really finished resilvering a hot spare.
*/
if (vdev_any_spare_replacing(nvroot))
bytes_rem[ZPOOL_WAIT_REPLACE] = bytes_rem[ZPOOL_WAIT_RESILVER];
if (timestamp_fmt != NODATE)
print_timestamp(timestamp_fmt);
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
char buf[64];
if (!wd->wd_enabled[i])
continue;
if (wd->wd_exact)
(void) snprintf(buf, sizeof (buf), "%" PRIi64,
bytes_rem[i]);
else
zfs_nicenum(bytes_rem[i], buf, sizeof (buf));
if (wd->wd_scripted)
(void) printf(i == 0 ? "%s" : "\t%s", buf);
else
(void) printf(" %*s", col_widths[i] - 1, buf);
}
(void) printf("\n");
(void) fflush(stdout);
}
static void *
wait_status_thread(void *arg)
{
wait_data_t *wd = (wait_data_t *)arg;
zpool_handle_t *zhp;
if ((zhp = zpool_open(g_zfs, wd->wd_poolname)) == NULL)
return (void *)(1);
for (int row = 0; ; row++) {
boolean_t missing;
struct timespec timeout;
int ret = 0;
(void) clock_gettime(CLOCK_REALTIME, &timeout);
if (zpool_refresh_stats(zhp, &missing) != 0 || missing ||
zpool_props_refresh(zhp) != 0) {
zpool_close(zhp);
return (void *)(uintptr_t)(missing ? 0 : 1);
}
print_wait_status_row(wd, zhp, row);
timeout.tv_sec += floor(wd->wd_interval);
long nanos = timeout.tv_nsec +
(wd->wd_interval - floor(wd->wd_interval)) * NANOSEC;
if (nanos >= NANOSEC) {
timeout.tv_sec++;
timeout.tv_nsec = nanos - NANOSEC;
} else {
timeout.tv_nsec = nanos;
}
pthread_mutex_lock(&wd->wd_mutex);
if (!wd->wd_should_exit)
ret = pthread_cond_timedwait(&wd->wd_cv, &wd->wd_mutex,
&timeout);
pthread_mutex_unlock(&wd->wd_mutex);
if (ret == 0) {
break; /* signaled by main thread */
} else if (ret != ETIMEDOUT) {
(void) fprintf(stderr, gettext("pthread_cond_timedwait "
"failed: %s\n"), strerror(ret));
zpool_close(zhp);
return (void *)(uintptr_t)(1);
}
}
zpool_close(zhp);
return (void *)(0);
}
int
zpool_do_wait(int argc, char **argv)
{
boolean_t verbose = B_FALSE;
int c;
char *value;
int i;
unsigned long count;
pthread_t status_thr;
int error = 0;
zpool_handle_t *zhp;
wait_data_t wd;
wd.wd_scripted = B_FALSE;
wd.wd_exact = B_FALSE;
wd.wd_headers_once = B_FALSE;
wd.wd_should_exit = B_FALSE;
pthread_mutex_init(&wd.wd_mutex, NULL);
pthread_cond_init(&wd.wd_cv, NULL);
/* By default, wait for all types of activity. */
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++)
wd.wd_enabled[i] = B_TRUE;
while ((c = getopt(argc, argv, "HpT:t:")) != -1) {
switch (c) {
case 'H':
wd.wd_scripted = B_TRUE;
break;
case 'n':
wd.wd_headers_once = B_TRUE;
break;
case 'p':
wd.wd_exact = B_TRUE;
break;
case 'T':
get_timestamp_arg(*optarg);
break;
case 't':
{
static char *col_subopts[] = { "discard", "free",
"initialize", "replace", "remove", "resilver",
"scrub", "trim", NULL };
/* Reset activities array */
bzero(&wd.wd_enabled, sizeof (wd.wd_enabled));
while (*optarg != '\0') {
int activity = getsubopt(&optarg, col_subopts,
&value);
if (activity < 0) {
(void) fprintf(stderr,
gettext("invalid activity '%s'\n"),
value);
usage(B_FALSE);
}
wd.wd_enabled[activity] = B_TRUE;
}
break;
}
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
get_interval_count(&argc, argv, &wd.wd_interval, &count);
if (count != 0) {
/* This subcmd only accepts an interval, not a count */
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if (wd.wd_interval != 0)
verbose = B_TRUE;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing 'pool' argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
wd.wd_poolname = argv[0];
if ((zhp = zpool_open(g_zfs, wd.wd_poolname)) == NULL)
return (1);
if (verbose) {
/*
* We use a separate thread for printing status updates because
* the main thread will call lzc_wait(), which blocks as long
* as an activity is in progress, which can be a long time.
*/
if (pthread_create(&status_thr, NULL, wait_status_thread, &wd)
!= 0) {
(void) fprintf(stderr, gettext("failed to create status"
"thread: %s\n"), strerror(errno));
zpool_close(zhp);
return (1);
}
}
/*
* Loop over all activities that we are supposed to wait for until none
* of them are in progress. Note that this means we can end up waiting
* for more activities to complete than just those that were in progress
* when we began waiting; if an activity we are interested in begins
* while we are waiting for another activity, we will wait for both to
* complete before exiting.
*/
for (;;) {
boolean_t missing = B_FALSE;
boolean_t any_waited = B_FALSE;
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
boolean_t waited;
if (!wd.wd_enabled[i])
continue;
error = zpool_wait_status(zhp, i, &missing, &waited);
if (error != 0 || missing)
break;
any_waited = (any_waited || waited);
}
if (error != 0 || missing || !any_waited)
break;
}
zpool_close(zhp);
if (verbose) {
uintptr_t status;
pthread_mutex_lock(&wd.wd_mutex);
wd.wd_should_exit = B_TRUE;
pthread_cond_signal(&wd.wd_cv);
pthread_mutex_unlock(&wd.wd_mutex);
(void) pthread_join(status_thr, (void *)&status);
if (status != 0)
error = status;
}
pthread_mutex_destroy(&wd.wd_mutex);
pthread_cond_destroy(&wd.wd_cv);
return (error);
}
static int
find_command_idx(char *command, int *idx)
{
int i;
for (i = 0; i < NCOMMAND; i++) {
if (command_table[i].name == NULL)
continue;
if (strcmp(command, command_table[i].name) == 0) {
*idx = i;
return (0);
}
}
return (1);
}
/*
* Display version message
*/
static int
zpool_do_version(int argc, char **argv)
{
if (zfs_version_print() == -1)
return (1);
return (0);
}
/*
* Do zpool_load_compat() and print error message on failure
*/
static zpool_compat_status_t
zpool_do_load_compat(const char *compat, boolean_t *list)
{
char report[1024];
zpool_compat_status_t ret;
ret = zpool_load_compat(compat, list, report, 1024);
switch (ret) {
case ZPOOL_COMPATIBILITY_OK:
break;
case ZPOOL_COMPATIBILITY_NOFILES:
case ZPOOL_COMPATIBILITY_BADFILE:
case ZPOOL_COMPATIBILITY_BADTOKEN:
(void) fprintf(stderr, "Error: %s\n", report);
break;
case ZPOOL_COMPATIBILITY_WARNTOKEN:
(void) fprintf(stderr, "Warning: %s\n", report);
ret = ZPOOL_COMPATIBILITY_OK;
break;
}
return (ret);
}
int
main(int argc, char **argv)
{
int ret = 0;
int i = 0;
char *cmdname;
char **newargv;
(void) setlocale(LC_ALL, "");
(void) setlocale(LC_NUMERIC, "C");
(void) textdomain(TEXT_DOMAIN);
srand(time(NULL));
opterr = 0;
/*
* Make sure the user has specified some command.
*/
if (argc < 2) {
(void) fprintf(stderr, gettext("missing command\n"));
usage(B_FALSE);
}
cmdname = argv[1];
/*
* Special case '-?'
*/
if ((strcmp(cmdname, "-?") == 0) || strcmp(cmdname, "--help") == 0)
usage(B_TRUE);
/*
* Special case '-V|--version'
*/
if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0))
return (zpool_do_version(argc, argv));
if ((g_zfs = libzfs_init()) == NULL) {
(void) fprintf(stderr, "%s\n", libzfs_error_init(errno));
return (1);
}
libzfs_print_on_error(g_zfs, B_TRUE);
zfs_save_arguments(argc, argv, history_str, sizeof (history_str));
/*
* Many commands modify input strings for string parsing reasons.
* We create a copy to protect the original argv.
*/
newargv = malloc((argc + 1) * sizeof (newargv[0]));
for (i = 0; i < argc; i++)
newargv[i] = strdup(argv[i]);
newargv[argc] = NULL;
/*
* Run the appropriate command.
*/
if (find_command_idx(cmdname, &i) == 0) {
current_command = &command_table[i];
ret = command_table[i].func(argc - 1, newargv + 1);
} else if (strchr(cmdname, '=')) {
verify(find_command_idx("set", &i) == 0);
current_command = &command_table[i];
ret = command_table[i].func(argc, newargv);
} else if (strcmp(cmdname, "freeze") == 0 && argc == 3) {
/*
* 'freeze' is a vile debugging abomination, so we treat
* it as such.
*/
zfs_cmd_t zc = {"\0"};
(void) strlcpy(zc.zc_name, argv[2], sizeof (zc.zc_name));
ret = zfs_ioctl(g_zfs, ZFS_IOC_POOL_FREEZE, &zc);
if (ret != 0) {
(void) fprintf(stderr,
gettext("failed to freeze pool: %d\n"), errno);
ret = 1;
}
log_history = 0;
} else {
(void) fprintf(stderr, gettext("unrecognized "
"command '%s'\n"), cmdname);
usage(B_FALSE);
ret = 1;
}
for (i = 0; i < argc; i++)
free(newargv[i]);
free(newargv);
if (ret == 0 && log_history)
(void) zpool_log_history(g_zfs, history_str);
libzfs_fini(g_zfs);
/*
* The 'ZFS_ABORT' environment variable causes us to dump core on exit
* for the purposes of running ::findleaks.
*/
if (getenv("ZFS_ABORT") != NULL) {
(void) printf("dumping core by request\n");
abort();
}
return (ret);
}
diff --git a/sys/contrib/openzfs/cmd/zstream/zstream_dump.c b/sys/contrib/openzfs/cmd/zstream/zstream_dump.c
index 45cf7b97a147..04a4986b45d8 100644
--- a/sys/contrib/openzfs/cmd/zstream/zstream_dump.c
+++ b/sys/contrib/openzfs/cmd/zstream/zstream_dump.c
@@ -1,799 +1,812 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*
* Portions Copyright 2012 Martin Matuska <martin@matuska.org>
*/
/*
* Copyright (c) 2013, 2015 by Delphix. All rights reserved.
*/
#include <ctype.h>
#include <libnvpair.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <stddef.h>
#include <sys/dmu.h>
#include <sys/zfs_ioctl.h>
#include <sys/zio.h>
#include <zfs_fletcher.h>
#include "zstream.h"
/*
* If dump mode is enabled, the number of bytes to print per line
*/
#define BYTES_PER_LINE 16
/*
* If dump mode is enabled, the number of bytes to group together, separated
* by newlines or spaces
*/
#define DUMP_GROUPING 4
uint64_t total_stream_len = 0;
FILE *send_stream = 0;
boolean_t do_byteswap = B_FALSE;
boolean_t do_cksum = B_TRUE;
static void *
safe_malloc(size_t size)
{
void *rv = malloc(size);
if (rv == NULL) {
(void) fprintf(stderr, "ERROR; failed to allocate %zu bytes\n",
size);
abort();
}
return (rv);
}
/*
* ssread - send stream read.
*
* Read while computing incremental checksum
*/
static size_t
ssread(void *buf, size_t len, zio_cksum_t *cksum)
{
size_t outlen;
if ((outlen = fread(buf, len, 1, send_stream)) == 0)
return (0);
if (do_cksum) {
if (do_byteswap)
fletcher_4_incremental_byteswap(buf, len, cksum);
else
fletcher_4_incremental_native(buf, len, cksum);
}
total_stream_len += len;
return (outlen);
}
static size_t
read_hdr(dmu_replay_record_t *drr, zio_cksum_t *cksum)
{
ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
size_t r = ssread(drr, sizeof (*drr) - sizeof (zio_cksum_t), cksum);
if (r == 0)
return (0);
zio_cksum_t saved_cksum = *cksum;
r = ssread(&drr->drr_u.drr_checksum.drr_checksum,
sizeof (zio_cksum_t), cksum);
if (r == 0)
return (0);
if (do_cksum &&
!ZIO_CHECKSUM_IS_ZERO(&drr->drr_u.drr_checksum.drr_checksum) &&
!ZIO_CHECKSUM_EQUAL(saved_cksum,
drr->drr_u.drr_checksum.drr_checksum)) {
fprintf(stderr, "invalid checksum\n");
(void) printf("Incorrect checksum in record header.\n");
(void) printf("Expected checksum = %llx/%llx/%llx/%llx\n",
(longlong_t)saved_cksum.zc_word[0],
(longlong_t)saved_cksum.zc_word[1],
(longlong_t)saved_cksum.zc_word[2],
(longlong_t)saved_cksum.zc_word[3]);
return (0);
}
return (sizeof (*drr));
}
/*
* Print part of a block in ASCII characters
*/
static void
print_ascii_block(char *subbuf, int length)
{
int i;
for (i = 0; i < length; i++) {
char char_print = isprint(subbuf[i]) ? subbuf[i] : '.';
if (i != 0 && i % DUMP_GROUPING == 0) {
(void) printf(" ");
}
(void) printf("%c", char_print);
}
(void) printf("\n");
}
/*
* print_block - Dump the contents of a modified block to STDOUT
*
* Assume that buf has capacity evenly divisible by BYTES_PER_LINE
*/
static void
print_block(char *buf, int length)
{
int i;
/*
* Start printing ASCII characters at a constant offset, after
* the hex prints. Leave 3 characters per byte on a line (2 digit
* hex number plus 1 space) plus spaces between characters and
* groupings.
*/
int ascii_start = BYTES_PER_LINE * 3 +
BYTES_PER_LINE / DUMP_GROUPING + 2;
for (i = 0; i < length; i += BYTES_PER_LINE) {
int j;
int this_line_length = MIN(BYTES_PER_LINE, length - i);
int print_offset = 0;
for (j = 0; j < this_line_length; j++) {
int buf_offset = i + j;
/*
* Separate every DUMP_GROUPING bytes by a space.
*/
if (buf_offset % DUMP_GROUPING == 0) {
print_offset += printf(" ");
}
/*
* Print the two-digit hex value for this byte.
*/
unsigned char hex_print = buf[buf_offset];
print_offset += printf("%02x ", hex_print);
}
(void) printf("%*s", ascii_start - print_offset, " ");
print_ascii_block(buf + i, this_line_length);
}
}
/*
* Print an array of bytes to stdout as hexadecimal characters. str must
* have buf_len * 2 + 1 bytes of space.
*/
static void
sprintf_bytes(char *str, uint8_t *buf, uint_t buf_len)
{
int i, n;
for (i = 0; i < buf_len; i++) {
n = sprintf(str, "%02x", buf[i] & 0xff);
str += n;
}
str[0] = '\0';
}
int
zstream_do_dump(int argc, char *argv[])
{
char *buf = safe_malloc(SPA_MAXBLOCKSIZE);
uint64_t drr_record_count[DRR_NUMTYPES] = { 0 };
uint64_t total_payload_size = 0;
uint64_t total_overhead_size = 0;
uint64_t drr_byte_count[DRR_NUMTYPES] = { 0 };
char salt[ZIO_DATA_SALT_LEN * 2 + 1];
char iv[ZIO_DATA_IV_LEN * 2 + 1];
char mac[ZIO_DATA_MAC_LEN * 2 + 1];
uint64_t total_records = 0;
uint64_t payload_size;
dmu_replay_record_t thedrr;
dmu_replay_record_t *drr = &thedrr;
struct drr_begin *drrb = &thedrr.drr_u.drr_begin;
struct drr_end *drre = &thedrr.drr_u.drr_end;
struct drr_object *drro = &thedrr.drr_u.drr_object;
struct drr_freeobjects *drrfo = &thedrr.drr_u.drr_freeobjects;
struct drr_write *drrw = &thedrr.drr_u.drr_write;
struct drr_write_byref *drrwbr = &thedrr.drr_u.drr_write_byref;
struct drr_free *drrf = &thedrr.drr_u.drr_free;
struct drr_spill *drrs = &thedrr.drr_u.drr_spill;
struct drr_write_embedded *drrwe = &thedrr.drr_u.drr_write_embedded;
struct drr_object_range *drror = &thedrr.drr_u.drr_object_range;
struct drr_redact *drrr = &thedrr.drr_u.drr_redact;
struct drr_checksum *drrc = &thedrr.drr_u.drr_checksum;
int c;
boolean_t verbose = B_FALSE;
boolean_t very_verbose = B_FALSE;
boolean_t first = B_TRUE;
/*
* dump flag controls whether the contents of any modified data blocks
* are printed to the console during processing of the stream. Warning:
* for large streams, this can obviously lead to massive prints.
*/
boolean_t dump = B_FALSE;
int err;
zio_cksum_t zc = { { 0 } };
zio_cksum_t pcksum = { { 0 } };
while ((c = getopt(argc, argv, ":vCd")) != -1) {
switch (c) {
case 'C':
do_cksum = B_FALSE;
break;
case 'v':
if (verbose)
very_verbose = B_TRUE;
verbose = B_TRUE;
break;
case 'd':
dump = B_TRUE;
verbose = B_TRUE;
very_verbose = B_TRUE;
break;
case ':':
(void) fprintf(stderr,
"missing argument for '%c' option\n", optopt);
zstream_usage();
break;
case '?':
(void) fprintf(stderr, "invalid option '%c'\n",
optopt);
zstream_usage();
break;
}
}
if (argc > optind) {
const char *filename = argv[optind];
send_stream = fopen(filename, "r");
if (send_stream == NULL) {
(void) fprintf(stderr,
"Error while opening file '%s': %s\n",
filename, strerror(errno));
exit(1);
}
} else {
if (isatty(STDIN_FILENO)) {
(void) fprintf(stderr,
"Error: The send stream is a binary format "
"and can not be read from a\n"
"terminal. Standard input must be redirected, "
"or a file must be\n"
"specified as a command-line argument.\n");
exit(1);
}
send_stream = stdin;
}
fletcher_4_init();
while (read_hdr(drr, &zc)) {
+ uint64_t featureflags = 0;
/*
* If this is the first DMU record being processed, check for
* the magic bytes and figure out the endian-ness based on them.
*/
if (first) {
if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) {
do_byteswap = B_TRUE;
if (do_cksum) {
ZIO_SET_CHECKSUM(&zc, 0, 0, 0, 0);
/*
* recalculate header checksum now
* that we know it needs to be
* byteswapped.
*/
fletcher_4_incremental_byteswap(drr,
sizeof (dmu_replay_record_t), &zc);
}
} else if (drrb->drr_magic != DMU_BACKUP_MAGIC) {
(void) fprintf(stderr, "Invalid stream "
"(bad magic number)\n");
exit(1);
}
first = B_FALSE;
}
if (do_byteswap) {
drr->drr_type = BSWAP_32(drr->drr_type);
drr->drr_payloadlen =
BSWAP_32(drr->drr_payloadlen);
}
/*
* At this point, the leading fields of the replay record
* (drr_type and drr_payloadlen) have been byte-swapped if
* necessary, but the rest of the data structure (the
* union of type-specific structures) is still in its
* original state.
*/
if (drr->drr_type >= DRR_NUMTYPES) {
(void) printf("INVALID record found: type 0x%x\n",
drr->drr_type);
(void) printf("Aborting.\n");
exit(1);
}
drr_record_count[drr->drr_type]++;
total_overhead_size += sizeof (*drr);
total_records++;
payload_size = 0;
switch (drr->drr_type) {
case DRR_BEGIN:
if (do_byteswap) {
drrb->drr_magic = BSWAP_64(drrb->drr_magic);
drrb->drr_versioninfo =
BSWAP_64(drrb->drr_versioninfo);
drrb->drr_creation_time =
BSWAP_64(drrb->drr_creation_time);
drrb->drr_type = BSWAP_32(drrb->drr_type);
drrb->drr_flags = BSWAP_32(drrb->drr_flags);
drrb->drr_toguid = BSWAP_64(drrb->drr_toguid);
drrb->drr_fromguid =
BSWAP_64(drrb->drr_fromguid);
}
+ featureflags =
+ DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
+
(void) printf("BEGIN record\n");
(void) printf("\thdrtype = %lld\n",
DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo));
(void) printf("\tfeatures = %llx\n",
DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo));
(void) printf("\tmagic = %llx\n",
(u_longlong_t)drrb->drr_magic);
(void) printf("\tcreation_time = %llx\n",
(u_longlong_t)drrb->drr_creation_time);
(void) printf("\ttype = %u\n", drrb->drr_type);
(void) printf("\tflags = 0x%x\n", drrb->drr_flags);
(void) printf("\ttoguid = %llx\n",
(u_longlong_t)drrb->drr_toguid);
(void) printf("\tfromguid = %llx\n",
(u_longlong_t)drrb->drr_fromguid);
(void) printf("\ttoname = %s\n", drrb->drr_toname);
(void) printf("\tpayloadlen = %u\n",
drr->drr_payloadlen);
if (verbose)
(void) printf("\n");
if (drr->drr_payloadlen != 0) {
nvlist_t *nv;
int sz = drr->drr_payloadlen;
if (sz > SPA_MAXBLOCKSIZE) {
free(buf);
buf = safe_malloc(sz);
}
(void) ssread(buf, sz, &zc);
if (ferror(send_stream))
perror("fread");
err = nvlist_unpack(buf, sz, &nv, 0);
if (err) {
perror(strerror(err));
} else {
nvlist_print(stdout, nv);
nvlist_free(nv);
}
payload_size = sz;
}
break;
case DRR_END:
if (do_byteswap) {
drre->drr_checksum.zc_word[0] =
BSWAP_64(drre->drr_checksum.zc_word[0]);
drre->drr_checksum.zc_word[1] =
BSWAP_64(drre->drr_checksum.zc_word[1]);
drre->drr_checksum.zc_word[2] =
BSWAP_64(drre->drr_checksum.zc_word[2]);
drre->drr_checksum.zc_word[3] =
BSWAP_64(drre->drr_checksum.zc_word[3]);
}
/*
* We compare against the *previous* checksum
* value, because the stored checksum is of
* everything before the DRR_END record.
*/
if (do_cksum && !ZIO_CHECKSUM_EQUAL(drre->drr_checksum,
pcksum)) {
(void) printf("Expected checksum differs from "
"checksum in stream.\n");
(void) printf("Expected checksum = "
"%llx/%llx/%llx/%llx\n",
(long long unsigned int)pcksum.zc_word[0],
(long long unsigned int)pcksum.zc_word[1],
(long long unsigned int)pcksum.zc_word[2],
(long long unsigned int)pcksum.zc_word[3]);
}
(void) printf("END checksum = %llx/%llx/%llx/%llx\n",
(long long unsigned int)
drre->drr_checksum.zc_word[0],
(long long unsigned int)
drre->drr_checksum.zc_word[1],
(long long unsigned int)
drre->drr_checksum.zc_word[2],
(long long unsigned int)
drre->drr_checksum.zc_word[3]);
ZIO_SET_CHECKSUM(&zc, 0, 0, 0, 0);
break;
case DRR_OBJECT:
if (do_byteswap) {
drro->drr_object = BSWAP_64(drro->drr_object);
drro->drr_type = BSWAP_32(drro->drr_type);
drro->drr_bonustype =
BSWAP_32(drro->drr_bonustype);
drro->drr_blksz = BSWAP_32(drro->drr_blksz);
drro->drr_bonuslen =
BSWAP_32(drro->drr_bonuslen);
drro->drr_raw_bonuslen =
BSWAP_32(drro->drr_raw_bonuslen);
drro->drr_toguid = BSWAP_64(drro->drr_toguid);
drro->drr_maxblkid =
BSWAP_64(drro->drr_maxblkid);
}
+ if (featureflags & DMU_BACKUP_FEATURE_RAW &&
+ drro->drr_bonuslen > drro->drr_raw_bonuslen) {
+ (void) fprintf(stderr,
+ "Warning: Object %llu has bonuslen = "
+ "%u > raw_bonuslen = %u\n\n",
+ (u_longlong_t)drro->drr_object,
+ drro->drr_bonuslen, drro->drr_raw_bonuslen);
+ }
+
payload_size = DRR_OBJECT_PAYLOAD_SIZE(drro);
if (verbose) {
(void) printf("OBJECT object = %llu type = %u "
"bonustype = %u blksz = %u bonuslen = %u "
"dn_slots = %u raw_bonuslen = %u "
"flags = %u maxblkid = %llu "
"indblkshift = %u nlevels = %u "
"nblkptr = %u\n",
(u_longlong_t)drro->drr_object,
drro->drr_type,
drro->drr_bonustype,
drro->drr_blksz,
drro->drr_bonuslen,
drro->drr_dn_slots,
drro->drr_raw_bonuslen,
drro->drr_flags,
(u_longlong_t)drro->drr_maxblkid,
drro->drr_indblkshift,
drro->drr_nlevels,
drro->drr_nblkptr);
}
if (drro->drr_bonuslen > 0) {
(void) ssread(buf, payload_size, &zc);
if (dump)
print_block(buf, payload_size);
}
break;
case DRR_FREEOBJECTS:
if (do_byteswap) {
drrfo->drr_firstobj =
BSWAP_64(drrfo->drr_firstobj);
drrfo->drr_numobjs =
BSWAP_64(drrfo->drr_numobjs);
drrfo->drr_toguid = BSWAP_64(drrfo->drr_toguid);
}
if (verbose) {
(void) printf("FREEOBJECTS firstobj = %llu "
"numobjs = %llu\n",
(u_longlong_t)drrfo->drr_firstobj,
(u_longlong_t)drrfo->drr_numobjs);
}
break;
case DRR_WRITE:
if (do_byteswap) {
drrw->drr_object = BSWAP_64(drrw->drr_object);
drrw->drr_type = BSWAP_32(drrw->drr_type);
drrw->drr_offset = BSWAP_64(drrw->drr_offset);
drrw->drr_logical_size =
BSWAP_64(drrw->drr_logical_size);
drrw->drr_toguid = BSWAP_64(drrw->drr_toguid);
drrw->drr_key.ddk_prop =
BSWAP_64(drrw->drr_key.ddk_prop);
drrw->drr_compressed_size =
BSWAP_64(drrw->drr_compressed_size);
}
payload_size = DRR_WRITE_PAYLOAD_SIZE(drrw);
/*
* If this is verbose and/or dump output,
* print info on the modified block
*/
if (verbose) {
sprintf_bytes(salt, drrw->drr_salt,
ZIO_DATA_SALT_LEN);
sprintf_bytes(iv, drrw->drr_iv,
ZIO_DATA_IV_LEN);
sprintf_bytes(mac, drrw->drr_mac,
ZIO_DATA_MAC_LEN);
(void) printf("WRITE object = %llu type = %u "
"checksum type = %u compression type = %u "
"flags = %u offset = %llu "
"logical_size = %llu "
"compressed_size = %llu "
"payload_size = %llu props = %llx "
"salt = %s iv = %s mac = %s\n",
(u_longlong_t)drrw->drr_object,
drrw->drr_type,
drrw->drr_checksumtype,
drrw->drr_compressiontype,
drrw->drr_flags,
(u_longlong_t)drrw->drr_offset,
(u_longlong_t)drrw->drr_logical_size,
(u_longlong_t)drrw->drr_compressed_size,
(u_longlong_t)payload_size,
(u_longlong_t)drrw->drr_key.ddk_prop,
salt,
iv,
mac);
}
/*
* Read the contents of the block in from STDIN to buf
*/
(void) ssread(buf, payload_size, &zc);
/*
* If in dump mode
*/
if (dump) {
print_block(buf, payload_size);
}
break;
case DRR_WRITE_BYREF:
if (do_byteswap) {
drrwbr->drr_object =
BSWAP_64(drrwbr->drr_object);
drrwbr->drr_offset =
BSWAP_64(drrwbr->drr_offset);
drrwbr->drr_length =
BSWAP_64(drrwbr->drr_length);
drrwbr->drr_toguid =
BSWAP_64(drrwbr->drr_toguid);
drrwbr->drr_refguid =
BSWAP_64(drrwbr->drr_refguid);
drrwbr->drr_refobject =
BSWAP_64(drrwbr->drr_refobject);
drrwbr->drr_refoffset =
BSWAP_64(drrwbr->drr_refoffset);
drrwbr->drr_key.ddk_prop =
BSWAP_64(drrwbr->drr_key.ddk_prop);
}
if (verbose) {
(void) printf("WRITE_BYREF object = %llu "
"checksum type = %u props = %llx "
"offset = %llu length = %llu "
"toguid = %llx refguid = %llx "
"refobject = %llu refoffset = %llu\n",
(u_longlong_t)drrwbr->drr_object,
drrwbr->drr_checksumtype,
(u_longlong_t)drrwbr->drr_key.ddk_prop,
(u_longlong_t)drrwbr->drr_offset,
(u_longlong_t)drrwbr->drr_length,
(u_longlong_t)drrwbr->drr_toguid,
(u_longlong_t)drrwbr->drr_refguid,
(u_longlong_t)drrwbr->drr_refobject,
(u_longlong_t)drrwbr->drr_refoffset);
}
break;
case DRR_FREE:
if (do_byteswap) {
drrf->drr_object = BSWAP_64(drrf->drr_object);
drrf->drr_offset = BSWAP_64(drrf->drr_offset);
drrf->drr_length = BSWAP_64(drrf->drr_length);
}
if (verbose) {
(void) printf("FREE object = %llu "
"offset = %llu length = %lld\n",
(u_longlong_t)drrf->drr_object,
(u_longlong_t)drrf->drr_offset,
(longlong_t)drrf->drr_length);
}
break;
case DRR_SPILL:
if (do_byteswap) {
drrs->drr_object = BSWAP_64(drrs->drr_object);
drrs->drr_length = BSWAP_64(drrs->drr_length);
drrs->drr_compressed_size =
BSWAP_64(drrs->drr_compressed_size);
drrs->drr_type = BSWAP_32(drrs->drr_type);
}
payload_size = DRR_SPILL_PAYLOAD_SIZE(drrs);
if (verbose) {
sprintf_bytes(salt, drrs->drr_salt,
ZIO_DATA_SALT_LEN);
sprintf_bytes(iv, drrs->drr_iv,
ZIO_DATA_IV_LEN);
sprintf_bytes(mac, drrs->drr_mac,
ZIO_DATA_MAC_LEN);
(void) printf("SPILL block for object = %llu "
"length = %llu flags = %u "
"compression type = %u "
"compressed_size = %llu "
"payload_size = %llu "
"salt = %s iv = %s mac = %s\n",
(u_longlong_t)drrs->drr_object,
(u_longlong_t)drrs->drr_length,
drrs->drr_flags,
drrs->drr_compressiontype,
(u_longlong_t)drrs->drr_compressed_size,
(u_longlong_t)payload_size,
salt,
iv,
mac);
}
(void) ssread(buf, payload_size, &zc);
if (dump) {
print_block(buf, payload_size);
}
break;
case DRR_WRITE_EMBEDDED:
if (do_byteswap) {
drrwe->drr_object =
BSWAP_64(drrwe->drr_object);
drrwe->drr_offset =
BSWAP_64(drrwe->drr_offset);
drrwe->drr_length =
BSWAP_64(drrwe->drr_length);
drrwe->drr_toguid =
BSWAP_64(drrwe->drr_toguid);
drrwe->drr_lsize =
BSWAP_32(drrwe->drr_lsize);
drrwe->drr_psize =
BSWAP_32(drrwe->drr_psize);
}
if (verbose) {
(void) printf("WRITE_EMBEDDED object = %llu "
"offset = %llu length = %llu "
"toguid = %llx comp = %u etype = %u "
"lsize = %u psize = %u\n",
(u_longlong_t)drrwe->drr_object,
(u_longlong_t)drrwe->drr_offset,
(u_longlong_t)drrwe->drr_length,
(u_longlong_t)drrwe->drr_toguid,
drrwe->drr_compression,
drrwe->drr_etype,
drrwe->drr_lsize,
drrwe->drr_psize);
}
(void) ssread(buf,
P2ROUNDUP(drrwe->drr_psize, 8), &zc);
if (dump) {
print_block(buf,
P2ROUNDUP(drrwe->drr_psize, 8));
}
payload_size = P2ROUNDUP(drrwe->drr_psize, 8);
break;
case DRR_OBJECT_RANGE:
if (do_byteswap) {
drror->drr_firstobj =
BSWAP_64(drror->drr_firstobj);
drror->drr_numslots =
BSWAP_64(drror->drr_numslots);
drror->drr_toguid = BSWAP_64(drror->drr_toguid);
}
if (verbose) {
sprintf_bytes(salt, drror->drr_salt,
ZIO_DATA_SALT_LEN);
sprintf_bytes(iv, drror->drr_iv,
ZIO_DATA_IV_LEN);
sprintf_bytes(mac, drror->drr_mac,
ZIO_DATA_MAC_LEN);
(void) printf("OBJECT_RANGE firstobj = %llu "
"numslots = %llu flags = %u "
"salt = %s iv = %s mac = %s\n",
(u_longlong_t)drror->drr_firstobj,
(u_longlong_t)drror->drr_numslots,
drror->drr_flags,
salt,
iv,
mac);
}
break;
case DRR_REDACT:
if (do_byteswap) {
drrr->drr_object = BSWAP_64(drrr->drr_object);
drrr->drr_offset = BSWAP_64(drrr->drr_offset);
drrr->drr_length = BSWAP_64(drrr->drr_length);
drrr->drr_toguid = BSWAP_64(drrr->drr_toguid);
}
if (verbose) {
(void) printf("REDACT object = %llu offset = "
"%llu length = %llu\n",
(u_longlong_t)drrr->drr_object,
(u_longlong_t)drrr->drr_offset,
(u_longlong_t)drrr->drr_length);
}
break;
case DRR_NUMTYPES:
/* should never be reached */
exit(1);
}
if (drr->drr_type != DRR_BEGIN && very_verbose) {
(void) printf(" checksum = %llx/%llx/%llx/%llx\n",
(longlong_t)drrc->drr_checksum.zc_word[0],
(longlong_t)drrc->drr_checksum.zc_word[1],
(longlong_t)drrc->drr_checksum.zc_word[2],
(longlong_t)drrc->drr_checksum.zc_word[3]);
}
pcksum = zc;
drr_byte_count[drr->drr_type] += payload_size;
total_payload_size += payload_size;
}
free(buf);
fletcher_4_fini();
/* Print final summary */
(void) printf("SUMMARY:\n");
(void) printf("\tTotal DRR_BEGIN records = %lld (%llu bytes)\n",
(u_longlong_t)drr_record_count[DRR_BEGIN],
(u_longlong_t)drr_byte_count[DRR_BEGIN]);
(void) printf("\tTotal DRR_END records = %lld (%llu bytes)\n",
(u_longlong_t)drr_record_count[DRR_END],
(u_longlong_t)drr_byte_count[DRR_END]);
(void) printf("\tTotal DRR_OBJECT records = %lld (%llu bytes)\n",
(u_longlong_t)drr_record_count[DRR_OBJECT],
(u_longlong_t)drr_byte_count[DRR_OBJECT]);
(void) printf("\tTotal DRR_FREEOBJECTS records = %lld (%llu bytes)\n",
(u_longlong_t)drr_record_count[DRR_FREEOBJECTS],
(u_longlong_t)drr_byte_count[DRR_FREEOBJECTS]);
(void) printf("\tTotal DRR_WRITE records = %lld (%llu bytes)\n",
(u_longlong_t)drr_record_count[DRR_WRITE],
(u_longlong_t)drr_byte_count[DRR_WRITE]);
(void) printf("\tTotal DRR_WRITE_BYREF records = %lld (%llu bytes)\n",
(u_longlong_t)drr_record_count[DRR_WRITE_BYREF],
(u_longlong_t)drr_byte_count[DRR_WRITE_BYREF]);
(void) printf("\tTotal DRR_WRITE_EMBEDDED records = %lld (%llu "
"bytes)\n", (u_longlong_t)drr_record_count[DRR_WRITE_EMBEDDED],
(u_longlong_t)drr_byte_count[DRR_WRITE_EMBEDDED]);
(void) printf("\tTotal DRR_FREE records = %lld (%llu bytes)\n",
(u_longlong_t)drr_record_count[DRR_FREE],
(u_longlong_t)drr_byte_count[DRR_FREE]);
(void) printf("\tTotal DRR_SPILL records = %lld (%llu bytes)\n",
(u_longlong_t)drr_record_count[DRR_SPILL],
(u_longlong_t)drr_byte_count[DRR_SPILL]);
(void) printf("\tTotal records = %lld\n",
(u_longlong_t)total_records);
(void) printf("\tTotal payload size = %lld (0x%llx)\n",
(u_longlong_t)total_payload_size, (u_longlong_t)total_payload_size);
(void) printf("\tTotal header overhead = %lld (0x%llx)\n",
(u_longlong_t)total_overhead_size,
(u_longlong_t)total_overhead_size);
(void) printf("\tTotal stream length = %lld (0x%llx)\n",
(u_longlong_t)total_stream_len, (u_longlong_t)total_stream_len);
return (0);
}
diff --git a/sys/contrib/openzfs/config/Substfiles.am b/sys/contrib/openzfs/config/Substfiles.am
index 63697bfa2b6a..911903e10e69 100644
--- a/sys/contrib/openzfs/config/Substfiles.am
+++ b/sys/contrib/openzfs/config/Substfiles.am
@@ -1,34 +1,36 @@
subst_sed_cmd = \
-e 's|@bindir[@]|$(bindir)|g' \
-e 's|@sbindir[@]|$(sbindir)|g' \
-e 's|@datadir[@]|$(datadir)|g' \
-e 's|@sysconfdir[@]|$(sysconfdir)|g' \
-e 's|@runstatedir[@]|$(runstatedir)|g' \
-e 's|@initconfdir[@]|$(initconfdir)|g' \
-e 's|@initdir[@]|$(initdir)|g' \
-e 's|@mounthelperdir[@]|$(mounthelperdir)|g' \
-e 's|@systemdgeneratordir[@]|$(systemdgeneratordir)|g' \
-e 's|@systemdunitdir[@]|$(systemdunitdir)|g' \
-e 's|@udevdir[@]|$(udevdir)|g' \
-e 's|@udevruledir[@]|$(udevruledir)|g' \
-e 's|@zfsexecdir[@]|$(zfsexecdir)|g' \
-e 's|@PYTHON[@]|$(PYTHON)|g' \
-e 's|@PYTHON_SHEBANG[@]|$(PYTHON_SHEBANG)|g' \
-e 's|@DEFAULT_INIT_NFS_SERVER[@]|$(DEFAULT_INIT_NFS_SERVER)|g' \
- -e 's|@DEFAULT_INIT_SHELL[@]|$(DEFAULT_INIT_SHELL)|g'
+ -e 's|@DEFAULT_INIT_SHELL[@]|$(DEFAULT_INIT_SHELL)|g' \
+ -e 's|@LIBFETCH_DYNAMIC[@]|$(LIBFETCH_DYNAMIC)|g' \
+ -e 's|@LIBFETCH_SONAME[@]|$(LIBFETCH_SONAME)|g'
SUBSTFILES =
CLEANFILES = $(SUBSTFILES)
EXTRA_DIST = $(SUBSTFILES:=.in)
$(SUBSTFILES):%:%.in Makefile
$(AM_V_GEN)set -e; \
$(MKDIR_P) $$(dirname $@); \
$(RM) $@~; \
$(SED) $(subst_sed_cmd) $< >$@~; \
if grep -E '@[a-zA-Z0-9_]+@' $@~ >&2; then \
echo "Undefined substitution" >&2; \
exit 1; \
else test $$? -eq 1; fi; \
test -x $< && chmod +x $@~; \
mv -f $@~ $@
diff --git a/sys/contrib/openzfs/config/always-python.m4 b/sys/contrib/openzfs/config/always-python.m4
index 76b06fcd8488..5f47df424c27 100644
--- a/sys/contrib/openzfs/config/always-python.m4
+++ b/sys/contrib/openzfs/config/always-python.m4
@@ -1,70 +1,70 @@
dnl #
dnl # The majority of the python scripts are written to be compatible
dnl # with Python 2.6 and Python 3.4. Therefore, they may be installed
dnl # and used with either interpreter. This option is intended to
dnl # to provide a method to specify the default system version, and
dnl # set the PYTHON environment variable accordingly.
dnl #
AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_PYTHON], [
AC_ARG_WITH([python],
AS_HELP_STRING([--with-python[=VERSION]],
[default system python version @<:@default=check@:>@]),
[with_python=$withval],
[with_python=check])
AS_CASE([$with_python],
[check], [AC_CHECK_PROGS([PYTHON], [python3 python2], [:])],
[2*], [PYTHON="python${with_python}"],
[*python2*], [PYTHON="${with_python}"],
[3*], [PYTHON="python${with_python}"],
[*python3*], [PYTHON="${with_python}"],
[no], [PYTHON=":"],
[AC_MSG_ERROR([Unknown --with-python value '$with_python'])]
)
dnl #
dnl # Minimum supported Python versions for utilities:
dnl # Python 2.6 or Python 3.4
dnl #
AM_PATH_PYTHON([], [], [:])
AS_IF([test -z "$PYTHON_VERSION"], [
- PYTHON_VERSION=$(basename $PYTHON | tr -cd 0-9.)
+ PYTHON_VERSION=$(echo ${PYTHON##*/} | tr -cd 0-9.)
])
PYTHON_MINOR=${PYTHON_VERSION#*\.}
AS_CASE([$PYTHON_VERSION],
[2.*], [
AS_IF([test $PYTHON_MINOR -lt 6],
[AC_MSG_ERROR("Python >= 2.6 is required")])
],
[3.*], [
AS_IF([test $PYTHON_MINOR -lt 4],
[AC_MSG_ERROR("Python >= 3.4 is required")])
],
[:|2|3], [],
[PYTHON_VERSION=3]
)
AM_CONDITIONAL([USING_PYTHON], [test "$PYTHON" != :])
AM_CONDITIONAL([USING_PYTHON_2], [test "x${PYTHON_VERSION%%\.*}" = x2])
AM_CONDITIONAL([USING_PYTHON_3], [test "x${PYTHON_VERSION%%\.*}" = x3])
AM_COND_IF([USING_PYTHON_2],
[AC_SUBST([PYTHON_SHEBANG], [python2])],
[AC_SUBST([PYTHON_SHEBANG], [python3])])
dnl #
dnl # Request that packages be built for a specific Python version.
dnl #
AS_IF([test "x$with_python" != xcheck], [
PYTHON_PKG_VERSION=$(echo $PYTHON_VERSION | tr -d .)
DEFINE_PYTHON_PKG_VERSION='--define "__use_python_pkg_version '${PYTHON_PKG_VERSION}'"'
DEFINE_PYTHON_VERSION='--define "__use_python '${PYTHON}'"'
], [
DEFINE_PYTHON_VERSION=''
DEFINE_PYTHON_PKG_VERSION=''
])
AC_SUBST(DEFINE_PYTHON_VERSION)
AC_SUBST(DEFINE_PYTHON_PKG_VERSION)
])
diff --git a/sys/contrib/openzfs/config/always-pyzfs.m4 b/sys/contrib/openzfs/config/always-pyzfs.m4
index fa39fd88519c..00e5d0e2cbbd 100644
--- a/sys/contrib/openzfs/config/always-pyzfs.m4
+++ b/sys/contrib/openzfs/config/always-pyzfs.m4
@@ -1,120 +1,120 @@
dnl #
dnl # ZFS_AC_PYTHON_MODULE(module_name, [action-if-true], [action-if-false])
dnl #
dnl # Checks for Python module. Freely inspired by AX_PYTHON_MODULE
dnl # https://www.gnu.org/software/autoconf-archive/ax_python_module.html
dnl # Required by ZFS_AC_CONFIG_ALWAYS_PYZFS.
dnl #
AC_DEFUN([ZFS_AC_PYTHON_MODULE], [
- PYTHON_NAME=$(basename $PYTHON)
+ PYTHON_NAME=${PYTHON##*/}
AC_MSG_CHECKING([for $PYTHON_NAME module: $1])
AS_IF([$PYTHON -c "import $1" 2>/dev/null], [
AC_MSG_RESULT(yes)
m4_ifvaln([$2], [$2])
], [
AC_MSG_RESULT(no)
m4_ifvaln([$3], [$3])
])
])
dnl #
dnl # Determines if pyzfs can be built, requires Python 2.7 or later.
dnl #
AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_PYZFS], [
AC_ARG_ENABLE([pyzfs],
AS_HELP_STRING([--enable-pyzfs],
[install libzfs_core python bindings @<:@default=check@:>@]),
[enable_pyzfs=$enableval],
[enable_pyzfs=check])
dnl #
dnl # Packages for pyzfs specifically enabled/disabled.
dnl #
AS_IF([test "x$enable_pyzfs" != xcheck], [
AS_IF([test "x$enable_pyzfs" = xyes], [
DEFINE_PYZFS='--with pyzfs'
], [
DEFINE_PYZFS='--without pyzfs'
])
], [
AS_IF([test "$PYTHON" != :], [
DEFINE_PYZFS=''
], [
enable_pyzfs=no
DEFINE_PYZFS='--without pyzfs'
])
])
AC_SUBST(DEFINE_PYZFS)
dnl #
dnl # Python "packaging" (or, failing that, "distlib") module is required to build and install pyzfs
dnl #
AS_IF([test "x$enable_pyzfs" = xcheck -o "x$enable_pyzfs" = xyes], [
ZFS_AC_PYTHON_MODULE([packaging], [], [
ZFS_AC_PYTHON_MODULE([distlib], [], [
AS_IF([test "x$enable_pyzfs" = xyes], [
AC_MSG_ERROR("Python $PYTHON_VERSION packaging and distlib modules are not installed")
], [test "x$enable_pyzfs" != xno], [
enable_pyzfs=no
])
])
])
])
dnl #
dnl # Require python-devel libraries
dnl #
AS_IF([test "x$enable_pyzfs" = xcheck -o "x$enable_pyzfs" = xyes], [
AS_CASE([$PYTHON_VERSION],
[3.*], [PYTHON_REQUIRED_VERSION=">= '3.4.0'"],
[2.*], [PYTHON_REQUIRED_VERSION=">= '2.7.0'"],
[AC_MSG_ERROR("Python $PYTHON_VERSION unknown")]
)
AX_PYTHON_DEVEL([$PYTHON_REQUIRED_VERSION], [
AS_IF([test "x$enable_pyzfs" = xyes], [
AC_MSG_ERROR("Python $PYTHON_REQUIRED_VERSION development library is not installed")
], [test "x$enable_pyzfs" != xno], [
enable_pyzfs=no
])
])
])
dnl #
dnl # Python "setuptools" module is required to build and install pyzfs
dnl #
AS_IF([test "x$enable_pyzfs" = xcheck -o "x$enable_pyzfs" = xyes], [
ZFS_AC_PYTHON_MODULE([setuptools], [], [
AS_IF([test "x$enable_pyzfs" = xyes], [
AC_MSG_ERROR("Python $PYTHON_VERSION setuptools is not installed")
], [test "x$enable_pyzfs" != xno], [
enable_pyzfs=no
])
])
])
dnl #
dnl # Python "cffi" module is required to run pyzfs
dnl #
AS_IF([test "x$enable_pyzfs" = xcheck -o "x$enable_pyzfs" = xyes], [
ZFS_AC_PYTHON_MODULE([cffi], [], [
AS_IF([test "x$enable_pyzfs" = xyes], [
AC_MSG_ERROR("Python $PYTHON_VERSION cffi is not installed")
], [test "x$enable_pyzfs" != xno], [
enable_pyzfs=no
])
])
])
dnl #
dnl # Set enable_pyzfs to 'yes' if every check passed
dnl #
AS_IF([test "x$enable_pyzfs" = xcheck], [enable_pyzfs=yes])
AM_CONDITIONAL([PYZFS_ENABLED], [test "x$enable_pyzfs" = xyes])
AC_SUBST([PYZFS_ENABLED], [$enable_pyzfs])
AC_SUBST(pythonsitedir, [$PYTHON_SITE_PKG])
AC_MSG_CHECKING([whether to enable pyzfs: ])
AC_MSG_RESULT($enable_pyzfs)
])
diff --git a/sys/contrib/openzfs/config/kernel-add-disk.m4 b/sys/contrib/openzfs/config/kernel-add-disk.m4
new file mode 100644
index 000000000000..5d1779eb4328
--- /dev/null
+++ b/sys/contrib/openzfs/config/kernel-add-disk.m4
@@ -0,0 +1,26 @@
+dnl #
+dnl # 5.16 API change
+dnl # add_disk grew a must-check return code
+dnl #
+AC_DEFUN([ZFS_AC_KERNEL_SRC_ADD_DISK], [
+
+ ZFS_LINUX_TEST_SRC([add_disk_ret], [
+ #include <linux/genhd.h>
+ ], [
+ struct gendisk *disk = NULL;
+ int err = add_disk(disk);
+ err = err;
+ ])
+
+])
+AC_DEFUN([ZFS_AC_KERNEL_ADD_DISK], [
+ AC_MSG_CHECKING([whether add_disk() returns int])
+ ZFS_LINUX_TEST_RESULT([add_disk_ret],
+ [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_ADD_DISK_RET, 1,
+ [add_disk() returns int])
+ ], [
+ AC_MSG_RESULT(no)
+ ])
+])
diff --git a/sys/contrib/openzfs/config/kernel-fallocate.m4 b/sys/contrib/openzfs/config/kernel-fallocate.m4
index 7a8550f7e760..815602d3e2c6 100644
--- a/sys/contrib/openzfs/config/kernel-fallocate.m4
+++ b/sys/contrib/openzfs/config/kernel-fallocate.m4
@@ -1,27 +1,44 @@
dnl #
dnl # Linux 2.6.38 - 3.x API
dnl # The fallocate callback was moved from the inode_operations
dnl # structure to the file_operations structure.
dnl #
+dnl #
+dnl # Linux 3.15+
+dnl # fallocate learned a new flag, FALLOC_FL_ZERO_RANGE
+dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_FALLOCATE], [
ZFS_LINUX_TEST_SRC([file_fallocate], [
#include <linux/fs.h>
long test_fallocate(struct file *file, int mode,
loff_t offset, loff_t len) { return 0; }
static const struct file_operations
fops __attribute__ ((unused)) = {
.fallocate = test_fallocate,
};
], [])
+ ZFS_LINUX_TEST_SRC([falloc_fl_zero_range], [
+ #include <linux/falloc.h>
+ ],[
+ int flags __attribute__ ((unused));
+ flags = FALLOC_FL_ZERO_RANGE;
+ ])
])
AC_DEFUN([ZFS_AC_KERNEL_FALLOCATE], [
AC_MSG_CHECKING([whether fops->fallocate() exists])
ZFS_LINUX_TEST_RESULT([file_fallocate], [
AC_MSG_RESULT(yes)
+ AC_MSG_CHECKING([whether FALLOC_FL_ZERO_RANGE exists])
+ ZFS_LINUX_TEST_RESULT([falloc_fl_zero_range], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_FALLOC_FL_ZERO_RANGE, 1, [FALLOC_FL_ZERO_RANGE is defined])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
],[
ZFS_LINUX_TEST_ERROR([file_fallocate])
])
])
diff --git a/sys/contrib/openzfs/config/kernel-fpu.m4 b/sys/contrib/openzfs/config/kernel-fpu.m4
index faa64f1ec46b..7f8b028d043b 100644
--- a/sys/contrib/openzfs/config/kernel-fpu.m4
+++ b/sys/contrib/openzfs/config/kernel-fpu.m4
@@ -1,146 +1,187 @@
-dnl #
+dnl #
dnl # Handle differences in kernel FPU code.
dnl #
dnl # Kernel
dnl # 5.16: XCR code put into asm/fpu/xcr.h
-dnl # HAVE_KERNEL_FPU_XCR_HEADER
+dnl # HAVE_KERNEL_FPU_XCR_HEADER
+dnl #
+dnl # XSTATE_XSAVE and XSTATE_XRESTORE aren't accessible any more
+dnl # HAVE_KERNEL_FPU_XSAVE_INTERNAL
+dnl #
+dnl # 5.11: kernel_fpu_begin() is an inlined function now, so don't check
+dnl # for it inside the kernel symbols.
dnl #
dnl # 5.0: Wrappers have been introduced to save/restore the FPU state.
dnl # This change was made to the 4.19.38 and 4.14.120 LTS kernels.
dnl # HAVE_KERNEL_FPU_INTERNAL
dnl #
dnl # 4.2: Use __kernel_fpu_{begin,end}()
dnl # HAVE_UNDERSCORE_KERNEL_FPU & KERNEL_EXPORTS_X86_FPU
dnl #
dnl # Pre-4.2: Use kernel_fpu_{begin,end}()
dnl # HAVE_KERNEL_FPU & KERNEL_EXPORTS_X86_FPU
dnl #
dnl # N.B. The header check is performed before all other checks since it
dnl # depends on HAVE_KERNEL_FPU_API_HEADER being set in confdefs.h.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_FPU_HEADER], [
AC_MSG_CHECKING([whether fpu headers are available])
ZFS_LINUX_TRY_COMPILE([
#include <linux/module.h>
#include <asm/fpu/api.h>
],[
],[
AC_DEFINE(HAVE_KERNEL_FPU_API_HEADER, 1,
[kernel has asm/fpu/api.h])
AC_MSG_RESULT(asm/fpu/api.h)
AC_MSG_CHECKING([whether fpu/xcr header is available])
ZFS_LINUX_TRY_COMPILE([
#include <linux/module.h>
#include <asm/fpu/xcr.h>
],[
],[
AC_DEFINE(HAVE_KERNEL_FPU_XCR_HEADER, 1,
[kernel has asm/fpu/xcr.h])
AC_MSG_RESULT(asm/fpu/xcr.h)
],[
AC_MSG_RESULT(no asm/fpu/xcr.h)
])
],[
AC_MSG_RESULT(i387.h & xcr.h)
])
])
AC_DEFUN([ZFS_AC_KERNEL_SRC_FPU], [
ZFS_LINUX_TEST_SRC([kernel_fpu], [
#include <linux/types.h>
#ifdef HAVE_KERNEL_FPU_API_HEADER
#include <asm/fpu/api.h>
#else
#include <asm/i387.h>
#include <asm/xcr.h>
#endif
], [
kernel_fpu_begin();
kernel_fpu_end();
], [], [ZFS_META_LICENSE])
ZFS_LINUX_TEST_SRC([__kernel_fpu], [
#include <linux/types.h>
#ifdef HAVE_KERNEL_FPU_API_HEADER
#include <asm/fpu/api.h>
#else
#include <asm/i387.h>
#include <asm/xcr.h>
#endif
], [
__kernel_fpu_begin();
__kernel_fpu_end();
], [], [ZFS_META_LICENSE])
ZFS_LINUX_TEST_SRC([fpu_internal], [
#if defined(__x86_64) || defined(__x86_64__) || \
defined(__i386) || defined(__i386__)
#if !defined(__x86)
#define __x86
#endif
#endif
#if !defined(__x86)
#error Unsupported architecture
#endif
#include <linux/types.h>
#ifdef HAVE_KERNEL_FPU_API_HEADER
#include <asm/fpu/api.h>
#include <asm/fpu/internal.h>
#else
#include <asm/i387.h>
#include <asm/xcr.h>
#endif
#if !defined(XSTATE_XSAVE)
#error XSTATE_XSAVE not defined
#endif
#if !defined(XSTATE_XRESTORE)
#error XSTATE_XRESTORE not defined
#endif
],[
struct fpu *fpu = &current->thread.fpu;
union fpregs_state *st = &fpu->state;
struct fregs_state *fr __attribute__ ((unused)) = &st->fsave;
struct fxregs_state *fxr __attribute__ ((unused)) = &st->fxsave;
struct xregs_state *xr __attribute__ ((unused)) = &st->xsave;
])
+
+ ZFS_LINUX_TEST_SRC([fpu_xsave_internal], [
+ #include <linux/sched.h>
+ #if defined(__x86_64) || defined(__x86_64__) || \
+ defined(__i386) || defined(__i386__)
+ #if !defined(__x86)
+ #define __x86
+ #endif
+ #endif
+
+ #if !defined(__x86)
+ #error Unsupported architecture
+ #endif
+
+ #include <linux/types.h>
+ #ifdef HAVE_KERNEL_FPU_API_HEADER
+ #include <asm/fpu/api.h>
+ #include <asm/fpu/internal.h>
+ #else
+ #include <asm/i387.h>
+ #include <asm/xcr.h>
+ #endif
+
+ ],[
+ struct fpu *fpu = &current->thread.fpu;
+ union fpregs_state *st = &fpu->fpstate->regs;
+ struct fregs_state *fr __attribute__ ((unused)) = &st->fsave;
+ struct fxregs_state *fxr __attribute__ ((unused)) = &st->fxsave;
+ struct xregs_state *xr __attribute__ ((unused)) = &st->xsave;
+ ])
])
AC_DEFUN([ZFS_AC_KERNEL_FPU], [
dnl #
dnl # Legacy kernel
dnl #
AC_MSG_CHECKING([whether kernel fpu is available])
- ZFS_LINUX_TEST_RESULT_SYMBOL([kernel_fpu_license],
- [kernel_fpu_begin], [arch/x86/kernel/fpu/core.c], [
+ ZFS_LINUX_TEST_RESULT([kernel_fpu_license], [
AC_MSG_RESULT(kernel_fpu_*)
AC_DEFINE(HAVE_KERNEL_FPU, 1,
[kernel has kernel_fpu_* functions])
AC_DEFINE(KERNEL_EXPORTS_X86_FPU, 1,
[kernel exports FPU functions])
],[
dnl #
dnl # Linux 4.2 kernel
dnl #
ZFS_LINUX_TEST_RESULT_SYMBOL([__kernel_fpu_license],
[__kernel_fpu_begin],
[arch/x86/kernel/fpu/core.c arch/x86/kernel/i387.c], [
AC_MSG_RESULT(__kernel_fpu_*)
AC_DEFINE(HAVE_UNDERSCORE_KERNEL_FPU, 1,
[kernel has __kernel_fpu_* functions])
AC_DEFINE(KERNEL_EXPORTS_X86_FPU, 1,
[kernel exports FPU functions])
],[
ZFS_LINUX_TEST_RESULT([fpu_internal], [
AC_MSG_RESULT(internal)
AC_DEFINE(HAVE_KERNEL_FPU_INTERNAL, 1,
[kernel fpu internal])
],[
+ ZFS_LINUX_TEST_RESULT([fpu_xsave_internal], [
+ AC_MSG_RESULT(internal with internal XSAVE)
+ AC_DEFINE(HAVE_KERNEL_FPU_XSAVE_INTERNAL, 1,
+ [kernel fpu and XSAVE internal])
+ ],[
AC_MSG_RESULT(unavailable)
+ ])
])
])
])
])
diff --git a/sys/contrib/openzfs/config/kernel-kmem.m4 b/sys/contrib/openzfs/config/kernel-kmem.m4
index 43f9e72f88d8..03c2a41fbdb2 100644
--- a/sys/contrib/openzfs/config/kernel-kmem.m4
+++ b/sys/contrib/openzfs/config/kernel-kmem.m4
@@ -1,108 +1,109 @@
dnl #
dnl # Enabled by default it provides a minimal level of memory tracking.
dnl # A total count of bytes allocated is kept for each alloc and free.
dnl # Then at module unload time a report to the console will be printed
dnl # if memory was leaked.
dnl #
AC_DEFUN([SPL_AC_DEBUG_KMEM], [
AC_ARG_ENABLE([debug-kmem],
[AS_HELP_STRING([--enable-debug-kmem],
[Enable basic kmem accounting @<:@default=no@:>@])],
[],
[enable_debug_kmem=no])
AS_IF([test "x$enable_debug_kmem" = xyes],
[
KERNELCPPFLAGS="${KERNELCPPFLAGS} -DDEBUG_KMEM"
DEBUG_KMEM="_with_debug_kmem"
AC_DEFINE([DEBUG_KMEM], [1],
[Define to 1 to enable basic kmem accounting])
], [
DEBUG_KMEM="_without_debug_kmem"
])
AC_SUBST(DEBUG_KMEM)
AC_MSG_CHECKING([whether basic kmem accounting is enabled])
AC_MSG_RESULT([$enable_debug_kmem])
])
dnl #
dnl # Disabled by default it provides detailed memory tracking. This
dnl # feature also requires --enable-debug-kmem to be set. When enabled
dnl # not only will total bytes be tracked but also the location of every
dnl # alloc and free. When the SPL module is unloaded a list of all leaked
dnl # addresses and where they were allocated will be dumped to the console.
dnl # Enabling this feature has a significant impact on performance but it
dnl # makes finding memory leaks pretty straight forward.
dnl #
AC_DEFUN([SPL_AC_DEBUG_KMEM_TRACKING], [
AC_ARG_ENABLE([debug-kmem-tracking],
[AS_HELP_STRING([--enable-debug-kmem-tracking],
[Enable detailed kmem tracking @<:@default=no@:>@])],
[],
[enable_debug_kmem_tracking=no])
AS_IF([test "x$enable_debug_kmem_tracking" = xyes],
[
KERNELCPPFLAGS="${KERNELCPPFLAGS} -DDEBUG_KMEM_TRACKING"
DEBUG_KMEM_TRACKING="_with_debug_kmem_tracking"
AC_DEFINE([DEBUG_KMEM_TRACKING], [1],
[Define to 1 to enable detailed kmem tracking])
], [
DEBUG_KMEM_TRACKING="_without_debug_kmem_tracking"
])
AC_SUBST(DEBUG_KMEM_TRACKING)
AC_MSG_CHECKING([whether detailed kmem tracking is enabled])
AC_MSG_RESULT([$enable_debug_kmem_tracking])
])
dnl #
dnl # 4.12 API,
dnl # Added kvmalloc allocation strategy
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_KVMALLOC], [
ZFS_LINUX_TEST_SRC([kvmalloc], [
#include <linux/mm.h>
+ #include <linux/slab.h>
],[
void *p __attribute__ ((unused));
p = kvmalloc(0, GFP_KERNEL);
])
])
AC_DEFUN([ZFS_AC_KERNEL_KVMALLOC], [
AC_MSG_CHECKING([whether kvmalloc(ptr, flags) is available])
ZFS_LINUX_TEST_RESULT([kvmalloc], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_KVMALLOC, 1, [kvmalloc exists])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 5.8 API,
dnl # __vmalloc PAGE_KERNEL removal
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_VMALLOC_PAGE_KERNEL], [
ZFS_LINUX_TEST_SRC([__vmalloc], [
#include <linux/mm.h>
#include <linux/vmalloc.h>
],[
void *p __attribute__ ((unused));
p = __vmalloc(0, GFP_KERNEL, PAGE_KERNEL);
])
])
AC_DEFUN([ZFS_AC_KERNEL_VMALLOC_PAGE_KERNEL], [
AC_MSG_CHECKING([whether __vmalloc(ptr, flags, pageflags) is available])
ZFS_LINUX_TEST_RESULT([__vmalloc], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_VMALLOC_PAGE_KERNEL, 1, [__vmalloc page flags exists])
],[
AC_MSG_RESULT(no)
])
])
-
\ No newline at end of file
diff --git a/sys/contrib/openzfs/config/kernel-kthread.m4 b/sys/contrib/openzfs/config/kernel-kthread.m4
new file mode 100644
index 000000000000..f5b824d7947a
--- /dev/null
+++ b/sys/contrib/openzfs/config/kernel-kthread.m4
@@ -0,0 +1,68 @@
+AC_DEFUN([ZFS_AC_KERNEL_KTHREAD_COMPLETE_AND_EXIT], [
+ dnl #
+ dnl # 5.17 API,
+ dnl # cead18552660702a4a46f58e65188fe5f36e9dfe ("exit: Rename complete_and_exit to kthread_complete_and_exit")
+ dnl #
+ dnl # Also moves the definition from include/linux/kernel.h to include/linux/kthread.h
+ dnl #
+ AC_MSG_CHECKING([whether kthread_complete_and_exit() is available])
+ ZFS_LINUX_TEST_RESULT([kthread_complete_and_exit], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(SPL_KTHREAD_COMPLETE_AND_EXIT, kthread_complete_and_exit, [kthread_complete_and_exit() available])
+ ], [
+ AC_MSG_RESULT(no)
+ AC_DEFINE(SPL_KTHREAD_COMPLETE_AND_EXIT, complete_and_exit, [using complete_and_exit() instead])
+ ])
+])
+
+AC_DEFUN([ZFS_AC_KERNEL_KTHREAD_DEQUEUE_SIGNAL_4ARG], [
+ dnl #
+ dnl # 5.17 API: enum pid_type * as new 4th dequeue_signal() argument,
+ dnl # 5768d8906bc23d512b1a736c1e198aa833a6daa4 ("signal: Requeue signals in the appropriate queue")
+ dnl #
+ dnl # int dequeue_signal(struct task_struct *task, sigset_t *mask, kernel_siginfo_t *info);
+ dnl # int dequeue_signal(struct task_struct *task, sigset_t *mask, kernel_siginfo_t *info, enum pid_type *type);
+ dnl #
+ AC_MSG_CHECKING([whether dequeue_signal() takes 4 arguments])
+ ZFS_LINUX_TEST_RESULT([kthread_dequeue_signal], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_DEQUEUE_SIGNAL_4ARG, 1, [dequeue_signal() takes 4 arguments])
+ ], [
+ AC_MSG_RESULT(no)
+ ])
+])
+
+AC_DEFUN([ZFS_AC_KERNEL_SRC_KTHREAD_COMPLETE_AND_EXIT], [
+ ZFS_LINUX_TEST_SRC([kthread_complete_and_exit], [
+ #include <linux/kthread.h>
+ ], [
+ struct completion *completion = NULL;
+ long code = 0;
+
+ kthread_complete_and_exit(completion, code);
+ ])
+])
+
+AC_DEFUN([ZFS_AC_KERNEL_SRC_KTHREAD_DEQUEUE_SIGNAL_4ARG], [
+ ZFS_LINUX_TEST_SRC([kthread_dequeue_signal], [
+ #include <linux/sched/signal.h>
+ ], [
+ struct task_struct *task = NULL;
+ sigset_t *mask = NULL;
+ kernel_siginfo_t *info = NULL;
+ enum pid_type *type = NULL;
+ int error __attribute__ ((unused));
+
+ error = dequeue_signal(task, mask, info, type);
+ ])
+])
+
+AC_DEFUN([ZFS_AC_KERNEL_KTHREAD], [
+ ZFS_AC_KERNEL_KTHREAD_COMPLETE_AND_EXIT
+ ZFS_AC_KERNEL_KTHREAD_DEQUEUE_SIGNAL_4ARG
+])
+
+AC_DEFUN([ZFS_AC_KERNEL_SRC_KTHREAD], [
+ ZFS_AC_KERNEL_SRC_KTHREAD_COMPLETE_AND_EXIT
+ ZFS_AC_KERNEL_SRC_KTHREAD_DEQUEUE_SIGNAL_4ARG
+])
diff --git a/sys/contrib/openzfs/config/kernel-pde-data.m4 b/sys/contrib/openzfs/config/kernel-pde-data.m4
index f866d77a11df..4fc665dfbe2e 100644
--- a/sys/contrib/openzfs/config/kernel-pde-data.m4
+++ b/sys/contrib/openzfs/config/kernel-pde-data.m4
@@ -1,20 +1,22 @@
dnl #
-dnl # 3.10 API change,
-dnl # PDE is replaced by PDE_DATA
+dnl # 5.17 API: PDE_DATA() renamed to pde_data(),
+dnl # 359745d78351c6f5442435f81549f0207ece28aa ("proc: remove PDE_DATA() completely")
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_PDE_DATA], [
ZFS_LINUX_TEST_SRC([pde_data], [
#include <linux/proc_fs.h>
], [
- PDE_DATA(NULL);
+ pde_data(NULL);
])
])
AC_DEFUN([ZFS_AC_KERNEL_PDE_DATA], [
- AC_MSG_CHECKING([whether PDE_DATA() is available])
- ZFS_LINUX_TEST_RESULT_SYMBOL([pde_data], [PDE_DATA], [], [
+ AC_MSG_CHECKING([whether pde_data() is lowercase])
+ ZFS_LINUX_TEST_RESULT([pde_data], [
AC_MSG_RESULT(yes)
- ],[
- ZFS_LINUX_TEST_ERROR([PDE_DATA])
+ AC_DEFINE(SPL_PDE_DATA, pde_data, [pde_data() is pde_data()])
+ ], [
+ AC_MSG_RESULT(no)
+ AC_DEFINE(SPL_PDE_DATA, PDE_DATA, [pde_data() is PDE_DATA()])
])
])
diff --git a/sys/contrib/openzfs/config/kernel-vfs-iov_iter.m4 b/sys/contrib/openzfs/config/kernel-vfs-iov_iter.m4
index ecdda939f1cf..57f78745a24b 100644
--- a/sys/contrib/openzfs/config/kernel-vfs-iov_iter.m4
+++ b/sys/contrib/openzfs/config/kernel-vfs-iov_iter.m4
@@ -1,184 +1,202 @@
dnl #
dnl # Check for available iov_iter functionality.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_VFS_IOV_ITER], [
ZFS_LINUX_TEST_SRC([iov_iter_types], [
#include <linux/fs.h>
#include <linux/uio.h>
],[
int type __attribute__ ((unused)) =
ITER_IOVEC | ITER_KVEC | ITER_BVEC | ITER_PIPE;
])
ZFS_LINUX_TEST_SRC([iov_iter_advance], [
#include <linux/fs.h>
#include <linux/uio.h>
],[
struct iov_iter iter = { 0 };
size_t advance = 512;
iov_iter_advance(&iter, advance);
])
ZFS_LINUX_TEST_SRC([iov_iter_revert], [
#include <linux/fs.h>
#include <linux/uio.h>
],[
struct iov_iter iter = { 0 };
size_t revert = 512;
iov_iter_revert(&iter, revert);
])
ZFS_LINUX_TEST_SRC([iov_iter_fault_in_readable], [
#include <linux/fs.h>
#include <linux/uio.h>
],[
struct iov_iter iter = { 0 };
size_t size = 512;
int error __attribute__ ((unused));
error = iov_iter_fault_in_readable(&iter, size);
])
+ ZFS_LINUX_TEST_SRC([fault_in_iov_iter_readable], [
+ #include <linux/fs.h>
+ #include <linux/uio.h>
+ ],[
+ struct iov_iter iter = { 0 };
+ size_t size = 512;
+ int error __attribute__ ((unused));
+
+ error = fault_in_iov_iter_readable(&iter, size);
+ ])
+
ZFS_LINUX_TEST_SRC([iov_iter_count], [
#include <linux/fs.h>
#include <linux/uio.h>
],[
struct iov_iter iter = { 0 };
size_t bytes __attribute__ ((unused));
bytes = iov_iter_count(&iter);
])
ZFS_LINUX_TEST_SRC([copy_to_iter], [
#include <linux/fs.h>
#include <linux/uio.h>
],[
struct iov_iter iter = { 0 };
char buf[512] = { 0 };
size_t size = 512;
size_t bytes __attribute__ ((unused));
bytes = copy_to_iter((const void *)&buf, size, &iter);
])
ZFS_LINUX_TEST_SRC([copy_from_iter], [
#include <linux/fs.h>
#include <linux/uio.h>
],[
struct iov_iter iter = { 0 };
char buf[512] = { 0 };
size_t size = 512;
size_t bytes __attribute__ ((unused));
bytes = copy_from_iter((void *)&buf, size, &iter);
])
ZFS_LINUX_TEST_SRC([iov_iter_type], [
#include <linux/fs.h>
#include <linux/uio.h>
],[
struct iov_iter iter = { 0 };
__attribute__((unused)) enum iter_type i = iov_iter_type(&iter);
])
])
AC_DEFUN([ZFS_AC_KERNEL_VFS_IOV_ITER], [
enable_vfs_iov_iter="yes"
AC_MSG_CHECKING([whether iov_iter types are available])
ZFS_LINUX_TEST_RESULT([iov_iter_types], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_IOV_ITER_TYPES, 1,
[iov_iter types are available])
],[
AC_MSG_RESULT(no)
enable_vfs_iov_iter="no"
])
AC_MSG_CHECKING([whether iov_iter_advance() is available])
ZFS_LINUX_TEST_RESULT([iov_iter_advance], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_IOV_ITER_ADVANCE, 1,
[iov_iter_advance() is available])
],[
AC_MSG_RESULT(no)
enable_vfs_iov_iter="no"
])
AC_MSG_CHECKING([whether iov_iter_revert() is available])
ZFS_LINUX_TEST_RESULT([iov_iter_revert], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_IOV_ITER_REVERT, 1,
[iov_iter_revert() is available])
],[
AC_MSG_RESULT(no)
enable_vfs_iov_iter="no"
])
AC_MSG_CHECKING([whether iov_iter_fault_in_readable() is available])
ZFS_LINUX_TEST_RESULT([iov_iter_fault_in_readable], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_IOV_ITER_FAULT_IN_READABLE, 1,
[iov_iter_fault_in_readable() is available])
],[
- AC_MSG_RESULT(no)
- enable_vfs_iov_iter="no"
+ AC_MSG_CHECKING([whether fault_in_iov_iter_readable() is available])
+ ZFS_LINUX_TEST_RESULT([fault_in_iov_iter_readable], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_FAULT_IN_IOV_ITER_READABLE, 1,
+ [fault_in_iov_iter_readable() is available])
+ ],[
+ AC_MSG_RESULT(no)
+ enable_vfs_iov_iter="no"
+ ])
])
AC_MSG_CHECKING([whether iov_iter_count() is available])
ZFS_LINUX_TEST_RESULT([iov_iter_count], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_IOV_ITER_COUNT, 1,
[iov_iter_count() is available])
],[
AC_MSG_RESULT(no)
enable_vfs_iov_iter="no"
])
AC_MSG_CHECKING([whether copy_to_iter() is available])
ZFS_LINUX_TEST_RESULT([copy_to_iter], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_COPY_TO_ITER, 1,
[copy_to_iter() is available])
],[
AC_MSG_RESULT(no)
enable_vfs_iov_iter="no"
])
AC_MSG_CHECKING([whether copy_from_iter() is available])
ZFS_LINUX_TEST_RESULT([copy_from_iter], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_COPY_FROM_ITER, 1,
[copy_from_iter() is available])
],[
AC_MSG_RESULT(no)
enable_vfs_iov_iter="no"
])
dnl #
dnl # This checks for iov_iter_type() in linux/uio.h. It is not
dnl # required, however, and the module will compiled without it
dnl # using direct access of the member attribute
dnl #
AC_MSG_CHECKING([whether iov_iter_type() is available])
ZFS_LINUX_TEST_RESULT([iov_iter_type], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_IOV_ITER_TYPE, 1,
[iov_iter_type() is available])
],[
AC_MSG_RESULT(no)
])
dnl #
dnl # As of the 4.9 kernel support is provided for iovecs, kvecs,
dnl # bvecs and pipes in the iov_iter structure. As long as the
dnl # other support interfaces are all available the iov_iter can
dnl # be correctly used in the uio structure.
dnl #
AS_IF([test "x$enable_vfs_iov_iter" = "xyes"], [
AC_DEFINE(HAVE_VFS_IOV_ITER, 1,
[All required iov_iter interfaces are available])
])
])
diff --git a/sys/contrib/openzfs/config/kernel.m4 b/sys/contrib/openzfs/config/kernel.m4
index bdd3caed2b3d..3122e9dbaa94 100644
--- a/sys/contrib/openzfs/config/kernel.m4
+++ b/sys/contrib/openzfs/config/kernel.m4
@@ -1,893 +1,948 @@
dnl #
dnl # Default ZFS kernel configuration
dnl #
AC_DEFUN([ZFS_AC_CONFIG_KERNEL], [
AM_COND_IF([BUILD_LINUX], [
dnl # Setup the kernel build environment.
ZFS_AC_KERNEL
ZFS_AC_QAT
dnl # Sanity checks for module building and CONFIG_* defines
ZFS_AC_KERNEL_TEST_MODULE
ZFS_AC_KERNEL_CONFIG_DEFINED
dnl # Sequential ZFS_LINUX_TRY_COMPILE tests
ZFS_AC_KERNEL_FPU_HEADER
ZFS_AC_KERNEL_OBJTOOL_HEADER
ZFS_AC_KERNEL_WAIT_QUEUE_ENTRY_T
ZFS_AC_KERNEL_MISC_MINOR
ZFS_AC_KERNEL_DECLARE_EVENT_CLASS
dnl # Parallel ZFS_LINUX_TEST_SRC / ZFS_LINUX_TEST_RESULT tests
ZFS_AC_KERNEL_TEST_SRC
ZFS_AC_KERNEL_TEST_RESULT
AS_IF([test "$LINUX_OBJ" != "$LINUX"], [
KERNEL_MAKE="$KERNEL_MAKE O=$LINUX_OBJ"
])
AC_SUBST(KERNEL_MAKE)
])
])
dnl #
dnl # Generate and compile all of the kernel API test cases to determine
dnl # which interfaces are available. By invoking the kernel build system
dnl # only once the compilation can be done in parallel significantly
dnl # speeding up the process.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_TEST_SRC], [
ZFS_AC_KERNEL_SRC_OBJTOOL
ZFS_AC_KERNEL_SRC_GLOBAL_PAGE_STATE
ZFS_AC_KERNEL_SRC_ACCESS_OK_TYPE
ZFS_AC_KERNEL_SRC_PDE_DATA
ZFS_AC_KERNEL_SRC_FALLOCATE
ZFS_AC_KERNEL_SRC_2ARGS_ZLIB_DEFLATE_WORKSPACESIZE
ZFS_AC_KERNEL_SRC_RWSEM
ZFS_AC_KERNEL_SRC_SCHED
ZFS_AC_KERNEL_SRC_USLEEP_RANGE
ZFS_AC_KERNEL_SRC_KMEM_CACHE
ZFS_AC_KERNEL_SRC_KVMALLOC
ZFS_AC_KERNEL_SRC_VMALLOC_PAGE_KERNEL
ZFS_AC_KERNEL_SRC_WAIT
ZFS_AC_KERNEL_SRC_INODE_TIMES
ZFS_AC_KERNEL_SRC_INODE_LOCK
ZFS_AC_KERNEL_SRC_GROUP_INFO_GID
ZFS_AC_KERNEL_SRC_RW
ZFS_AC_KERNEL_SRC_TIMER_SETUP
ZFS_AC_KERNEL_SRC_SUPER_USER_NS
ZFS_AC_KERNEL_SRC_PROC_OPERATIONS
ZFS_AC_KERNEL_SRC_BLOCK_DEVICE_OPERATIONS
ZFS_AC_KERNEL_SRC_BIO
ZFS_AC_KERNEL_SRC_BLKDEV
ZFS_AC_KERNEL_SRC_BLK_QUEUE
ZFS_AC_KERNEL_SRC_REVALIDATE_DISK
ZFS_AC_KERNEL_SRC_GET_DISK_RO
ZFS_AC_KERNEL_SRC_GENERIC_READLINK_GLOBAL
ZFS_AC_KERNEL_SRC_DISCARD_GRANULARITY
ZFS_AC_KERNEL_SRC_INODE_OWNER_OR_CAPABLE
ZFS_AC_KERNEL_SRC_XATTR
ZFS_AC_KERNEL_SRC_ACL
ZFS_AC_KERNEL_SRC_INODE_GETATTR
ZFS_AC_KERNEL_SRC_INODE_SET_FLAGS
ZFS_AC_KERNEL_SRC_INODE_SET_IVERSION
ZFS_AC_KERNEL_SRC_SHOW_OPTIONS
ZFS_AC_KERNEL_SRC_FILE_INODE
ZFS_AC_KERNEL_SRC_FILE_DENTRY
ZFS_AC_KERNEL_SRC_FSYNC
ZFS_AC_KERNEL_SRC_AIO_FSYNC
ZFS_AC_KERNEL_SRC_EVICT_INODE
ZFS_AC_KERNEL_SRC_DIRTY_INODE
ZFS_AC_KERNEL_SRC_SHRINKER
ZFS_AC_KERNEL_SRC_MKDIR
ZFS_AC_KERNEL_SRC_LOOKUP_FLAGS
ZFS_AC_KERNEL_SRC_CREATE
ZFS_AC_KERNEL_SRC_GET_LINK
ZFS_AC_KERNEL_SRC_PUT_LINK
ZFS_AC_KERNEL_SRC_TMPFILE
ZFS_AC_KERNEL_SRC_AUTOMOUNT
ZFS_AC_KERNEL_SRC_ENCODE_FH_WITH_INODE
ZFS_AC_KERNEL_SRC_COMMIT_METADATA
ZFS_AC_KERNEL_SRC_CLEAR_INODE
ZFS_AC_KERNEL_SRC_SETATTR_PREPARE
ZFS_AC_KERNEL_SRC_INSERT_INODE_LOCKED
ZFS_AC_KERNEL_SRC_DENTRY
ZFS_AC_KERNEL_SRC_TRUNCATE_SETSIZE
ZFS_AC_KERNEL_SRC_SECURITY_INODE
ZFS_AC_KERNEL_SRC_FST_MOUNT
ZFS_AC_KERNEL_SRC_BDI
ZFS_AC_KERNEL_SRC_SET_NLINK
ZFS_AC_KERNEL_SRC_SGET
ZFS_AC_KERNEL_SRC_LSEEK_EXECUTE
ZFS_AC_KERNEL_SRC_VFS_GETATTR
ZFS_AC_KERNEL_SRC_VFS_FSYNC_2ARGS
ZFS_AC_KERNEL_SRC_VFS_ITERATE
ZFS_AC_KERNEL_SRC_VFS_DIRECT_IO
ZFS_AC_KERNEL_SRC_VFS_RW_ITERATE
ZFS_AC_KERNEL_SRC_VFS_GENERIC_WRITE_CHECKS
ZFS_AC_KERNEL_SRC_VFS_IOV_ITER
ZFS_AC_KERNEL_SRC_KMAP_ATOMIC_ARGS
ZFS_AC_KERNEL_SRC_FOLLOW_DOWN_ONE
ZFS_AC_KERNEL_SRC_MAKE_REQUEST_FN
ZFS_AC_KERNEL_SRC_GENERIC_IO_ACCT
ZFS_AC_KERNEL_SRC_FPU
ZFS_AC_KERNEL_SRC_FMODE_T
ZFS_AC_KERNEL_SRC_KUIDGID_T
ZFS_AC_KERNEL_SRC_KUID_HELPERS
ZFS_AC_KERNEL_SRC_MODULE_PARAM_CALL_CONST
ZFS_AC_KERNEL_SRC_RENAME
ZFS_AC_KERNEL_SRC_CURRENT_TIME
ZFS_AC_KERNEL_SRC_USERNS_CAPABILITIES
ZFS_AC_KERNEL_SRC_IN_COMPAT_SYSCALL
ZFS_AC_KERNEL_SRC_KTIME
ZFS_AC_KERNEL_SRC_TOTALRAM_PAGES_FUNC
ZFS_AC_KERNEL_SRC_TOTALHIGH_PAGES
ZFS_AC_KERNEL_SRC_KSTRTOUL
ZFS_AC_KERNEL_SRC_PERCPU
ZFS_AC_KERNEL_SRC_CPU_HOTPLUG
ZFS_AC_KERNEL_SRC_GENERIC_FILLATTR_USERNS
ZFS_AC_KERNEL_SRC_MKNOD
ZFS_AC_KERNEL_SRC_SYMLINK
ZFS_AC_KERNEL_SRC_BIO_MAX_SEGS
ZFS_AC_KERNEL_SRC_SIGNAL_STOP
ZFS_AC_KERNEL_SRC_SIGINFO
ZFS_AC_KERNEL_SRC_SET_SPECIAL_STATE
ZFS_AC_KERNEL_SRC_VFS_SET_PAGE_DIRTY_NOBUFFERS
ZFS_AC_KERNEL_SRC_STANDALONE_LINUX_STDARG
ZFS_AC_KERNEL_SRC_PAGEMAP_FOLIO_WAIT_BIT
+ ZFS_AC_KERNEL_SRC_ADD_DISK
+ ZFS_AC_KERNEL_SRC_KTHREAD
AC_MSG_CHECKING([for available kernel interfaces])
ZFS_LINUX_TEST_COMPILE_ALL([kabi])
AC_MSG_RESULT([done])
])
dnl #
dnl # Check results of kernel interface tests.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_TEST_RESULT], [
ZFS_AC_KERNEL_ACCESS_OK_TYPE
ZFS_AC_KERNEL_GLOBAL_PAGE_STATE
ZFS_AC_KERNEL_OBJTOOL
ZFS_AC_KERNEL_PDE_DATA
ZFS_AC_KERNEL_FALLOCATE
ZFS_AC_KERNEL_2ARGS_ZLIB_DEFLATE_WORKSPACESIZE
ZFS_AC_KERNEL_RWSEM
ZFS_AC_KERNEL_SCHED
ZFS_AC_KERNEL_USLEEP_RANGE
ZFS_AC_KERNEL_KMEM_CACHE
ZFS_AC_KERNEL_KVMALLOC
ZFS_AC_KERNEL_VMALLOC_PAGE_KERNEL
ZFS_AC_KERNEL_WAIT
ZFS_AC_KERNEL_INODE_TIMES
ZFS_AC_KERNEL_INODE_LOCK
ZFS_AC_KERNEL_GROUP_INFO_GID
ZFS_AC_KERNEL_RW
ZFS_AC_KERNEL_TIMER_SETUP
ZFS_AC_KERNEL_SUPER_USER_NS
ZFS_AC_KERNEL_PROC_OPERATIONS
ZFS_AC_KERNEL_BLOCK_DEVICE_OPERATIONS
ZFS_AC_KERNEL_BIO
ZFS_AC_KERNEL_BLKDEV
ZFS_AC_KERNEL_BLK_QUEUE
ZFS_AC_KERNEL_REVALIDATE_DISK
ZFS_AC_KERNEL_GET_DISK_RO
ZFS_AC_KERNEL_GENERIC_READLINK_GLOBAL
ZFS_AC_KERNEL_DISCARD_GRANULARITY
ZFS_AC_KERNEL_INODE_OWNER_OR_CAPABLE
ZFS_AC_KERNEL_XATTR
ZFS_AC_KERNEL_ACL
ZFS_AC_KERNEL_INODE_GETATTR
ZFS_AC_KERNEL_INODE_SET_FLAGS
ZFS_AC_KERNEL_INODE_SET_IVERSION
ZFS_AC_KERNEL_SHOW_OPTIONS
ZFS_AC_KERNEL_FILE_INODE
ZFS_AC_KERNEL_FILE_DENTRY
ZFS_AC_KERNEL_FSYNC
ZFS_AC_KERNEL_AIO_FSYNC
ZFS_AC_KERNEL_EVICT_INODE
ZFS_AC_KERNEL_DIRTY_INODE
ZFS_AC_KERNEL_SHRINKER
ZFS_AC_KERNEL_MKDIR
ZFS_AC_KERNEL_LOOKUP_FLAGS
ZFS_AC_KERNEL_CREATE
ZFS_AC_KERNEL_GET_LINK
ZFS_AC_KERNEL_PUT_LINK
ZFS_AC_KERNEL_TMPFILE
ZFS_AC_KERNEL_AUTOMOUNT
ZFS_AC_KERNEL_ENCODE_FH_WITH_INODE
ZFS_AC_KERNEL_COMMIT_METADATA
ZFS_AC_KERNEL_CLEAR_INODE
ZFS_AC_KERNEL_SETATTR_PREPARE
ZFS_AC_KERNEL_INSERT_INODE_LOCKED
ZFS_AC_KERNEL_DENTRY
ZFS_AC_KERNEL_TRUNCATE_SETSIZE
ZFS_AC_KERNEL_SECURITY_INODE
ZFS_AC_KERNEL_FST_MOUNT
ZFS_AC_KERNEL_BDI
ZFS_AC_KERNEL_SET_NLINK
ZFS_AC_KERNEL_SGET
ZFS_AC_KERNEL_LSEEK_EXECUTE
ZFS_AC_KERNEL_VFS_GETATTR
ZFS_AC_KERNEL_VFS_FSYNC_2ARGS
ZFS_AC_KERNEL_VFS_ITERATE
ZFS_AC_KERNEL_VFS_DIRECT_IO
ZFS_AC_KERNEL_VFS_RW_ITERATE
ZFS_AC_KERNEL_VFS_GENERIC_WRITE_CHECKS
ZFS_AC_KERNEL_VFS_IOV_ITER
ZFS_AC_KERNEL_KMAP_ATOMIC_ARGS
ZFS_AC_KERNEL_FOLLOW_DOWN_ONE
ZFS_AC_KERNEL_MAKE_REQUEST_FN
ZFS_AC_KERNEL_GENERIC_IO_ACCT
ZFS_AC_KERNEL_FPU
ZFS_AC_KERNEL_FMODE_T
ZFS_AC_KERNEL_KUIDGID_T
ZFS_AC_KERNEL_KUID_HELPERS
ZFS_AC_KERNEL_MODULE_PARAM_CALL_CONST
ZFS_AC_KERNEL_RENAME
ZFS_AC_KERNEL_CURRENT_TIME
ZFS_AC_KERNEL_USERNS_CAPABILITIES
ZFS_AC_KERNEL_IN_COMPAT_SYSCALL
ZFS_AC_KERNEL_KTIME
ZFS_AC_KERNEL_TOTALRAM_PAGES_FUNC
ZFS_AC_KERNEL_TOTALHIGH_PAGES
ZFS_AC_KERNEL_KSTRTOUL
ZFS_AC_KERNEL_PERCPU
ZFS_AC_KERNEL_CPU_HOTPLUG
ZFS_AC_KERNEL_GENERIC_FILLATTR_USERNS
ZFS_AC_KERNEL_MKNOD
ZFS_AC_KERNEL_SYMLINK
ZFS_AC_KERNEL_BIO_MAX_SEGS
ZFS_AC_KERNEL_SIGNAL_STOP
ZFS_AC_KERNEL_SIGINFO
ZFS_AC_KERNEL_SET_SPECIAL_STATE
ZFS_AC_KERNEL_VFS_SET_PAGE_DIRTY_NOBUFFERS
ZFS_AC_KERNEL_STANDALONE_LINUX_STDARG
ZFS_AC_KERNEL_PAGEMAP_FOLIO_WAIT_BIT
+ ZFS_AC_KERNEL_ADD_DISK
+ ZFS_AC_KERNEL_KTHREAD
])
dnl #
dnl # Detect name used for Module.symvers file in kernel
dnl #
AC_DEFUN([ZFS_AC_MODULE_SYMVERS], [
modpost=$LINUX/scripts/Makefile.modpost
AC_MSG_CHECKING([kernel file name for module symbols])
AS_IF([test "x$enable_linux_builtin" != xyes -a -f "$modpost"], [
AS_IF([grep -q Modules.symvers $modpost], [
LINUX_SYMBOLS=Modules.symvers
], [
LINUX_SYMBOLS=Module.symvers
])
AS_IF([test ! -f "$LINUX_OBJ/$LINUX_SYMBOLS"], [
AC_MSG_ERROR([
*** Please make sure the kernel devel package for your distribution
*** is installed. If you are building with a custom kernel, make sure
*** the kernel is configured, built, and the '--with-linux=PATH'
*** configure option refers to the location of the kernel source.
])
])
], [
LINUX_SYMBOLS=NONE
])
AC_MSG_RESULT($LINUX_SYMBOLS)
AC_SUBST(LINUX_SYMBOLS)
])
dnl #
dnl # Detect the kernel to be built against
dnl #
+dnl # Most modern Linux distributions have separate locations for bare
+dnl # source (source) and prebuilt (build) files. Additionally, there are
+dnl # `source` and `build` symlinks in `/lib/modules/$(KERNEL_VERSION)`
+dnl # pointing to them. The directory search order is now:
+dnl #
+dnl # - `configure` command line values if both `--with-linux` and
+dnl # `--with-linux-obj` were defined
+dnl #
+dnl # - If only `--with-linux` was defined, `--with-linux-obj` is assumed
+dnl # to have the same value as `--with-linux`
+dnl #
+dnl # - If neither `--with-linux` nor `--with-linux-obj` were defined
+dnl # autodetection is used:
+dnl #
+dnl # - `/lib/modules/$(uname -r)/{source,build}` respectively, if exist.
+dnl #
+dnl # - If only `/lib/modules/$(uname -r)/build` exists, it is assumed
+dnl # to be both source and build directory.
+dnl #
+dnl # - The first directory in `/lib/modules` with the highest version
+dnl # number according to `sort -V` which contains both `source` and
+dnl # `build` symlinks/directories. If module directory contains only
+dnl # `build` component, it is assumed to be both source and build
+dnl # directory.
+dnl #
+dnl # - Last resort: the first directory matching `/usr/src/kernels/*`
+dnl # and `/usr/src/linux-*` with the highest version number according
+dnl # to `sort -V` is assumed to be both source and build directory.
+dnl #
AC_DEFUN([ZFS_AC_KERNEL], [
AC_ARG_WITH([linux],
AS_HELP_STRING([--with-linux=PATH],
[Path to kernel source]),
[kernelsrc="$withval"])
AC_ARG_WITH(linux-obj,
AS_HELP_STRING([--with-linux-obj=PATH],
[Path to kernel build objects]),
[kernelbuild="$withval"])
- AC_MSG_CHECKING([kernel source directory])
- AS_IF([test -z "$kernelsrc"], [
- AS_IF([test -e "/lib/modules/$(uname -r)/source"], [
- headersdir="/lib/modules/$(uname -r)/source"
- sourcelink=$(readlink -f "$headersdir")
+ AC_MSG_CHECKING([kernel source and build directories])
+ AS_IF([test -n "$kernelsrc" && test -z "$kernelbuild"], [
+ kernelbuild="$kernelsrc"
+ ], [test -z "$kernelsrc"], [
+ AS_IF([test -e "/lib/modules/$(uname -r)/source" && \
+ test -e "/lib/modules/$(uname -r)/build"], [
+ src="/lib/modules/$(uname -r)/source"
+ build="/lib/modules/$(uname -r)/build"
], [test -e "/lib/modules/$(uname -r)/build"], [
- headersdir="/lib/modules/$(uname -r)/build"
- sourcelink=$(readlink -f "$headersdir")
+ build="/lib/modules/$(uname -r)/build"
+ src="$build"
], [
- sourcelink=$(ls -1d /usr/src/kernels/* \
- /usr/src/linux-* \
- 2>/dev/null | grep -v obj | tail -1)
+ src=
+
+ for d in $(ls -1d /lib/modules/* 2>/dev/null | sort -Vr); do
+ if test -e "$d/source" && test -e "$d/build"; then
+ src="$d/source"
+ build="$d/build"
+ break
+ fi
+
+ if test -e "$d/build"; then
+ src="$d/build"
+ build="$d/build"
+ break
+ fi
+ done
+
+ # the least reliable method
+ if test -z "$src"; then
+ src=$(ls -1d /usr/src/kernels/* /usr/src/linux-* \
+ 2>/dev/null | grep -v obj | sort -Vr | head -1)
+ build="$src"
+ fi
])
- AS_IF([test -n "$sourcelink" && test -e ${sourcelink}], [
- kernelsrc=`readlink -f ${sourcelink}`
+ AS_IF([test -n "$src" && test -e "$src"], [
+ kernelsrc=$(readlink -e "$src")
], [
kernelsrc="[Not found]"
])
+ AS_IF([test -n "$build" && test -e "$build"], [
+ kernelbuild=$(readlink -e "$build")
+ ], [
+ kernelbuild="[Not found]"
+ ])
], [
AS_IF([test "$kernelsrc" = "NONE"], [
kernsrcver=NONE
])
withlinux=yes
])
+ AC_MSG_RESULT([done])
+ AC_MSG_CHECKING([kernel source directory])
AC_MSG_RESULT([$kernelsrc])
- AS_IF([test ! -d "$kernelsrc"], [
+ AC_MSG_CHECKING([kernel build directory])
+ AC_MSG_RESULT([$kernelbuild])
+ AS_IF([test ! -d "$kernelsrc" || test ! -d "$kernelbuild"], [
AC_MSG_ERROR([
*** Please make sure the kernel devel package for your distribution
*** is installed and then try again. If that fails, you can specify the
- *** location of the kernel source with the '--with-linux=PATH' option.])
+ *** location of the kernel source and build with the '--with-linux=PATH' and
+ *** '--with-linux-obj=PATH' options respectively.])
])
- AC_MSG_CHECKING([kernel build directory])
- AS_IF([test -z "$kernelbuild"], [
- AS_IF([test x$withlinux != xyes -a -e "/lib/modules/$(uname -r)/build"], [
- kernelbuild=`readlink -f /lib/modules/$(uname -r)/build`
- ], [test -d ${kernelsrc}-obj/${target_cpu}/${target_cpu}], [
- kernelbuild=${kernelsrc}-obj/${target_cpu}/${target_cpu}
- ], [test -d ${kernelsrc}-obj/${target_cpu}/default], [
- kernelbuild=${kernelsrc}-obj/${target_cpu}/default
- ], [test -d `dirname ${kernelsrc}`/build-${target_cpu}], [
- kernelbuild=`dirname ${kernelsrc}`/build-${target_cpu}
- ], [
- kernelbuild=${kernelsrc}
- ])
- ])
- AC_MSG_RESULT([$kernelbuild])
-
AC_MSG_CHECKING([kernel source version])
utsrelease1=$kernelbuild/include/linux/version.h
utsrelease2=$kernelbuild/include/linux/utsrelease.h
utsrelease3=$kernelbuild/include/generated/utsrelease.h
AS_IF([test -r $utsrelease1 && fgrep -q UTS_RELEASE $utsrelease1], [
utsrelease=$utsrelease1
], [test -r $utsrelease2 && fgrep -q UTS_RELEASE $utsrelease2], [
utsrelease=$utsrelease2
], [test -r $utsrelease3 && fgrep -q UTS_RELEASE $utsrelease3], [
utsrelease=$utsrelease3
])
AS_IF([test -n "$utsrelease"], [
kernsrcver=$($AWK '/UTS_RELEASE/ { gsub(/"/, "", $[3]); print $[3] }' $utsrelease)
AS_IF([test -z "$kernsrcver"], [
AC_MSG_RESULT([Not found])
AC_MSG_ERROR([
*** Cannot determine kernel version.
])
])
], [
AC_MSG_RESULT([Not found])
if test "x$enable_linux_builtin" != xyes; then
AC_MSG_ERROR([
*** Cannot find UTS_RELEASE definition.
])
else
AC_MSG_ERROR([
*** Cannot find UTS_RELEASE definition.
*** Please run 'make prepare' inside the kernel source tree.])
fi
])
AC_MSG_RESULT([$kernsrcver])
AS_VERSION_COMPARE([$kernsrcver], [$ZFS_META_KVER_MIN], [
AC_MSG_ERROR([
*** Cannot build against kernel version $kernsrcver.
*** The minimum supported kernel version is $ZFS_META_KVER_MIN.
])
])
LINUX=${kernelsrc}
LINUX_OBJ=${kernelbuild}
LINUX_VERSION=${kernsrcver}
AC_SUBST(LINUX)
AC_SUBST(LINUX_OBJ)
AC_SUBST(LINUX_VERSION)
ZFS_AC_MODULE_SYMVERS
])
dnl #
dnl # Detect the QAT module to be built against, QAT provides hardware
dnl # acceleration for data compression:
dnl #
dnl # https://01.org/intel-quickassist-technology
dnl #
dnl # 1) Download and install QAT driver from the above link
dnl # 2) Start QAT driver in your system:
dnl # service qat_service start
dnl # 3) Enable QAT in ZFS, e.g.:
dnl # ./configure --with-qat=<qat-driver-path>/QAT1.6
dnl # make
dnl # 4) Set GZIP compression in ZFS dataset:
dnl # zfs set compression = gzip <dataset>
dnl #
dnl # Then the data written to this ZFS pool is compressed by QAT accelerator
dnl # automatically, and de-compressed by QAT when read from the pool.
dnl #
dnl # 1) Get QAT hardware statistics with:
dnl # cat /proc/icp_dh895xcc_dev/qat
dnl # 2) To disable QAT:
dnl # insmod zfs.ko zfs_qat_disable=1
dnl #
AC_DEFUN([ZFS_AC_QAT], [
AC_ARG_WITH([qat],
AS_HELP_STRING([--with-qat=PATH],
[Path to qat source]),
AS_IF([test "$withval" = "yes"],
AC_MSG_ERROR([--with-qat=PATH requires a PATH]),
[qatsrc="$withval"]))
AC_ARG_WITH([qat-obj],
AS_HELP_STRING([--with-qat-obj=PATH],
[Path to qat build objects]),
[qatbuild="$withval"])
AS_IF([test ! -z "${qatsrc}"], [
AC_MSG_CHECKING([qat source directory])
AC_MSG_RESULT([$qatsrc])
QAT_SRC="${qatsrc}/quickassist"
AS_IF([ test ! -e "$QAT_SRC/include/cpa.h"], [
AC_MSG_ERROR([
*** Please make sure the qat driver package is installed
*** and specify the location of the qat source with the
*** '--with-qat=PATH' option then try again. Failed to
*** find cpa.h in:
${QAT_SRC}/include])
])
])
AS_IF([test ! -z "${qatsrc}"], [
AC_MSG_CHECKING([qat build directory])
AS_IF([test -z "$qatbuild"], [
qatbuild="${qatsrc}/build"
])
AC_MSG_RESULT([$qatbuild])
QAT_OBJ=${qatbuild}
AS_IF([ ! test -e "$QAT_OBJ/icp_qa_al.ko" && ! test -e "$QAT_OBJ/qat_api.ko"], [
AC_MSG_ERROR([
*** Please make sure the qat driver is installed then try again.
*** Failed to find icp_qa_al.ko or qat_api.ko in:
$QAT_OBJ])
])
AC_SUBST(QAT_SRC)
AC_SUBST(QAT_OBJ)
AC_DEFINE(HAVE_QAT, 1,
[qat is enabled and existed])
])
dnl #
dnl # Detect the name used for the QAT Module.symvers file.
dnl #
AS_IF([test ! -z "${qatsrc}"], [
AC_MSG_CHECKING([qat file for module symbols])
QAT_SYMBOLS=$QAT_SRC/lookaside/access_layer/src/Module.symvers
AS_IF([test -r $QAT_SYMBOLS], [
AC_MSG_RESULT([$QAT_SYMBOLS])
AC_SUBST(QAT_SYMBOLS)
],[
AC_MSG_ERROR([
*** Please make sure the qat driver is installed then try again.
*** Failed to find Module.symvers in:
$QAT_SYMBOLS
])
])
])
])
dnl #
dnl # Basic toolchain sanity check.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_TEST_MODULE], [
AC_MSG_CHECKING([whether modules can be built])
ZFS_LINUX_TRY_COMPILE([], [], [
AC_MSG_RESULT([yes])
],[
AC_MSG_RESULT([no])
if test "x$enable_linux_builtin" != xyes; then
AC_MSG_ERROR([
*** Unable to build an empty module.
])
else
AC_MSG_ERROR([
*** Unable to build an empty module.
*** Please run 'make scripts' inside the kernel source tree.])
fi
])
])
dnl #
dnl # ZFS_LINUX_CONFTEST_H
dnl #
AC_DEFUN([ZFS_LINUX_CONFTEST_H], [
test -d build/$2 || mkdir -p build/$2
cat - <<_ACEOF >build/$2/$2.h
$1
_ACEOF
])
dnl #
dnl # ZFS_LINUX_CONFTEST_C
dnl #
AC_DEFUN([ZFS_LINUX_CONFTEST_C], [
test -d build/$2 || mkdir -p build/$2
cat confdefs.h - <<_ACEOF >build/$2/$2.c
$1
_ACEOF
])
dnl #
dnl # ZFS_LINUX_CONFTEST_MAKEFILE
dnl #
dnl # $1 - test case name
dnl # $2 - add to top-level Makefile
dnl # $3 - additional build flags
dnl #
AC_DEFUN([ZFS_LINUX_CONFTEST_MAKEFILE], [
test -d build || mkdir -p build
test -d build/$1 || mkdir -p build/$1
file=build/$1/Makefile
dnl # Example command line to manually build source.
cat - <<_ACEOF >$file
# Example command line to manually build source
# make modules -C $LINUX_OBJ $ARCH_UM M=$PWD/build/$1
ccflags-y := -Werror $FRAME_LARGER_THAN
_ACEOF
dnl # Additional custom CFLAGS as requested.
m4_ifval($3, [echo "ccflags-y += $3" >>$file], [])
dnl # Test case source
echo "obj-m := $1.o" >>$file
AS_IF([test "x$2" = "xyes"], [echo "obj-m += $1/" >>build/Makefile], [])
])
dnl #
dnl # ZFS_LINUX_TEST_PROGRAM(C)([PROLOGUE], [BODY])
dnl #
m4_define([ZFS_LINUX_TEST_PROGRAM], [
#include <linux/module.h>
$1
int
main (void)
{
$2
;
return 0;
}
MODULE_DESCRIPTION("conftest");
MODULE_AUTHOR(ZFS_META_AUTHOR);
MODULE_VERSION(ZFS_META_VERSION "-" ZFS_META_RELEASE);
MODULE_LICENSE($3);
])
dnl #
dnl # ZFS_LINUX_TEST_REMOVE
dnl #
dnl # Removes the specified test source and results.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_REMOVE], [
test -d build/$1 && rm -Rf build/$1
test -f build/Makefile && sed '/$1/d' build/Makefile
])
dnl #
dnl # ZFS_LINUX_COMPILE
dnl #
dnl # $1 - build dir
dnl # $2 - test command
dnl # $3 - pass command
dnl # $4 - fail command
dnl # $5 - set KBUILD_MODPOST_NOFINAL='yes'
dnl # $6 - set KBUILD_MODPOST_WARN='yes'
dnl #
dnl # Used internally by ZFS_LINUX_TEST_{COMPILE,MODPOST}
dnl #
AC_DEFUN([ZFS_LINUX_COMPILE], [
+ AC_ARG_VAR([KERNEL_CC], [C compiler for
+ building kernel modules])
+ AC_ARG_VAR([KERNEL_LD], [Linker for
+ building kernel modules])
+ AC_ARG_VAR([KERNEL_LLVM], [Binary option to
+ build kernel modules with LLVM/CLANG toolchain])
AC_TRY_COMMAND([
KBUILD_MODPOST_NOFINAL="$5" KBUILD_MODPOST_WARN="$6"
- make modules -k -j$TEST_JOBS -C $LINUX_OBJ $ARCH_UM
+ make modules -k -j$TEST_JOBS ${KERNEL_CC:+CC=$KERNEL_CC} ${KERNEL_LD:+LD=$KERNEL_LD} ${KERNEL_LLVM:+LLVM=$KERNEL_LLVM} -C $LINUX_OBJ $ARCH_UM
M=$PWD/$1 >$1/build.log 2>&1])
AS_IF([AC_TRY_COMMAND([$2])], [$3], [$4])
])
dnl #
dnl # ZFS_LINUX_TEST_COMPILE
dnl #
dnl # Perform a full compile excluding the final modpost phase.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_COMPILE], [
ZFS_LINUX_COMPILE([$2], [test -f $2/build.log], [
mv $2/Makefile $2/Makefile.compile.$1
mv $2/build.log $2/build.log.$1
],[
AC_MSG_ERROR([
*** Unable to compile test source to determine kernel interfaces.])
], [yes], [])
])
dnl #
dnl # ZFS_LINUX_TEST_MODPOST
dnl #
dnl # Perform a full compile including the modpost phase. This may
dnl # be an incremental build if the objects have already been built.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_MODPOST], [
ZFS_LINUX_COMPILE([$2], [test -f $2/build.log], [
mv $2/Makefile $2/Makefile.modpost.$1
cat $2/build.log >>build/build.log.$1
],[
AC_MSG_ERROR([
*** Unable to modpost test source to determine kernel interfaces.])
], [], [yes])
])
dnl #
dnl # Perform the compilation of the test cases in two phases.
dnl #
dnl # Phase 1) attempt to build the object files for all of the tests
dnl # defined by the ZFS_LINUX_TEST_SRC macro. But do not
dnl # perform the final modpost stage.
dnl #
dnl # Phase 2) disable all tests which failed the initial compilation,
dnl # then invoke the final modpost step for the remaining tests.
dnl #
dnl # This allows us efficiently build the test cases in parallel while
dnl # remaining resilient to build failures which are expected when
dnl # detecting the available kernel interfaces.
dnl #
dnl # The maximum allowed parallelism can be controlled by setting the
dnl # TEST_JOBS environment variable. Otherwise, it default to $(nproc).
dnl #
AC_DEFUN([ZFS_LINUX_TEST_COMPILE_ALL], [
dnl # Phase 1 - Compilation only, final linking is skipped.
ZFS_LINUX_TEST_COMPILE([$1], [build])
dnl #
dnl # Phase 2 - When building external modules disable test cases
dnl # which failed to compile and invoke modpost to verify the
dnl # final linking.
dnl #
dnl # Test names suffixed with '_license' call modpost independently
dnl # to ensure that a single incompatibility does not result in the
dnl # modpost phase exiting early. This check is not performed on
dnl # every symbol since the majority are compatible and doing so
dnl # would significantly slow down this phase.
dnl #
dnl # When configuring for builtin (--enable-linux-builtin)
dnl # fake the linking step artificially create the expected .ko
dnl # files for tests which did compile. This is required for
dnl # kernels which do not have loadable module support or have
dnl # not yet been built.
dnl #
AS_IF([test "x$enable_linux_builtin" = "xno"], [
for dir in $(awk '/^obj-m/ { print [$]3 }' \
build/Makefile.compile.$1); do
name=${dir%/}
AS_IF([test -f build/$name/$name.o], [
AS_IF([test "${name##*_}" = "license"], [
ZFS_LINUX_TEST_MODPOST([$1],
[build/$name])
echo "obj-n += $dir" >>build/Makefile
], [
echo "obj-m += $dir" >>build/Makefile
])
], [
echo "obj-n += $dir" >>build/Makefile
])
done
ZFS_LINUX_TEST_MODPOST([$1], [build])
], [
for dir in $(awk '/^obj-m/ { print [$]3 }' \
build/Makefile.compile.$1); do
name=${dir%/}
AS_IF([test -f build/$name/$name.o], [
touch build/$name/$name.ko
])
done
])
])
dnl #
dnl # ZFS_LINUX_TEST_SRC
dnl #
dnl # $1 - name
dnl # $2 - global
dnl # $3 - source
dnl # $4 - extra cflags
dnl # $5 - check license-compatibility
dnl #
dnl # Check if the test source is buildable at all and then if it is
dnl # license compatible.
dnl #
dnl # N.B because all of the test cases are compiled in parallel they
dnl # must never depend on the results of previous tests. Each test
dnl # needs to be entirely independent.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_SRC], [
ZFS_LINUX_CONFTEST_C([ZFS_LINUX_TEST_PROGRAM([[$2]], [[$3]],
[["Dual BSD/GPL"]])], [$1])
ZFS_LINUX_CONFTEST_MAKEFILE([$1], [yes], [$4])
AS_IF([ test -n "$5" ], [
ZFS_LINUX_CONFTEST_C([ZFS_LINUX_TEST_PROGRAM(
[[$2]], [[$3]], [[$5]])], [$1_license])
ZFS_LINUX_CONFTEST_MAKEFILE([$1_license], [yes], [$4])
])
])
dnl #
dnl # ZFS_LINUX_TEST_RESULT
dnl #
dnl # $1 - name of a test source (ZFS_LINUX_TEST_SRC)
dnl # $2 - run on success (valid .ko generated)
dnl # $3 - run on failure (unable to compile)
dnl #
AC_DEFUN([ZFS_LINUX_TEST_RESULT], [
AS_IF([test -d build/$1], [
AS_IF([test -f build/$1/$1.ko], [$2], [$3])
], [
AC_MSG_ERROR([
*** No matching source for the "$1" test, check that
*** both the test source and result macros refer to the same name.
])
])
])
dnl #
dnl # ZFS_LINUX_TEST_ERROR
dnl #
dnl # Generic error message which can be used when none of the expected
dnl # kernel interfaces were detected.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_ERROR], [
AC_MSG_ERROR([
*** None of the expected "$1" interfaces were detected.
*** This may be because your kernel version is newer than what is
*** supported, or you are using a patched custom kernel with
*** incompatible modifications.
***
*** ZFS Version: $ZFS_META_ALIAS
*** Compatible Kernels: $ZFS_META_KVER_MIN - $ZFS_META_KVER_MAX
])
])
dnl #
dnl # ZFS_LINUX_TEST_RESULT_SYMBOL
dnl #
dnl # Like ZFS_LINUX_TEST_RESULT except ZFS_CHECK_SYMBOL_EXPORT is called to
dnl # verify symbol exports, unless --enable-linux-builtin was provided to
dnl # configure.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_RESULT_SYMBOL], [
AS_IF([ ! test -f build/$1/$1.ko], [
$5
], [
AS_IF([test "x$enable_linux_builtin" != "xyes"], [
ZFS_CHECK_SYMBOL_EXPORT([$2], [$3], [$4], [$5])
], [
$4
])
])
])
dnl #
dnl # ZFS_LINUX_COMPILE_IFELSE
dnl #
AC_DEFUN([ZFS_LINUX_COMPILE_IFELSE], [
ZFS_LINUX_TEST_REMOVE([conftest])
m4_ifvaln([$1], [ZFS_LINUX_CONFTEST_C([$1], [conftest])])
m4_ifvaln([$5], [ZFS_LINUX_CONFTEST_H([$5], [conftest])],
[ZFS_LINUX_CONFTEST_H([], [conftest])])
ZFS_LINUX_CONFTEST_MAKEFILE([conftest], [no],
[m4_ifvaln([$5], [-I$PWD/build/conftest], [])])
ZFS_LINUX_COMPILE([build/conftest], [$2], [$3], [$4], [], [])
])
dnl #
dnl # ZFS_LINUX_TRY_COMPILE
dnl #
dnl # $1 - global
dnl # $2 - source
dnl # $3 - run on success (valid .ko generated)
dnl # $4 - run on failure (unable to compile)
dnl #
dnl # When configuring as builtin (--enable-linux-builtin) for kernels
dnl # without loadable module support (CONFIG_MODULES=n) only the object
dnl # file is created. See ZFS_LINUX_TEST_COMPILE_ALL for details.
dnl #
AC_DEFUN([ZFS_LINUX_TRY_COMPILE], [
AS_IF([test "x$enable_linux_builtin" = "xyes"], [
ZFS_LINUX_COMPILE_IFELSE(
[ZFS_LINUX_TEST_PROGRAM([[$1]], [[$2]],
[[ZFS_META_LICENSE]])],
[test -f build/conftest/conftest.o], [$3], [$4])
], [
ZFS_LINUX_COMPILE_IFELSE(
[ZFS_LINUX_TEST_PROGRAM([[$1]], [[$2]],
[[ZFS_META_LICENSE]])],
[test -f build/conftest/conftest.ko], [$3], [$4])
])
])
dnl #
dnl # ZFS_CHECK_SYMBOL_EXPORT
dnl #
dnl # Check if a symbol is exported on not by consulting the symbols
dnl # file, or optionally the source code.
dnl #
AC_DEFUN([ZFS_CHECK_SYMBOL_EXPORT], [
grep -q -E '[[[:space:]]]$1[[[:space:]]]' \
$LINUX_OBJ/$LINUX_SYMBOLS 2>/dev/null
rc=$?
if test $rc -ne 0; then
export=0
for file in $2; do
grep -q -E "EXPORT_SYMBOL.*($1)" \
"$LINUX/$file" 2>/dev/null
rc=$?
if test $rc -eq 0; then
export=1
break;
fi
done
if test $export -eq 0; then :
$4
else :
$3
fi
else :
$3
fi
])
dnl #
dnl # ZFS_LINUX_TRY_COMPILE_SYMBOL
dnl #
dnl # Like ZFS_LINUX_TRY_COMPILER except ZFS_CHECK_SYMBOL_EXPORT is called
dnl # to verify symbol exports, unless --enable-linux-builtin was provided
dnl # to configure.
dnl #
AC_DEFUN([ZFS_LINUX_TRY_COMPILE_SYMBOL], [
ZFS_LINUX_TRY_COMPILE([$1], [$2], [rc=0], [rc=1])
if test $rc -ne 0; then :
$6
else
if test "x$enable_linux_builtin" != xyes; then
ZFS_CHECK_SYMBOL_EXPORT([$3], [$4], [rc=0], [rc=1])
fi
if test $rc -ne 0; then :
$6
else :
$5
fi
fi
])
dnl #
dnl # ZFS_LINUX_TRY_COMPILE_HEADER
dnl # like ZFS_LINUX_TRY_COMPILE, except the contents conftest.h are
dnl # provided via the fifth parameter
dnl #
AC_DEFUN([ZFS_LINUX_TRY_COMPILE_HEADER], [
ZFS_LINUX_COMPILE_IFELSE(
[ZFS_LINUX_TEST_PROGRAM([[$1]], [[$2]], [[ZFS_META_LICENSE]])],
[test -f build/conftest/conftest.ko],
[$3], [$4], [$5])
])
diff --git a/sys/contrib/openzfs/config/toolchain-simd.m4 b/sys/contrib/openzfs/config/toolchain-simd.m4
index 1153cd6941a8..061576fd94e3 100644
--- a/sys/contrib/openzfs/config/toolchain-simd.m4
+++ b/sys/contrib/openzfs/config/toolchain-simd.m4
@@ -1,424 +1,490 @@
dnl #
dnl # Checks if host toolchain supports SIMD instructions
dnl #
AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_TOOLCHAIN_SIMD], [
case "$host_cpu" in
amd64 | x86_64 | x86 | i686)
ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_SSE
ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_SSE2
ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_SSE3
ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_SSSE3
ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_SSE4_1
ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_SSE4_2
ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX
ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX2
ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX512F
ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX512CD
ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX512DQ
ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX512BW
ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX512IFMA
ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX512VBMI
ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX512PF
ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX512ER
ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX512VL
ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AES
ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_PCLMULQDQ
ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_MOVBE
+ ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_XSAVE
+ ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_XSAVEOPT
+ ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_XSAVES
;;
esac
])
dnl #
dnl # ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_SSE
dnl #
AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_SSE], [
AC_MSG_CHECKING([whether host toolchain supports SSE])
AC_LINK_IFELSE([AC_LANG_SOURCE([[
void main()
{
__asm__ __volatile__("xorps %xmm0, %xmm1");
}
]])], [
AC_DEFINE([HAVE_SSE], 1, [Define if host toolchain supports SSE])
AC_MSG_RESULT([yes])
], [
AC_MSG_RESULT([no])
])
])
dnl #
dnl # ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_SSE2
dnl #
AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_SSE2], [
AC_MSG_CHECKING([whether host toolchain supports SSE2])
AC_LINK_IFELSE([AC_LANG_SOURCE([[
void main()
{
__asm__ __volatile__("pxor %xmm0, %xmm1");
}
]])], [
AC_DEFINE([HAVE_SSE2], 1, [Define if host toolchain supports SSE2])
AC_MSG_RESULT([yes])
], [
AC_MSG_RESULT([no])
])
])
dnl #
dnl # ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_SSE3
dnl #
AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_SSE3], [
AC_MSG_CHECKING([whether host toolchain supports SSE3])
AC_LINK_IFELSE([AC_LANG_SOURCE([[
void main()
{
char v[16];
__asm__ __volatile__("lddqu %0,%%xmm0" :: "m"(v[0]));
}
]])], [
AC_DEFINE([HAVE_SSE3], 1, [Define if host toolchain supports SSE3])
AC_MSG_RESULT([yes])
], [
AC_MSG_RESULT([no])
])
])
dnl #
dnl # ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_SSSE3
dnl #
AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_SSSE3], [
AC_MSG_CHECKING([whether host toolchain supports SSSE3])
AC_LINK_IFELSE([AC_LANG_SOURCE([[
void main()
{
__asm__ __volatile__("pshufb %xmm0,%xmm1");
}
]])], [
AC_DEFINE([HAVE_SSSE3], 1, [Define if host toolchain supports SSSE3])
AC_MSG_RESULT([yes])
], [
AC_MSG_RESULT([no])
])
])
dnl #
dnl # ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_SSE4_1
dnl #
AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_SSE4_1], [
AC_MSG_CHECKING([whether host toolchain supports SSE4.1])
AC_LINK_IFELSE([AC_LANG_SOURCE([[
void main()
{
__asm__ __volatile__("pmaxsb %xmm0,%xmm1");
}
]])], [
AC_DEFINE([HAVE_SSE4_1], 1, [Define if host toolchain supports SSE4.1])
AC_MSG_RESULT([yes])
], [
AC_MSG_RESULT([no])
])
])
dnl #
dnl # ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_SSE4_2
dnl #
AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_SSE4_2], [
AC_MSG_CHECKING([whether host toolchain supports SSE4.2])
AC_LINK_IFELSE([AC_LANG_SOURCE([[
void main()
{
__asm__ __volatile__("pcmpgtq %xmm0, %xmm1");
}
]])], [
AC_DEFINE([HAVE_SSE4_2], 1, [Define if host toolchain supports SSE4.2])
AC_MSG_RESULT([yes])
], [
AC_MSG_RESULT([no])
])
])
dnl #
dnl # ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX
dnl #
AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX], [
AC_MSG_CHECKING([whether host toolchain supports AVX])
AC_LINK_IFELSE([AC_LANG_SOURCE([[
void main()
{
char v[32];
__asm__ __volatile__("vmovdqa %0,%%ymm0" :: "m"(v[0]));
}
]])], [
AC_MSG_RESULT([yes])
AC_DEFINE([HAVE_AVX], 1, [Define if host toolchain supports AVX])
], [
AC_MSG_RESULT([no])
])
])
dnl #
dnl # ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX2
dnl #
AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX2], [
AC_MSG_CHECKING([whether host toolchain supports AVX2])
AC_LINK_IFELSE([AC_LANG_SOURCE([
[
void main()
{
__asm__ __volatile__("vpshufb %ymm0,%ymm1,%ymm2");
}
]])], [
AC_MSG_RESULT([yes])
AC_DEFINE([HAVE_AVX2], 1, [Define if host toolchain supports AVX2])
], [
AC_MSG_RESULT([no])
])
])
dnl #
dnl # ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX512F
dnl #
AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX512F], [
AC_MSG_CHECKING([whether host toolchain supports AVX512F])
AC_LINK_IFELSE([AC_LANG_SOURCE([
[
void main()
{
__asm__ __volatile__("vpandd %zmm0,%zmm1,%zmm2");
}
]])], [
AC_MSG_RESULT([yes])
AC_DEFINE([HAVE_AVX512F], 1, [Define if host toolchain supports AVX512F])
], [
AC_MSG_RESULT([no])
])
])
dnl #
dnl # ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX512CD
dnl #
AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX512CD], [
AC_MSG_CHECKING([whether host toolchain supports AVX512CD])
AC_LINK_IFELSE([AC_LANG_SOURCE([
[
void main()
{
__asm__ __volatile__("vplzcntd %zmm0,%zmm1");
}
]])], [
AC_MSG_RESULT([yes])
AC_DEFINE([HAVE_AVX512CD], 1, [Define if host toolchain supports AVX512CD])
], [
AC_MSG_RESULT([no])
])
])
dnl #
dnl # ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX512DQ
dnl #
AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX512DQ], [
AC_MSG_CHECKING([whether host toolchain supports AVX512DQ])
AC_LINK_IFELSE([AC_LANG_SOURCE([
[
void main()
{
__asm__ __volatile__("vandpd %zmm0,%zmm1,%zmm2");
}
]])], [
AC_MSG_RESULT([yes])
AC_DEFINE([HAVE_AVX512DQ], 1, [Define if host toolchain supports AVX512DQ])
], [
AC_MSG_RESULT([no])
])
])
dnl #
dnl # ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX512BW
dnl #
AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX512BW], [
AC_MSG_CHECKING([whether host toolchain supports AVX512BW])
AC_LINK_IFELSE([AC_LANG_SOURCE([
[
void main()
{
__asm__ __volatile__("vpshufb %zmm0,%zmm1,%zmm2");
}
]])], [
AC_MSG_RESULT([yes])
AC_DEFINE([HAVE_AVX512BW], 1, [Define if host toolchain supports AVX512BW])
], [
AC_MSG_RESULT([no])
])
])
dnl #
dnl # ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX512IFMA
dnl #
AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX512IFMA], [
AC_MSG_CHECKING([whether host toolchain supports AVX512IFMA])
AC_LINK_IFELSE([AC_LANG_SOURCE([
[
void main()
{
__asm__ __volatile__("vpmadd52luq %zmm0,%zmm1,%zmm2");
}
]])], [
AC_MSG_RESULT([yes])
AC_DEFINE([HAVE_AVX512IFMA], 1, [Define if host toolchain supports AVX512IFMA])
], [
AC_MSG_RESULT([no])
])
])
dnl #
dnl # ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX512VBMI
dnl #
AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX512VBMI], [
AC_MSG_CHECKING([whether host toolchain supports AVX512VBMI])
AC_LINK_IFELSE([AC_LANG_SOURCE([
[
void main()
{
__asm__ __volatile__("vpermb %zmm0,%zmm1,%zmm2");
}
]])], [
AC_MSG_RESULT([yes])
AC_DEFINE([HAVE_AVX512VBMI], 1, [Define if host toolchain supports AVX512VBMI])
], [
AC_MSG_RESULT([no])
])
])
dnl #
dnl # ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX512PF
dnl #
AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX512PF], [
AC_MSG_CHECKING([whether host toolchain supports AVX512PF])
AC_LINK_IFELSE([AC_LANG_SOURCE([
[
void main()
{
__asm__ __volatile__("vgatherpf0dps (%rsi,%zmm0,4){%k1}");
}
]])], [
AC_MSG_RESULT([yes])
AC_DEFINE([HAVE_AVX512PF], 1, [Define if host toolchain supports AVX512PF])
], [
AC_MSG_RESULT([no])
])
])
dnl #
dnl # ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX512ER
dnl #
AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX512ER], [
AC_MSG_CHECKING([whether host toolchain supports AVX512ER])
AC_LINK_IFELSE([AC_LANG_SOURCE([
[
void main()
{
__asm__ __volatile__("vexp2pd %zmm0,%zmm1");
}
]])], [
AC_MSG_RESULT([yes])
AC_DEFINE([HAVE_AVX512ER], 1, [Define if host toolchain supports AVX512ER])
], [
AC_MSG_RESULT([no])
])
])
dnl #
dnl # ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX512VL
dnl #
AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AVX512VL], [
AC_MSG_CHECKING([whether host toolchain supports AVX512VL])
AC_LINK_IFELSE([AC_LANG_SOURCE([
[
void main()
{
__asm__ __volatile__("vpabsq %zmm0,%zmm1");
}
]])], [
AC_MSG_RESULT([yes])
AC_DEFINE([HAVE_AVX512VL], 1, [Define if host toolchain supports AVX512VL])
], [
AC_MSG_RESULT([no])
])
])
dnl #
dnl # ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AES
dnl #
AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AES], [
AC_MSG_CHECKING([whether host toolchain supports AES])
AC_LINK_IFELSE([AC_LANG_SOURCE([
[
void main()
{
__asm__ __volatile__("aesenc %xmm0, %xmm1");
}
]])], [
AC_MSG_RESULT([yes])
AC_DEFINE([HAVE_AES], 1, [Define if host toolchain supports AES])
], [
AC_MSG_RESULT([no])
])
])
dnl #
dnl # ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_PCLMULQDQ
dnl #
AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_PCLMULQDQ], [
AC_MSG_CHECKING([whether host toolchain supports PCLMULQDQ])
AC_LINK_IFELSE([AC_LANG_SOURCE([
[
void main()
{
__asm__ __volatile__("pclmulqdq %0, %%xmm0, %%xmm1" :: "i"(0));
}
]])], [
AC_MSG_RESULT([yes])
AC_DEFINE([HAVE_PCLMULQDQ], 1, [Define if host toolchain supports PCLMULQDQ])
], [
AC_MSG_RESULT([no])
])
])
dnl #
dnl # ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_MOVBE
dnl #
AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_MOVBE], [
AC_MSG_CHECKING([whether host toolchain supports MOVBE])
AC_LINK_IFELSE([AC_LANG_SOURCE([
[
void main()
{
__asm__ __volatile__("movbe 0(%eax), %eax");
}
]])], [
AC_MSG_RESULT([yes])
AC_DEFINE([HAVE_MOVBE], 1, [Define if host toolchain supports MOVBE])
], [
AC_MSG_RESULT([no])
])
])
+
+dnl #
+dnl # ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_XSAVE
+dnl #
+AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_XSAVE], [
+ AC_MSG_CHECKING([whether host toolchain supports XSAVE])
+
+ AC_LINK_IFELSE([AC_LANG_SOURCE([
+ [
+ void main()
+ {
+ char b[4096] __attribute__ ((aligned (64)));
+ __asm__ __volatile__("xsave %[b]\n" : : [b] "m" (*b) : "memory");
+ }
+ ]])], [
+ AC_MSG_RESULT([yes])
+ AC_DEFINE([HAVE_XSAVE], 1, [Define if host toolchain supports XSAVE])
+ ], [
+ AC_MSG_RESULT([no])
+ ])
+])
+
+dnl #
+dnl # ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_XSAVEOPT
+dnl #
+AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_XSAVEOPT], [
+ AC_MSG_CHECKING([whether host toolchain supports XSAVEOPT])
+
+ AC_LINK_IFELSE([AC_LANG_SOURCE([
+ [
+ void main()
+ {
+ char b[4096] __attribute__ ((aligned (64)));
+ __asm__ __volatile__("xsaveopt %[b]\n" : : [b] "m" (*b) : "memory");
+ }
+ ]])], [
+ AC_MSG_RESULT([yes])
+ AC_DEFINE([HAVE_XSAVEOPT], 1, [Define if host toolchain supports XSAVEOPT])
+ ], [
+ AC_MSG_RESULT([no])
+ ])
+])
+
+dnl #
+dnl # ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_XSAVES
+dnl #
+AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_XSAVES], [
+ AC_MSG_CHECKING([whether host toolchain supports XSAVES])
+
+ AC_LINK_IFELSE([AC_LANG_SOURCE([
+ [
+ void main()
+ {
+ char b[4096] __attribute__ ((aligned (64)));
+ __asm__ __volatile__("xsaves %[b]\n" : : [b] "m" (*b) : "memory");
+ }
+ ]])], [
+ AC_MSG_RESULT([yes])
+ AC_DEFINE([HAVE_XSAVES], 1, [Define if host toolchain supports XSAVES])
+ ], [
+ AC_MSG_RESULT([no])
+ ])
+])
diff --git a/sys/contrib/openzfs/config/user-libfetch.m4 b/sys/contrib/openzfs/config/user-libfetch.m4
new file mode 100644
index 000000000000..d961c6ca77a1
--- /dev/null
+++ b/sys/contrib/openzfs/config/user-libfetch.m4
@@ -0,0 +1,71 @@
+dnl #
+dnl # Check for a libfetch - either fetch(3) or libcurl.
+dnl #
+dnl # There are two configuration dimensions:
+dnl # * fetch(3) vs libcurl
+dnl # * static vs dynamic
+dnl #
+dnl # fetch(3) is only dynamic.
+dnl # We use sover 6, which first appeared in FreeBSD 8.0-RELEASE.
+dnl #
+dnl # libcurl development packages include curl-config(1) – we want:
+dnl # * HTTPS support
+dnl # * version at least 7.16 (October 2006), for sover 4
+dnl # * to decide if it's static or not
+dnl #
+AC_DEFUN([ZFS_AC_CONFIG_USER_LIBFETCH], [
+ AC_MSG_CHECKING([for libfetch])
+ LIBFETCH_LIBS=
+ LIBFETCH_IS_FETCH=0
+ LIBFETCH_IS_LIBCURL=0
+ LIBFETCH_DYNAMIC=0
+ LIBFETCH_SONAME=
+ have_libfetch=
+
+ saved_libs="$LIBS"
+ LIBS="$LIBS -lfetch"
+ AC_LINK_IFELSE([AC_LANG_PROGRAM([[
+ #include <sys/param.h>
+ #include <stdio.h>
+ #include <fetch.h>
+ ]], [fetchGetURL("", "");])], [
+ have_libfetch=1
+ LIBFETCH_IS_FETCH=1
+ LIBFETCH_DYNAMIC=1
+ LIBFETCH_SONAME="libfetch.so.6"
+ LIBFETCH_LIBS="-ldl"
+ AC_MSG_RESULT([fetch(3)])
+ ], [])
+ LIBS="$saved_libs"
+
+ if test -z "$have_libfetch"; then
+ if curl-config --protocols 2>/dev/null | grep -q HTTPS &&
+ test "$(printf "%u" "0x$(curl-config --vernum)")" -ge "$(printf "%u" "0x071000")"; then
+ have_libfetch=1
+ LIBFETCH_IS_LIBCURL=1
+ if test "$(curl-config --built-shared)" = "yes"; then
+ LIBFETCH_DYNAMIC=1
+ LIBFETCH_SONAME="libcurl.so.4"
+ LIBFETCH_LIBS="-ldl"
+ AC_MSG_RESULT([libcurl])
+ else
+ LIBFETCH_LIBS="$(curl-config --libs)"
+ AC_MSG_RESULT([libcurl (static)])
+ fi
+
+ CCFLAGS="$CCFLAGS $(curl-config --cflags)"
+ fi
+ fi
+
+ if test -z "$have_libfetch"; then
+ AC_MSG_RESULT([none])
+ fi
+
+ AC_SUBST([LIBFETCH_LIBS])
+ AC_SUBST([LIBFETCH_DYNAMIC])
+ AC_SUBST([LIBFETCH_SONAME])
+ AC_DEFINE_UNQUOTED([LIBFETCH_IS_FETCH], [$LIBFETCH_IS_FETCH], [libfetch is fetch(3)])
+ AC_DEFINE_UNQUOTED([LIBFETCH_IS_LIBCURL], [$LIBFETCH_IS_LIBCURL], [libfetch is libcurl])
+ AC_DEFINE_UNQUOTED([LIBFETCH_DYNAMIC], [$LIBFETCH_DYNAMIC], [whether the chosen libfetch is to be loaded at run-time])
+ AC_DEFINE_UNQUOTED([LIBFETCH_SONAME], ["$LIBFETCH_SONAME"], [soname of chosen libfetch])
+])
diff --git a/sys/contrib/openzfs/config/user.m4 b/sys/contrib/openzfs/config/user.m4
index e799faffb61c..670820b37715 100644
--- a/sys/contrib/openzfs/config/user.m4
+++ b/sys/contrib/openzfs/config/user.m4
@@ -1,46 +1,47 @@
dnl #
dnl # Default ZFS user configuration
dnl #
AC_DEFUN([ZFS_AC_CONFIG_USER], [
ZFS_AC_CONFIG_USER_GETTEXT
ZFS_AC_CONFIG_USER_MOUNT_HELPER
ZFS_AC_CONFIG_USER_SYSVINIT
ZFS_AC_CONFIG_USER_DRACUT
AM_COND_IF([BUILD_FREEBSD], [
PKG_INSTALLDIR(['${prefix}/libdata/pkgconfig'])], [
PKG_INSTALLDIR
])
ZFS_AC_CONFIG_USER_ZLIB
AM_COND_IF([BUILD_LINUX], [
ZFS_AC_CONFIG_USER_UDEV
ZFS_AC_CONFIG_USER_SYSTEMD
ZFS_AC_CONFIG_USER_LIBUUID
ZFS_AC_CONFIG_USER_LIBBLKID
])
ZFS_AC_CONFIG_USER_LIBTIRPC
ZFS_AC_CONFIG_USER_LIBUDEV
ZFS_AC_CONFIG_USER_LIBCRYPTO
ZFS_AC_CONFIG_USER_LIBAIO
ZFS_AC_CONFIG_USER_LIBATOMIC
+ ZFS_AC_CONFIG_USER_LIBFETCH
ZFS_AC_CONFIG_USER_CLOCK_GETTIME
ZFS_AC_CONFIG_USER_PAM
ZFS_AC_CONFIG_USER_RUNSTATEDIR
ZFS_AC_CONFIG_USER_MAKEDEV_IN_SYSMACROS
ZFS_AC_CONFIG_USER_MAKEDEV_IN_MKDEV
ZFS_AC_CONFIG_USER_ZFSEXEC
ZFS_AC_TEST_FRAMEWORK
AC_CHECK_FUNCS([issetugid mlockall strlcat strlcpy])
])
dnl #
dnl # Setup the environment for the ZFS Test Suite. Currently only
dnl # Linux style systems are supported but this infrastructure can
dnl # be extended to support other platforms if needed.
dnl #
AC_DEFUN([ZFS_AC_TEST_FRAMEWORK], [
ZONENAME="echo global"
AC_SUBST(ZONENAME)
AC_SUBST(RM)
])
diff --git a/sys/contrib/openzfs/config/zfs-build.m4 b/sys/contrib/openzfs/config/zfs-build.m4
index 27041c054c26..126f78476bd4 100644
--- a/sys/contrib/openzfs/config/zfs-build.m4
+++ b/sys/contrib/openzfs/config/zfs-build.m4
@@ -1,626 +1,629 @@
AC_DEFUN([ZFS_AC_LICENSE], [
AC_MSG_CHECKING([zfs author])
AC_MSG_RESULT([$ZFS_META_AUTHOR])
AC_MSG_CHECKING([zfs license])
AC_MSG_RESULT([$ZFS_META_LICENSE])
])
AC_DEFUN([ZFS_AC_DEBUG_ENABLE], [
DEBUG_CFLAGS="-Werror"
DEBUG_CPPFLAGS="-DDEBUG -UNDEBUG"
DEBUG_LDFLAGS=""
DEBUG_ZFS="_with_debug"
WITH_DEBUG="true"
AC_DEFINE(ZFS_DEBUG, 1, [zfs debugging enabled])
KERNEL_DEBUG_CFLAGS="-Werror"
KERNEL_DEBUG_CPPFLAGS="-DDEBUG -UNDEBUG"
])
AC_DEFUN([ZFS_AC_DEBUG_DISABLE], [
DEBUG_CFLAGS=""
DEBUG_CPPFLAGS="-UDEBUG -DNDEBUG"
DEBUG_LDFLAGS=""
DEBUG_ZFS="_without_debug"
WITH_DEBUG=""
KERNEL_DEBUG_CFLAGS=""
KERNEL_DEBUG_CPPFLAGS="-UDEBUG -DNDEBUG"
])
dnl #
dnl # When debugging is enabled:
dnl # - Enable all ASSERTs (-DDEBUG)
dnl # - Promote all compiler warnings to errors (-Werror)
dnl #
dnl # (If INVARIANTS is detected, we need to force DEBUG, or strange panics
dnl # can ensue.)
dnl #
AC_DEFUN([ZFS_AC_DEBUG], [
AC_MSG_CHECKING([whether assertion support will be enabled])
AC_ARG_ENABLE([debug],
[AS_HELP_STRING([--enable-debug],
[Enable compiler and code assertions @<:@default=no@:>@])],
[],
[enable_debug=no])
AS_CASE(["x$enable_debug"],
["xyes"],
[ZFS_AC_DEBUG_ENABLE],
["xno"],
[ZFS_AC_DEBUG_DISABLE],
[AC_MSG_ERROR([Unknown option $enable_debug])])
AS_CASE(["x$enable_invariants"],
["xyes"],
[],
["xno"],
[],
[ZFS_AC_DEBUG_INVARIANTS_DETECT])
AS_CASE(["x$enable_invariants"],
["xyes"],
[ZFS_AC_DEBUG_ENABLE],
["xno"],
[],
[AC_MSG_ERROR([Unknown option $enable_invariants])])
AC_SUBST(DEBUG_CFLAGS)
AC_SUBST(DEBUG_CPPFLAGS)
AC_SUBST(DEBUG_LDFLAGS)
AC_SUBST(DEBUG_ZFS)
AC_SUBST(WITH_DEBUG)
AC_SUBST(KERNEL_DEBUG_CFLAGS)
AC_SUBST(KERNEL_DEBUG_CPPFLAGS)
AC_MSG_RESULT([$enable_debug])
])
AC_DEFUN([ZFS_AC_DEBUGINFO_ENABLE], [
DEBUG_CFLAGS="$DEBUG_CFLAGS -g -fno-inline $NO_IPA_SRA"
KERNEL_DEBUG_CFLAGS="$KERNEL_DEBUG_CFLAGS -fno-inline $NO_IPA_SRA"
KERNEL_MAKE="$KERNEL_MAKE CONFIG_DEBUG_INFO=y"
DEBUGINFO_ZFS="_with_debuginfo"
])
AC_DEFUN([ZFS_AC_DEBUGINFO_DISABLE], [
DEBUGINFO_ZFS="_without_debuginfo"
])
AC_DEFUN([ZFS_AC_DEBUGINFO], [
AC_MSG_CHECKING([whether debuginfo support will be forced])
AC_ARG_ENABLE([debuginfo],
[AS_HELP_STRING([--enable-debuginfo],
[Force generation of debuginfo @<:@default=no@:>@])],
[],
[enable_debuginfo=no])
AS_CASE(["x$enable_debuginfo"],
["xyes"],
[ZFS_AC_DEBUGINFO_ENABLE],
["xno"],
[ZFS_AC_DEBUGINFO_DISABLE],
[AC_MSG_ERROR([Unknown option $enable_debuginfo])])
AC_SUBST(DEBUG_CFLAGS)
AC_SUBST(DEBUGINFO_ZFS)
AC_SUBST(KERNEL_DEBUG_CFLAGS)
AC_SUBST(KERNEL_MAKE)
AC_MSG_RESULT([$enable_debuginfo])
])
dnl #
dnl # Disabled by default, provides basic memory tracking. Track the total
dnl # number of bytes allocated with kmem_alloc() and freed with kmem_free().
dnl # Then at module unload time if any bytes were leaked it will be reported
dnl # on the console.
dnl #
AC_DEFUN([ZFS_AC_DEBUG_KMEM], [
AC_MSG_CHECKING([whether basic kmem accounting is enabled])
AC_ARG_ENABLE([debug-kmem],
[AS_HELP_STRING([--enable-debug-kmem],
[Enable basic kmem accounting @<:@default=no@:>@])],
[],
[enable_debug_kmem=no])
AS_IF([test "x$enable_debug_kmem" = xyes], [
KERNEL_DEBUG_CPPFLAGS="${KERNEL_DEBUG_CPPFLAGS} -DDEBUG_KMEM"
DEBUG_KMEM_ZFS="_with_debug_kmem"
], [
DEBUG_KMEM_ZFS="_without_debug_kmem"
])
AC_SUBST(KERNEL_DEBUG_CPPFLAGS)
AC_SUBST(DEBUG_KMEM_ZFS)
AC_MSG_RESULT([$enable_debug_kmem])
])
dnl #
dnl # Disabled by default, provides detailed memory tracking. This feature
dnl # also requires --enable-debug-kmem to be set. When enabled not only will
dnl # total bytes be tracked but also the location of every kmem_alloc() and
dnl # kmem_free(). When the module is unloaded a list of all leaked addresses
dnl # and where they were allocated will be dumped to the console. Enabling
dnl # this feature has a significant impact on performance but it makes finding
dnl # memory leaks straight forward.
dnl #
AC_DEFUN([ZFS_AC_DEBUG_KMEM_TRACKING], [
AC_MSG_CHECKING([whether detailed kmem tracking is enabled])
AC_ARG_ENABLE([debug-kmem-tracking],
[AS_HELP_STRING([--enable-debug-kmem-tracking],
[Enable detailed kmem tracking @<:@default=no@:>@])],
[],
[enable_debug_kmem_tracking=no])
AS_IF([test "x$enable_debug_kmem_tracking" = xyes], [
KERNEL_DEBUG_CPPFLAGS="${KERNEL_DEBUG_CPPFLAGS} -DDEBUG_KMEM_TRACKING"
DEBUG_KMEM_TRACKING_ZFS="_with_debug_kmem_tracking"
], [
DEBUG_KMEM_TRACKING_ZFS="_without_debug_kmem_tracking"
])
AC_SUBST(KERNEL_DEBUG_CPPFLAGS)
AC_SUBST(DEBUG_KMEM_TRACKING_ZFS)
AC_MSG_RESULT([$enable_debug_kmem_tracking])
])
AC_DEFUN([ZFS_AC_DEBUG_INVARIANTS_DETECT_FREEBSD], [
AS_IF([sysctl -n kern.conftxt | fgrep -qx $'options\tINVARIANTS'],
[enable_invariants="yes"],
[enable_invariants="no"])
])
AC_DEFUN([ZFS_AC_DEBUG_INVARIANTS_DETECT], [
AM_COND_IF([BUILD_FREEBSD],
[ZFS_AC_DEBUG_INVARIANTS_DETECT_FREEBSD],
[enable_invariants="no"])
])
dnl #
dnl # Detected for the running kernel by default, enables INVARIANTS features
dnl # in the FreeBSD kernel module. This feature must be used when building
dnl # for a FreeBSD kernel with "options INVARIANTS" in the KERNCONF and must
dnl # not be used when the INVARIANTS option is absent.
dnl #
AC_DEFUN([ZFS_AC_DEBUG_INVARIANTS], [
AC_MSG_CHECKING([whether FreeBSD kernel INVARIANTS checks are enabled])
AC_ARG_ENABLE([invariants],
[AS_HELP_STRING([--enable-invariants],
[Enable FreeBSD kernel INVARIANTS checks [[default: detect]]])],
[], [ZFS_AC_DEBUG_INVARIANTS_DETECT])
AS_IF([test "x$enable_invariants" = xyes],
[WITH_INVARIANTS="true"],
[WITH_INVARIANTS=""])
AC_SUBST(WITH_INVARIANTS)
AC_MSG_RESULT([$enable_invariants])
])
AC_DEFUN([ZFS_AC_CONFIG_ALWAYS], [
AX_COUNT_CPUS([])
AC_SUBST(CPU_COUNT)
ZFS_AC_CONFIG_ALWAYS_CC_NO_UNUSED_BUT_SET_VARIABLE
ZFS_AC_CONFIG_ALWAYS_CC_NO_BOOL_COMPARE
ZFS_AC_CONFIG_ALWAYS_CC_IMPLICIT_FALLTHROUGH
ZFS_AC_CONFIG_ALWAYS_CC_FRAME_LARGER_THAN
ZFS_AC_CONFIG_ALWAYS_CC_NO_FORMAT_TRUNCATION
ZFS_AC_CONFIG_ALWAYS_CC_NO_FORMAT_ZERO_LENGTH
ZFS_AC_CONFIG_ALWAYS_CC_NO_OMIT_FRAME_POINTER
ZFS_AC_CONFIG_ALWAYS_CC_NO_IPA_SRA
ZFS_AC_CONFIG_ALWAYS_CC_ASAN
ZFS_AC_CONFIG_ALWAYS_TOOLCHAIN_SIMD
ZFS_AC_CONFIG_ALWAYS_SYSTEM
ZFS_AC_CONFIG_ALWAYS_ARCH
ZFS_AC_CONFIG_ALWAYS_PYTHON
ZFS_AC_CONFIG_ALWAYS_PYZFS
ZFS_AC_CONFIG_ALWAYS_SED
ZFS_AC_CONFIG_ALWAYS_CPPCHECK
ZFS_AC_CONFIG_ALWAYS_SHELLCHECK
])
AC_DEFUN([ZFS_AC_CONFIG], [
dnl # Remove the previous build test directory.
rm -Rf build
ZFS_CONFIG=all
AC_ARG_WITH([config],
AS_HELP_STRING([--with-config=CONFIG],
[Config file 'kernel|user|all|srpm']),
[ZFS_CONFIG="$withval"])
AC_ARG_ENABLE([linux-builtin],
[AS_HELP_STRING([--enable-linux-builtin],
[Configure for builtin in-tree kernel modules @<:@default=no@:>@])],
[],
[enable_linux_builtin=no])
AC_MSG_CHECKING([zfs config])
AC_MSG_RESULT([$ZFS_CONFIG]);
AC_SUBST(ZFS_CONFIG)
ZFS_AC_CONFIG_ALWAYS
AM_COND_IF([BUILD_LINUX], [
AC_ARG_VAR([TEST_JOBS], [simultaneous jobs during configure])
if test "x$ac_cv_env_TEST_JOBS_set" != "xset"; then
TEST_JOBS=$CPU_COUNT
fi
AC_SUBST(TEST_JOBS)
])
case "$ZFS_CONFIG" in
kernel) ZFS_AC_CONFIG_KERNEL ;;
user) ZFS_AC_CONFIG_USER ;;
all) ZFS_AC_CONFIG_USER
ZFS_AC_CONFIG_KERNEL ;;
srpm) ;;
*)
AC_MSG_RESULT([Error!])
AC_MSG_ERROR([Bad value "$ZFS_CONFIG" for --with-config,
user kernel|user|all|srpm]) ;;
esac
AM_CONDITIONAL([CONFIG_USER],
[test "$ZFS_CONFIG" = user -o "$ZFS_CONFIG" = all])
AM_CONDITIONAL([CONFIG_KERNEL],
[test "$ZFS_CONFIG" = kernel -o "$ZFS_CONFIG" = all] &&
[test "x$enable_linux_builtin" != xyes ])
AM_CONDITIONAL([CONFIG_QAT],
[test "$ZFS_CONFIG" = kernel -o "$ZFS_CONFIG" = all] &&
[test "x$qatsrc" != x ])
AM_CONDITIONAL([WANT_DEVNAME2DEVID], [test "x$user_libudev" = xyes ])
AM_CONDITIONAL([WANT_MMAP_LIBAIO], [test "x$user_libaio" = xyes ])
AM_CONDITIONAL([PAM_ZFS_ENABLED], [test "x$enable_pam" = xyes])
])
dnl #
dnl # Check for rpm+rpmbuild to build RPM packages. If these tools
dnl # are missing it is non-fatal but you will not be able to build
dnl # RPM packages and will be warned if you try too.
dnl #
dnl # By default the generic spec file will be used because it requires
dnl # minimal dependencies. Distribution specific spec files can be
dnl # placed under the 'rpm/<distribution>' directory and enabled using
dnl # the --with-spec=<distribution> configure option.
dnl #
AC_DEFUN([ZFS_AC_RPM], [
RPM=rpm
RPMBUILD=rpmbuild
AC_MSG_CHECKING([whether $RPM is available])
AS_IF([tmp=$($RPM --version 2>/dev/null)], [
RPM_VERSION=$(echo $tmp | $AWK '/RPM/ { print $[3] }')
HAVE_RPM=yes
AC_MSG_RESULT([$HAVE_RPM ($RPM_VERSION)])
],[
HAVE_RPM=no
AC_MSG_RESULT([$HAVE_RPM])
])
AC_MSG_CHECKING([whether $RPMBUILD is available])
AS_IF([tmp=$($RPMBUILD --version 2>/dev/null)], [
RPMBUILD_VERSION=$(echo $tmp | $AWK '/RPM/ { print $[3] }')
HAVE_RPMBUILD=yes
AC_MSG_RESULT([$HAVE_RPMBUILD ($RPMBUILD_VERSION)])
],[
HAVE_RPMBUILD=no
AC_MSG_RESULT([$HAVE_RPMBUILD])
])
RPM_DEFINE_COMMON='--define "$(DEBUG_ZFS) 1"'
RPM_DEFINE_COMMON=${RPM_DEFINE_COMMON}' --define "$(DEBUGINFO_ZFS) 1"'
RPM_DEFINE_COMMON=${RPM_DEFINE_COMMON}' --define "$(DEBUG_KMEM_ZFS) 1"'
RPM_DEFINE_COMMON=${RPM_DEFINE_COMMON}' --define "$(DEBUG_KMEM_TRACKING_ZFS) 1"'
RPM_DEFINE_COMMON=${RPM_DEFINE_COMMON}' --define "$(ASAN_ZFS) 1"'
RPM_DEFINE_UTIL=' --define "_initconfdir $(initconfdir)"'
dnl # Make the next three RPM_DEFINE_UTIL additions conditional, since
dnl # their values may not be set when running:
dnl #
dnl # ./configure --with-config=srpm
dnl #
AS_IF([test -n "$dracutdir" ], [
RPM_DEFINE_UTIL=${RPM_DEFINE_UTIL}' --define "_dracutdir $(dracutdir)"'
])
AS_IF([test -n "$udevdir" ], [
RPM_DEFINE_UTIL=${RPM_DEFINE_UTIL}' --define "_udevdir $(udevdir)"'
])
AS_IF([test -n "$udevruledir" ], [
RPM_DEFINE_UTIL=${RPM_DEFINE_UTIL}' --define "_udevruledir $(udevruledir)"'
])
RPM_DEFINE_UTIL=${RPM_DEFINE_UTIL}' $(DEFINE_SYSTEMD)'
RPM_DEFINE_UTIL=${RPM_DEFINE_UTIL}' $(DEFINE_PYZFS)'
RPM_DEFINE_UTIL=${RPM_DEFINE_UTIL}' $(DEFINE_PAM)'
RPM_DEFINE_UTIL=${RPM_DEFINE_UTIL}' $(DEFINE_PYTHON_VERSION)'
RPM_DEFINE_UTIL=${RPM_DEFINE_UTIL}' $(DEFINE_PYTHON_PKG_VERSION)'
dnl # Override default lib directory on Debian/Ubuntu systems. The
dnl # provided /usr/lib/rpm/platform/<arch>/macros files do not
dnl # specify the correct path for multiarch systems as described
dnl # by the packaging guidelines.
dnl #
dnl # https://wiki.ubuntu.com/MultiarchSpec
dnl # https://wiki.debian.org/Multiarch/Implementation
dnl #
AS_IF([test "$DEFAULT_PACKAGE" = "deb"], [
MULTIARCH_LIBDIR="lib/$(dpkg-architecture -qDEB_HOST_MULTIARCH)"
RPM_DEFINE_UTIL=${RPM_DEFINE_UTIL}' --define "_lib $(MULTIARCH_LIBDIR)"'
AC_SUBST(MULTIARCH_LIBDIR)
])
dnl # Make RPM_DEFINE_KMOD additions conditional on CONFIG_KERNEL,
dnl # since the values will not be set otherwise. The spec files
dnl # provide defaults for them.
dnl #
RPM_DEFINE_KMOD='--define "_wrong_version_format_terminate_build 0"'
AM_COND_IF([CONFIG_KERNEL], [
RPM_DEFINE_KMOD=${RPM_DEFINE_KMOD}' --define "kernels $(LINUX_VERSION)"'
RPM_DEFINE_KMOD=${RPM_DEFINE_KMOD}' --define "ksrc $(LINUX)"'
RPM_DEFINE_KMOD=${RPM_DEFINE_KMOD}' --define "kobj $(LINUX_OBJ)"'
+ RPM_DEFINE_KMOD=${RPM_DEFINE_KMOD}' --define "kernel_cc KERNEL_CC=$(KERNEL_CC)"'
+ RPM_DEFINE_KMOD=${RPM_DEFINE_KMOD}' --define "kernel_ld KERNEL_LD=$(KERNEL_LD)"'
+ RPM_DEFINE_KMOD=${RPM_DEFINE_KMOD}' --define "kernel_llvm KERNEL_LLVM=$(KERNEL_LLVM)"'
])
RPM_DEFINE_DKMS=''
SRPM_DEFINE_COMMON='--define "build_src_rpm 1"'
SRPM_DEFINE_UTIL=
SRPM_DEFINE_KMOD=
SRPM_DEFINE_DKMS=
RPM_SPEC_DIR="rpm/generic"
AC_ARG_WITH([spec],
AS_HELP_STRING([--with-spec=SPEC],
[Spec files 'generic|redhat']),
[RPM_SPEC_DIR="rpm/$withval"])
AC_MSG_CHECKING([whether spec files are available])
AC_MSG_RESULT([yes ($RPM_SPEC_DIR/*.spec.in)])
AC_SUBST(HAVE_RPM)
AC_SUBST(RPM)
AC_SUBST(RPM_VERSION)
AC_SUBST(HAVE_RPMBUILD)
AC_SUBST(RPMBUILD)
AC_SUBST(RPMBUILD_VERSION)
AC_SUBST(RPM_SPEC_DIR)
AC_SUBST(RPM_DEFINE_UTIL)
AC_SUBST(RPM_DEFINE_KMOD)
AC_SUBST(RPM_DEFINE_DKMS)
AC_SUBST(RPM_DEFINE_COMMON)
AC_SUBST(SRPM_DEFINE_UTIL)
AC_SUBST(SRPM_DEFINE_KMOD)
AC_SUBST(SRPM_DEFINE_DKMS)
AC_SUBST(SRPM_DEFINE_COMMON)
])
dnl #
dnl # Check for dpkg+dpkg-buildpackage to build DEB packages. If these
dnl # tools are missing it is non-fatal but you will not be able to build
dnl # DEB packages and will be warned if you try too.
dnl #
AC_DEFUN([ZFS_AC_DPKG], [
DPKG=dpkg
DPKGBUILD=dpkg-buildpackage
AC_MSG_CHECKING([whether $DPKG is available])
AS_IF([tmp=$($DPKG --version 2>/dev/null)], [
DPKG_VERSION=$(echo $tmp | $AWK '/Debian/ { print $[7] }')
HAVE_DPKG=yes
AC_MSG_RESULT([$HAVE_DPKG ($DPKG_VERSION)])
],[
HAVE_DPKG=no
AC_MSG_RESULT([$HAVE_DPKG])
])
AC_MSG_CHECKING([whether $DPKGBUILD is available])
AS_IF([tmp=$($DPKGBUILD --version 2>/dev/null)], [
DPKGBUILD_VERSION=$(echo $tmp | \
$AWK '/Debian/ { print $[4] }' | cut -f-4 -d'.')
HAVE_DPKGBUILD=yes
AC_MSG_RESULT([$HAVE_DPKGBUILD ($DPKGBUILD_VERSION)])
],[
HAVE_DPKGBUILD=no
AC_MSG_RESULT([$HAVE_DPKGBUILD])
])
AC_SUBST(HAVE_DPKG)
AC_SUBST(DPKG)
AC_SUBST(DPKG_VERSION)
AC_SUBST(HAVE_DPKGBUILD)
AC_SUBST(DPKGBUILD)
AC_SUBST(DPKGBUILD_VERSION)
])
dnl #
dnl # Until native packaging for various different packing systems
dnl # can be added the least we can do is attempt to use alien to
dnl # convert the RPM packages to the needed package type. This is
dnl # a hack but so far it has worked reasonable well.
dnl #
AC_DEFUN([ZFS_AC_ALIEN], [
ALIEN=alien
AC_MSG_CHECKING([whether $ALIEN is available])
AS_IF([tmp=$($ALIEN --version 2>/dev/null)], [
ALIEN_VERSION=$(echo $tmp | $AWK '{ print $[3] }')
ALIEN_MAJOR=$(echo ${ALIEN_VERSION} | $AWK -F'.' '{ print $[1] }')
ALIEN_MINOR=$(echo ${ALIEN_VERSION} | $AWK -F'.' '{ print $[2] }')
ALIEN_POINT=$(echo ${ALIEN_VERSION} | $AWK -F'.' '{ print $[3] }')
HAVE_ALIEN=yes
AC_MSG_RESULT([$HAVE_ALIEN ($ALIEN_VERSION)])
],[
HAVE_ALIEN=no
AC_MSG_RESULT([$HAVE_ALIEN])
])
AC_SUBST(HAVE_ALIEN)
AC_SUBST(ALIEN)
AC_SUBST(ALIEN_VERSION)
AC_SUBST(ALIEN_MAJOR)
AC_SUBST(ALIEN_MINOR)
AC_SUBST(ALIEN_POINT)
])
dnl #
dnl # Using the VENDOR tag from config.guess set the default
dnl # package type for 'make pkg': (rpm | deb | tgz)
dnl #
AC_DEFUN([ZFS_AC_DEFAULT_PACKAGE], [
AC_MSG_CHECKING([os distribution])
AC_ARG_WITH([vendor],
[AS_HELP_STRING([--with-vendor],
[Distribution vendor @<:@default=check@:>@])],
[with_vendor=$withval],
[with_vendor=check])
AS_IF([test "x$with_vendor" = "xcheck"],[
if test -f /etc/toss-release ; then
VENDOR=toss ;
elif test -f /etc/fedora-release ; then
VENDOR=fedora ;
elif test -f /etc/redhat-release ; then
VENDOR=redhat ;
elif test -f /etc/gentoo-release ; then
VENDOR=gentoo ;
elif test -f /etc/arch-release ; then
VENDOR=arch ;
elif test -f /etc/SuSE-release ; then
VENDOR=sles ;
elif test -f /etc/slackware-version ; then
VENDOR=slackware ;
elif test -f /etc/lunar.release ; then
VENDOR=lunar ;
elif test -f /etc/lsb-release ; then
VENDOR=ubuntu ;
elif test -f /etc/debian_version ; then
VENDOR=debian ;
elif test -f /etc/alpine-release ; then
VENDOR=alpine ;
elif test -f /bin/freebsd-version ; then
VENDOR=freebsd ;
else
VENDOR= ;
fi],
[ test "x${with_vendor}" != x],[
VENDOR="$with_vendor" ],
[ VENDOR= ; ]
)
AC_MSG_RESULT([$VENDOR])
AC_SUBST(VENDOR)
AC_MSG_CHECKING([default package type])
case "$VENDOR" in
toss) DEFAULT_PACKAGE=rpm ;;
redhat) DEFAULT_PACKAGE=rpm ;;
fedora) DEFAULT_PACKAGE=rpm ;;
gentoo) DEFAULT_PACKAGE=tgz ;;
alpine) DEFAULT_PACKAGE=tgz ;;
arch) DEFAULT_PACKAGE=tgz ;;
sles) DEFAULT_PACKAGE=rpm ;;
slackware) DEFAULT_PACKAGE=tgz ;;
lunar) DEFAULT_PACKAGE=tgz ;;
ubuntu) DEFAULT_PACKAGE=deb ;;
debian) DEFAULT_PACKAGE=deb ;;
freebsd) DEFAULT_PACKAGE=pkg ;;
*) DEFAULT_PACKAGE=rpm ;;
esac
AC_MSG_RESULT([$DEFAULT_PACKAGE])
AC_SUBST(DEFAULT_PACKAGE)
AC_MSG_CHECKING([default init directory])
case "$VENDOR" in
freebsd) initdir=$sysconfdir/rc.d ;;
*) initdir=$sysconfdir/init.d;;
esac
AC_MSG_RESULT([$initdir])
AC_SUBST(initdir)
AC_MSG_CHECKING([default init script type and shell])
case "$VENDOR" in
toss) DEFAULT_INIT_SCRIPT=redhat ;;
redhat) DEFAULT_INIT_SCRIPT=redhat ;;
fedora) DEFAULT_INIT_SCRIPT=fedora ;;
gentoo) DEFAULT_INIT_SCRIPT=openrc ;;
alpine) DEFAULT_INIT_SCRIPT=openrc ;;
arch) DEFAULT_INIT_SCRIPT=lsb ;;
sles) DEFAULT_INIT_SCRIPT=lsb ;;
slackware) DEFAULT_INIT_SCRIPT=lsb ;;
lunar) DEFAULT_INIT_SCRIPT=lunar ;;
ubuntu) DEFAULT_INIT_SCRIPT=lsb ;;
debian) DEFAULT_INIT_SCRIPT=lsb ;;
freebsd) DEFAULT_INIT_SCRIPT=freebsd;;
*) DEFAULT_INIT_SCRIPT=lsb ;;
esac
# On gentoo, it's possible that OpenRC isn't installed. Check if
# /sbin/openrc-run exists, and if not, fall back to generic defaults.
DEFAULT_INIT_SHELL="/bin/sh"
AS_IF([test "$DEFAULT_INIT_SCRIPT" = "openrc"], [
AS_IF([test -x "/sbin/openrc-run"],
[DEFAULT_INIT_SHELL="/sbin/openrc-run"],
[DEFAULT_INIT_SCRIPT=lsb])
])
AC_MSG_RESULT([$DEFAULT_INIT_SCRIPT:$DEFAULT_INIT_SHELL])
AC_SUBST(DEFAULT_INIT_SCRIPT)
AC_SUBST(DEFAULT_INIT_SHELL)
AC_MSG_CHECKING([default nfs server init script])
AS_IF([test "$VENDOR" = "debian"],
[DEFAULT_INIT_NFS_SERVER="nfs-kernel-server"],
[DEFAULT_INIT_NFS_SERVER="nfs"]
)
AC_MSG_RESULT([$DEFAULT_INIT_NFS_SERVER])
AC_SUBST(DEFAULT_INIT_NFS_SERVER)
AC_MSG_CHECKING([default init config directory])
case "$VENDOR" in
alpine) initconfdir=/etc/conf.d ;;
gentoo) initconfdir=/etc/conf.d ;;
toss) initconfdir=/etc/sysconfig ;;
redhat) initconfdir=/etc/sysconfig ;;
fedora) initconfdir=/etc/sysconfig ;;
sles) initconfdir=/etc/sysconfig ;;
ubuntu) initconfdir=/etc/default ;;
debian) initconfdir=/etc/default ;;
freebsd) initconfdir=$sysconfdir/rc.conf.d;;
*) initconfdir=/etc/default ;;
esac
AC_MSG_RESULT([$initconfdir])
AC_SUBST(initconfdir)
AC_MSG_CHECKING([whether initramfs-tools is available])
if test -d /usr/share/initramfs-tools ; then
RPM_DEFINE_INITRAMFS='--define "_initramfs 1"'
AC_MSG_RESULT([yes])
else
RPM_DEFINE_INITRAMFS=''
AC_MSG_RESULT([no])
fi
AC_SUBST(RPM_DEFINE_INITRAMFS)
])
dnl #
dnl # Default ZFS package configuration
dnl #
AC_DEFUN([ZFS_AC_PACKAGE], [
ZFS_AC_DEFAULT_PACKAGE
AS_IF([test x$VENDOR != xfreebsd], [
ZFS_AC_RPM
ZFS_AC_DPKG
ZFS_AC_ALIEN
])
])
diff --git a/sys/contrib/openzfs/config/zfs-meta.m4 b/sys/contrib/openzfs/config/zfs-meta.m4
index b3c1befaac5d..20064a0fb595 100644
--- a/sys/contrib/openzfs/config/zfs-meta.m4
+++ b/sys/contrib/openzfs/config/zfs-meta.m4
@@ -1,207 +1,207 @@
dnl #
dnl # DESCRIPTION:
dnl # Read meta data from the META file. When building from a git repository
dnl # the ZFS_META_RELEASE field will be overwritten if there is an annotated
dnl # tag matching the form ZFS_META_NAME-ZFS_META_VERSION-*. This allows
dnl # for working builds to be uniquely identified using the git commit hash.
dnl #
dnl # The META file format is as follows:
dnl # ^[ ]*KEY:[ \t]+VALUE$
dnl #
dnl # In other words:
dnl # - KEY is separated from VALUE by a colon and one or more spaces/tabs.
dnl # - KEY and VALUE are case sensitive.
dnl # - Leading spaces are ignored.
dnl # - First match wins for duplicate keys.
dnl #
dnl # A line can be commented out by preceding it with a '#' (or technically
dnl # any non-space character since that will prevent the regex from
dnl # matching).
dnl #
dnl # WARNING:
dnl # Placing a colon followed by a space or tab (ie, ":[ \t]+") within the
dnl # VALUE will prematurely terminate the string since that sequence is
dnl # used as the awk field separator.
dnl #
dnl # KEYS:
dnl # The following META keys are recognized:
dnl # Name, Version, Release, Date, Author, LT_Current, LT_Revision, LT_Age
dnl #
dnl # Written by Chris Dunlap <cdunlap@llnl.gov>.
dnl # Modified by Brian Behlendorf <behlendorf1@llnl.gov>.
dnl #
AC_DEFUN([ZFS_AC_META], [
AH_BOTTOM([
#undef PACKAGE
#undef PACKAGE_BUGREPORT
#undef PACKAGE_NAME
#undef PACKAGE_STRING
#undef PACKAGE_TARNAME
#undef PACKAGE_VERSION
#undef STDC_HEADERS
#undef VERSION])
AC_PROG_AWK
AC_MSG_CHECKING([metadata])
META="$srcdir/META"
_zfs_ac_meta_type="none"
if test -f "$META"; then
_zfs_ac_meta_type="META file"
ZFS_META_NAME=_ZFS_AC_META_GETVAL([(Name|Project|Package)]);
if test -n "$ZFS_META_NAME"; then
AC_DEFINE_UNQUOTED([ZFS_META_NAME], ["$ZFS_META_NAME"],
[Define the project name.]
)
AC_SUBST([ZFS_META_NAME])
fi
ZFS_META_VERSION=_ZFS_AC_META_GETVAL([Version]);
if test -n "$ZFS_META_VERSION"; then
AC_DEFINE_UNQUOTED([ZFS_META_VERSION],
["$ZFS_META_VERSION"],
[Define the project version.])
AC_DEFINE_UNQUOTED([SPL_META_VERSION],
[ZFS_META_VERSION],
[Defined for legacy compatibility.])
AC_SUBST([ZFS_META_VERSION])
fi
ZFS_META_RELEASE=_ZFS_AC_META_GETVAL([Release]);
if test ! -f ".nogitrelease" && git rev-parse --git-dir > /dev/null 2>&1; then
_match="${ZFS_META_NAME}-${ZFS_META_VERSION}"
_alias=$(git describe --match=${_match} 2>/dev/null)
- _release=$(echo ${_alias}|cut -f3- -d'-'|sed 's/-/_/g')
+ _release=$(echo ${_alias}|sed "s/${ZFS_META_NAME}//"|cut -f3- -d'-'|tr - _)
if test -n "${_release}"; then
ZFS_META_RELEASE=${_release}
_zfs_ac_meta_type="git describe"
else
_match="${ZFS_META_NAME}-${ZFS_META_VERSION}-${ZFS_META_RELEASE}"
_alias=$(git describe --match=${_match} 2>/dev/null)
- _release=$(echo ${_alias}|cut -f3- -d'-'|sed 's/-/_/g')
+ _release=$(echo ${_alias}|sed 's/${ZFS_META_NAME}//'|cut -f3- -d'-'|tr - _)
if test -n "${_release}"; then
ZFS_META_RELEASE=${_release}
_zfs_ac_meta_type="git describe"
fi
fi
fi
if test -n "$ZFS_META_RELEASE"; then
AC_DEFINE_UNQUOTED([ZFS_META_RELEASE],
["$ZFS_META_RELEASE"],
[Define the project release.])
AC_DEFINE_UNQUOTED([SPL_META_RELEASE],
[ZFS_META_RELEASE],
[Defined for legacy compatibility.])
AC_SUBST([ZFS_META_RELEASE])
RELEASE="$ZFS_META_RELEASE"
AC_SUBST([RELEASE])
fi
ZFS_META_LICENSE=_ZFS_AC_META_GETVAL([License]);
if test -n "$ZFS_META_LICENSE"; then
AC_DEFINE_UNQUOTED([ZFS_META_LICENSE], ["$ZFS_META_LICENSE"],
[Define the project license.]
)
AC_SUBST([ZFS_META_LICENSE])
fi
if test -n "$ZFS_META_NAME" -a -n "$ZFS_META_VERSION"; then
ZFS_META_ALIAS="$ZFS_META_NAME-$ZFS_META_VERSION"
test -n "$ZFS_META_RELEASE" &&
ZFS_META_ALIAS="$ZFS_META_ALIAS-$ZFS_META_RELEASE"
AC_DEFINE_UNQUOTED([ZFS_META_ALIAS],
["$ZFS_META_ALIAS"],
[Define the project alias string.])
AC_DEFINE_UNQUOTED([SPL_META_ALIAS],
[ZFS_META_ALIAS],
[Defined for legacy compatibility.])
AC_SUBST([ZFS_META_ALIAS])
fi
ZFS_META_DATA=_ZFS_AC_META_GETVAL([Date]);
if test -n "$ZFS_META_DATA"; then
AC_DEFINE_UNQUOTED([ZFS_META_DATA], ["$ZFS_META_DATA"],
[Define the project release date.]
)
AC_SUBST([ZFS_META_DATA])
fi
ZFS_META_AUTHOR=_ZFS_AC_META_GETVAL([Author]);
if test -n "$ZFS_META_AUTHOR"; then
AC_DEFINE_UNQUOTED([ZFS_META_AUTHOR], ["$ZFS_META_AUTHOR"],
[Define the project author.]
)
AC_SUBST([ZFS_META_AUTHOR])
fi
ZFS_META_KVER_MIN=_ZFS_AC_META_GETVAL([Linux-Minimum]);
if test -n "$ZFS_META_KVER_MIN"; then
AC_DEFINE_UNQUOTED([ZFS_META_KVER_MIN],
["$ZFS_META_KVER_MIN"],
[Define the minimum compatible kernel version.]
)
AC_SUBST([ZFS_META_KVER_MIN])
fi
ZFS_META_KVER_MAX=_ZFS_AC_META_GETVAL([Linux-Maximum]);
if test -n "$ZFS_META_KVER_MAX"; then
AC_DEFINE_UNQUOTED([ZFS_META_KVER_MAX],
["$ZFS_META_KVER_MAX"],
[Define the maximum compatible kernel version.]
)
AC_SUBST([ZFS_META_KVER_MAX])
fi
m4_pattern_allow([^LT_(CURRENT|REVISION|AGE)$])
ZFS_META_LT_CURRENT=_ZFS_AC_META_GETVAL([LT_Current]);
ZFS_META_LT_REVISION=_ZFS_AC_META_GETVAL([LT_Revision]);
ZFS_META_LT_AGE=_ZFS_AC_META_GETVAL([LT_Age]);
if test -n "$ZFS_META_LT_CURRENT" \
-o -n "$ZFS_META_LT_REVISION" \
-o -n "$ZFS_META_LT_AGE"; then
test -n "$ZFS_META_LT_CURRENT" || ZFS_META_LT_CURRENT="0"
test -n "$ZFS_META_LT_REVISION" || ZFS_META_LT_REVISION="0"
test -n "$ZFS_META_LT_AGE" || ZFS_META_LT_AGE="0"
AC_DEFINE_UNQUOTED([ZFS_META_LT_CURRENT],
["$ZFS_META_LT_CURRENT"],
[Define the libtool library 'current'
version information.]
)
AC_DEFINE_UNQUOTED([ZFS_META_LT_REVISION],
["$ZFS_META_LT_REVISION"],
[Define the libtool library 'revision'
version information.]
)
AC_DEFINE_UNQUOTED([ZFS_META_LT_AGE], ["$ZFS_META_LT_AGE"],
[Define the libtool library 'age'
version information.]
)
AC_SUBST([ZFS_META_LT_CURRENT])
AC_SUBST([ZFS_META_LT_REVISION])
AC_SUBST([ZFS_META_LT_AGE])
fi
fi
AC_MSG_RESULT([$_zfs_ac_meta_type])
]
)
dnl # _ZFS_AC_META_GETVAL (KEY_NAME_OR_REGEX)
dnl #
dnl # Returns the META VALUE associated with the given KEY_NAME_OR_REGEX expr.
dnl #
dnl # Despite their resemblance to line noise,
dnl # the "@<:@" and "@:>@" constructs are quadrigraphs for "[" and "]".
dnl # <www.gnu.org/software/autoconf/manual/autoconf.html#Quadrigraphs>
dnl #
dnl # The "$[]1" and "$[]2" constructs prevent M4 parameter expansion
dnl # so a literal $1 and $2 will be passed to the resulting awk script,
dnl # whereas the "$1" will undergo M4 parameter expansion for the META key.
dnl #
AC_DEFUN([_ZFS_AC_META_GETVAL],
[`$AWK -F ':@<:@ \t@:>@+' '$[]1 ~ /^ *$1$/ { print $[]2; exit }' $META`]dnl
)
diff --git a/sys/contrib/openzfs/configure.ac b/sys/contrib/openzfs/configure.ac
index ebc7b276a640..2671434afc72 100644
--- a/sys/contrib/openzfs/configure.ac
+++ b/sys/contrib/openzfs/configure.ac
@@ -1,418 +1,419 @@
/*
* This file is part of the ZFS Linux port.
*
* Copyright (c) 2009 Lawrence Livermore National Security, LLC.
* Produced at Lawrence Livermore National Laboratory
* Written by:
* Brian Behlendorf <behlendorf1@llnl.gov>,
* Herb Wartens <wartens2@llnl.gov>,
* Jim Garlick <garlick@llnl.gov>
* LLNL-CODE-403049
*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
AC_INIT(m4_esyscmd(grep ^Name: META | cut -d ':' -f 2 | tr -d ' \n'),
m4_esyscmd(grep ^Version: META | cut -d ':' -f 2 | tr -d ' \n'))
AC_LANG(C)
ZFS_AC_META
AC_CONFIG_AUX_DIR([config])
AC_CONFIG_MACRO_DIR([config])
AC_CANONICAL_TARGET
AM_MAINTAINER_MODE
m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])
AM_INIT_AUTOMAKE([subdir-objects])
AC_CONFIG_HEADERS([zfs_config.h], [
(mv zfs_config.h zfs_config.h.tmp &&
awk -f ${ac_srcdir}/config/config.awk zfs_config.h.tmp >zfs_config.h &&
rm zfs_config.h.tmp) || exit 1])
LT_INIT
AC_PROG_INSTALL
AC_PROG_CC
AC_PROG_LN_S
PKG_PROG_PKG_CONFIG
AM_PROG_AS
AM_PROG_CC_C_O
AX_CODE_COVERAGE
_AM_PROG_TAR(pax)
ZFS_AC_LICENSE
ZFS_AC_CONFIG
ZFS_AC_PACKAGE
ZFS_AC_DEBUG
ZFS_AC_DEBUGINFO
ZFS_AC_DEBUG_KMEM
ZFS_AC_DEBUG_KMEM_TRACKING
ZFS_AC_DEBUG_INVARIANTS
AC_CONFIG_FILES([
Makefile
cmd/Makefile
cmd/arc_summary/Makefile
cmd/arcstat/Makefile
cmd/dbufstat/Makefile
cmd/fsck_zfs/Makefile
cmd/mount_zfs/Makefile
cmd/raidz_test/Makefile
cmd/vdev_id/Makefile
cmd/zdb/Makefile
cmd/zed/Makefile
cmd/zed/zed.d/Makefile
cmd/zfs/Makefile
cmd/zfs_ids_to_path/Makefile
cmd/zgenhostid/Makefile
cmd/zhack/Makefile
cmd/zinject/Makefile
cmd/zpool/Makefile
cmd/zstream/Makefile
cmd/ztest/Makefile
cmd/zvol_id/Makefile
cmd/zvol_wait/Makefile
cmd/zpool_influxdb/Makefile
contrib/Makefile
contrib/bash_completion.d/Makefile
contrib/bpftrace/Makefile
contrib/dracut/02zfsexpandknowledge/Makefile
contrib/dracut/90zfs/Makefile
contrib/dracut/Makefile
contrib/initramfs/Makefile
contrib/initramfs/conf.d/Makefile
contrib/initramfs/conf-hooks.d/Makefile
contrib/initramfs/hooks/Makefile
contrib/initramfs/scripts/Makefile
contrib/initramfs/scripts/local-top/Makefile
contrib/pam_zfs_key/Makefile
contrib/pyzfs/Makefile
contrib/pyzfs/setup.py
contrib/zcp/Makefile
etc/Makefile
etc/default/Makefile
etc/init.d/Makefile
etc/modules-load.d/Makefile
etc/sudoers.d/Makefile
etc/systemd/Makefile
etc/systemd/system-generators/Makefile
etc/systemd/system/Makefile
etc/zfs/Makefile
include/Makefile
include/os/Makefile
include/os/freebsd/Makefile
include/os/freebsd/linux/Makefile
include/os/freebsd/spl/Makefile
include/os/freebsd/spl/acl/Makefile
include/os/freebsd/spl/rpc/Makefile
include/os/freebsd/spl/sys/Makefile
include/os/freebsd/zfs/Makefile
include/os/freebsd/zfs/sys/Makefile
include/os/linux/Makefile
include/os/linux/kernel/Makefile
include/os/linux/kernel/linux/Makefile
include/os/linux/spl/Makefile
include/os/linux/spl/rpc/Makefile
include/os/linux/spl/sys/Makefile
include/os/linux/zfs/Makefile
include/os/linux/zfs/sys/Makefile
include/sys/Makefile
include/sys/crypto/Makefile
include/sys/fm/Makefile
include/sys/fm/fs/Makefile
include/sys/fs/Makefile
include/sys/lua/Makefile
include/sys/sysevent/Makefile
include/sys/zstd/Makefile
lib/Makefile
lib/libavl/Makefile
lib/libefi/Makefile
lib/libicp/Makefile
lib/libnvpair/Makefile
lib/libshare/Makefile
lib/libspl/Makefile
lib/libspl/include/Makefile
lib/libspl/include/ia32/Makefile
lib/libspl/include/ia32/sys/Makefile
lib/libspl/include/os/Makefile
lib/libspl/include/os/freebsd/Makefile
lib/libspl/include/os/freebsd/sys/Makefile
lib/libspl/include/os/linux/Makefile
lib/libspl/include/os/linux/sys/Makefile
lib/libspl/include/rpc/Makefile
lib/libspl/include/sys/Makefile
lib/libspl/include/sys/dktp/Makefile
lib/libspl/include/util/Makefile
lib/libtpool/Makefile
lib/libunicode/Makefile
lib/libuutil/Makefile
lib/libzfs/Makefile
lib/libzfs/libzfs.pc
lib/libzfsbootenv/Makefile
lib/libzfsbootenv/libzfsbootenv.pc
lib/libzfs_core/Makefile
lib/libzfs_core/libzfs_core.pc
lib/libzpool/Makefile
lib/libzstd/Makefile
lib/libzutil/Makefile
man/Makefile
module/Kbuild
module/Makefile
module/avl/Makefile
module/icp/Makefile
module/lua/Makefile
module/nvpair/Makefile
module/os/linux/spl/Makefile
module/os/linux/zfs/Makefile
module/spl/Makefile
module/unicode/Makefile
module/zcommon/Makefile
module/zfs/Makefile
module/zstd/Makefile
rpm/Makefile
rpm/generic/Makefile
rpm/generic/zfs-dkms.spec
rpm/generic/zfs-kmod.spec
rpm/generic/zfs.spec
rpm/redhat/Makefile
rpm/redhat/zfs-dkms.spec
rpm/redhat/zfs-kmod.spec
rpm/redhat/zfs.spec
scripts/Makefile
tests/Makefile
tests/runfiles/Makefile
tests/test-runner/Makefile
tests/test-runner/bin/Makefile
tests/test-runner/include/Makefile
tests/test-runner/man/Makefile
tests/zfs-tests/Makefile
tests/zfs-tests/callbacks/Makefile
tests/zfs-tests/cmd/Makefile
tests/zfs-tests/cmd/badsend/Makefile
tests/zfs-tests/cmd/btree_test/Makefile
tests/zfs-tests/cmd/chg_usr_exec/Makefile
tests/zfs-tests/cmd/devname2devid/Makefile
tests/zfs-tests/cmd/draid/Makefile
tests/zfs-tests/cmd/dir_rd_update/Makefile
tests/zfs-tests/cmd/file_check/Makefile
tests/zfs-tests/cmd/file_trunc/Makefile
tests/zfs-tests/cmd/file_write/Makefile
tests/zfs-tests/cmd/get_diff/Makefile
tests/zfs-tests/cmd/largest_file/Makefile
tests/zfs-tests/cmd/libzfs_input_check/Makefile
tests/zfs-tests/cmd/mkbusy/Makefile
tests/zfs-tests/cmd/mkfile/Makefile
tests/zfs-tests/cmd/mkfiles/Makefile
tests/zfs-tests/cmd/mktree/Makefile
tests/zfs-tests/cmd/mmap_exec/Makefile
tests/zfs-tests/cmd/mmap_libaio/Makefile
tests/zfs-tests/cmd/mmap_seek/Makefile
tests/zfs-tests/cmd/mmapwrite/Makefile
tests/zfs-tests/cmd/nvlist_to_lua/Makefile
tests/zfs-tests/cmd/randfree_file/Makefile
tests/zfs-tests/cmd/randwritecomp/Makefile
tests/zfs-tests/cmd/readmmap/Makefile
tests/zfs-tests/cmd/rename_dir/Makefile
tests/zfs-tests/cmd/rm_lnkcnt_zero_file/Makefile
tests/zfs-tests/cmd/send_doall/Makefile
tests/zfs-tests/cmd/stride_dd/Makefile
tests/zfs-tests/cmd/threadsappend/Makefile
tests/zfs-tests/cmd/user_ns_exec/Makefile
tests/zfs-tests/cmd/xattrtest/Makefile
tests/zfs-tests/include/Makefile
tests/zfs-tests/tests/Makefile
tests/zfs-tests/tests/functional/Makefile
tests/zfs-tests/tests/functional/acl/Makefile
tests/zfs-tests/tests/functional/acl/off/Makefile
tests/zfs-tests/tests/functional/acl/posix/Makefile
tests/zfs-tests/tests/functional/acl/posix-sa/Makefile
tests/zfs-tests/tests/functional/alloc_class/Makefile
tests/zfs-tests/tests/functional/arc/Makefile
tests/zfs-tests/tests/functional/atime/Makefile
tests/zfs-tests/tests/functional/bootfs/Makefile
tests/zfs-tests/tests/functional/btree/Makefile
tests/zfs-tests/tests/functional/cache/Makefile
tests/zfs-tests/tests/functional/cachefile/Makefile
tests/zfs-tests/tests/functional/casenorm/Makefile
tests/zfs-tests/tests/functional/channel_program/Makefile
tests/zfs-tests/tests/functional/channel_program/lua_core/Makefile
tests/zfs-tests/tests/functional/channel_program/synctask_core/Makefile
tests/zfs-tests/tests/functional/chattr/Makefile
tests/zfs-tests/tests/functional/checksum/Makefile
tests/zfs-tests/tests/functional/clean_mirror/Makefile
tests/zfs-tests/tests/functional/cli_root/Makefile
tests/zfs-tests/tests/functional/cli_root/zdb/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_bookmark/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_change-key/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_clone/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_copies/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_create/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_destroy/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_diff/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_get/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_ids_to_path/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_inherit/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_jail/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_load-key/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_mount/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_program/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_promote/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_property/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_receive/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_rename/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_reservation/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_rollback/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_send/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_set/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_share/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_snapshot/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_sysfs/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_unload-key/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_unmount/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_unshare/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_upgrade/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_wait/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_add/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_attach/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_clear/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_create/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_destroy/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_detach/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_events/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_expand/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_export/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_get/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_history/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_import/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_import/blockfiles/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_initialize/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_labelclear/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_offline/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_online/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_remove/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_reopen/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_replace/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_resilver/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_scrub/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_set/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_split/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_status/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_sync/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_trim/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/blockfiles/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_wait/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_wait/scan/Makefile
tests/zfs-tests/tests/functional/cli_user/Makefile
tests/zfs-tests/tests/functional/cli_user/misc/Makefile
tests/zfs-tests/tests/functional/cli_user/zfs_list/Makefile
tests/zfs-tests/tests/functional/cli_user/zpool_iostat/Makefile
tests/zfs-tests/tests/functional/cli_user/zpool_list/Makefile
tests/zfs-tests/tests/functional/cli_user/zpool_status/Makefile
tests/zfs-tests/tests/functional/compression/Makefile
tests/zfs-tests/tests/functional/cp_files/Makefile
tests/zfs-tests/tests/functional/crtime/Makefile
tests/zfs-tests/tests/functional/ctime/Makefile
tests/zfs-tests/tests/functional/deadman/Makefile
tests/zfs-tests/tests/functional/delegate/Makefile
tests/zfs-tests/tests/functional/devices/Makefile
tests/zfs-tests/tests/functional/events/Makefile
tests/zfs-tests/tests/functional/exec/Makefile
tests/zfs-tests/tests/functional/fallocate/Makefile
tests/zfs-tests/tests/functional/fault/Makefile
tests/zfs-tests/tests/functional/features/Makefile
tests/zfs-tests/tests/functional/features/async_destroy/Makefile
tests/zfs-tests/tests/functional/features/large_dnode/Makefile
tests/zfs-tests/tests/functional/grow/Makefile
tests/zfs-tests/tests/functional/history/Makefile
tests/zfs-tests/tests/functional/hkdf/Makefile
tests/zfs-tests/tests/functional/inheritance/Makefile
tests/zfs-tests/tests/functional/inuse/Makefile
tests/zfs-tests/tests/functional/io/Makefile
tests/zfs-tests/tests/functional/l2arc/Makefile
tests/zfs-tests/tests/functional/large_files/Makefile
tests/zfs-tests/tests/functional/largest_pool/Makefile
tests/zfs-tests/tests/functional/libzfs/Makefile
tests/zfs-tests/tests/functional/limits/Makefile
tests/zfs-tests/tests/functional/link_count/Makefile
tests/zfs-tests/tests/functional/log_spacemap/Makefile
tests/zfs-tests/tests/functional/migration/Makefile
tests/zfs-tests/tests/functional/mmap/Makefile
tests/zfs-tests/tests/functional/mmp/Makefile
tests/zfs-tests/tests/functional/mount/Makefile
tests/zfs-tests/tests/functional/mv_files/Makefile
tests/zfs-tests/tests/functional/nestedfs/Makefile
tests/zfs-tests/tests/functional/no_space/Makefile
tests/zfs-tests/tests/functional/nopwrite/Makefile
tests/zfs-tests/tests/functional/online_offline/Makefile
tests/zfs-tests/tests/functional/pam/Makefile
tests/zfs-tests/tests/functional/pool_checkpoint/Makefile
tests/zfs-tests/tests/functional/pool_names/Makefile
tests/zfs-tests/tests/functional/poolversion/Makefile
tests/zfs-tests/tests/functional/privilege/Makefile
tests/zfs-tests/tests/functional/procfs/Makefile
tests/zfs-tests/tests/functional/projectquota/Makefile
tests/zfs-tests/tests/functional/pyzfs/Makefile
tests/zfs-tests/tests/functional/quota/Makefile
tests/zfs-tests/tests/functional/raidz/Makefile
tests/zfs-tests/tests/functional/redacted_send/Makefile
tests/zfs-tests/tests/functional/redundancy/Makefile
tests/zfs-tests/tests/functional/refquota/Makefile
tests/zfs-tests/tests/functional/refreserv/Makefile
tests/zfs-tests/tests/functional/removal/Makefile
tests/zfs-tests/tests/functional/rename_dirs/Makefile
tests/zfs-tests/tests/functional/replacement/Makefile
tests/zfs-tests/tests/functional/reservation/Makefile
tests/zfs-tests/tests/functional/rootpool/Makefile
tests/zfs-tests/tests/functional/rsend/Makefile
tests/zfs-tests/tests/functional/scrub_mirror/Makefile
+ tests/zfs-tests/tests/functional/simd/Makefile
tests/zfs-tests/tests/functional/slog/Makefile
tests/zfs-tests/tests/functional/snapshot/Makefile
tests/zfs-tests/tests/functional/snapused/Makefile
tests/zfs-tests/tests/functional/sparse/Makefile
tests/zfs-tests/tests/functional/suid/Makefile
tests/zfs-tests/tests/functional/threadsappend/Makefile
tests/zfs-tests/tests/functional/tmpfile/Makefile
tests/zfs-tests/tests/functional/trim/Makefile
tests/zfs-tests/tests/functional/truncate/Makefile
tests/zfs-tests/tests/functional/upgrade/Makefile
tests/zfs-tests/tests/functional/user_namespace/Makefile
tests/zfs-tests/tests/functional/userquota/Makefile
tests/zfs-tests/tests/functional/vdev_zaps/Makefile
tests/zfs-tests/tests/functional/write_dirs/Makefile
tests/zfs-tests/tests/functional/xattr/Makefile
tests/zfs-tests/tests/functional/zpool_influxdb/Makefile
tests/zfs-tests/tests/functional/zvol/Makefile
tests/zfs-tests/tests/functional/zvol/zvol_ENOSPC/Makefile
tests/zfs-tests/tests/functional/zvol/zvol_cli/Makefile
tests/zfs-tests/tests/functional/zvol/zvol_misc/Makefile
tests/zfs-tests/tests/functional/zvol/zvol_swap/Makefile
tests/zfs-tests/tests/perf/Makefile
tests/zfs-tests/tests/perf/fio/Makefile
tests/zfs-tests/tests/perf/regression/Makefile
tests/zfs-tests/tests/perf/scripts/Makefile
tests/zfs-tests/tests/stress/Makefile
udev/Makefile
udev/rules.d/Makefile
zfs.release
])
AC_OUTPUT
diff --git a/sys/contrib/openzfs/contrib/bpftrace/zfs-trace.sh b/sys/contrib/openzfs/contrib/bpftrace/zfs-trace.sh
index 54f66f3ba3fd..0165335c474b 100755
--- a/sys/contrib/openzfs/contrib/bpftrace/zfs-trace.sh
+++ b/sys/contrib/openzfs/contrib/bpftrace/zfs-trace.sh
@@ -1,10 +1,11 @@
#!/bin/sh
-ZVER=$(cut -f 1 -d '-' /sys/module/zfs/version)
+read -r ZVER < /sys/module/zfs/version
+ZVER="${ZVER%%-*}"
KVER=$(uname -r)
exec bpftrace \
--include "/usr/src/zfs-$ZVER/$KVER/zfs_config.h" \
-I "/usr/src/zfs-$ZVER/include" \
-I "/usr/src/zfs-$ZVER/include/spl" \
"$@"
diff --git a/sys/contrib/openzfs/contrib/dracut/02zfsexpandknowledge/module-setup.sh.in b/sys/contrib/openzfs/contrib/dracut/02zfsexpandknowledge/module-setup.sh.in
index d21ab74cc0d0..a161fbf6f113 100755
--- a/sys/contrib/openzfs/contrib/dracut/02zfsexpandknowledge/module-setup.sh.in
+++ b/sys/contrib/openzfs/contrib/dracut/02zfsexpandknowledge/module-setup.sh.in
@@ -1,126 +1,122 @@
#!/usr/bin/env bash
get_devtype() {
local typ
- typ=$(udevadm info --query=property --name="$1" | grep "^ID_FS_TYPE=" | sed 's|^ID_FS_TYPE=||')
- if [ "$typ" = "" ] ; then
+ typ=$(udevadm info --query=property --name="$1" | sed -n 's|^ID_FS_TYPE=||p')
+ if [ -z "$typ" ] ; then
typ=$(blkid -c /dev/null "$1" -o value -s TYPE)
fi
echo "$typ"
}
get_pool_devices() {
# also present in 99zfssystemd
local poolconfigtemp
local poolconfigoutput
local pooldev
local resolved
poolconfigtemp="$(mktemp)"
if ! @sbindir@/zpool list -v -H -P "$1" > "$poolconfigtemp" 2>&1 ; then
poolconfigoutput="$(cat "$poolconfigtemp")"
dinfo "zfsexpandknowledge: pool $1 cannot be listed: $poolconfigoutput"
else
awk -F '\t' '/\t\/dev/ { print $2 }' "$poolconfigtemp" | \
while read -r pooldev ; do
if [ -e "$pooldev" ] ; then
resolved="$(readlink -f "$pooldev")"
dinfo "zfsexpandknowledge: pool $1 has device $pooldev (which resolves to $resolved)"
echo "$resolved"
fi
done
fi
rm -f "$poolconfigtemp"
}
find_zfs_block_devices() {
local dev
local mp
local fstype
- local pool
local _
numfields="$(awk '{print NF; exit}' /proc/self/mountinfo)"
if [ "$numfields" = "10" ] ; then
fields="_ _ _ _ mp _ _ fstype dev _"
else
fields="_ _ _ _ mp _ _ _ fstype dev _"
fi
# shellcheck disable=SC2086
while read -r ${fields?} ; do
[ "$fstype" = "zfs" ] || continue
- if [ "$mp" = "$1" ]; then
- pool=$(echo "$dev" | cut -d / -f 1)
- get_pool_devices "$pool"
- fi
+ [ "$mp" = "$1" ] && get_pool_devices "${dev%%/*}"
done < /proc/self/mountinfo
}
array_contains () {
local e
for e in "${@:2}"; do [[ "$e" == "$1" ]] && return 0; done
return 1
}
check() {
local mp
local dev
local blockdevs
local fstype
local majmin
local _depdev
local _depdevname
local _depdevtype
# shellcheck disable=SC2154
if [ -n "$hostonly" ]; then
for mp in \
"/" \
"/etc" \
"/bin" \
"/sbin" \
"/lib" \
"/lib64" \
"/usr" \
"/usr/bin" \
"/usr/sbin" \
"/usr/lib" \
"/usr/lib64" \
"/boot";
do
mp=$(readlink -f "$mp")
mountpoint "$mp" >/dev/null 2>&1 || continue
blockdevs=$(find_zfs_block_devices "$mp")
if [ -z "$blockdevs" ] ; then continue ; fi
dinfo "zfsexpandknowledge: block devices backing ZFS dataset $mp: ${blockdevs//$'\n'/ }"
for dev in $blockdevs
do
array_contains "$dev" "${host_devs[@]}" || host_devs+=("$dev")
fstype=$(get_devtype "$dev")
host_fs_types["$dev"]="$fstype"
majmin=$(get_maj_min "$dev")
if [ -d "/sys/dev/block/$majmin/slaves" ] ; then
for _depdev in "/sys/dev/block/$majmin/slaves"/*; do
- [[ -f $_depdev/dev ]] || continue
- _depdev=/dev/$(basename "$_depdev")
- _depdevname=$(udevadm info --query=property --name="$_depdev" | grep "^DEVNAME=" | sed 's|^DEVNAME=||')
+ [ -f "$_depdev/dev" ] || continue
+ _depdev="/dev/${_depdev##*/}"
+ _depdevname=$(udevadm info --query=property --name="$_depdev" | sed -n 's|^DEVNAME=||p')
_depdevtype=$(get_devtype "$_depdevname")
dinfo "zfsexpandknowledge: underlying block device backing ZFS dataset $mp: ${_depdevname//$'\n'/ }"
array_contains "$_depdevname" "${host_devs[@]}" || host_devs+=("$_depdevname")
host_fs_types["$_depdevname"]="$_depdevtype"
done
fi
done
done
for a in "${host_devs[@]}"
do
dinfo "zfsexpandknowledge: host device $a"
done
for a in "${!host_fs_types[@]}"
do
dinfo "zfsexpandknowledge: device $a of type ${host_fs_types[$a]}"
done
fi
return 1
}
diff --git a/sys/contrib/openzfs/contrib/dracut/90zfs/module-setup.sh.in b/sys/contrib/openzfs/contrib/dracut/90zfs/module-setup.sh.in
index 213b48a25f5a..fbf32b6588a9 100755
--- a/sys/contrib/openzfs/contrib/dracut/90zfs/module-setup.sh.in
+++ b/sys/contrib/openzfs/contrib/dracut/90zfs/module-setup.sh.in
@@ -1,137 +1,143 @@
#!/usr/bin/env bash
# shellcheck disable=SC2154
check() {
# We depend on udev-rules being loaded
[ "${1}" = "-d" ] && return 0
# Verify the zfs tool chain
for tool in "@sbindir@/zgenhostid" "@sbindir@/zpool" "@sbindir@/zfs" "@mounthelperdir@/mount.zfs" ; do
test -x "$tool" || return 1
done
return 0
}
depends() {
echo udev-rules
return 0
}
installkernel() {
instmods zfs
instmods zcommon
instmods znvpair
instmods zavl
instmods zunicode
instmods zlua
instmods icp
instmods spl
instmods zlib_deflate
instmods zlib_inflate
}
install() {
inst_rules @udevruledir@/90-zfs.rules
inst_rules @udevruledir@/69-vdev.rules
inst_rules @udevruledir@/60-zvol.rules
dracut_install hostid
dracut_install grep
dracut_install @sbindir@/zgenhostid
dracut_install @sbindir@/zfs
dracut_install @sbindir@/zpool
# Workaround for https://github.com/openzfs/zfs/issues/4749 by
# ensuring libgcc_s.so(.1) is included
if ldd @sbindir@/zpool | grep -qF 'libgcc_s.so'; then
# Dracut will have already tracked and included it
:;
elif command -v gcc-config >/dev/null 2>&1; then
# On systems with gcc-config (Gentoo, Funtoo, etc.):
# Use the current profile to resolve the appropriate path
s="$(gcc-config -c)"
dracut_install "/usr/lib/gcc/${s%-*}/${s##*-}/libgcc_s.so"*
elif [ "$(echo /usr/lib/libgcc_s.so*)" != "/usr/lib/libgcc_s.so*" ]; then
# Try a simple path first
dracut_install /usr/lib/libgcc_s.so*
elif [ "$(echo /lib*/libgcc_s.so*)" != "/lib*/libgcc_s.so*" ]; then
# SUSE
dracut_install /lib*/libgcc_s.so*
else
# Fallback: Guess the path and include all matches
dracut_install /usr/lib*/gcc/**/libgcc_s.so*
fi
+ # shellcheck disable=SC2050
+ if [ @LIBFETCH_DYNAMIC@ -gt 0 ]; then
+ for d in $libdirs; do
+ [ -e "$d/@LIBFETCH_SONAME@" ] && dracut_install "$d/@LIBFETCH_SONAME@"
+ done
+ fi
dracut_install @mounthelperdir@/mount.zfs
dracut_install @udevdir@/vdev_id
dracut_install awk
- dracut_install basename
dracut_install cut
+ dracut_install tr
dracut_install head
dracut_install @udevdir@/zvol_id
inst_hook cmdline 95 "${moddir}/parse-zfs.sh"
if [ -n "$systemdutildir" ] ; then
inst_script "${moddir}/zfs-generator.sh" "$systemdutildir"/system-generators/dracut-zfs-generator
fi
inst_hook pre-mount 90 "${moddir}/zfs-load-key.sh"
inst_hook mount 98 "${moddir}/mount-zfs.sh"
inst_hook cleanup 99 "${moddir}/zfs-needshutdown.sh"
inst_hook shutdown 20 "${moddir}/export-zfs.sh"
inst_simple "${moddir}/zfs-lib.sh" "/lib/dracut-zfs-lib.sh"
if [ -e @sysconfdir@/zfs/zpool.cache ]; then
inst @sysconfdir@/zfs/zpool.cache
type mark_hostonly >/dev/null 2>&1 && mark_hostonly @sysconfdir@/zfs/zpool.cache
fi
if [ -e @sysconfdir@/zfs/vdev_id.conf ]; then
inst @sysconfdir@/zfs/vdev_id.conf
type mark_hostonly >/dev/null 2>&1 && mark_hostonly @sysconfdir@/zfs/vdev_id.conf
fi
# Synchronize initramfs and system hostid
if [ -f @sysconfdir@/hostid ]; then
inst @sysconfdir@/hostid
type mark_hostonly >/dev/null 2>&1 && mark_hostonly @sysconfdir@/hostid
elif HOSTID="$(hostid 2>/dev/null)" && [ "${HOSTID}" != "00000000" ]; then
zgenhostid -o "${initdir}@sysconfdir@/hostid" "${HOSTID}"
type mark_hostonly >/dev/null 2>&1 && mark_hostonly @sysconfdir@/hostid
fi
if dracut_module_included "systemd"; then
mkdir -p "${initdir}/$systemdsystemunitdir/zfs-import.target.wants"
for _service in "zfs-import-scan.service" "zfs-import-cache.service" ; do
dracut_install "@systemdunitdir@/$_service"
if ! [ -L "${initdir}/$systemdsystemunitdir/zfs-import.target.wants/$_service" ]; then
ln -sf ../$_service "${initdir}/$systemdsystemunitdir/zfs-import.target.wants/$_service"
type mark_hostonly >/dev/null 2>&1 && mark_hostonly "@systemdunitdir@/$_service"
fi
done
inst "${moddir}"/zfs-env-bootfs.service "${systemdsystemunitdir}"/zfs-env-bootfs.service
ln -s ../zfs-env-bootfs.service "${initdir}/${systemdsystemunitdir}/zfs-import.target.wants"/zfs-env-bootfs.service
type mark_hostonly >/dev/null 2>&1 && mark_hostonly @systemdunitdir@/zfs-env-bootfs.service
dracut_install systemd-ask-password
dracut_install systemd-tty-ask-password-agent
mkdir -p "${initdir}/$systemdsystemunitdir/initrd.target.wants"
dracut_install @systemdunitdir@/zfs-import.target
if ! [ -L "${initdir}/$systemdsystemunitdir/initrd.target.wants"/zfs-import.target ]; then
ln -s ../zfs-import.target "${initdir}/$systemdsystemunitdir/initrd.target.wants"/zfs-import.target
type mark_hostonly >/dev/null 2>&1 && mark_hostonly @systemdunitdir@/zfs-import.target
fi
for _service in zfs-snapshot-bootfs.service zfs-rollback-bootfs.service ; do
inst "${moddir}/$_service" "${systemdsystemunitdir}/$_service"
if ! [ -L "${initdir}/$systemdsystemunitdir/initrd.target.wants/$_service" ]; then
ln -s "../$_service" "${initdir}/$systemdsystemunitdir/initrd.target.wants/$_service"
fi
done
# There isn't a pkg-config variable for this,
# and dracut doesn't automatically resolve anything this'd be next to
local systemdsystemenvironmentgeneratordir
systemdsystemenvironmentgeneratordir="$(pkg-config --variable=prefix systemd || echo "/usr")/lib/systemd/system-environment-generators"
mkdir -p "${initdir}/${systemdsystemenvironmentgeneratordir}"
inst "${moddir}"/import-opts-generator.sh "${systemdsystemenvironmentgeneratordir}"/zfs-import-opts.sh
fi
}
diff --git a/sys/contrib/openzfs/contrib/dracut/90zfs/parse-zfs.sh.in b/sys/contrib/openzfs/contrib/dracut/90zfs/parse-zfs.sh.in
index fe786a880699..0f92f5c80cce 100755
--- a/sys/contrib/openzfs/contrib/dracut/90zfs/parse-zfs.sh.in
+++ b/sys/contrib/openzfs/contrib/dracut/90zfs/parse-zfs.sh.in
@@ -1,63 +1,63 @@
#!/bin/sh
# shellcheck disable=SC2034,SC2154
. /lib/dracut-lib.sh
# Let the command line override our host id.
spl_hostid=$(getarg spl_hostid=)
if [ -n "${spl_hostid}" ] ; then
info "ZFS: Using hostid from command line: ${spl_hostid}"
zgenhostid -f "${spl_hostid}"
elif [ -f "/etc/hostid" ] ; then
info "ZFS: Using hostid from /etc/hostid: $(hostid)"
else
warn "ZFS: No hostid found on kernel command line or /etc/hostid."
warn "ZFS: Pools may not import correctly."
fi
wait_for_zfs=0
case "${root}" in
""|zfs|zfs:)
# We'll take root unset, root=zfs, or root=zfs:
# No root set, so we want to read the bootfs attribute. We
# can't do that until udev settles so we'll set dummy values
# and hope for the best later on.
root="zfs:AUTO"
rootok=1
wait_for_zfs=1
info "ZFS: Enabling autodetection of bootfs after udev settles."
;;
ZFS=*|zfs:*|FILESYSTEM=*)
# root is explicit ZFS root. Parse it now. We can handle
# a root=... param in any of the following formats:
# root=ZFS=rpool/ROOT
# root=zfs:rpool/ROOT
# root=zfs:FILESYSTEM=rpool/ROOT
# root=FILESYSTEM=rpool/ROOT
# root=ZFS=pool+with+space/ROOT+WITH+SPACE (translates to root=ZFS=pool with space/ROOT WITH SPACE)
# Strip down to just the pool/fs
root="${root#zfs:}"
root="${root#FILESYSTEM=}"
root="zfs:${root#ZFS=}"
# switch + with spaces because kernel cmdline does not allow us to quote parameters
- root=$(printf '%s\n' "$root" | sed "s/+/ /g")
+ root=$(echo "$root" | tr '+' ' ')
rootok=1
wait_for_zfs=1
info "ZFS: Set ${root} as bootfs."
;;
esac
# Make sure Dracut is happy that we have a root and will wait for ZFS
# modules to settle before mounting.
if [ ${wait_for_zfs} -eq 1 ]; then
ln -s /dev/null /dev/root 2>/dev/null
initqueuedir="${hookdir}/initqueue/finished"
test -d "${initqueuedir}" || {
initqueuedir="${hookdir}/initqueue-finished"
}
echo '[ -e /dev/zfs ]' > "${initqueuedir}/zfs.sh"
fi
diff --git a/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-env-bootfs.service.in b/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-env-bootfs.service.in
index 2bc43482c187..e143cb5ec1ed 100644
--- a/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-env-bootfs.service.in
+++ b/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-env-bootfs.service.in
@@ -1,14 +1,14 @@
[Unit]
Description=Set BOOTFS environment for dracut
Documentation=man:zpool(8)
DefaultDependencies=no
After=zfs-import-cache.service
After=zfs-import-scan.service
Before=zfs-import.target
[Service]
Type=oneshot
-ExecStart=/bin/sh -c "systemctl set-environment BOOTFS=$(@sbindir@/zpool list -H -o bootfs | grep -m1 -v '^-$')"
+ExecStart=/bin/sh -c "exec systemctl set-environment BOOTFS=$(@sbindir@/zpool list -H -o bootfs | grep -m1 -v '^-$')"
[Install]
WantedBy=zfs-import.target
diff --git a/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-generator.sh.in b/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-generator.sh.in
index b57c64c688b1..e50b9530c4f0 100755
--- a/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-generator.sh.in
+++ b/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-generator.sh.in
@@ -1,119 +1,119 @@
#!/bin/sh
# shellcheck disable=SC2016,SC1004
grep -wq debug /proc/cmdline && debug=1
[ -n "$debug" ] && echo "zfs-generator: starting" >> /dev/kmsg
GENERATOR_DIR="$1"
[ -n "$GENERATOR_DIR" ] || {
echo "zfs-generator: no generator directory specified, exiting" >> /dev/kmsg
exit 1
}
[ -f /lib/dracut-lib.sh ] && dracutlib=/lib/dracut-lib.sh
[ -f /usr/lib/dracut/modules.d/99base/dracut-lib.sh ] && dracutlib=/usr/lib/dracut/modules.d/99base/dracut-lib.sh
command -v getarg >/dev/null 2>&1 || {
[ -n "$debug" ] && echo "zfs-generator: loading Dracut library from $dracutlib" >> /dev/kmsg
. "$dracutlib"
}
. /lib/dracut-zfs-lib.sh
[ -z "$root" ] && root=$(getarg root=)
[ -z "$rootfstype" ] && rootfstype=$(getarg rootfstype=)
[ -z "$rootflags" ] && rootflags=$(getarg rootflags=)
# If root is not ZFS= or zfs: or rootfstype is not zfs
# then we are not supposed to handle it.
[ "${root##zfs:}" = "${root}" ] &&
[ "${root##ZFS=}" = "${root}" ] &&
[ "$rootfstype" != "zfs" ] &&
exit 0
case ",${rootflags}," in
*,zfsutil,*) ;;
,,) rootflags=zfsutil ;;
*) rootflags="zfsutil,${rootflags}" ;;
esac
if [ "${root}" != "zfs:AUTO" ]; then
root="${root##zfs:}"
root="${root##ZFS=}"
fi
[ -n "$debug" ] && echo "zfs-generator: writing extension for sysroot.mount to $GENERATOR_DIR/sysroot.mount.d/zfs-enhancement.conf" >> /dev/kmsg
mkdir -p "$GENERATOR_DIR"/sysroot.mount.d "$GENERATOR_DIR"/initrd-root-fs.target.requires "$GENERATOR_DIR"/dracut-pre-mount.service.d
{
echo "[Unit]"
echo "Before=initrd-root-fs.target"
echo "After=zfs-import.target"
echo
echo "[Mount]"
if [ "${root}" = "zfs:AUTO" ]; then
echo "PassEnvironment=BOOTFS"
echo 'What=${BOOTFS}'
else
echo "What=${root}"
fi
echo "Type=zfs"
echo "Options=${rootflags}"
} > "$GENERATOR_DIR"/sysroot.mount.d/zfs-enhancement.conf
ln -fs ../sysroot.mount "$GENERATOR_DIR"/initrd-root-fs.target.requires/sysroot.mount
if [ "${root}" = "zfs:AUTO" ]; then
{
echo "[Unit]"
echo "Before=initrd-root-fs.target"
echo "After=sysroot.mount"
echo "DefaultDependencies=no"
echo
echo "[Service]"
echo "Type=oneshot"
echo "PassEnvironment=BOOTFS"
echo "ExecStart=/bin/sh -c '" ' \
. /lib/dracut-zfs-lib.sh; \
_zfs_nonroot_necessities_cb() { \
zfs mount | grep -m1 -q "^$1 " && return 0; \
echo "Mounting $1 on /sysroot$2"; \
mount -o zfsutil -t zfs "$1" "/sysroot$2"; \
}; \
for_relevant_root_children "${BOOTFS}" _zfs_nonroot_necessities_cb;' \
"'"
} > "$GENERATOR_DIR"/zfs-nonroot-necessities.service
ln -fs ../zfs-nonroot-necessities.service "$GENERATOR_DIR"/initrd-root-fs.target.requires/zfs-nonroot-necessities.service
else
# We can solve this statically at generation time, so do!
_zfs_generator_cb() {
dset="${1}"
mpnt="${2}"
- unit="sysroot$(echo "$mpnt" | sed 's;/;-;g').mount"
+ unit="sysroot$(echo "$mpnt" | tr '/' '-').mount"
{
echo "[Unit]"
echo "Before=initrd-root-fs.target"
echo "After=sysroot.mount"
echo
echo "[Mount]"
echo "Where=/sysroot${mpnt}"
echo "What=${dset}"
echo "Type=zfs"
echo "Options=zfsutil"
} > "$GENERATOR_DIR/${unit}"
ln -fs ../"${unit}" "$GENERATOR_DIR"/initrd-root-fs.target.requires/"${unit}"
}
for_relevant_root_children "${root}" _zfs_generator_cb
fi
{
echo "[Unit]"
echo "After=zfs-import.target"
} > "$GENERATOR_DIR"/dracut-pre-mount.service.d/zfs-enhancement.conf
[ -n "$debug" ] && echo "zfs-generator: finished" >> /dev/kmsg
exit 0
diff --git a/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-load-key.sh.in b/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-load-key.sh.in
index 5eb28fee3fe3..c974b3d9ec4c 100755
--- a/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-load-key.sh.in
+++ b/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-load-key.sh.in
@@ -1,56 +1,73 @@
#!/bin/sh
# shellcheck disable=SC2154
# only run this on systemd systems, we handle the decrypt in mount-zfs.sh in the mount hook otherwise
[ -e /bin/systemctl ] || [ -e /usr/bin/systemctl ] || return 0
# This script only gets executed on systemd systems, see mount-zfs.sh for non-systemd systems
# import the libs now that we know the pool imported
[ -f /lib/dracut-lib.sh ] && dracutlib=/lib/dracut-lib.sh
[ -f /usr/lib/dracut/modules.d/99base/dracut-lib.sh ] && dracutlib=/usr/lib/dracut/modules.d/99base/dracut-lib.sh
# shellcheck source=./lib-zfs.sh.in
. "$dracutlib"
# load the kernel command line vars
[ -z "$root" ] && root="$(getarg root=)"
# If root is not ZFS= or zfs: or rootfstype is not zfs then we are not supposed to handle it.
[ "${root##zfs:}" = "${root}" ] && [ "${root##ZFS=}" = "${root}" ] && [ "$rootfstype" != "zfs" ] && exit 0
# There is a race between the zpool import and the pre-mount hooks, so we wait for a pool to be imported
while [ "$(zpool list -H)" = "" ]; do
systemctl is-failed --quiet zfs-import-cache.service zfs-import-scan.service && exit 1
sleep 0.1s
done
# run this after import as zfs-import-cache/scan service is confirmed good
# we do not overwrite the ${root} variable, but create a new one, BOOTFS, to hold the dataset
if [ "${root}" = "zfs:AUTO" ] ; then
BOOTFS="$(zpool list -H -o bootfs | awk '$1 != "-" {print; exit}')"
else
BOOTFS="${root##zfs:}"
BOOTFS="${BOOTFS##ZFS=}"
fi
# if pool encryption is active and the zfs command understands '-o encryption'
if [ "$(zpool list -H -o feature@encryption "${BOOTFS%%/*}")" = 'active' ]; then
# if the root dataset has encryption enabled
ENCRYPTIONROOT="$(zfs get -H -o value encryptionroot "${BOOTFS}")"
if ! [ "${ENCRYPTIONROOT}" = "-" ]; then
KEYSTATUS="$(zfs get -H -o value keystatus "${ENCRYPTIONROOT}")"
# continue only if the key needs to be loaded
[ "$KEYSTATUS" = "unavailable" ] || exit 0
KEYLOCATION="$(zfs get -H -o value keylocation "${ENCRYPTIONROOT}")"
- if ! [ "${KEYLOCATION}" = "prompt" ]; then
- zfs load-key "${ENCRYPTIONROOT}"
- else
- # decrypt them
- TRY_COUNT=5
- while [ $TRY_COUNT -gt 0 ]; do
- systemd-ask-password "Encrypted ZFS password for ${BOOTFS}" --no-tty | zfs load-key "${ENCRYPTIONROOT}" && break
- TRY_COUNT=$((TRY_COUNT - 1))
- done
- fi
+ case "${KEYLOCATION%%://*}" in
+ prompt)
+ for _ in 1 2 3; do
+ systemd-ask-password --no-tty "Encrypted ZFS password for ${BOOTFS}" | zfs load-key "${ENCRYPTIONROOT}" && break
+ done
+ ;;
+ http*)
+ systemctl start network-online.target
+ zfs load-key "${ENCRYPTIONROOT}"
+ ;;
+ file)
+ KEYFILE="${KEYLOCATION#file://}"
+ [ -r "${KEYFILE}" ] || udevadm settle
+ [ -r "${KEYFILE}" ] || {
+ info "Waiting for key ${KEYFILE} for ${ENCRYPTIONROOT}..."
+ for _ in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20; do
+ sleep 0.5s
+ [ -r "${KEYFILE}" ] && break
+ done
+ }
+ [ -r "${KEYFILE}" ] || warn "Key ${KEYFILE} for ${ENCRYPTIONROOT} hasn't appeared. Trying anyway."
+ zfs load-key "${ENCRYPTIONROOT}"
+ ;;
+ *)
+ zfs load-key "${ENCRYPTIONROOT}"
+ ;;
+ esac
fi
fi
diff --git a/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-rollback-bootfs.service.in b/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-rollback-bootfs.service.in
index 0d45f71eadce..bdc246943208 100644
--- a/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-rollback-bootfs.service.in
+++ b/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-rollback-bootfs.service.in
@@ -1,14 +1,14 @@
[Unit]
Description=Rollback bootfs just before it is mounted
Requisite=zfs-import.target
After=zfs-import.target zfs-snapshot-bootfs.service
Before=dracut-mount.service
DefaultDependencies=no
ConditionKernelCommandLine=bootfs.rollback
[Service]
# ${BOOTFS} should have been set by zfs-env-bootfs.service
Type=oneshot
ExecStartPre=/bin/sh -c 'test -n "${BOOTFS}"'
-ExecStart=/bin/sh -c '. /lib/dracut-lib.sh; SNAPNAME="$(getarg bootfs.rollback)"; @sbindir@/zfs rollback -Rf "${BOOTFS}@${SNAPNAME:-%v}"'
+ExecStart=/bin/sh -c '. /lib/dracut-lib.sh; SNAPNAME="$(getarg bootfs.rollback)"; exec @sbindir@/zfs rollback -Rf "${BOOTFS}@${SNAPNAME:-%v}"'
RemainAfterExit=yes
diff --git a/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-snapshot-bootfs.service.in b/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-snapshot-bootfs.service.in
index 11513ba27b01..6ea13850c3a7 100644
--- a/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-snapshot-bootfs.service.in
+++ b/sys/contrib/openzfs/contrib/dracut/90zfs/zfs-snapshot-bootfs.service.in
@@ -1,14 +1,14 @@
[Unit]
Description=Snapshot bootfs just before it is mounted
Requisite=zfs-import.target
After=zfs-import.target
Before=dracut-mount.service
DefaultDependencies=no
ConditionKernelCommandLine=bootfs.snapshot
[Service]
# ${BOOTFS} should have been set by zfs-env-bootfs.service
Type=oneshot
ExecStartPre=/bin/sh -c 'test -n "${BOOTFS}"'
-ExecStart=-/bin/sh -c '. /lib/dracut-lib.sh; SNAPNAME="$(getarg bootfs.snapshot)"; @sbindir@/zfs snapshot "${BOOTFS}@${SNAPNAME:-%v}"'
+ExecStart=-/bin/sh -c '. /lib/dracut-lib.sh; SNAPNAME="$(getarg bootfs.snapshot)"; exec @sbindir@/zfs snapshot "${BOOTFS}@${SNAPNAME:-%v}"'
RemainAfterExit=yes
diff --git a/sys/contrib/openzfs/contrib/initramfs/hooks/zfs.in b/sys/contrib/openzfs/contrib/initramfs/hooks/zfs.in
index 32331b264da8..546841e6c36a 100755
--- a/sys/contrib/openzfs/contrib/initramfs/hooks/zfs.in
+++ b/sys/contrib/openzfs/contrib/initramfs/hooks/zfs.in
@@ -1,50 +1,57 @@
#!/bin/sh
#
# Add OpenZFS filesystem capabilities to an initrd, usually for a native ZFS root.
#
if [ "$1" = "prereqs" ]; then
echo "udev"
exit
fi
. /usr/share/initramfs-tools/hook-functions
for req in "@sbindir@/zpool" "@sbindir@/zfs" "@mounthelperdir@/mount.zfs"; do
copy_exec "$req" || {
echo "$req not available!" >&2
exit 2
}
done
copy_exec "@sbindir@/zdb"
copy_exec "@udevdir@/vdev_id"
copy_exec "@udevdir@/zvol_id"
if command -v systemd-ask-password > /dev/null; then
copy_exec "$(command -v systemd-ask-password)"
fi
# We use pthreads, but i-t from buster doesn't automatically
# copy this indirect dependency: this can be removed when buster finally dies.
find /lib/ -type f -name "libgcc_s.so.[1-9]" | while read -r libgcc; do
copy_exec "$libgcc"
done
+# shellcheck disable=SC2050
+if [ @LIBFETCH_DYNAMIC@ -gt 0 ]; then
+ find /lib/ -name "@LIBFETCH_SONAME@" | while read -r libfetch; do
+ copy_exec "$libfetch"
+ done
+fi
+
copy_file config "/etc/hostid"
copy_file cache "@sysconfdir@/zfs/zpool.cache"
copy_file config "@initconfdir@/zfs"
copy_file config "@sysconfdir@/zfs/zfs-functions"
copy_file config "@sysconfdir@/zfs/vdev_id.conf"
copy_file rule "@udevruledir@/60-zvol.rules"
copy_file rule "@udevruledir@/69-vdev.rules"
manual_add_modules zfs
if [ -f "/etc/hostname" ]; then
copy_file config "/etc/hostname"
else
hostname="$(mktemp -t hostname.XXXXXXXXXX)"
hostname > "$hostname"
copy_file config "$hostname" "/etc/hostname"
rm -f "$hostname"
fi
diff --git a/sys/contrib/openzfs/contrib/initramfs/scripts/zfs b/sys/contrib/openzfs/contrib/initramfs/scripts/zfs
index 82eceaedb56e..814547b6fa0c 100644
--- a/sys/contrib/openzfs/contrib/initramfs/scripts/zfs
+++ b/sys/contrib/openzfs/contrib/initramfs/scripts/zfs
@@ -1,997 +1,993 @@
# ZFS boot stub for initramfs-tools.
#
# In the initramfs environment, the /init script sources this stub to
# override the default functions in the /scripts/local script.
#
# Enable this by passing boot=zfs on the kernel command line.
#
# $quiet, $root, $rpool, $bootfs come from the cmdline:
# shellcheck disable=SC2154
# Source the common functions
. /etc/zfs/zfs-functions
# Start interactive shell.
# Use debian's panic() if defined, because it allows to prevent shell access
# by setting panic in cmdline (e.g. panic=0 or panic=15).
# See "4.5 Disable root prompt on the initramfs" of Securing Debian Manual:
# https://www.debian.org/doc/manuals/securing-debian-howto/ch4.en.html
shell() {
if command -v panic > /dev/null 2>&1; then
panic
else
/bin/sh
fi
}
# This runs any scripts that should run before we start importing
# pools and mounting any filesystems.
pre_mountroot()
{
if command -v run_scripts > /dev/null 2>&1
then
if [ -f "/scripts/local-top" ] || [ -d "/scripts/local-top" ]
then
[ "$quiet" != "y" ] && \
zfs_log_begin_msg "Running /scripts/local-top"
run_scripts /scripts/local-top
[ "$quiet" != "y" ] && zfs_log_end_msg
fi
if [ -f "/scripts/local-premount" ] || [ -d "/scripts/local-premount" ]
then
[ "$quiet" != "y" ] && \
zfs_log_begin_msg "Running /scripts/local-premount"
run_scripts /scripts/local-premount
[ "$quiet" != "y" ] && zfs_log_end_msg
fi
fi
}
# If plymouth is available, hide the splash image.
disable_plymouth()
{
if [ -x /bin/plymouth ] && /bin/plymouth --ping
then
/bin/plymouth hide-splash >/dev/null 2>&1
fi
}
# Get a ZFS filesystem property value.
get_fs_value()
{
fs="$1"
value=$2
"${ZFS}" get -H -ovalue "$value" "$fs" 2> /dev/null
}
# Find the 'bootfs' property on pool $1.
# If the property does not contain '/', then ignore this
# pool by exporting it again.
find_rootfs()
{
pool="$1"
# If 'POOL_IMPORTED' isn't set, no pool imported and therefore
# we won't be able to find a root fs.
[ -z "${POOL_IMPORTED}" ] && return 1
# If it's already specified, just keep it mounted and exit
# User (kernel command line) must be correct.
[ -n "${ZFS_BOOTFS}" ] && return 0
# Not set, try to find it in the 'bootfs' property of the pool.
# NOTE: zpool does not support 'get -H -ovalue bootfs'...
ZFS_BOOTFS=$("${ZPOOL}" list -H -obootfs "$pool")
# Make sure it's not '-' and that it starts with /.
if [ "${ZFS_BOOTFS}" != "-" ] && \
get_fs_value "${ZFS_BOOTFS}" mountpoint | grep -q '^/$'
then
# Keep it mounted
POOL_IMPORTED=1
return 0
fi
# Not boot fs here, export it and later try again..
"${ZPOOL}" export "$pool"
POOL_IMPORTED=
ZFS_BOOTFS=
return 1
}
# Support function to get a list of all pools, separated with ';'
find_pools()
{
pools=$("$@" 2> /dev/null | \
- grep -E "pool:|^[a-zA-Z0-9]" | \
- sed 's@.*: @@' | \
+ sed -Ee '/pool:|^[a-zA-Z0-9]/!d' -e 's@.*: @@' | \
tr '\n' ';')
echo "${pools%%;}" # Return without the last ';'.
}
# Get a list of all available pools
get_pools()
{
if [ -n "${ZFS_POOL_IMPORT}" ]; then
echo "$ZFS_POOL_IMPORT"
return 0
fi
# Get the base list of available pools.
available_pools=$(find_pools "$ZPOOL" import)
# Just in case - seen it happen (that a pool isn't visible/found
# with a simple "zpool import" but only when using the "-d"
# option or setting ZPOOL_IMPORT_PATH).
if [ -d "/dev/disk/by-id" ]
then
npools=$(find_pools "$ZPOOL" import -d /dev/disk/by-id)
if [ -n "$npools" ]
then
# Because we have found extra pool(s) here, which wasn't
# found 'normally', we need to force USE_DISK_BY_ID to
# make sure we're able to actually import it/them later.
USE_DISK_BY_ID='yes'
if [ -n "$available_pools" ]
then
# Filter out duplicates (pools found with the simple
# "zpool import" but which is also found with the
# "zpool import -d ...").
npools=$(echo "$npools" | sed "s,$available_pools,,")
# Add the list to the existing list of
# available pools
available_pools="$available_pools;$npools"
else
available_pools="$npools"
fi
fi
fi
# Filter out any exceptions...
if [ -n "$ZFS_POOL_EXCEPTIONS" ]
then
found=""
apools=""
OLD_IFS="$IFS" ; IFS=";"
for pool in $available_pools
do
for exception in $ZFS_POOL_EXCEPTIONS
do
[ "$pool" = "$exception" ] && continue 2
found="$pool"
done
if [ -n "$found" ]
then
if [ -n "$apools" ]
then
apools="$apools;$pool"
else
apools="$pool"
fi
fi
done
IFS="$OLD_IFS"
available_pools="$apools"
fi
# Return list of available pools.
echo "$available_pools"
}
# Import given pool $1
import_pool()
{
pool="$1"
# Verify that the pool isn't already imported
# Make as sure as we can to not require '-f' to import.
"${ZPOOL}" get name,guid -o value -H 2>/dev/null | grep -Fxq "$pool" && return 0
# For backwards compatibility, make sure that ZPOOL_IMPORT_PATH is set
# to something we can use later with the real import(s). We want to
# make sure we find all by* dirs, BUT by-vdev should be first (if it
# exists).
if [ -n "$USE_DISK_BY_ID" ] && [ -z "$ZPOOL_IMPORT_PATH" ]
then
dirs="$(for dir in /dev/disk/by-*
do
# Ignore by-vdev here - we want it first!
echo "$dir" | grep -q /by-vdev && continue
[ ! -d "$dir" ] && continue
printf "%s" "$dir:"
done | sed 's,:$,,g')"
if [ -d "/dev/disk/by-vdev" ]
then
# Add by-vdev at the beginning.
ZPOOL_IMPORT_PATH="/dev/disk/by-vdev:"
fi
# ... and /dev at the very end, just for good measure.
ZPOOL_IMPORT_PATH="$ZPOOL_IMPORT_PATH$dirs:/dev"
fi
# Needs to be exported for "zpool" to catch it.
[ -n "$ZPOOL_IMPORT_PATH" ] && export ZPOOL_IMPORT_PATH
[ "$quiet" != "y" ] && zfs_log_begin_msg \
"Importing pool '${pool}' using defaults"
ZFS_CMD="${ZPOOL} import -N ${ZPOOL_FORCE} ${ZPOOL_IMPORT_OPTS}"
ZFS_STDERR="$($ZFS_CMD "$pool" 2>&1)"
ZFS_ERROR="$?"
if [ "${ZFS_ERROR}" != 0 ]
then
[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
if [ -f "${ZPOOL_CACHE}" ]
then
[ "$quiet" != "y" ] && zfs_log_begin_msg \
"Importing pool '${pool}' using cachefile."
ZFS_CMD="${ZPOOL} import -c ${ZPOOL_CACHE} -N ${ZPOOL_FORCE} ${ZPOOL_IMPORT_OPTS}"
ZFS_STDERR="$($ZFS_CMD "$pool" 2>&1)"
ZFS_ERROR="$?"
fi
if [ "${ZFS_ERROR}" != 0 ]
then
[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
disable_plymouth
echo ""
echo "Command: ${ZFS_CMD} '$pool'"
echo "Message: $ZFS_STDERR"
echo "Error: $ZFS_ERROR"
echo ""
echo "Failed to import pool '$pool'."
echo "Manually import the pool and exit."
shell
fi
fi
[ "$quiet" != "y" ] && zfs_log_end_msg
POOL_IMPORTED=1
return 0
}
# Load ZFS modules
# Loading a module in a initrd require a slightly different approach,
# with more logging etc.
load_module_initrd()
{
[ -n "$ROOTDELAY" ] && ZFS_INITRD_PRE_MOUNTROOT_SLEEP="$ROOTDELAY"
if [ "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP" -gt 0 ] 2>/dev/null
then
if [ "$quiet" != "y" ]; then
zfs_log_begin_msg "Sleeping for" \
"$ZFS_INITRD_PRE_MOUNTROOT_SLEEP seconds..."
fi
sleep "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP"
[ "$quiet" != "y" ] && zfs_log_end_msg
fi
# Wait for all of the /dev/{hd,sd}[a-z] device nodes to appear.
if command -v wait_for_udev > /dev/null 2>&1 ; then
wait_for_udev 10
elif command -v wait_for_dev > /dev/null 2>&1 ; then
wait_for_dev
fi
# zpool import refuse to import without a valid /proc/self/mounts
[ ! -f /proc/self/mounts ] && mount proc /proc
# Load the module
load_module "zfs" || return 1
if [ "$ZFS_INITRD_POST_MODPROBE_SLEEP" -gt 0 ] 2>/dev/null
then
if [ "$quiet" != "y" ]; then
zfs_log_begin_msg "Sleeping for" \
"$ZFS_INITRD_POST_MODPROBE_SLEEP seconds..."
fi
sleep "$ZFS_INITRD_POST_MODPROBE_SLEEP"
[ "$quiet" != "y" ] && zfs_log_end_msg
fi
return 0
}
# Mount a given filesystem
mount_fs()
{
fs="$1"
# Check that the filesystem exists
"${ZFS}" list -oname -tfilesystem -H "${fs}" > /dev/null 2>&1 || return 1
# Skip filesystems with canmount=off. The root fs should not have
# canmount=off, but ignore it for backwards compatibility just in case.
if [ "$fs" != "${ZFS_BOOTFS}" ]
then
canmount=$(get_fs_value "$fs" canmount)
[ "$canmount" = "off" ] && return 0
fi
# Need the _original_ datasets mountpoint!
mountpoint=$(get_fs_value "$fs" mountpoint)
ZFS_CMD="mount -o zfsutil -t zfs"
if [ "$mountpoint" = "legacy" ] || [ "$mountpoint" = "none" ]; then
# Can't use the mountpoint property. Might be one of our
# clones. Check the 'org.zol:mountpoint' property set in
# clone_snap() if that's usable.
mountpoint=$(get_fs_value "$fs" org.zol:mountpoint)
if [ "$mountpoint" = "legacy" ] ||
[ "$mountpoint" = "none" ] ||
[ "$mountpoint" = "-" ]
then
if [ "$fs" != "${ZFS_BOOTFS}" ]; then
# We don't have a proper mountpoint and this
# isn't the root fs.
return 0
else
# Last hail-mary: Hope 'rootmnt' is set!
mountpoint=""
fi
fi
# If it's not a legacy filesystem, it can only be a
# native one...
if [ "$mountpoint" = "legacy" ]; then
ZFS_CMD="mount -t zfs"
fi
fi
# Possibly decrypt a filesystem using native encryption.
decrypt_fs "$fs"
[ "$quiet" != "y" ] && \
zfs_log_begin_msg "Mounting '${fs}' on '${rootmnt}/${mountpoint}'"
[ -n "${ZFS_DEBUG}" ] && \
zfs_log_begin_msg "CMD: '$ZFS_CMD ${fs} ${rootmnt}/${mountpoint}'"
ZFS_STDERR=$(${ZFS_CMD} "${fs}" "${rootmnt}/${mountpoint}" 2>&1)
ZFS_ERROR=$?
if [ "${ZFS_ERROR}" != 0 ]
then
[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
disable_plymouth
echo ""
echo "Command: ${ZFS_CMD} ${fs} ${rootmnt}/${mountpoint}"
echo "Message: $ZFS_STDERR"
echo "Error: $ZFS_ERROR"
echo ""
echo "Failed to mount ${fs} on ${rootmnt}/${mountpoint}."
echo "Manually mount the filesystem and exit."
shell
else
[ "$quiet" != "y" ] && zfs_log_end_msg
fi
return 0
}
# Unlock a ZFS native encrypted filesystem.
decrypt_fs()
{
fs="$1"
# If pool encryption is active and the zfs command understands '-o encryption'
if [ "$(zpool list -H -o feature@encryption "${fs%%/*}")" = 'active' ]; then
# Determine dataset that holds key for root dataset
ENCRYPTIONROOT="$(get_fs_value "${fs}" encryptionroot)"
KEYLOCATION="$(get_fs_value "${ENCRYPTIONROOT}" keylocation)"
echo "${ENCRYPTIONROOT}" > /run/zfs_fs_name
# If root dataset is encrypted...
if ! [ "${ENCRYPTIONROOT}" = "-" ]; then
KEYSTATUS="$(get_fs_value "${ENCRYPTIONROOT}" keystatus)"
# Continue only if the key needs to be loaded
[ "$KEYSTATUS" = "unavailable" ] || return 0
- TRY_COUNT=3
- # If key is stored in a file, do not prompt
+ # Do not prompt if key is stored noninteractively,
if ! [ "${KEYLOCATION}" = "prompt" ]; then
$ZFS load-key "${ENCRYPTIONROOT}"
# Prompt with plymouth, if active
- elif [ -e /bin/plymouth ] && /bin/plymouth --ping 2>/dev/null; then
+ elif /bin/plymouth --ping 2>/dev/null; then
echo "plymouth" > /run/zfs_console_askpwd_cmd
- while [ $TRY_COUNT -gt 0 ]; do
+ for _ in 1 2 3; do
plymouth ask-for-password --prompt "Encrypted ZFS password for ${ENCRYPTIONROOT}" | \
$ZFS load-key "${ENCRYPTIONROOT}" && break
- TRY_COUNT=$((TRY_COUNT - 1))
done
# Prompt with systemd, if active
elif [ -e /run/systemd/system ]; then
echo "systemd-ask-password" > /run/zfs_console_askpwd_cmd
- while [ $TRY_COUNT -gt 0 ]; do
- systemd-ask-password "Encrypted ZFS password for ${ENCRYPTIONROOT}" --no-tty | \
+ for _ in 1 2 3; do
+ systemd-ask-password --no-tty "Encrypted ZFS password for ${ENCRYPTIONROOT}" | \
$ZFS load-key "${ENCRYPTIONROOT}" && break
- TRY_COUNT=$((TRY_COUNT - 1))
done
# Prompt with ZFS tty, otherwise
else
# Temporarily setting "printk" to "7" allows the prompt to appear even when the "quiet" kernel option has been used
echo "load-key" > /run/zfs_console_askpwd_cmd
- storeprintk="$(awk '{print $1}' /proc/sys/kernel/printk)"
+ read -r storeprintk _ < /proc/sys/kernel/printk
echo 7 > /proc/sys/kernel/printk
$ZFS load-key "${ENCRYPTIONROOT}"
echo "$storeprintk" > /proc/sys/kernel/printk
fi
fi
fi
return 0
}
# Destroy a given filesystem.
destroy_fs()
{
fs="$1"
[ "$quiet" != "y" ] && \
zfs_log_begin_msg "Destroying '$fs'"
ZFS_CMD="${ZFS} destroy $fs"
ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
ZFS_ERROR="$?"
if [ "${ZFS_ERROR}" != 0 ]
then
[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
disable_plymouth
echo ""
echo "Command: $ZFS_CMD"
echo "Message: $ZFS_STDERR"
echo "Error: $ZFS_ERROR"
echo ""
echo "Failed to destroy '$fs'. Please make sure that '$fs' is not available."
echo "Hint: Try: zfs destroy -Rfn $fs"
echo "If this dryrun looks good, then remove the 'n' from '-Rfn' and try again."
shell
else
[ "$quiet" != "y" ] && zfs_log_end_msg
fi
return 0
}
# Clone snapshot $1 to destination filesystem $2
# Set 'canmount=noauto' and 'mountpoint=none' so that we get to keep
# manual control over it's mounting (i.e., make sure it's not automatically
# mounted with a 'zfs mount -a' in the init/systemd scripts).
clone_snap()
{
snap="$1"
destfs="$2"
mountpoint="$3"
[ "$quiet" != "y" ] && zfs_log_begin_msg "Cloning '$snap' to '$destfs'"
# Clone the snapshot into a dataset we can boot from
# + We don't want this filesystem to be automatically mounted, we
# want control over this here and nowhere else.
# + We don't need any mountpoint set for the same reason.
# We use the 'org.zol:mountpoint' property to remember the mountpoint.
ZFS_CMD="${ZFS} clone -o canmount=noauto -o mountpoint=none"
ZFS_CMD="${ZFS_CMD} -o org.zol:mountpoint=${mountpoint}"
ZFS_CMD="${ZFS_CMD} $snap $destfs"
ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
ZFS_ERROR="$?"
if [ "${ZFS_ERROR}" != 0 ]
then
[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
disable_plymouth
echo ""
echo "Command: $ZFS_CMD"
echo "Message: $ZFS_STDERR"
echo "Error: $ZFS_ERROR"
echo ""
echo "Failed to clone snapshot."
echo "Make sure that the any problems are corrected and then make sure"
echo "that the dataset '$destfs' exists and is bootable."
shell
else
[ "$quiet" != "y" ] && zfs_log_end_msg
fi
return 0
}
# Rollback a given snapshot.
rollback_snap()
{
snap="$1"
[ "$quiet" != "y" ] && zfs_log_begin_msg "Rollback $snap"
ZFS_CMD="${ZFS} rollback -Rf $snap"
ZFS_STDERR="$(${ZFS_CMD} 2>&1)"
ZFS_ERROR="$?"
if [ "${ZFS_ERROR}" != 0 ]
then
[ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}"
disable_plymouth
echo ""
echo "Command: $ZFS_CMD"
echo "Message: $ZFS_STDERR"
echo "Error: $ZFS_ERROR"
echo ""
echo "Failed to rollback snapshot."
shell
else
[ "$quiet" != "y" ] && zfs_log_end_msg
fi
return 0
}
# Get a list of snapshots, give them as a numbered list
# to the user to choose from.
ask_user_snap()
{
fs="$1"
# We need to temporarily disable debugging. Set 'debug' so we
# remember to enabled it again.
if [ -n "${ZFS_DEBUG}" ]; then
unset ZFS_DEBUG
set +x
debug=1
fi
# Because we need the resulting snapshot, which is sent on
# stdout to the caller, we use stderr for our questions.
echo "What snapshot do you want to boot from?" > /dev/stderr
# shellcheck disable=SC2046
IFS="
" set -- $("${ZFS}" list -H -oname -tsnapshot -r "${fs}")
i=1
for snap in "$@"; do
echo " $i: $snap"
i=$((i + 1))
done > /dev/stderr
# expr instead of test here because [ a -lt 0 ] errors out,
# but expr falls back to lexicographical, which works out right
snapnr=0
while expr "$snapnr" "<" 1 > /dev/null ||
expr "$snapnr" ">" "$#" > /dev/null
do
printf "%s" "Snap nr [1-$#]? " > /dev/stderr
read -r snapnr
done
# Re-enable debugging.
if [ -n "${debug}" ]; then
ZFS_DEBUG=1
set -x
fi
eval echo '$'"$snapnr"
}
setup_snapshot_booting()
{
snap="$1"
retval=0
# Make sure that the snapshot specified actually exists.
if [ ! "$(get_fs_value "${snap}" type)" ]
then
# Snapshot does not exist (...@<null> ?)
# ask the user for a snapshot to use.
snap="$(ask_user_snap "${snap%%@*}")"
fi
# Separate the full snapshot ('$snap') into it's filesystem and
# snapshot names. Would have been nice with a split() function..
rootfs="${snap%%@*}"
snapname="${snap##*@}"
ZFS_BOOTFS="${rootfs}_${snapname}"
if ! grep -qiE '(^|[^\\](\\\\)* )(rollback)=(on|yes|1)( |$)' /proc/cmdline
then
# If the destination dataset for the clone
# already exists, destroy it. Recursively
if [ "$(get_fs_value "${rootfs}_${snapname}" type)" ]; then
filesystems=$("${ZFS}" list -oname -tfilesystem -H \
-r -Sname "${ZFS_BOOTFS}")
for fs in $filesystems; do
destroy_fs "${fs}"
done
fi
fi
# Get all snapshots, recursively (might need to clone /usr, /var etc
# as well).
for s in $("${ZFS}" list -H -oname -tsnapshot -r "${rootfs}" | \
grep "${snapname}")
do
if grep -qiE '(^|[^\\](\\\\)* )(rollback)=(on|yes|1)( |$)' /proc/cmdline
then
# Rollback snapshot
rollback_snap "$s" || retval=$((retval + 1))
else
# Setup a destination filesystem name.
# Ex: Called with 'rpool/ROOT/debian@snap2'
# rpool/ROOT/debian@snap2 => rpool/ROOT/debian_snap2
# rpool/ROOT/debian/boot@snap2 => rpool/ROOT/debian_snap2/boot
# rpool/ROOT/debian/usr@snap2 => rpool/ROOT/debian_snap2/usr
# rpool/ROOT/debian/var@snap2 => rpool/ROOT/debian_snap2/var
subfs="${s##$rootfs}"
subfs="${subfs%%@$snapname}"
destfs="${rootfs}_${snapname}" # base fs.
[ -n "$subfs" ] && destfs="${destfs}$subfs" # + sub fs.
# Get the mountpoint of the filesystem, to be used
# with clone_snap(). If legacy or none, then use
# the sub fs value.
mountpoint=$(get_fs_value "${s%%@*}" mountpoint)
if [ "$mountpoint" = "legacy" ] || \
[ "$mountpoint" = "none" ]
then
if [ -n "${subfs}" ]; then
mountpoint="${subfs}"
else
mountpoint="/"
fi
fi
# Clone the snapshot into its own
# filesystem
clone_snap "$s" "${destfs}" "${mountpoint}" || \
retval=$((retval + 1))
fi
done
# If we haven't return yet, we have a problem...
return "${retval}"
}
# ================================================================
# This is the main function.
mountroot()
{
# ----------------------------------------------------------------
# I N I T I A L S E T U P
# ------------
# Run the pre-mount scripts from /scripts/local-top.
pre_mountroot
# ------------
# Source the default setup variables.
[ -r '/etc/default/zfs' ] && . /etc/default/zfs
# ------------
# Support debug option
if grep -qiE '(^|[^\\](\\\\)* )(zfs_debug|zfs\.debug|zfsdebug)=(on|yes|1)( |$)' /proc/cmdline
then
ZFS_DEBUG=1
mkdir /var/log
#exec 2> /var/log/boot.debug
set -x
fi
# ------------
# Load ZFS module etc.
if ! load_module_initrd; then
disable_plymouth
echo ""
echo "Failed to load ZFS modules."
echo "Manually load the modules and exit."
shell
fi
# ------------
# Look for the cache file (if any).
[ -f "${ZPOOL_CACHE}" ] || unset ZPOOL_CACHE
[ -s "${ZPOOL_CACHE}" ] || unset ZPOOL_CACHE
# ------------
# Compatibility: 'ROOT' is for Debian GNU/Linux (etc),
# 'root' is for Redhat/Fedora (etc),
# 'REAL_ROOT' is for Gentoo
if [ -z "$ROOT" ]
then
[ -n "$root" ] && ROOT=${root}
[ -n "$REAL_ROOT" ] && ROOT=${REAL_ROOT}
fi
# ------------
# Where to mount the root fs in the initrd - set outside this script
# Compatibility: 'rootmnt' is for Debian GNU/Linux (etc),
# 'NEWROOT' is for RedHat/Fedora (etc),
# 'NEW_ROOT' is for Gentoo
if [ -z "$rootmnt" ]
then
[ -n "$NEWROOT" ] && rootmnt=${NEWROOT}
[ -n "$NEW_ROOT" ] && rootmnt=${NEW_ROOT}
fi
# ------------
# No longer set in the defaults file, but it could have been set in
# get_pools() in some circumstances. If it's something, but not 'yes',
# it's no good to us.
[ -n "$USE_DISK_BY_ID" ] && [ "$USE_DISK_BY_ID" != 'yes' ] && \
unset USE_DISK_BY_ID
# ----------------------------------------------------------------
# P A R S E C O M M A N D L I N E O P T I O N S
# This part is the really ugly part - there's so many options and permutations
# 'out there', and if we should make this the 'primary' source for ZFS initrd
# scripting, we need/should support them all.
#
# Supports the following kernel command line argument combinations
# (in this order - first match win):
#
# rpool=<pool> (tries to finds bootfs automatically)
# bootfs=<pool>/<dataset> (uses this for rpool - first part)
# rpool=<pool> bootfs=<pool>/<dataset>
# -B zfs-bootfs=<pool>/<fs> (uses this for rpool - first part)
# rpool=rpool (default if none of the above is used)
# root=<pool>/<dataset> (uses this for rpool - first part)
# root=ZFS=<pool>/<dataset> (uses this for rpool - first part, without 'ZFS=')
# root=zfs:AUTO (tries to detect both pool and rootfs
# root=zfs:<pool>/<dataset> (uses this for rpool - first part, without 'zfs:')
#
# Option <dataset> could also be <snapshot>
# Option <pool> could also be <guid>
# ------------
# Support force option
# In addition, setting one of zfs_force, zfs.force or zfsforce to
# 'yes', 'on' or '1' will make sure we force import the pool.
# This should (almost) never be needed, but it's here for
# completeness.
ZPOOL_FORCE=""
if grep -qiE '(^|[^\\](\\\\)* )(zfs_force|zfs\.force|zfsforce)=(on|yes|1)( |$)' /proc/cmdline
then
ZPOOL_FORCE="-f"
fi
# ------------
# Look for 'rpool' and 'bootfs' parameter
[ -n "$rpool" ] && ZFS_RPOOL="${rpool#rpool=}"
[ -n "$bootfs" ] && ZFS_BOOTFS="${bootfs#bootfs=}"
# ------------
# If we have 'ROOT' (see above), but not 'ZFS_BOOTFS', then use
# 'ROOT'
[ -n "$ROOT" ] && [ -z "${ZFS_BOOTFS}" ] && ZFS_BOOTFS="$ROOT"
# ------------
# Check for the `-B zfs-bootfs=%s/%u,...` kind of parameter.
# NOTE: Only use the pool name and dataset. The rest is not
# supported by OpenZFS (whatever it's for).
if [ -z "$ZFS_RPOOL" ]
then
# The ${zfs-bootfs} variable is set at the kernel command
# line, usually by GRUB, but it cannot be referenced here
# directly because bourne variable names cannot contain a
# hyphen.
#
# Reassign the variable by dumping the environment and
# stripping the zfs-bootfs= prefix. Let the shell handle
# quoting through the eval command:
# shellcheck disable=SC2046
eval ZFS_RPOOL=$(set | sed -n -e 's,^zfs-bootfs=,,p')
fi
# ------------
# No root fs or pool specified - do auto detect.
if [ -z "$ZFS_RPOOL" ] && [ -z "${ZFS_BOOTFS}" ]
then
# Do auto detect. Do this by 'cheating' - set 'root=zfs:AUTO'
# which will be caught later
ROOT='zfs:AUTO'
fi
# ----------------------------------------------------------------
# F I N D A N D I M P O R T C O R R E C T P O O L
# ------------
if [ "$ROOT" = "zfs:AUTO" ]
then
# Try to detect both pool and root fs.
# If we got here, that means we don't have a hint so as to
# the root dataset, but with root=zfs:AUTO on cmdline,
# this says "zfs:AUTO" here and interferes with checks later
ZFS_BOOTFS=
[ "$quiet" != "y" ] && \
zfs_log_begin_msg "Attempting to import additional pools."
# Get a list of pools available for import
if [ -n "$ZFS_RPOOL" ]
then
# We've specified a pool - check only that
POOLS=$ZFS_RPOOL
else
POOLS=$(get_pools)
fi
OLD_IFS="$IFS" ; IFS=";"
for pool in $POOLS
do
[ -z "$pool" ] && continue
IFS="$OLD_IFS" import_pool "$pool"
IFS="$OLD_IFS" find_rootfs "$pool" && break
done
IFS="$OLD_IFS"
[ "$quiet" != "y" ] && zfs_log_end_msg $ZFS_ERROR
else
# No auto - use value from the command line option.
# Strip 'zfs:' and 'ZFS='.
ZFS_BOOTFS="${ROOT#*[:=]}"
# Strip everything after the first slash.
ZFS_RPOOL="${ZFS_BOOTFS%%/*}"
fi
# Import the pool (if not already done so in the AUTO check above).
if [ -n "$ZFS_RPOOL" ] && [ -z "${POOL_IMPORTED}" ]
then
[ "$quiet" != "y" ] && \
zfs_log_begin_msg "Importing ZFS root pool '$ZFS_RPOOL'"
import_pool "${ZFS_RPOOL}"
find_rootfs "${ZFS_RPOOL}"
[ "$quiet" != "y" ] && zfs_log_end_msg
fi
if [ -z "${POOL_IMPORTED}" ]
then
# No pool imported, this is serious!
disable_plymouth
echo ""
echo "Command: $ZFS_CMD"
echo "Message: $ZFS_STDERR"
echo "Error: $ZFS_ERROR"
echo ""
echo "No pool imported. Manually import the root pool"
echo "at the command prompt and then exit."
echo "Hint: Try: zpool import -N ${ZFS_RPOOL}"
shell
fi
# In case the pool was specified as guid, resolve guid to name
pool="$("${ZPOOL}" get name,guid -o name,value -H | \
awk -v pool="${ZFS_RPOOL}" '$2 == pool { print $1 }')"
if [ -n "$pool" ]; then
# If $ZFS_BOOTFS contains guid, replace the guid portion with $pool
ZFS_BOOTFS=$(echo "$ZFS_BOOTFS" | \
sed -e "s/$("${ZPOOL}" get guid -o value "$pool" -H)/$pool/g")
ZFS_RPOOL="${pool}"
fi
# ----------------------------------------------------------------
# P R E P A R E R O O T F I L E S Y S T E M
if [ -n "${ZFS_BOOTFS}" ]
then
# Booting from a snapshot?
# Will overwrite the ZFS_BOOTFS variable like so:
# rpool/ROOT/debian@snap2 => rpool/ROOT/debian_snap2
echo "${ZFS_BOOTFS}" | grep -q '@' && \
setup_snapshot_booting "${ZFS_BOOTFS}"
fi
if [ -z "${ZFS_BOOTFS}" ]
then
# Still nothing! Let the user sort this out.
disable_plymouth
echo ""
echo "Error: Unknown root filesystem - no 'bootfs' pool property and"
echo " not specified on the kernel command line."
echo ""
echo "Manually mount the root filesystem on $rootmnt and then exit."
echo "Hint: Try: mount -o zfsutil -t zfs ${ZFS_RPOOL-rpool}/ROOT/system $rootmnt"
shell
fi
# ----------------------------------------------------------------
# M O U N T F I L E S Y S T E M S
# * Ideally, the root filesystem would be mounted like this:
#
# zpool import -R "$rootmnt" -N "$ZFS_RPOOL"
# zfs mount -o mountpoint=/ "${ZFS_BOOTFS}"
#
# but the MOUNTPOINT prefix is preserved on descendent filesystem
# after the pivot into the regular root, which later breaks things
# like `zfs mount -a` and the /proc/self/mounts refresh.
#
# * Mount additional filesystems required
# Such as /usr, /var, /usr/local etc.
# NOTE: Mounted in the order specified in the
# ZFS_INITRD_ADDITIONAL_DATASETS variable so take care!
# Go through the complete list (recursively) of all filesystems below
# the real root dataset
filesystems="$("${ZFS}" list -oname -tfilesystem -H -r "${ZFS_BOOTFS}")"
OLD_IFS="$IFS" ; IFS="
"
for fs in $filesystems; do
IFS="$OLD_IFS" mount_fs "$fs"
done
IFS="$OLD_IFS"
for fs in $ZFS_INITRD_ADDITIONAL_DATASETS; do
mount_fs "$fs"
done
touch /run/zfs_unlock_complete
if [ -e /run/zfs_unlock_complete_notify ]; then
read -r < /run/zfs_unlock_complete_notify
fi
# ------------
# Debugging information
if [ -n "${ZFS_DEBUG}" ]
then
#exec 2>&1-
echo "DEBUG: imported pools:"
"${ZPOOL}" list -H
echo
echo "DEBUG: mounted ZFS filesystems:"
mount | grep zfs
echo
echo "=> waiting for ENTER before continuing because of 'zfsdebug=1'. "
printf "%s" " 'c' for shell, 'r' for reboot, 'ENTER' to continue. "
read -r b
[ "$b" = "c" ] && /bin/sh
[ "$b" = "r" ] && reboot -f
set +x
fi
# ------------
# Run local bottom script
if command -v run_scripts > /dev/null 2>&1
then
if [ -f "/scripts/local-bottom" ] || [ -d "/scripts/local-bottom" ]
then
[ "$quiet" != "y" ] && \
zfs_log_begin_msg "Running /scripts/local-bottom"
run_scripts /scripts/local-bottom
[ "$quiet" != "y" ] && zfs_log_end_msg
fi
fi
}
diff --git a/sys/contrib/openzfs/etc/default/zfs.in b/sys/contrib/openzfs/etc/default/zfs.in
index 3b6e5486dd33..77cc604d8f4f 100644
--- a/sys/contrib/openzfs/etc/default/zfs.in
+++ b/sys/contrib/openzfs/etc/default/zfs.in
@@ -1,103 +1,109 @@
-# ZoL userland configuration.
+# OpenZFS userland configuration.
# NOTE: This file is intended for sysv init and initramfs.
# Changing some of these settings may not make any difference on
# systemd-based setup, e.g. setting ZFS_MOUNT=no will not prevent systemd
# from launching zfs-mount.service during boot.
# See: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=901436
# To enable a boolean setting, set it to yes, on, true, or 1.
# Anything else will be interpreted as unset.
+# Run `zfs load-key` during system start?
+ZFS_LOAD_KEY='yes'
+
+# Run `zfs unload-key` during system stop?
+ZFS_UNLOAD_KEY='no'
+
# Run `zfs mount -a` during system start?
ZFS_MOUNT='yes'
# Run `zfs unmount -a` during system stop?
ZFS_UNMOUNT='yes'
# Run `zfs share -a` during system start?
# nb: The shareiscsi, sharenfs, and sharesmb dataset properties.
ZFS_SHARE='yes'
# Run `zfs unshare -a` during system stop?
ZFS_UNSHARE='yes'
# By default, a verbatim import of all pools is performed at boot based on the
# contents of the default zpool cache file. The contents of the cache are
# managed automatically by the 'zpool import' and 'zpool export' commands.
#
# By setting this to 'yes', the system will instead search all devices for
# pools and attempt to import them all at boot, even those that have been
# exported. Under this mode, the search path can be controlled by the
# ZPOOL_IMPORT_PATH variable and a list of pools that should not be imported
# can be listed in the ZFS_POOL_EXCEPTIONS variable.
#
# Note that importing all visible pools may include pools that you don't
# expect, such as those on removable devices and SANs, and those pools may
# proceed to mount themselves in places you do not want them to. The results
# can be unpredictable and possibly dangerous. Only enable this option if you
# understand this risk and have complete physical control over your system and
# SAN to prevent the insertion of malicious pools.
ZPOOL_IMPORT_ALL_VISIBLE='no'
# Specify specific path(s) to look for device nodes and/or links for the
# pool import(s). See zpool(8) for more information about this variable.
# It supersedes the old USE_DISK_BY_ID which indicated that it would only
# try '/dev/disk/by-id'.
# The old variable will still work in the code, but is deprecated.
#ZPOOL_IMPORT_PATH="/dev/disk/by-vdev:/dev/disk/by-id"
# List of pools that should NOT be imported at boot
# when ZPOOL_IMPORT_ALL_VISIBLE is 'yes'.
# This is a space separated list.
#ZFS_POOL_EXCEPTIONS="test2"
# Should the datasets be mounted verbosely?
# A mount counter will be used when mounting if set to 'yes'.
VERBOSE_MOUNT='no'
# Should we allow overlay mounts?
# This is standard in Linux, but not ZFS which comes from Solaris where this
# is not allowed).
DO_OVERLAY_MOUNTS='no'
# Any additional option to the 'zfs import' commandline?
# Include '-o' for each option wanted.
# You don't need to put '-f' in here, unless you want it ALL the time.
# Using the option 'zfsforce=1' on the grub/kernel command line will
# do the same, but on a case-to-case basis.
ZPOOL_IMPORT_OPTS=""
# Full path to the ZFS cache file?
# See "cachefile" in zpool(8).
# The default is "@sysconfdir@/zfs/zpool.cache".
#ZPOOL_CACHE="@sysconfdir@/zfs/zpool.cache"
#
# Setting ZPOOL_CACHE to an empty string ('') AND setting ZPOOL_IMPORT_OPTS to
# "-c @sysconfdir@/zfs/zpool.cache" will _enforce_ the use of a cache file.
# This is needed in some cases (extreme amounts of VDEVs, multipath etc).
# Generally, the use of a cache file is usually not recommended on Linux
# because it sometimes is more trouble than it's worth (laptops with external
# devices or when/if device nodes changes names).
#ZPOOL_IMPORT_OPTS="-c @sysconfdir@/zfs/zpool.cache"
#ZPOOL_CACHE=""
# Any additional option to the 'zfs mount' command line?
# Include '-o' for each option wanted.
MOUNT_EXTRA_OPTIONS=""
# Build kernel modules with the --enable-debug switch?
# Only applicable for Debian GNU/Linux {dkms,initramfs}.
ZFS_DKMS_ENABLE_DEBUG='no'
# Build kernel modules with the --enable-debuginfo switch?
# Only applicable for Debian GNU/Linux {dkms,initramfs}.
ZFS_DKMS_ENABLE_DEBUGINFO='no'
# Keep debugging symbols in kernel modules?
# Only applicable for Debian GNU/Linux {dkms,initramfs}.
ZFS_DKMS_DISABLE_STRIP='no'
# Optional arguments for the ZFS Event Daemon (ZED).
# See zed(8) for more information on available options.
#ZED_ARGS="-M"
diff --git a/sys/contrib/openzfs/etc/init.d/.gitignore b/sys/contrib/openzfs/etc/init.d/.gitignore
index 43a673d55343..b3402f831806 100644
--- a/sys/contrib/openzfs/etc/init.d/.gitignore
+++ b/sys/contrib/openzfs/etc/init.d/.gitignore
@@ -1,5 +1,6 @@
zfs-import
+zfs-load-key
zfs-mount
zfs-share
zfs-zed
zfs
diff --git a/sys/contrib/openzfs/etc/init.d/Makefile.am b/sys/contrib/openzfs/etc/init.d/Makefile.am
index f93af1fd77e0..658623fda4aa 100644
--- a/sys/contrib/openzfs/etc/init.d/Makefile.am
+++ b/sys/contrib/openzfs/etc/init.d/Makefile.am
@@ -1,10 +1,10 @@
include $(top_srcdir)/config/Substfiles.am
include $(top_srcdir)/config/Shellcheck.am
EXTRA_DIST += README.md
-init_SCRIPTS = zfs-import zfs-mount zfs-share zfs-zed
+init_SCRIPTS = zfs-import zfs-load-key zfs-mount zfs-share zfs-zed
SUBSTFILES += $(init_SCRIPTS)
SHELLCHECK_SHELL = dash # local variables
diff --git a/sys/contrib/openzfs/etc/init.d/README.md b/sys/contrib/openzfs/etc/init.d/README.md
index c14b01937db2..f417b24c5923 100644
--- a/sys/contrib/openzfs/etc/init.d/README.md
+++ b/sys/contrib/openzfs/etc/init.d/README.md
@@ -1,72 +1,75 @@
DESCRIPTION
These script were written with the primary intention of being portable and
usable on as many systems as possible.
This is, in practice, usually not possible. But the intention is there.
And it is a good one.
They have been tested successfully on:
* Debian GNU/Linux Wheezy
* Debian GNU/Linux Jessie
* Ubuntu Trusty
* CentOS 6.0
* CentOS 6.6
* Gentoo
SUPPORT
If you find that they don't work for your platform, please report this
at the OpenZFS issue tracker at https://github.com/openzfs/zfs/issues.
Please include:
* Distribution name
* Distribution version
* Where to find an install CD image
* Architecture
If you have code to share that fixes the problem, that is much better.
But please remember to try your best keep portability in mind. If you
suspect that what you're writing/modifying won't work on anything else
than your distribution, please make sure to put that code in appropriate
if/else/fi code.
It currently MUST be bash (or fully compatible) for this to work.
If you're making your own distribution and you want the scripts to
work on that, the biggest problem you'll (probably) have is the part
at the beginning of the "zfs-functions" file which sets up the
logging output.
INSTALLING INIT SCRIPT LINKS
To setup the init script links in /etc/rc?.d manually on a Debian GNU/Linux
(or derived) system, run the following commands (the order is important!):
- update-rc.d zfs-import start 07 S . stop 07 0 1 6 .
- update-rc.d zfs-mount start 02 2 3 4 5 . stop 06 0 1 6 .
- update-rc.d zfs-zed start 07 2 3 4 5 . stop 08 0 1 6 .
- update-rc.d zfs-share start 27 2 3 4 5 . stop 05 0 1 6 .
+ update-rc.d zfs-import start 07 S . stop 07 0 1 6 .
+ update-rc.d zfs-load-key start 02 2 3 4 5 . stop 06 0 1 6 .
+ update-rc.d zfs-mount start 02 2 3 4 5 . stop 06 0 1 6 .
+ update-rc.d zfs-zed start 07 2 3 4 5 . stop 08 0 1 6 .
+ update-rc.d zfs-share start 27 2 3 4 5 . stop 05 0 1 6 .
To do the same on RedHat, Fedora and/or CentOS:
chkconfig zfs-import
+ chkconfig zfs-load-key
chkconfig zfs-mount
chkconfig zfs-zed
chkconfig zfs-share
On Gentoo:
rc-update add zfs-import boot
+ rc-update add zfs-load-key boot
rc-update add zfs-mount boot
rc-update add zfs-zed default
rc-update add zfs-share default
The idea here is to make sure all of the ZFS filesystems, including possibly
separate datasets like /var, are mounted before anything else is started.
Then, ZED, which depends on /var, can be started. It will consume and act
on events that occurred before it started. ZED may also play a role in
sharing filesystems in the future, so it is important to start before the
'share' service.
Finally, we share filesystems configured with the share\* property.
diff --git a/sys/contrib/openzfs/etc/init.d/zfs-import.in b/sys/contrib/openzfs/etc/init.d/zfs-import.in
index e4bc7b8339fc..130174f74d06 100755
--- a/sys/contrib/openzfs/etc/init.d/zfs-import.in
+++ b/sys/contrib/openzfs/etc/init.d/zfs-import.in
@@ -1,338 +1,337 @@
#!@DEFAULT_INIT_SHELL@
#
# zfs-import This script will import ZFS pools
#
# chkconfig: 2345 01 99
# description: This script will perform a verbatim import of ZFS pools
# during system boot.
# probe: true
#
### BEGIN INIT INFO
# Provides: zfs-import
# Required-Start: mtab
# Required-Stop: $local_fs mtab
# Default-Start: S
# Default-Stop: 0 1 6
# X-Start-Before: checkfs
# X-Stop-After: zfs-mount
# Short-Description: Import ZFS pools
# Description: Run the `zpool import` command.
### END INIT INFO
#
# NOTE: Not having '$local_fs' on Required-Start but only on Required-Stop
# is on purpose. If we have '$local_fs' in both (and X-Start-Before=checkfs)
# we get conflicts - import needs to be started extremely early,
# but not stopped too late.
#
# Released under the 2-clause BSD license.
#
# This script is based on debian/zfsutils.zfs.init from the
# Debian GNU/kFreeBSD zfsutils 8.1-3 package, written by Aurelien Jarno.
# Source the common init script
. @sysconfdir@/zfs/zfs-functions
# ----------------------------------------------------
do_depend()
{
before swap
after sysfs udev
keyword -lxc -openvz -prefix -vserver
}
# Use the zpool cache file to import pools
do_verbatim_import()
{
if [ -f "$ZPOOL_CACHE" ]
then
zfs_action "Importing ZFS pool(s)" \
"$ZPOOL" import -c "$ZPOOL_CACHE" -N -a
fi
}
# Support function to get a list of all pools, separated with ';'
find_pools()
{
local pools
pools=$("$@" 2> /dev/null | \
- grep -E "pool:|^[a-zA-Z0-9]" | \
- sed 's@.*: @@' | \
+ sed -Ee '/pool:|^[a-zA-Z0-9]/!d' -e 's@.*: @@' | \
sort | \
tr '\n' ';')
echo "${pools%%;}" # Return without the last ';'.
}
# Find and import all visible pools, even exported ones
do_import_all_visible()
{
local already_imported available_pools pool npools
local exception dir ZPOOL_IMPORT_PATH RET=0 r=1
# In case not shutdown cleanly.
# shellcheck disable=SC2154
[ -n "$init" ] && rm -f /etc/dfs/sharetab
# Just simplify code later on.
if [ -n "$USE_DISK_BY_ID" ] && [ "$USE_DISK_BY_ID" != 'yes' ]
then
# It's something, but not 'yes' so it's no good to us.
unset USE_DISK_BY_ID
fi
# Find list of already imported pools.
already_imported=$(find_pools "$ZPOOL" list -H -oname)
available_pools=$(find_pools "$ZPOOL" import)
# Just in case - seen it happen (that a pool isn't visible/found
# with a simple "zpool import" but only when using the "-d"
# option or setting ZPOOL_IMPORT_PATH).
if [ -d "/dev/disk/by-id" ]
then
npools=$(find_pools "$ZPOOL" import -d /dev/disk/by-id)
if [ -n "$npools" ]
then
# Because we have found extra pool(s) here, which wasn't
# found 'normally', we need to force USE_DISK_BY_ID to
# make sure we're able to actually import it/them later.
USE_DISK_BY_ID='yes'
if [ -n "$available_pools" ]
then
# Filter out duplicates (pools found with the simpl
# "zpool import" but which is also found with the
# "zpool import -d ...").
npools=$(echo "$npools" | sed "s,$available_pools,,")
# Add the list to the existing list of
# available pools
available_pools="$available_pools;$npools"
else
available_pools="$npools"
fi
fi
fi
# Filter out any exceptions...
if [ -n "$ZFS_POOL_EXCEPTIONS" ]
then
local found=""
local apools=""
OLD_IFS="$IFS" ; IFS=";"
for pool in $available_pools
do
for exception in $ZFS_POOL_EXCEPTIONS
do
[ "$pool" = "$exception" ] && continue 2
found="$pool"
done
if [ -n "$found" ]
then
if [ -n "$apools" ]
then
apools="$apools;$pool"
else
apools="$pool"
fi
fi
done
IFS="$OLD_IFS"
available_pools="$apools"
fi
# For backwards compatibility, make sure that ZPOOL_IMPORT_PATH is set
# to something we can use later with the real import(s). We want to
# make sure we find all by* dirs, BUT by-vdev should be first (if it
# exists).
if [ -n "$USE_DISK_BY_ID" ] && [ -z "$ZPOOL_IMPORT_PATH" ]
then
local dirs
dirs="$(for dir in $(echo /dev/disk/by-*)
do
# Ignore by-vdev here - we want it first!
echo "$dir" | grep -q /by-vdev && continue
[ ! -d "$dir" ] && continue
printf "%s" "$dir:"
done | sed 's,:$,,g')"
if [ -d "/dev/disk/by-vdev" ]
then
# Add by-vdev at the beginning.
ZPOOL_IMPORT_PATH="/dev/disk/by-vdev:"
fi
# Help with getting LUKS partitions etc imported.
if [ -d "/dev/mapper" ]; then
if [ -n "$ZPOOL_IMPORT_PATH" ]; then
ZPOOL_IMPORT_PATH="$ZPOOL_IMPORT_PATH:/dev/mapper:"
else
ZPOOL_IMPORT_PATH="/dev/mapper:"
fi
fi
# ... and /dev at the very end, just for good measure.
ZPOOL_IMPORT_PATH="$ZPOOL_IMPORT_PATH$dirs:/dev"
fi
# Needs to be exported for "zpool" to catch it.
[ -n "$ZPOOL_IMPORT_PATH" ] && export ZPOOL_IMPORT_PATH
# Mount all available pools (except those set in ZFS_POOL_EXCEPTIONS.
#
# If not interactive (run from init - variable init='/sbin/init')
# we get ONE line for all pools being imported, with just a dot
# as status for each pool.
# Example: Importing ZFS pool(s)... [OK]
#
# If it IS interactive (started from the shell manually), then we
# get one line per pool importing.
# Example: Importing ZFS pool pool1 [OK]
# Importing ZFS pool pool2 [OK]
# [etc]
[ -n "$init" ] && zfs_log_begin_msg "Importing ZFS pool(s)"
OLD_IFS="$IFS" ; IFS=";"
for pool in $available_pools
do
[ -z "$pool" ] && continue
# We have pools that haven't been imported - import them
if [ -n "$init" ]
then
# Not interactive - a dot for each pool.
# Except on Gentoo where this doesn't work.
zfs_log_progress_msg "."
else
# Interactive - one 'Importing ...' line per pool
zfs_log_begin_msg "Importing ZFS pool $pool"
fi
# Import by using ZPOOL_IMPORT_PATH (either set above or in
# the config file) _or_ with the 'built in' default search
# paths. This is the preferred way.
# shellcheck disable=SC2086
"$ZPOOL" import -N ${ZPOOL_IMPORT_OPTS} "$pool" 2> /dev/null
r="$?" ; RET=$((RET + r))
if [ "$r" -eq 0 ]
then
# Output success and process the next pool
[ -z "$init" ] && zfs_log_end_msg 0
continue
fi
# We don't want a fail msg here, we're going to try import
# using the cache file soon and that might succeed.
[ ! -f "$ZPOOL_CACHE" ] && zfs_log_end_msg "$RET"
if [ "$r" -gt 0 ] && [ -f "$ZPOOL_CACHE" ]
then
# Failed to import without a cache file. Try WITH...
if [ -z "$init" ] && check_boolean "$VERBOSE_MOUNT"
then
# Interactive + Verbose = more information
zfs_log_progress_msg " using cache file"
fi
# shellcheck disable=SC2086
"$ZPOOL" import -c "$ZPOOL_CACHE" -N ${ZPOOL_IMPORT_OPTS} \
"$pool" 2> /dev/null
r="$?" ; RET=$((RET + r))
if [ "$r" -eq 0 ]
then
[ -z "$init" ] && zfs_log_end_msg 0
continue 3 # Next pool
fi
zfs_log_end_msg "$RET"
fi
done
[ -n "$init" ] && zfs_log_end_msg "$RET"
IFS="$OLD_IFS"
[ -n "$already_imported" ] && [ -z "$available_pools" ] && return 0
return "$RET"
}
do_import()
{
if check_boolean "$ZPOOL_IMPORT_ALL_VISIBLE"
then
do_import_all_visible
else
# This is the default option
do_verbatim_import
fi
}
# Output the status and list of pools
do_status()
{
check_module_loaded "zfs" || exit 0
"$ZPOOL" status && echo "" && "$ZPOOL" list
}
do_start()
{
if check_boolean "$VERBOSE_MOUNT"
then
zfs_log_begin_msg "Checking if ZFS userspace tools present"
fi
if checksystem
then
check_boolean "$VERBOSE_MOUNT" && zfs_log_end_msg 0
check_boolean "$VERBOSE_MOUNT" && \
zfs_log_begin_msg "Loading kernel ZFS infrastructure"
if ! load_module "zfs"
then
check_boolean "$VERBOSE_MOUNT" && zfs_log_end_msg 1
return 5
fi
check_boolean "$VERBOSE_MOUNT" && zfs_log_end_msg 0
do_import && udev_trigger # just to make sure we get zvols.
return 0
else
return 1
fi
}
# ----------------------------------------------------
if [ ! -e /sbin/openrc-run ]
then
case "$1" in
start)
do_start
;;
stop)
# no-op
;;
status)
do_status
;;
force-reload|condrestart|reload|restart)
# no-op
;;
*)
[ -n "$1" ] && echo "Error: Unknown command $1."
echo "Usage: $0 {start|status}"
exit 3
;;
esac
exit $?
else
# Create wrapper functions since Gentoo don't use the case part.
depend() { do_depend; }
start() { do_start; }
status() { do_status; }
fi
diff --git a/sys/contrib/openzfs/etc/init.d/zfs-load-key.in b/sys/contrib/openzfs/etc/init.d/zfs-load-key.in
new file mode 100755
index 000000000000..2f8deffdc809
--- /dev/null
+++ b/sys/contrib/openzfs/etc/init.d/zfs-load-key.in
@@ -0,0 +1,131 @@
+#!@DEFAULT_INIT_SHELL@
+#
+# zfs-load-key This script will load/unload the zfs filesystems keys.
+#
+# chkconfig: 2345 06 99
+# description: This script will load or unload the zfs filesystems keys during
+# system boot/shutdown. Only filesystems with key path set
+# in keylocation property. See the zfs(8) man page for details.
+# probe: true
+#
+### BEGIN INIT INFO
+# Provides: zfs-load-key
+# Required-Start: $local_fs zfs-import
+# Required-Stop: $local_fs zfs-import
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# X-Start-Before: zfs-mount
+# X-Stop-After: zfs-zed
+# Short-Description: Load ZFS keys for filesystems and volumes
+# Description: Run the `zfs load-key` or `zfs unload-key` commands.
+### END INIT INFO
+#
+# Released under the 2-clause BSD license.
+#
+# This script is based on debian/zfsutils.zfs.init from the
+# Debian GNU/kFreeBSD zfsutils 8.1-3 package, written by Aurelien Jarno.
+
+# Source the common init script
+. @sysconfdir@/zfs/zfs-functions
+
+# ----------------------------------------------------
+
+do_depend()
+{
+ # bootmisc will log to /var which may be a different zfs than root.
+ before bootmisc logger zfs-mount
+
+ after zfs-import sysfs
+ keyword -lxc -openvz -prefix -vserver
+}
+
+# Load keys for all datasets/filesystems
+do_load_keys()
+{
+ zfs_log_begin_msg "Load ZFS filesystem(s) keys"
+
+ "$ZFS" list -Ho name,encryptionroot,keystatus,keylocation |
+ while IFS=" " read -r name encryptionroot keystatus keylocation; do
+ if [ "$encryptionroot" != "-" ] &&
+ [ "$name" = "$encryptionroot" ] &&
+ [ "$keystatus" = "unavailable" ] &&
+ [ "$keylocation" != "prompt" ] &&
+ [ "$keylocation" != "none" ]
+ then
+ zfs_action "Load key for $encryptionroot" \
+ "$ZFS" load-key "$encryptionroot"
+ fi
+ done
+
+ zfs_log_end_msg 0
+
+ return 0
+}
+
+# Unload keys for all datasets/filesystems
+do_unload_keys()
+{
+ zfs_log_begin_msg "Unload ZFS filesystem(s) key"
+
+ "$ZFS" list -Ho name,encryptionroot,keystatus | sed '1!G;h;$!d' |
+ while IFS=" " read -r name encryptionroot keystatus; do
+ if [ "$encryptionroot" != "-" ] &&
+ [ "$name" = "$encryptionroot" ] &&
+ [ "$keystatus" = "available" ]
+ then
+ zfs_action "Unload key for $encryptionroot" \
+ "$ZFS" unload-key "$encryptionroot"
+ fi
+ done
+
+ zfs_log_end_msg 0
+
+ return 0
+}
+
+do_start()
+{
+ check_boolean "$ZFS_LOAD_KEY" || exit 0
+
+ check_module_loaded "zfs" || exit 0
+
+ do_load_keys
+}
+
+do_stop()
+{
+ check_boolean "$ZFS_UNLOAD_KEY" || exit 0
+
+ check_module_loaded "zfs" || exit 0
+
+ do_unload_keys
+}
+
+# ----------------------------------------------------
+
+if [ ! -e /sbin/openrc-run ]
+then
+ case "$1" in
+ start)
+ do_start
+ ;;
+ stop)
+ do_stop
+ ;;
+ force-reload|condrestart|reload|restart|status)
+ # no-op
+ ;;
+ *)
+ [ -n "$1" ] && echo "Error: Unknown command $1."
+ echo "Usage: $0 {start|stop}"
+ exit 3
+ ;;
+ esac
+
+ exit $?
+else
+ # Create wrapper functions since Gentoo don't use the case part.
+ depend() { do_depend; }
+ start() { do_start; }
+ stop() { do_stop; }
+fi
diff --git a/sys/contrib/openzfs/etc/systemd/system/.gitignore b/sys/contrib/openzfs/etc/systemd/system/.gitignore
index efada54ad932..4813c65a25a8 100644
--- a/sys/contrib/openzfs/etc/systemd/system/.gitignore
+++ b/sys/contrib/openzfs/etc/systemd/system/.gitignore
@@ -1,3 +1,4 @@
*.service
*.target
*.preset
+*.timer
diff --git a/sys/contrib/openzfs/etc/systemd/system/Makefile.am b/sys/contrib/openzfs/etc/systemd/system/Makefile.am
index c374a52ac7db..5e65e1db420c 100644
--- a/sys/contrib/openzfs/etc/systemd/system/Makefile.am
+++ b/sys/contrib/openzfs/etc/systemd/system/Makefile.am
@@ -1,21 +1,24 @@
include $(top_srcdir)/config/Substfiles.am
systemdpreset_DATA = \
50-zfs.preset
systemdunit_DATA = \
zfs-zed.service \
zfs-import-cache.service \
zfs-import-scan.service \
zfs-mount.service \
zfs-share.service \
zfs-volume-wait.service \
zfs-import.target \
zfs-volumes.target \
- zfs.target
+ zfs.target \
+ zfs-scrub-monthly@.timer \
+ zfs-scrub-weekly@.timer \
+ zfs-scrub@.service
SUBSTFILES += $(systemdpreset_DATA) $(systemdunit_DATA)
install-data-hook:
$(MKDIR_P) "$(DESTDIR)$(systemdunitdir)"
ln -sf /dev/null "$(DESTDIR)$(systemdunitdir)/zfs-import.service"
diff --git a/sys/contrib/openzfs/etc/systemd/system/zfs-scrub-monthly@.timer.in b/sys/contrib/openzfs/etc/systemd/system/zfs-scrub-monthly@.timer.in
new file mode 100644
index 000000000000..903068468278
--- /dev/null
+++ b/sys/contrib/openzfs/etc/systemd/system/zfs-scrub-monthly@.timer.in
@@ -0,0 +1,12 @@
+[Unit]
+Description=Monthly zpool scrub timer for %i
+Documentation=man:zpool-scrub(8)
+
+[Timer]
+OnCalendar=monthly
+Persistent=true
+RandomizedDelaySec=1h
+Unit=zfs-scrub@%i.service
+
+[Install]
+WantedBy=timers.target
diff --git a/sys/contrib/openzfs/etc/systemd/system/zfs-scrub-weekly@.timer.in b/sys/contrib/openzfs/etc/systemd/system/zfs-scrub-weekly@.timer.in
new file mode 100644
index 000000000000..ede699500599
--- /dev/null
+++ b/sys/contrib/openzfs/etc/systemd/system/zfs-scrub-weekly@.timer.in
@@ -0,0 +1,12 @@
+[Unit]
+Description=Weekly zpool scrub timer for %i
+Documentation=man:zpool-scrub(8)
+
+[Timer]
+OnCalendar=weekly
+Persistent=true
+RandomizedDelaySec=1h
+Unit=zfs-scrub@%i.service
+
+[Install]
+WantedBy=timers.target
diff --git a/sys/contrib/openzfs/etc/systemd/system/zfs-scrub@.service.in b/sys/contrib/openzfs/etc/systemd/system/zfs-scrub@.service.in
new file mode 100644
index 000000000000..bebe91d746ae
--- /dev/null
+++ b/sys/contrib/openzfs/etc/systemd/system/zfs-scrub@.service.in
@@ -0,0 +1,14 @@
+[Unit]
+Description=zpool scrub on %i
+Documentation=man:zpool-scrub(8)
+Requires=zfs.target
+After=zfs.target
+ConditionACPower=true
+ConditionPathIsDirectory=/sys/module/zfs
+
+[Service]
+ExecStart=/bin/sh -c '\
+if @sbindir@/zpool status %i | grep "scrub in progress"; then\
+exec @sbindir@/zpool wait -t scrub %i;\
+else exec @sbindir@/zpool scrub -w %i; fi'
+ExecStop=-/bin/sh -c '@sbindir@/zpool scrub -p %i 2>/dev/null || true'
diff --git a/sys/contrib/openzfs/etc/zfs/zfs-functions.in b/sys/contrib/openzfs/etc/zfs/zfs-functions.in
index 2fb065afdb9a..30441dc35d4b 100644
--- a/sys/contrib/openzfs/etc/zfs/zfs-functions.in
+++ b/sys/contrib/openzfs/etc/zfs/zfs-functions.in
@@ -1,431 +1,432 @@
-# This is a script with common functions etc used by zfs-import, zfs-mount,
-# zfs-share and zfs-zed.
+# This is a script with common functions etc used by zfs-import, zfs-load-key,
+# zfs-mount, zfs-share and zfs-zed.
#
# It is _NOT_ to be called independently
#
# Released under the 2-clause BSD license.
#
# This script is based on debian/zfsutils.zfs.init from the
# Debian GNU/kFreeBSD zfsutils 8.1-3 package, written by Aurelien Jarno.
PATH=/sbin:/bin:/usr/bin:/usr/sbin
# Source function library
if [ -f /etc/rc.d/init.d/functions ]; then
# RedHat and derivatives
. /etc/rc.d/init.d/functions
elif [ -L /etc/init.d/functions.sh ]; then
# Gentoo
. /etc/init.d/functions.sh
elif [ -f /lib/lsb/init-functions ]; then
# LSB, Debian, and derivatives
. /lib/lsb/init-functions
fi
# Of course the functions we need are called differently
# on different distributions - it would be way too easy
# otherwise!!
if type log_failure_msg > /dev/null 2>&1 ; then
# LSB functions - fall through
zfs_log_begin_msg() { log_begin_msg "$1"; }
zfs_log_end_msg() { log_end_msg "$1"; }
zfs_log_failure_msg() { log_failure_msg "$1"; }
zfs_log_progress_msg() { log_progress_msg "$1"; }
elif type success > /dev/null 2>&1 ; then
# Fedora/RedHat functions
zfs_set_ifs() {
# For some reason, the init function library have a problem
# with a changed IFS, so this function goes around that.
local tIFS="$1"
if [ -n "$tIFS" ]
then
TMP_IFS="$IFS"
IFS="$tIFS"
fi
}
zfs_log_begin_msg() { printf "%s" "$1 "; }
zfs_log_end_msg() {
zfs_set_ifs "$OLD_IFS"
if [ "$1" -eq 0 ]; then
success
else
failure
fi
echo
zfs_set_ifs "$TMP_IFS"
}
zfs_log_failure_msg() {
zfs_set_ifs "$OLD_IFS"
failure
echo
zfs_set_ifs "$TMP_IFS"
}
zfs_log_progress_msg() { printf "%s" "$""$1"; }
elif type einfo > /dev/null 2>&1 ; then
# Gentoo functions
zfs_log_begin_msg() { ebegin "$1"; }
zfs_log_end_msg() { eend "$1"; }
zfs_log_failure_msg() { eend "$1"; }
# zfs_log_progress_msg() { printf "%s" "$1"; }
zfs_log_progress_msg() { :; }
else
# Unknown - simple substitutes.
zfs_log_begin_msg() { printf "%s" "$1"; }
zfs_log_end_msg() {
ret=$1
if [ "$ret" -ge 1 ]; then
echo " failed!"
else
echo " success"
fi
return "$ret"
}
zfs_log_failure_msg() { echo "$1"; }
zfs_log_progress_msg() { printf "%s" "$1"; }
fi
# Paths to what we need
ZFS="@sbindir@/zfs"
ZED="@sbindir@/zed"
ZPOOL="@sbindir@/zpool"
ZPOOL_CACHE="@sysconfdir@/zfs/zpool.cache"
# Sensible defaults
+ZFS_LOAD_KEY='yes'
+ZFS_UNLOAD_KEY='no'
ZFS_MOUNT='yes'
ZFS_UNMOUNT='yes'
ZFS_SHARE='yes'
ZFS_UNSHARE='yes'
# Source zfs configuration, overriding the defaults
if [ -f @initconfdir@/zfs ]; then
. @initconfdir@/zfs
fi
# ----------------------------------------------------
-export ZFS ZED ZPOOL ZPOOL_CACHE ZFS_MOUNT ZFS_UNMOUNT ZFS_SHARE ZFS_UNSHARE
+export ZFS ZED ZPOOL ZPOOL_CACHE ZFS_LOAD_KEY ZFS_UNLOAD_KEY ZFS_MOUNT ZFS_UNMOUNT \
+ ZFS_SHARE ZFS_UNSHARE
zfs_action()
{
local MSG="$1"; shift
local CMD="$*"
local ret
zfs_log_begin_msg "$MSG "
$CMD
ret=$?
if [ "$ret" -eq 0 ]; then
zfs_log_end_msg $ret
else
zfs_log_failure_msg $ret
fi
return $ret
}
# Returns
# 0 if daemon has been started
# 1 if daemon was already running
# 2 if daemon could not be started
# 3 if unsupported
#
zfs_daemon_start()
{
local PIDFILE="$1"; shift
local DAEMON_BIN="$1"; shift
if type start-stop-daemon > /dev/null 2>&1 ; then
# LSB functions
start-stop-daemon --start --quiet --pidfile "$PIDFILE" \
--exec "$DAEMON_BIN" --test > /dev/null || return 1
# shellcheck disable=SC2086
start-stop-daemon --start --quiet --exec "$DAEMON_BIN" -- \
"$@" || return 2
# On Debian, there's a 'sendsigs' script that will
# kill basically everything quite early and zed is stopped
# much later than that. We don't want zed to be among them,
# so add the zed pid to list of pids to ignore.
if [ -f "$PIDFILE" ] && [ -d /run/sendsigs.omit.d ]
then
ln -sf "$PIDFILE" /run/sendsigs.omit.d/zed
fi
elif type daemon > /dev/null 2>&1 ; then
# Fedora/RedHat functions
# shellcheck disable=SC2086
daemon --pidfile "$PIDFILE" "$DAEMON_BIN" "$@"
return $?
else
# Unsupported
return 3
fi
return 0
}
# Returns
# 0 if daemon has been stopped
# 1 if daemon was already stopped
# 2 if daemon could not be stopped
# 3 if unsupported
#
zfs_daemon_stop()
{
local PIDFILE="$1"
local DAEMON_BIN="$2"
local DAEMON_NAME="$3"
if type start-stop-daemon > /dev/null 2>&1 ; then
# LSB functions
start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 \
--pidfile "$PIDFILE" --name "$DAEMON_NAME"
ret="$?"
[ "$ret" = 0 ] && rm -f "$PIDFILE"
return "$ret"
elif type killproc > /dev/null 2>&1 ; then
# Fedora/RedHat functions
killproc -p "$PIDFILE" "$DAEMON_NAME"
ret="$?"
[ "$ret" = 0 ] && rm -f "$PIDFILE"
return "$ret"
else
# Unsupported
return 3
fi
return 0
}
# Returns status
zfs_daemon_status()
{
local PIDFILE="$1"
local DAEMON_BIN="$2"
local DAEMON_NAME="$3"
if type status_of_proc > /dev/null 2>&1 ; then
# LSB functions
status_of_proc "$DAEMON_NAME" "$DAEMON_BIN"
return $?
elif type status > /dev/null 2>&1 ; then
# Fedora/RedHat functions
status -p "$PIDFILE" "$DAEMON_NAME"
return $?
else
# Unsupported
return 3
fi
return 0
}
zfs_daemon_reload()
{
local PIDFILE="$1"
local DAEMON_NAME="$2"
if type start-stop-daemon > /dev/null 2>&1 ; then
# LSB functions
start-stop-daemon --stop --signal 1 --quiet \
--pidfile "$PIDFILE" --name "$DAEMON_NAME"
return $?
elif type killproc > /dev/null 2>&1 ; then
# Fedora/RedHat functions
killproc -p "$PIDFILE" "$DAEMON_NAME" -HUP
return $?
else
# Unsupported
return 3
fi
return 0
}
zfs_installed()
{
if [ ! -x "$ZPOOL" ]; then
return 1
else
# Test if it works (will catch missing/broken libs etc)
"$ZPOOL" -? > /dev/null 2>&1
return $?
fi
if [ ! -x "$ZFS" ]; then
return 2
else
# Test if it works (will catch missing/broken libs etc)
"$ZFS" -? > /dev/null 2>&1
return $?
fi
return 0
}
# Trigger udev and wait for it to settle.
udev_trigger()
{
if [ -x /sbin/udevadm ]; then
/sbin/udevadm trigger --action=change --subsystem-match=block
/sbin/udevadm settle
elif [ -x /sbin/udevsettle ]; then
/sbin/udevtrigger
/sbin/udevsettle
fi
}
# Do a lot of checks to make sure it's 'safe' to continue with the import.
checksystem()
{
if grep -qiE '(^|[^\\](\\\\)* )zfs=(off|no|0)( |$)' /proc/cmdline;
then
# Called with zfs=(off|no|0) - bail because we don't
# want anything import, mounted or shared.
# HOWEVER, only do this if we're called at the boot up
# (from init), not if we're running interactively (as in
# from the shell - we know what we're doing).
# shellcheck disable=SC2154
[ -n "$init" ] && exit 3
fi
# Check if ZFS is installed.
zfs_installed || return 5
# Just make sure that /dev/zfs is created.
udev_trigger
return 0
}
get_root_pool()
{
# shellcheck disable=SC2046
set -- $(mount | grep ' on / ')
[ "$5" = "zfs" ] && echo "${1%%/*}"
}
# Check if a variable is 'yes' (any case) or '1'
# Returns TRUE if set.
check_boolean()
{
local var="$1"
echo "$var" | grep -Eiq "^yes$|^on$|^true$|^1$" && return 0 || return 1
}
check_module_loaded()
{
module="$1"
[ -r "/sys/module/${module}/version" ] && return 0 || return 1
}
load_module()
{
module="$1"
# Load the zfs module stack
if ! check_module_loaded "$module"; then
if ! /sbin/modprobe "$module"; then
return 5
fi
fi
return 0
}
# first parameter is a regular expression that filters mtab
read_mtab()
{
local match="$1"
local fs mntpnt fstype opts rest
# Unset all MTAB_* variables
# shellcheck disable=SC2046
- unset $(env | grep ^MTAB_ | sed 's,=.*,,')
+ unset $(env | sed -e '/^MTAB_/!d' -e 's,=.*,,')
while read -r fs mntpnt fstype opts rest; do
if echo "$fs $mntpnt $fstype $opts" | grep -qE "$match"; then
# * Fix problems (!?) in the mounts file. It will record
# 'rpool 1' as 'rpool\0401' instead of 'rpool\00401'
# which seems to be the correct (at least as far as
# 'printf' is concerned).
# * We need to use the external echo, because the
# internal one would interpret the backslash code
# (incorrectly), giving us a  instead.
mntpnt=$(/bin/echo "$mntpnt" | sed 's,\\0,\\00,g')
fs=$(/bin/echo "$fs" | sed 's,\\0,\\00,')
# Remove 'unwanted' characters.
- mntpnt=$(printf '%b\n' "$mntpnt" | sed -e 's,/,,g' \
- -e 's,-,,g' -e 's,\.,,g' -e 's, ,,g')
- fs=$(printf '%b\n' "$fs")
+ mntpnt=$(printf '%b' "$mntpnt" | tr -d '/. -')
+ fs=$(printf '%b' "$fs")
# Set the variable.
eval export "MTAB_$mntpnt=\"$fs\""
fi
done < /proc/self/mounts
}
in_mtab()
{
local mntpnt="$1"
# Remove 'unwanted' characters.
- mntpnt=$(printf '%b\n' "$mntpnt" | sed -e 's,/,,g' \
- -e 's,-,,g' -e 's,\.,,g' -e 's, ,,g')
+ mntpnt=$(printf '%b' "$mntpnt" | tr -d '/. -')
local var
var="$(eval echo "MTAB_$mntpnt")"
[ "$(eval echo "$""$var")" != "" ]
return "$?"
}
# first parameter is a regular expression that filters fstab
read_fstab()
{
local match="$1"
local i var
# Unset all FSTAB_* variables
# shellcheck disable=SC2046
- unset $(env | grep ^FSTAB_ | sed 's,=.*,,')
+ unset $(env | sed -e '/^FSTAB_/!d' -e 's,=.*,,')
i=0
while read -r fs mntpnt fstype opts; do
echo "$fs" | grep -qE '^#|^$' && continue
echo "$mntpnt" | grep -qE '^none|^swap' && continue
echo "$fstype" | grep -qE '^swap' && continue
if echo "$fs $mntpnt $fstype $opts" | grep -qE "$match"; then
eval export "FSTAB_dev_$i=$fs"
- fs=$(printf '%b\n' "$fs" | sed 's,/,_,g')
+ fs=$(printf '%b' "$fs" | tr '/' '_')
eval export "FSTAB_$i=$mntpnt"
i=$((i + 1))
fi
done < /etc/fstab
}
in_fstab()
{
local var
var="$(eval echo "FSTAB_$1")"
[ "${var}" != "" ]
return $?
}
is_mounted()
{
local mntpt="$1"
local mp
while read -r _ mp _; do
[ "$mp" = "$mntpt" ] && return 0
done < /proc/self/mounts
return 1
}
diff --git a/sys/contrib/openzfs/include/libzfs_impl.h b/sys/contrib/openzfs/include/libzfs_impl.h
index dfb63285c1fd..96b11dad137c 100644
--- a/sys/contrib/openzfs/include/libzfs_impl.h
+++ b/sys/contrib/openzfs/include/libzfs_impl.h
@@ -1,266 +1,268 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2018 Datto Inc.
* Copyright 2020 Joyent, Inc.
*/
#ifndef _LIBZFS_IMPL_H
#define _LIBZFS_IMPL_H
#include <sys/fs/zfs.h>
#include <sys/spa.h>
#include <sys/nvpair.h>
#include <sys/dmu.h>
#include <sys/zfs_ioctl.h>
#include <regex.h>
#include <libuutil.h>
#include <libzfs.h>
#include <libshare.h>
#include <libzfs_core.h>
#ifdef __cplusplus
extern "C" {
#endif
struct libzfs_handle {
int libzfs_error;
int libzfs_fd;
FILE *libzfs_mnttab;
zpool_handle_t *libzfs_pool_handles;
uu_avl_pool_t *libzfs_ns_avlpool;
uu_avl_t *libzfs_ns_avl;
uint64_t libzfs_ns_gen;
int libzfs_desc_active;
char libzfs_action[1024];
char libzfs_desc[1024];
int libzfs_printerr;
int libzfs_storeerr; /* stuff error messages into buffer */
boolean_t libzfs_mnttab_enable;
/*
* We need a lock to handle the case where parallel mount
* threads are populating the mnttab cache simultaneously. The
* lock only protects the integrity of the avl tree, and does
* not protect the contents of the mnttab entries themselves.
*/
pthread_mutex_t libzfs_mnttab_cache_lock;
avl_tree_t libzfs_mnttab_cache;
int libzfs_pool_iter;
char libzfs_chassis_id[256];
boolean_t libzfs_prop_debug;
regex_t libzfs_urire;
uint64_t libzfs_max_nvlist;
+ void *libfetch;
+ char *libfetch_load_error;
};
struct zfs_handle {
libzfs_handle_t *zfs_hdl;
zpool_handle_t *zpool_hdl;
char zfs_name[ZFS_MAX_DATASET_NAME_LEN];
zfs_type_t zfs_type; /* type including snapshot */
zfs_type_t zfs_head_type; /* type excluding snapshot */
dmu_objset_stats_t zfs_dmustats;
nvlist_t *zfs_props;
nvlist_t *zfs_user_props;
nvlist_t *zfs_recvd_props;
boolean_t zfs_mntcheck;
char *zfs_mntopts;
uint8_t *zfs_props_table;
};
/*
* This is different from checking zfs_type, because it will also catch
* snapshots of volumes.
*/
#define ZFS_IS_VOLUME(zhp) ((zhp)->zfs_head_type == ZFS_TYPE_VOLUME)
struct zpool_handle {
libzfs_handle_t *zpool_hdl;
zpool_handle_t *zpool_next;
char zpool_name[ZFS_MAX_DATASET_NAME_LEN];
int zpool_state;
size_t zpool_config_size;
nvlist_t *zpool_config;
nvlist_t *zpool_old_config;
nvlist_t *zpool_props;
diskaddr_t zpool_start_block;
};
typedef enum {
PROTO_NFS = 0,
PROTO_SMB = 1,
PROTO_END = 2
} zfs_share_proto_t;
/*
* The following can be used as a bitmask and any new values
* added must preserve that capability.
*/
typedef enum {
SHARED_NOT_SHARED = 0x0,
SHARED_NFS = 0x2,
SHARED_SMB = 0x4
} zfs_share_type_t;
typedef int (*zfs_uri_handler_fn_t)(struct libzfs_handle *, const char *,
const char *, zfs_keyformat_t, boolean_t, uint8_t **, size_t *);
typedef struct zfs_uri_handler {
const char *zuh_scheme;
zfs_uri_handler_fn_t zuh_handler;
} zfs_uri_handler_t;
#define CONFIG_BUF_MINSIZE 262144
int zfs_error(libzfs_handle_t *, int, const char *);
int zfs_error_fmt(libzfs_handle_t *, int, const char *, ...);
void zfs_error_aux(libzfs_handle_t *, const char *, ...);
void *zfs_alloc(libzfs_handle_t *, size_t);
void *zfs_realloc(libzfs_handle_t *, void *, size_t, size_t);
char *zfs_asprintf(libzfs_handle_t *, const char *, ...);
char *zfs_strdup(libzfs_handle_t *, const char *);
int no_memory(libzfs_handle_t *);
int zfs_standard_error(libzfs_handle_t *, int, const char *);
int zfs_standard_error_fmt(libzfs_handle_t *, int, const char *, ...);
void zfs_setprop_error(libzfs_handle_t *, zfs_prop_t, int, char *);
int zpool_standard_error(libzfs_handle_t *, int, const char *);
int zpool_standard_error_fmt(libzfs_handle_t *, int, const char *, ...);
zfs_handle_t *make_dataset_handle_zc(libzfs_handle_t *, zfs_cmd_t *);
zfs_handle_t *make_dataset_simple_handle_zc(zfs_handle_t *, zfs_cmd_t *);
int zprop_parse_value(libzfs_handle_t *, nvpair_t *, int, zfs_type_t,
nvlist_t *, char **, uint64_t *, const char *);
int zprop_expand_list(libzfs_handle_t *hdl, zprop_list_t **plp,
zfs_type_t type);
/*
* Use this changelist_gather() flag to force attempting mounts
* on each change node regardless of whether or not it is currently
* mounted.
*/
#define CL_GATHER_MOUNT_ALWAYS 1
/*
* changelist_gather() flag to force it to iterate on mounted datasets only
*/
#define CL_GATHER_ITER_MOUNTED 2
/*
* Use this changelist_gather() flag to prevent unmounting of file systems.
*/
#define CL_GATHER_DONT_UNMOUNT 4
typedef struct prop_changelist prop_changelist_t;
int zcmd_alloc_dst_nvlist(libzfs_handle_t *, zfs_cmd_t *, size_t);
int zcmd_write_src_nvlist(libzfs_handle_t *, zfs_cmd_t *, nvlist_t *);
int zcmd_write_conf_nvlist(libzfs_handle_t *, zfs_cmd_t *, nvlist_t *);
int zcmd_expand_dst_nvlist(libzfs_handle_t *, zfs_cmd_t *);
int zcmd_read_dst_nvlist(libzfs_handle_t *, zfs_cmd_t *, nvlist_t **);
void zcmd_free_nvlists(zfs_cmd_t *);
int changelist_prefix(prop_changelist_t *);
int changelist_postfix(prop_changelist_t *);
void changelist_rename(prop_changelist_t *, const char *, const char *);
void changelist_remove(prop_changelist_t *, const char *);
void changelist_free(prop_changelist_t *);
prop_changelist_t *changelist_gather(zfs_handle_t *, zfs_prop_t, int, int);
int changelist_unshare(prop_changelist_t *, zfs_share_proto_t *);
int changelist_haszonedchild(prop_changelist_t *);
void remove_mountpoint(zfs_handle_t *);
int create_parents(libzfs_handle_t *, char *, int);
boolean_t isa_child_of(const char *dataset, const char *parent);
zfs_handle_t *make_dataset_handle(libzfs_handle_t *, const char *);
zfs_handle_t *make_bookmark_handle(zfs_handle_t *, const char *,
nvlist_t *props);
int zpool_open_silent(libzfs_handle_t *, const char *, zpool_handle_t **);
boolean_t zpool_name_valid(libzfs_handle_t *, boolean_t, const char *);
int zfs_validate_name(libzfs_handle_t *hdl, const char *path, int type,
boolean_t modifying);
void namespace_clear(libzfs_handle_t *);
extern int zfs_parse_options(char *, zfs_share_proto_t);
extern int zfs_unshare_proto(zfs_handle_t *,
const char *, zfs_share_proto_t *);
typedef struct {
zfs_prop_t p_prop;
char *p_name;
int p_share_err;
int p_unshare_err;
} proto_table_t;
typedef struct differ_info {
zfs_handle_t *zhp;
char *fromsnap;
char *frommnt;
char *tosnap;
char *tomnt;
char *ds;
char *dsmnt;
char *tmpsnap;
char errbuf[1024];
boolean_t isclone;
boolean_t scripted;
boolean_t classify;
boolean_t timestamped;
uint64_t shares;
int zerr;
int cleanupfd;
int outputfd;
int datafd;
} differ_info_t;
extern proto_table_t proto_table[PROTO_END];
extern int do_mount(zfs_handle_t *zhp, const char *mntpt, char *opts,
int flags);
extern int do_unmount(const char *mntpt, int flags);
extern int zfs_mount_delegation_check(void);
extern int zfs_share_proto(zfs_handle_t *zhp, zfs_share_proto_t *proto);
extern int unshare_one(libzfs_handle_t *hdl, const char *name,
const char *mountpoint, zfs_share_proto_t proto);
extern boolean_t zfs_is_mountable(zfs_handle_t *zhp, char *buf, size_t buflen,
zprop_source_t *source, int flags);
extern zfs_share_type_t is_shared(const char *mountpoint,
zfs_share_proto_t proto);
extern int libzfs_load_module(void);
extern int zpool_relabel_disk(libzfs_handle_t *hdl, const char *path,
const char *msg);
extern int find_shares_object(differ_info_t *di);
extern void libzfs_set_pipe_max(int infd);
extern void zfs_commit_proto(zfs_share_proto_t *);
#ifdef __cplusplus
}
#endif
#endif /* _LIBZFS_IMPL_H */
diff --git a/sys/contrib/openzfs/include/os/freebsd/spl/sys/vnode.h b/sys/contrib/openzfs/include/os/freebsd/spl/sys/vnode.h
index 1ac595aa15dd..d2c900854acb 100644
--- a/sys/contrib/openzfs/include/os/freebsd/spl/sys/vnode.h
+++ b/sys/contrib/openzfs/include/os/freebsd/spl/sys/vnode.h
@@ -1,228 +1,227 @@
/*
* Copyright (c) 2007 Pawel Jakub Dawidek <pjd@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _OPENSOLARIS_SYS_VNODE_H_
#define _OPENSOLARIS_SYS_VNODE_H_
struct vnode;
struct vattr;
struct xucred;
typedef struct flock flock64_t;
typedef struct vnode vnode_t;
typedef struct vattr vattr_t;
typedef enum vtype vtype_t;
#include <sys/types.h>
#include <sys/queue.h>
#include_next <sys/sdt.h>
#include <sys/namei.h>
enum symfollow { NO_FOLLOW = NOFOLLOW };
#define NOCRED ((struct ucred *)0) /* no credential available */
#define F_FREESP 11 /* Free file space */
#include <sys/proc.h>
#include <sys/vnode_impl.h>
#ifndef IN_BASE
#include_next <sys/vnode.h>
#endif
#include <sys/mount.h>
#include <sys/cred.h>
#include <sys/fcntl.h>
#include <sys/refcount.h>
#include <sys/file.h>
#include <sys/filedesc.h>
#include <sys/syscallsubr.h>
#include <sys/vm.h>
#include <vm/vm_object.h>
typedef struct vop_vector vnodeops_t;
#define VOP_FID VOP_VPTOFH
#define vop_fid vop_vptofh
#define vop_fid_args vop_vptofh_args
#define a_fid a_fhp
#define rootvfs (rootvnode == NULL ? NULL : rootvnode->v_mount)
#ifndef IN_BASE
static __inline int
vn_is_readonly(vnode_t *vp)
{
return (vp->v_mount->mnt_flag & MNT_RDONLY);
}
#endif
#define vn_vfswlock(vp) (0)
#define vn_vfsunlock(vp) do { } while (0)
#define vn_ismntpt(vp) \
((vp)->v_type == VDIR && (vp)->v_mountedhere != NULL)
#define vn_mountedvfs(vp) ((vp)->v_mountedhere)
#define vn_has_cached_data(vp) \
((vp)->v_object != NULL && \
(vp)->v_object->resident_page_count > 0)
#ifndef IN_BASE
static __inline void
vn_flush_cached_data(vnode_t *vp, boolean_t sync)
{
#if __FreeBSD_version > 1300054
if (vm_object_mightbedirty(vp->v_object)) {
#else
if (vp->v_object->flags & OBJ_MIGHTBEDIRTY) {
#endif
int flags = sync ? OBJPC_SYNC : 0;
zfs_vmobject_wlock(vp->v_object);
vm_object_page_clean(vp->v_object, 0, 0, flags);
zfs_vmobject_wunlock(vp->v_object);
}
}
#endif
#define vn_exists(vp) do { } while (0)
#define vn_invalid(vp) do { } while (0)
#define vn_renamepath(tdvp, svp, tnm, lentnm) do { } while (0)
#define vn_free(vp) do { } while (0)
#define vn_matchops(vp, vops) ((vp)->v_op == &(vops))
#define VN_HOLD(v) vref(v)
#define VN_RELE(v) vrele(v)
#define VN_URELE(v) vput(v)
#define vnevent_create(vp, ct) do { } while (0)
#define vnevent_link(vp, ct) do { } while (0)
#define vnevent_remove(vp, dvp, name, ct) do { } while (0)
#define vnevent_rmdir(vp, dvp, name, ct) do { } while (0)
#define vnevent_rename_src(vp, dvp, name, ct) do { } while (0)
#define vnevent_rename_dest(vp, dvp, name, ct) do { } while (0)
#define vnevent_rename_dest_dir(vp, ct) do { } while (0)
#define specvp(vp, rdev, type, cr) (VN_HOLD(vp), (vp))
#define MANDLOCK(vp, mode) (0)
/*
* We will use va_spare is place of Solaris' va_mask.
* This field is initialized in zfs_setattr().
*/
#define va_mask va_spare
/* TODO: va_fileid is shorter than va_nodeid !!! */
#define va_nodeid va_fileid
/* TODO: This field needs conversion! */
#define va_nblocks va_bytes
#define va_blksize va_blocksize
-#define va_seq va_gen
#define MAXOFFSET_T OFF_MAX
#define EXCL 0
#define FCREAT O_CREAT
#define FTRUNC O_TRUNC
#define FEXCL O_EXCL
#ifndef FDSYNC
#define FDSYNC FFSYNC
#endif
#define FRSYNC FFSYNC
#define FSYNC FFSYNC
#define FOFFMAX 0x00
#define FIGNORECASE 0x00
/*
* Attributes of interest to the caller of setattr or getattr.
*/
#define AT_MODE 0x00002
#define AT_UID 0x00004
#define AT_GID 0x00008
#define AT_FSID 0x00010
#define AT_NODEID 0x00020
#define AT_NLINK 0x00040
#define AT_SIZE 0x00080
#define AT_ATIME 0x00100
#define AT_MTIME 0x00200
#define AT_CTIME 0x00400
#define AT_RDEV 0x00800
#define AT_BLKSIZE 0x01000
#define AT_NBLOCKS 0x02000
/* 0x04000 */ /* unused */
#define AT_SEQ 0x08000
/*
* If AT_XVATTR is set then there are additional bits to process in
* the xvattr_t's attribute bitmap. If this is not set then the bitmap
* MUST be ignored. Note that this bit must be set/cleared explicitly.
* That is, setting AT_ALL will NOT set AT_XVATTR.
*/
#define AT_XVATTR 0x10000
#define AT_ALL (AT_MODE|AT_UID|AT_GID|AT_FSID|AT_NODEID|\
AT_NLINK|AT_SIZE|AT_ATIME|AT_MTIME|AT_CTIME|\
AT_RDEV|AT_BLKSIZE|AT_NBLOCKS|AT_SEQ)
#define AT_STAT (AT_MODE|AT_UID|AT_GID|AT_FSID|AT_NODEID|AT_NLINK|\
AT_SIZE|AT_ATIME|AT_MTIME|AT_CTIME|AT_RDEV)
#define AT_TIMES (AT_ATIME|AT_MTIME|AT_CTIME)
#define AT_NOSET (AT_NLINK|AT_RDEV|AT_FSID|AT_NODEID|\
AT_BLKSIZE|AT_NBLOCKS|AT_SEQ)
#ifndef IN_BASE
static __inline void
vattr_init_mask(vattr_t *vap)
{
vap->va_mask = 0;
if (vap->va_uid != (uid_t)VNOVAL)
vap->va_mask |= AT_UID;
if (vap->va_gid != (gid_t)VNOVAL)
vap->va_mask |= AT_GID;
if (vap->va_size != (u_quad_t)VNOVAL)
vap->va_mask |= AT_SIZE;
if (vap->va_atime.tv_sec != VNOVAL)
vap->va_mask |= AT_ATIME;
if (vap->va_mtime.tv_sec != VNOVAL)
vap->va_mask |= AT_MTIME;
if (vap->va_mode != (uint16_t)VNOVAL)
vap->va_mask |= AT_MODE;
if (vap->va_flags != VNOVAL)
vap->va_mask |= AT_XVATTR;
}
#endif
#define RLIM64_INFINITY 0
static __inline int
vn_rename(char *from, char *to, enum uio_seg seg)
{
ASSERT(seg == UIO_SYSSPACE);
return (kern_renameat(curthread, AT_FDCWD, from, AT_FDCWD, to, seg));
}
#include <sys/vfs.h>
#endif /* _OPENSOLARIS_SYS_VNODE_H_ */
diff --git a/sys/contrib/openzfs/include/os/linux/kernel/linux/simd_x86.h b/sys/contrib/openzfs/include/os/linux/kernel/linux/simd_x86.h
index f2ae0fcbc21a..6d4c7a09fe82 100644
--- a/sys/contrib/openzfs/include/os/linux/kernel/linux/simd_x86.h
+++ b/sys/contrib/openzfs/include/os/linux/kernel/linux/simd_x86.h
@@ -1,649 +1,769 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (C) 2016 Gvozden Neskovic <neskovic@compeng.uni-frankfurt.de>.
*/
/*
* USER API:
*
* Kernel fpu methods:
* kfpu_allowed()
* kfpu_begin()
* kfpu_end()
* kfpu_init()
* kfpu_fini()
*
* SIMD support:
*
* Following functions should be called to determine whether CPU feature
* is supported. All functions are usable in kernel and user space.
* If a SIMD algorithm is using more than one instruction set
* all relevant feature test functions should be called.
*
* Supported features:
* zfs_sse_available()
* zfs_sse2_available()
* zfs_sse3_available()
* zfs_ssse3_available()
* zfs_sse4_1_available()
* zfs_sse4_2_available()
*
* zfs_avx_available()
* zfs_avx2_available()
*
* zfs_bmi1_available()
* zfs_bmi2_available()
*
* zfs_avx512f_available()
* zfs_avx512cd_available()
* zfs_avx512er_available()
* zfs_avx512pf_available()
* zfs_avx512bw_available()
* zfs_avx512dq_available()
* zfs_avx512vl_available()
* zfs_avx512ifma_available()
* zfs_avx512vbmi_available()
*
* NOTE(AVX-512VL): If using AVX-512 instructions with 128Bit registers
* also add zfs_avx512vl_available() to feature check.
*/
#ifndef _LINUX_SIMD_X86_H
#define _LINUX_SIMD_X86_H
/* only for __x86 */
#if defined(__x86)
#include <sys/types.h>
#include <asm/cpufeature.h>
/*
* Disable the WARN_ON_FPU() macro to prevent additional dependencies
* when providing the kfpu_* functions. Relevant warnings are included
* as appropriate and are unconditionally enabled.
*/
#if defined(CONFIG_X86_DEBUG_FPU) && !defined(KERNEL_EXPORTS_X86_FPU)
#undef CONFIG_X86_DEBUG_FPU
#endif
#if defined(HAVE_KERNEL_FPU_API_HEADER)
#include <asm/fpu/api.h>
#include <asm/fpu/internal.h>
#if defined(HAVE_KERNEL_FPU_XCR_HEADER)
#include <asm/fpu/xcr.h>
#endif
#else
#include <asm/i387.h>
#include <asm/xcr.h>
#endif
/*
* The following cases are for kernels which export either the
* kernel_fpu_* or __kernel_fpu_* functions.
*/
#if defined(KERNEL_EXPORTS_X86_FPU)
#define kfpu_allowed() 1
#define kfpu_init() 0
#define kfpu_fini() ((void) 0)
#if defined(HAVE_UNDERSCORE_KERNEL_FPU)
#define kfpu_begin() \
{ \
preempt_disable(); \
__kernel_fpu_begin(); \
}
#define kfpu_end() \
{ \
__kernel_fpu_end(); \
preempt_enable(); \
}
#elif defined(HAVE_KERNEL_FPU)
#define kfpu_begin() kernel_fpu_begin()
#define kfpu_end() kernel_fpu_end()
#else
/*
* This case is unreachable. When KERNEL_EXPORTS_X86_FPU is defined then
* either HAVE_UNDERSCORE_KERNEL_FPU or HAVE_KERNEL_FPU must be defined.
*/
#error "Unreachable kernel configuration"
#endif
#else /* defined(KERNEL_EXPORTS_X86_FPU) */
/*
* When the kernel_fpu_* symbols are unavailable then provide our own
* versions which allow the FPU to be safely used.
*/
+#if defined(HAVE_KERNEL_FPU_INTERNAL) || defined(HAVE_KERNEL_FPU_XSAVE_INTERNAL)
+
+#if defined(HAVE_KERNEL_FPU_XSAVE_INTERNAL)
+/*
+ * Some sanity checks.
+ * HAVE_KERNEL_FPU_INTERNAL and HAVE_KERNEL_FPU_XSAVE_INTERNAL are exclusive.
+ */
#if defined(HAVE_KERNEL_FPU_INTERNAL)
+#error "HAVE_KERNEL_FPU_INTERNAL and HAVE_KERNEL_FPU_XSAVE_INTERNAL defined"
+#endif
+/*
+ * For kernels >= 5.16 we have to use inline assembly with the XSAVE{,OPT,S}
+ * instructions, so we need the toolchain to support at least XSAVE.
+ */
+#if !defined(HAVE_XSAVE)
+#error "Toolchain needs to support the XSAVE assembler instruction"
+#endif
+#endif
#include <linux/mm.h>
+#include <linux/slab.h>
extern union fpregs_state **zfs_kfpu_fpregs;
/*
* Initialize per-cpu variables to store FPU state.
*/
static inline void
kfpu_fini(void)
{
int cpu;
for_each_possible_cpu(cpu) {
if (zfs_kfpu_fpregs[cpu] != NULL) {
free_pages((unsigned long)zfs_kfpu_fpregs[cpu],
get_order(sizeof (union fpregs_state)));
}
}
kfree(zfs_kfpu_fpregs);
}
static inline int
kfpu_init(void)
{
zfs_kfpu_fpregs = kzalloc(num_possible_cpus() *
sizeof (union fpregs_state *), GFP_KERNEL);
if (zfs_kfpu_fpregs == NULL)
return (-ENOMEM);
/*
* The fxsave and xsave operations require 16-/64-byte alignment of
* the target memory. Since kmalloc() provides no alignment
* guarantee instead use alloc_pages_node().
*/
unsigned int order = get_order(sizeof (union fpregs_state));
int cpu;
for_each_possible_cpu(cpu) {
struct page *page = alloc_pages_node(cpu_to_node(cpu),
GFP_KERNEL | __GFP_ZERO, order);
if (page == NULL) {
kfpu_fini();
return (-ENOMEM);
}
zfs_kfpu_fpregs[cpu] = page_address(page);
}
return (0);
}
#define kfpu_allowed() 1
+#if defined(HAVE_KERNEL_FPU_INTERNAL)
#define ex_handler_fprestore ex_handler_default
+#endif
/*
* FPU save and restore instructions.
*/
#define __asm __asm__ __volatile__
#define kfpu_fxsave(addr) __asm("fxsave %0" : "=m" (*(addr)))
#define kfpu_fxsaveq(addr) __asm("fxsaveq %0" : "=m" (*(addr)))
#define kfpu_fnsave(addr) __asm("fnsave %0; fwait" : "=m" (*(addr)))
#define kfpu_fxrstor(addr) __asm("fxrstor %0" : : "m" (*(addr)))
#define kfpu_fxrstorq(addr) __asm("fxrstorq %0" : : "m" (*(addr)))
#define kfpu_frstor(addr) __asm("frstor %0" : : "m" (*(addr)))
#define kfpu_fxsr_clean(rval) __asm("fnclex; emms; fildl %P[addr]" \
: : [addr] "m" (rval));
+#if defined(HAVE_KERNEL_FPU_INTERNAL)
static inline void
kfpu_save_xsave(struct xregs_state *addr, uint64_t mask)
{
uint32_t low, hi;
int err;
low = mask;
hi = mask >> 32;
XSTATE_XSAVE(addr, low, hi, err);
WARN_ON_ONCE(err);
}
+#endif /* defined(HAVE_KERNEL_FPU_INTERNAL) */
+
+#if defined(HAVE_KERNEL_FPU_XSAVE_INTERNAL)
+#define kfpu_do_xsave(instruction, addr, mask) \
+{ \
+ uint32_t low, hi; \
+ \
+ low = mask; \
+ hi = (uint64_t)(mask) >> 32; \
+ __asm(instruction " %[dst]\n\t" \
+ : \
+ : [dst] "m" (*(addr)), "a" (low), "d" (hi) \
+ : "memory"); \
+}
+#endif /* defined(HAVE_KERNEL_FPU_XSAVE_INTERNAL) */
static inline void
kfpu_save_fxsr(struct fxregs_state *addr)
{
if (IS_ENABLED(CONFIG_X86_32))
kfpu_fxsave(addr);
else
kfpu_fxsaveq(addr);
}
static inline void
kfpu_save_fsave(struct fregs_state *addr)
{
kfpu_fnsave(addr);
}
+#if defined(HAVE_KERNEL_FPU_INTERNAL)
static inline void
kfpu_begin(void)
{
/*
* Preemption and interrupts must be disabled for the critical
* region where the FPU state is being modified.
*/
preempt_disable();
local_irq_disable();
/*
* The current FPU registers need to be preserved by kfpu_begin()
* and restored by kfpu_end(). They are stored in a dedicated
* per-cpu variable, not in the task struct, this allows any user
* FPU state to be correctly preserved and restored.
*/
union fpregs_state *state = zfs_kfpu_fpregs[smp_processor_id()];
-
if (static_cpu_has(X86_FEATURE_XSAVE)) {
kfpu_save_xsave(&state->xsave, ~0);
} else if (static_cpu_has(X86_FEATURE_FXSR)) {
kfpu_save_fxsr(&state->fxsave);
} else {
kfpu_save_fsave(&state->fsave);
}
}
+#endif /* defined(HAVE_KERNEL_FPU_INTERNAL) */
+
+#if defined(HAVE_KERNEL_FPU_XSAVE_INTERNAL)
+static inline void
+kfpu_begin(void)
+{
+ /*
+ * Preemption and interrupts must be disabled for the critical
+ * region where the FPU state is being modified.
+ */
+ preempt_disable();
+ local_irq_disable();
+
+ /*
+ * The current FPU registers need to be preserved by kfpu_begin()
+ * and restored by kfpu_end(). They are stored in a dedicated
+ * per-cpu variable, not in the task struct, this allows any user
+ * FPU state to be correctly preserved and restored.
+ */
+ union fpregs_state *state = zfs_kfpu_fpregs[smp_processor_id()];
+#if defined(HAVE_XSAVES)
+ if (static_cpu_has(X86_FEATURE_XSAVES)) {
+ kfpu_do_xsave("xsaves", &state->xsave, ~0);
+ return;
+ }
+#endif
+#if defined(HAVE_XSAVEOPT)
+ if (static_cpu_has(X86_FEATURE_XSAVEOPT)) {
+ kfpu_do_xsave("xsaveopt", &state->xsave, ~0);
+ return;
+ }
+#endif
+ if (static_cpu_has(X86_FEATURE_XSAVE)) {
+ kfpu_do_xsave("xsave", &state->xsave, ~0);
+ } else if (static_cpu_has(X86_FEATURE_FXSR)) {
+ kfpu_save_fxsr(&state->fxsave);
+ } else {
+ kfpu_save_fsave(&state->fsave);
+ }
+}
+#endif /* defined(HAVE_KERNEL_FPU_XSAVE_INTERNAL) */
+#if defined(HAVE_KERNEL_FPU_INTERNAL)
static inline void
kfpu_restore_xsave(struct xregs_state *addr, uint64_t mask)
{
uint32_t low, hi;
low = mask;
hi = mask >> 32;
XSTATE_XRESTORE(addr, low, hi);
}
+#endif /* defined(HAVE_KERNEL_FPU_INTERNAL) */
+
+#if defined(HAVE_KERNEL_FPU_XSAVE_INTERNAL)
+#define kfpu_do_xrstor(instruction, addr, mask) \
+{ \
+ uint32_t low, hi; \
+ \
+ low = mask; \
+ hi = (uint64_t)(mask) >> 32; \
+ __asm(instruction " %[src]" \
+ : \
+ : [src] "m" (*(addr)), "a" (low), "d" (hi) \
+ : "memory"); \
+}
+#endif /* defined(HAVE_KERNEL_FPU_XSAVE_INTERNAL) */
static inline void
kfpu_restore_fxsr(struct fxregs_state *addr)
{
/*
* On AuthenticAMD K7 and K8 processors the fxrstor instruction only
* restores the _x87 FOP, FIP, and FDP registers when an exception
* is pending. Clean the _x87 state to force the restore.
*/
if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK)))
kfpu_fxsr_clean(addr);
if (IS_ENABLED(CONFIG_X86_32)) {
kfpu_fxrstor(addr);
} else {
kfpu_fxrstorq(addr);
}
}
static inline void
kfpu_restore_fsave(struct fregs_state *addr)
{
kfpu_frstor(addr);
}
+#if defined(HAVE_KERNEL_FPU_INTERNAL)
static inline void
kfpu_end(void)
{
union fpregs_state *state = zfs_kfpu_fpregs[smp_processor_id()];
if (static_cpu_has(X86_FEATURE_XSAVE)) {
kfpu_restore_xsave(&state->xsave, ~0);
} else if (static_cpu_has(X86_FEATURE_FXSR)) {
kfpu_restore_fxsr(&state->fxsave);
} else {
kfpu_restore_fsave(&state->fsave);
}
local_irq_enable();
preempt_enable();
}
+#endif /* defined(HAVE_KERNEL_FPU_INTERNAL) */
+
+#if defined(HAVE_KERNEL_FPU_XSAVE_INTERNAL)
+static inline void
+kfpu_end(void)
+{
+ union fpregs_state *state = zfs_kfpu_fpregs[smp_processor_id()];
+#if defined(HAVE_XSAVES)
+ if (static_cpu_has(X86_FEATURE_XSAVES)) {
+ kfpu_do_xrstor("xrstors", &state->xsave, ~0);
+ goto out;
+ }
+#endif
+ if (static_cpu_has(X86_FEATURE_XSAVE)) {
+ kfpu_do_xrstor("xrstor", &state->xsave, ~0);
+ } else if (static_cpu_has(X86_FEATURE_FXSR)) {
+ kfpu_save_fxsr(&state->fxsave);
+ } else {
+ kfpu_save_fsave(&state->fsave);
+ }
+out:
+ local_irq_enable();
+ preempt_enable();
+
+}
+#endif /* defined(HAVE_KERNEL_FPU_XSAVE_INTERNAL) */
#else
/*
* FPU support is unavailable.
*/
#define kfpu_allowed() 0
#define kfpu_begin() do {} while (0)
#define kfpu_end() do {} while (0)
#define kfpu_init() 0
#define kfpu_fini() ((void) 0)
-#endif /* defined(HAVE_KERNEL_FPU_INTERNAL) */
+#endif /* defined(HAVE_KERNEL_FPU_INTERNAL || HAVE_KERNEL_FPU_XSAVE_INTERNAL) */
#endif /* defined(KERNEL_EXPORTS_X86_FPU) */
/*
* Linux kernel provides an interface for CPU feature testing.
*/
/*
* Detect register set support
*/
static inline boolean_t
__simd_state_enabled(const uint64_t state)
{
boolean_t has_osxsave;
uint64_t xcr0;
#if defined(X86_FEATURE_OSXSAVE)
has_osxsave = !!boot_cpu_has(X86_FEATURE_OSXSAVE);
#else
has_osxsave = B_FALSE;
#endif
if (!has_osxsave)
return (B_FALSE);
xcr0 = xgetbv(0);
return ((xcr0 & state) == state);
}
#define _XSTATE_SSE_AVX (0x2 | 0x4)
#define _XSTATE_AVX512 (0xE0 | _XSTATE_SSE_AVX)
#define __ymm_enabled() __simd_state_enabled(_XSTATE_SSE_AVX)
#define __zmm_enabled() __simd_state_enabled(_XSTATE_AVX512)
/*
* Check if SSE instruction set is available
*/
static inline boolean_t
zfs_sse_available(void)
{
return (!!boot_cpu_has(X86_FEATURE_XMM));
}
/*
* Check if SSE2 instruction set is available
*/
static inline boolean_t
zfs_sse2_available(void)
{
return (!!boot_cpu_has(X86_FEATURE_XMM2));
}
/*
* Check if SSE3 instruction set is available
*/
static inline boolean_t
zfs_sse3_available(void)
{
return (!!boot_cpu_has(X86_FEATURE_XMM3));
}
/*
* Check if SSSE3 instruction set is available
*/
static inline boolean_t
zfs_ssse3_available(void)
{
return (!!boot_cpu_has(X86_FEATURE_SSSE3));
}
/*
* Check if SSE4.1 instruction set is available
*/
static inline boolean_t
zfs_sse4_1_available(void)
{
return (!!boot_cpu_has(X86_FEATURE_XMM4_1));
}
/*
* Check if SSE4.2 instruction set is available
*/
static inline boolean_t
zfs_sse4_2_available(void)
{
return (!!boot_cpu_has(X86_FEATURE_XMM4_2));
}
/*
* Check if AVX instruction set is available
*/
static inline boolean_t
zfs_avx_available(void)
{
return (boot_cpu_has(X86_FEATURE_AVX) && __ymm_enabled());
}
/*
* Check if AVX2 instruction set is available
*/
static inline boolean_t
zfs_avx2_available(void)
{
return (boot_cpu_has(X86_FEATURE_AVX2) && __ymm_enabled());
}
/*
* Check if BMI1 instruction set is available
*/
static inline boolean_t
zfs_bmi1_available(void)
{
#if defined(X86_FEATURE_BMI1)
return (!!boot_cpu_has(X86_FEATURE_BMI1));
#else
return (B_FALSE);
#endif
}
/*
* Check if BMI2 instruction set is available
*/
static inline boolean_t
zfs_bmi2_available(void)
{
#if defined(X86_FEATURE_BMI2)
return (!!boot_cpu_has(X86_FEATURE_BMI2));
#else
return (B_FALSE);
#endif
}
/*
* Check if AES instruction set is available
*/
static inline boolean_t
zfs_aes_available(void)
{
#if defined(X86_FEATURE_AES)
return (!!boot_cpu_has(X86_FEATURE_AES));
#else
return (B_FALSE);
#endif
}
/*
* Check if PCLMULQDQ instruction set is available
*/
static inline boolean_t
zfs_pclmulqdq_available(void)
{
#if defined(X86_FEATURE_PCLMULQDQ)
return (!!boot_cpu_has(X86_FEATURE_PCLMULQDQ));
#else
return (B_FALSE);
#endif
}
/*
* Check if MOVBE instruction is available
*/
static inline boolean_t
zfs_movbe_available(void)
{
#if defined(X86_FEATURE_MOVBE)
return (!!boot_cpu_has(X86_FEATURE_MOVBE));
#else
return (B_FALSE);
#endif
}
/*
* AVX-512 family of instruction sets:
*
* AVX512F Foundation
* AVX512CD Conflict Detection Instructions
* AVX512ER Exponential and Reciprocal Instructions
* AVX512PF Prefetch Instructions
*
* AVX512BW Byte and Word Instructions
* AVX512DQ Double-word and Quadword Instructions
* AVX512VL Vector Length Extensions
*
* AVX512IFMA Integer Fused Multiply Add (Not supported by kernel 4.4)
* AVX512VBMI Vector Byte Manipulation Instructions
*/
/*
* Check if AVX512F instruction set is available
*/
static inline boolean_t
zfs_avx512f_available(void)
{
boolean_t has_avx512 = B_FALSE;
#if defined(X86_FEATURE_AVX512F)
has_avx512 = !!boot_cpu_has(X86_FEATURE_AVX512F);
#endif
return (has_avx512 && __zmm_enabled());
}
/*
* Check if AVX512CD instruction set is available
*/
static inline boolean_t
zfs_avx512cd_available(void)
{
boolean_t has_avx512 = B_FALSE;
#if defined(X86_FEATURE_AVX512CD)
has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
boot_cpu_has(X86_FEATURE_AVX512CD);
#endif
return (has_avx512 && __zmm_enabled());
}
/*
* Check if AVX512ER instruction set is available
*/
static inline boolean_t
zfs_avx512er_available(void)
{
boolean_t has_avx512 = B_FALSE;
#if defined(X86_FEATURE_AVX512ER)
has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
boot_cpu_has(X86_FEATURE_AVX512ER);
#endif
return (has_avx512 && __zmm_enabled());
}
/*
* Check if AVX512PF instruction set is available
*/
static inline boolean_t
zfs_avx512pf_available(void)
{
boolean_t has_avx512 = B_FALSE;
#if defined(X86_FEATURE_AVX512PF)
has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
boot_cpu_has(X86_FEATURE_AVX512PF);
#endif
return (has_avx512 && __zmm_enabled());
}
/*
* Check if AVX512BW instruction set is available
*/
static inline boolean_t
zfs_avx512bw_available(void)
{
boolean_t has_avx512 = B_FALSE;
#if defined(X86_FEATURE_AVX512BW)
has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
boot_cpu_has(X86_FEATURE_AVX512BW);
#endif
return (has_avx512 && __zmm_enabled());
}
/*
* Check if AVX512DQ instruction set is available
*/
static inline boolean_t
zfs_avx512dq_available(void)
{
boolean_t has_avx512 = B_FALSE;
#if defined(X86_FEATURE_AVX512DQ)
has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
boot_cpu_has(X86_FEATURE_AVX512DQ);
#endif
return (has_avx512 && __zmm_enabled());
}
/*
* Check if AVX512VL instruction set is available
*/
static inline boolean_t
zfs_avx512vl_available(void)
{
boolean_t has_avx512 = B_FALSE;
#if defined(X86_FEATURE_AVX512VL)
has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
boot_cpu_has(X86_FEATURE_AVX512VL);
#endif
return (has_avx512 && __zmm_enabled());
}
/*
* Check if AVX512IFMA instruction set is available
*/
static inline boolean_t
zfs_avx512ifma_available(void)
{
boolean_t has_avx512 = B_FALSE;
#if defined(X86_FEATURE_AVX512IFMA)
has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
boot_cpu_has(X86_FEATURE_AVX512IFMA);
#endif
return (has_avx512 && __zmm_enabled());
}
/*
* Check if AVX512VBMI instruction set is available
*/
static inline boolean_t
zfs_avx512vbmi_available(void)
{
boolean_t has_avx512 = B_FALSE;
#if defined(X86_FEATURE_AVX512VBMI)
has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
boot_cpu_has(X86_FEATURE_AVX512VBMI);
#endif
return (has_avx512 && __zmm_enabled());
}
#endif /* defined(__x86) */
#endif /* _LINUX_SIMD_X86_H */
diff --git a/sys/contrib/openzfs/include/os/linux/spl/sys/uio.h b/sys/contrib/openzfs/include/os/linux/spl/sys/uio.h
index 66af2b0b534c..439eec986236 100644
--- a/sys/contrib/openzfs/include/os/linux/spl/sys/uio.h
+++ b/sys/contrib/openzfs/include/os/linux/spl/sys/uio.h
@@ -1,145 +1,149 @@
/*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California.
* Copyright (c) 2015 by Chunwei Chen. All rights reserved.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* UCRL-CODE-235197
*
* This file is part of the SPL, Solaris Porting Layer.
*
* The SPL is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* The SPL is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _SPL_UIO_H
#define _SPL_UIO_H
#include <sys/debug.h>
#include <linux/uio.h>
#include <linux/blkdev.h>
#include <linux/blkdev_compat.h>
#include <linux/mm.h>
#include <linux/bio.h>
#include <asm/uaccess.h>
#include <sys/types.h>
+#if defined(HAVE_VFS_IOV_ITER) && defined(HAVE_FAULT_IN_IOV_ITER_READABLE)
+#define iov_iter_fault_in_readable(a, b) fault_in_iov_iter_readable(a, b)
+#endif
+
typedef struct iovec iovec_t;
typedef enum zfs_uio_rw {
UIO_READ = 0,
UIO_WRITE = 1,
} zfs_uio_rw_t;
typedef enum zfs_uio_seg {
UIO_USERSPACE = 0,
UIO_SYSSPACE = 1,
UIO_BVEC = 2,
#if defined(HAVE_VFS_IOV_ITER)
UIO_ITER = 3,
#endif
} zfs_uio_seg_t;
typedef struct zfs_uio {
union {
const struct iovec *uio_iov;
const struct bio_vec *uio_bvec;
#if defined(HAVE_VFS_IOV_ITER)
struct iov_iter *uio_iter;
#endif
};
int uio_iovcnt;
offset_t uio_loffset;
zfs_uio_seg_t uio_segflg;
boolean_t uio_fault_disable;
uint16_t uio_fmode;
uint16_t uio_extflg;
ssize_t uio_resid;
size_t uio_skip;
} zfs_uio_t;
#define zfs_uio_segflg(u) (u)->uio_segflg
#define zfs_uio_offset(u) (u)->uio_loffset
#define zfs_uio_resid(u) (u)->uio_resid
#define zfs_uio_iovcnt(u) (u)->uio_iovcnt
#define zfs_uio_iovlen(u, idx) (u)->uio_iov[(idx)].iov_len
#define zfs_uio_iovbase(u, idx) (u)->uio_iov[(idx)].iov_base
#define zfs_uio_fault_disable(u, set) (u)->uio_fault_disable = set
#define zfs_uio_rlimit_fsize(z, u) (0)
#define zfs_uio_fault_move(p, n, rw, u) zfs_uiomove((p), (n), (rw), (u))
extern int zfs_uio_prefaultpages(ssize_t, zfs_uio_t *);
static inline void
zfs_uio_setoffset(zfs_uio_t *uio, offset_t off)
{
uio->uio_loffset = off;
}
static inline void
zfs_uio_advance(zfs_uio_t *uio, size_t size)
{
uio->uio_resid -= size;
uio->uio_loffset += size;
}
static inline void
zfs_uio_iovec_init(zfs_uio_t *uio, const struct iovec *iov,
unsigned long nr_segs, offset_t offset, zfs_uio_seg_t seg, ssize_t resid,
size_t skip)
{
ASSERT(seg == UIO_USERSPACE || seg == UIO_SYSSPACE);
uio->uio_iov = iov;
uio->uio_iovcnt = nr_segs;
uio->uio_loffset = offset;
uio->uio_segflg = seg;
uio->uio_fault_disable = B_FALSE;
uio->uio_fmode = 0;
uio->uio_extflg = 0;
uio->uio_resid = resid;
uio->uio_skip = skip;
}
static inline void
zfs_uio_bvec_init(zfs_uio_t *uio, struct bio *bio)
{
uio->uio_bvec = &bio->bi_io_vec[BIO_BI_IDX(bio)];
uio->uio_iovcnt = bio->bi_vcnt - BIO_BI_IDX(bio);
uio->uio_loffset = BIO_BI_SECTOR(bio) << 9;
uio->uio_segflg = UIO_BVEC;
uio->uio_fault_disable = B_FALSE;
uio->uio_fmode = 0;
uio->uio_extflg = 0;
uio->uio_resid = BIO_BI_SIZE(bio);
uio->uio_skip = BIO_BI_SKIP(bio);
}
#if defined(HAVE_VFS_IOV_ITER)
static inline void
zfs_uio_iov_iter_init(zfs_uio_t *uio, struct iov_iter *iter, offset_t offset,
ssize_t resid, size_t skip)
{
uio->uio_iter = iter;
uio->uio_iovcnt = iter->nr_segs;
uio->uio_loffset = offset;
uio->uio_segflg = UIO_ITER;
uio->uio_fault_disable = B_FALSE;
uio->uio_fmode = 0;
uio->uio_extflg = 0;
uio->uio_resid = resid;
uio->uio_skip = skip;
}
#endif
#endif /* SPL_UIO_H */
diff --git a/sys/contrib/openzfs/include/sys/dsl_pool.h b/sys/contrib/openzfs/include/sys/dsl_pool.h
index 8249bb8fc633..58fcae65db5a 100644
--- a/sys/contrib/openzfs/include/sys/dsl_pool.h
+++ b/sys/contrib/openzfs/include/sys/dsl_pool.h
@@ -1,199 +1,200 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2018 by Delphix. All rights reserved.
* Copyright 2016 Nexenta Systems, Inc. All rights reserved.
*/
#ifndef _SYS_DSL_POOL_H
#define _SYS_DSL_POOL_H
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/txg_impl.h>
#include <sys/zfs_context.h>
#include <sys/zio.h>
#include <sys/dnode.h>
#include <sys/ddt.h>
#include <sys/arc.h>
#include <sys/bpobj.h>
#include <sys/bptree.h>
#include <sys/rrwlock.h>
#include <sys/dsl_synctask.h>
#include <sys/mmp.h>
#ifdef __cplusplus
extern "C" {
#endif
extern int zfs_txg_synctime_ms;
struct objset;
struct dsl_dir;
struct dsl_dataset;
struct dsl_pool;
struct dmu_tx;
struct dsl_scan;
struct dsl_crypto_params;
struct dsl_deadlist;
extern unsigned long zfs_dirty_data_max;
extern unsigned long zfs_dirty_data_max_max;
extern int zfs_dirty_data_sync_percent;
extern int zfs_dirty_data_max_percent;
extern int zfs_dirty_data_max_max_percent;
extern int zfs_delay_min_dirty_percent;
extern unsigned long zfs_delay_scale;
/* These macros are for indexing into the zfs_all_blkstats_t. */
#define DMU_OT_DEFERRED DMU_OT_NONE
#define DMU_OT_OTHER DMU_OT_NUMTYPES /* place holder for DMU_OT() types */
#define DMU_OT_TOTAL (DMU_OT_NUMTYPES + 1)
typedef struct zfs_blkstat {
uint64_t zb_count;
uint64_t zb_asize;
uint64_t zb_lsize;
uint64_t zb_psize;
uint64_t zb_gangs;
uint64_t zb_ditto_2_of_2_samevdev;
uint64_t zb_ditto_2_of_3_samevdev;
uint64_t zb_ditto_3_of_3_samevdev;
} zfs_blkstat_t;
typedef struct zfs_all_blkstats {
zfs_blkstat_t zab_type[DN_MAX_LEVELS + 1][DMU_OT_TOTAL + 1];
kmutex_t zab_lock;
} zfs_all_blkstats_t;
typedef struct dsl_pool {
/* Immutable */
spa_t *dp_spa;
struct objset *dp_meta_objset;
struct dsl_dir *dp_root_dir;
struct dsl_dir *dp_mos_dir;
struct dsl_dir *dp_free_dir;
struct dsl_dir *dp_leak_dir;
struct dsl_dataset *dp_origin_snap;
uint64_t dp_root_dir_obj;
struct taskq *dp_zrele_taskq;
struct taskq *dp_unlinked_drain_taskq;
/* No lock needed - sync context only */
blkptr_t dp_meta_rootbp;
uint64_t dp_tmp_userrefs_obj;
bpobj_t dp_free_bpobj;
uint64_t dp_bptree_obj;
uint64_t dp_empty_bpobj;
bpobj_t dp_obsolete_bpobj;
struct dsl_scan *dp_scan;
/* Uses dp_lock */
kmutex_t dp_lock;
kcondvar_t dp_spaceavail_cv;
uint64_t dp_dirty_pertxg[TXG_SIZE];
uint64_t dp_dirty_total;
uint64_t dp_long_free_dirty_pertxg[TXG_SIZE];
uint64_t dp_mos_used_delta;
uint64_t dp_mos_compressed_delta;
uint64_t dp_mos_uncompressed_delta;
/*
* Time of most recently scheduled (furthest in the future)
* wakeup for delayed transactions.
*/
hrtime_t dp_last_wakeup;
/* Has its own locking */
tx_state_t dp_tx;
txg_list_t dp_dirty_datasets;
txg_list_t dp_dirty_zilogs;
txg_list_t dp_dirty_dirs;
txg_list_t dp_sync_tasks;
txg_list_t dp_early_sync_tasks;
taskq_t *dp_sync_taskq;
taskq_t *dp_zil_clean_taskq;
/*
* Protects administrative changes (properties, namespace)
*
* It is only held for write in syncing context. Therefore
* syncing context does not need to ever have it for read, since
* nobody else could possibly have it for write.
*/
rrwlock_t dp_config_rwlock;
zfs_all_blkstats_t *dp_blkstats;
} dsl_pool_t;
int dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp);
int dsl_pool_open(dsl_pool_t *dp);
void dsl_pool_close(dsl_pool_t *dp);
dsl_pool_t *dsl_pool_create(spa_t *spa, nvlist_t *zplprops,
struct dsl_crypto_params *dcp, uint64_t txg);
void dsl_pool_sync(dsl_pool_t *dp, uint64_t txg);
void dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg);
int dsl_pool_sync_context(dsl_pool_t *dp);
uint64_t dsl_pool_adjustedsize(dsl_pool_t *dp, zfs_space_check_t slop_policy);
uint64_t dsl_pool_unreserved_space(dsl_pool_t *dp,
zfs_space_check_t slop_policy);
+uint64_t dsl_pool_deferred_space(dsl_pool_t *dp);
void dsl_pool_dirty_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx);
void dsl_pool_undirty_space(dsl_pool_t *dp, int64_t space, uint64_t txg);
void dsl_free(dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp);
void dsl_free_sync(zio_t *pio, dsl_pool_t *dp, uint64_t txg,
const blkptr_t *bpp);
void dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx);
void dsl_pool_upgrade_clones(dsl_pool_t *dp, dmu_tx_t *tx);
void dsl_pool_upgrade_dir_clones(dsl_pool_t *dp, dmu_tx_t *tx);
void dsl_pool_mos_diduse_space(dsl_pool_t *dp,
int64_t used, int64_t comp, int64_t uncomp);
void dsl_pool_ckpoint_diduse_space(dsl_pool_t *dp,
int64_t used, int64_t comp, int64_t uncomp);
boolean_t dsl_pool_need_dirty_delay(dsl_pool_t *dp);
void dsl_pool_config_enter(dsl_pool_t *dp, void *tag);
void dsl_pool_config_enter_prio(dsl_pool_t *dp, void *tag);
void dsl_pool_config_exit(dsl_pool_t *dp, void *tag);
boolean_t dsl_pool_config_held(dsl_pool_t *dp);
boolean_t dsl_pool_config_held_writer(dsl_pool_t *dp);
taskq_t *dsl_pool_zrele_taskq(dsl_pool_t *dp);
taskq_t *dsl_pool_unlinked_drain_taskq(dsl_pool_t *dp);
int dsl_pool_user_hold(dsl_pool_t *dp, uint64_t dsobj,
const char *tag, uint64_t now, dmu_tx_t *tx);
int dsl_pool_user_release(dsl_pool_t *dp, uint64_t dsobj,
const char *tag, dmu_tx_t *tx);
void dsl_pool_clean_tmp_userrefs(dsl_pool_t *dp);
int dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **);
int dsl_pool_hold(const char *name, void *tag, dsl_pool_t **dp);
void dsl_pool_rele(dsl_pool_t *dp, void *tag);
void dsl_pool_create_obsolete_bpobj(dsl_pool_t *dp, dmu_tx_t *tx);
void dsl_pool_destroy_obsolete_bpobj(dsl_pool_t *dp, dmu_tx_t *tx);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_DSL_POOL_H */
diff --git a/sys/contrib/openzfs/include/sys/fm/fs/zfs.h b/sys/contrib/openzfs/include/sys/fm/fs/zfs.h
index 6491606d328b..cd080c8ee667 100644
--- a/sys/contrib/openzfs/include/sys/fm/fs/zfs.h
+++ b/sys/contrib/openzfs/include/sys/fm/fs/zfs.h
@@ -1,126 +1,135 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2020 by Delphix. All rights reserved.
*/
#ifndef _SYS_FM_FS_ZFS_H
#define _SYS_FM_FS_ZFS_H
#ifdef __cplusplus
extern "C" {
#endif
#define ZFS_ERROR_CLASS "fs.zfs"
#define FM_EREPORT_ZFS_CHECKSUM "checksum"
#define FM_EREPORT_ZFS_AUTHENTICATION "authentication"
#define FM_EREPORT_ZFS_IO "io"
#define FM_EREPORT_ZFS_DATA "data"
#define FM_EREPORT_ZFS_DELAY "delay"
#define FM_EREPORT_ZFS_DEADMAN "deadman"
#define FM_EREPORT_ZFS_POOL "zpool"
#define FM_EREPORT_ZFS_DEVICE_UNKNOWN "vdev.unknown"
#define FM_EREPORT_ZFS_DEVICE_OPEN_FAILED "vdev.open_failed"
#define FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA "vdev.corrupt_data"
#define FM_EREPORT_ZFS_DEVICE_NO_REPLICAS "vdev.no_replicas"
#define FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM "vdev.bad_guid_sum"
#define FM_EREPORT_ZFS_DEVICE_TOO_SMALL "vdev.too_small"
#define FM_EREPORT_ZFS_DEVICE_BAD_LABEL "vdev.bad_label"
#define FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT "vdev.bad_ashift"
#define FM_EREPORT_ZFS_IO_FAILURE "io_failure"
#define FM_EREPORT_ZFS_PROBE_FAILURE "probe_failure"
#define FM_EREPORT_ZFS_LOG_REPLAY "log_replay"
#define FM_EREPORT_ZFS_CONFIG_CACHE_WRITE "config_cache_write"
#define FM_EREPORT_PAYLOAD_ZFS_POOL "pool"
#define FM_EREPORT_PAYLOAD_ZFS_POOL_FAILMODE "pool_failmode"
#define FM_EREPORT_PAYLOAD_ZFS_POOL_GUID "pool_guid"
#define FM_EREPORT_PAYLOAD_ZFS_POOL_CONTEXT "pool_context"
#define FM_EREPORT_PAYLOAD_ZFS_POOL_STATE "pool_state"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID "vdev_guid"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_TYPE "vdev_type"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_PATH "vdev_path"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_PHYSPATH "vdev_physpath"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_ENC_SYSFS_PATH "vdev_enc_sysfs_path"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_DEVID "vdev_devid"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_FRU "vdev_fru"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE "vdev_state"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE "vdev_laststate"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_ASHIFT "vdev_ashift"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_COMP_TS "vdev_complete_ts"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_DELTA_TS "vdev_delta_ts"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_SPARE_PATHS "vdev_spare_paths"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_SPARE_GUIDS "vdev_spare_guids"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_READ_ERRORS "vdev_read_errors"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_WRITE_ERRORS "vdev_write_errors"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_CKSUM_ERRORS "vdev_cksum_errors"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_DELAYS "vdev_delays"
#define FM_EREPORT_PAYLOAD_ZFS_PARENT_GUID "parent_guid"
#define FM_EREPORT_PAYLOAD_ZFS_PARENT_TYPE "parent_type"
#define FM_EREPORT_PAYLOAD_ZFS_PARENT_PATH "parent_path"
#define FM_EREPORT_PAYLOAD_ZFS_PARENT_DEVID "parent_devid"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_OBJSET "zio_objset"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_OBJECT "zio_object"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_LEVEL "zio_level"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_BLKID "zio_blkid"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_ERR "zio_err"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_OFFSET "zio_offset"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_SIZE "zio_size"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS "zio_flags"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_STAGE "zio_stage"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_PRIORITY "zio_priority"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_PIPELINE "zio_pipeline"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_DELAY "zio_delay"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_TIMESTAMP "zio_timestamp"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_DELTA "zio_delta"
#define FM_EREPORT_PAYLOAD_ZFS_PREV_STATE "prev_state"
#define FM_EREPORT_PAYLOAD_ZFS_CKSUM_EXPECTED "cksum_expected"
#define FM_EREPORT_PAYLOAD_ZFS_CKSUM_ACTUAL "cksum_actual"
#define FM_EREPORT_PAYLOAD_ZFS_CKSUM_ALGO "cksum_algorithm"
#define FM_EREPORT_PAYLOAD_ZFS_CKSUM_BYTESWAP "cksum_byteswap"
#define FM_EREPORT_PAYLOAD_ZFS_BAD_OFFSET_RANGES "bad_ranges"
#define FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_MIN_GAP "bad_ranges_min_gap"
#define FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_SETS "bad_range_sets"
#define FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_CLEARS "bad_range_clears"
#define FM_EREPORT_PAYLOAD_ZFS_BAD_SET_BITS "bad_set_bits"
#define FM_EREPORT_PAYLOAD_ZFS_BAD_CLEARED_BITS "bad_cleared_bits"
#define FM_EREPORT_PAYLOAD_ZFS_BAD_SET_HISTOGRAM "bad_set_histogram"
#define FM_EREPORT_PAYLOAD_ZFS_BAD_CLEARED_HISTOGRAM "bad_cleared_histogram"
+#define FM_EREPORT_PAYLOAD_ZFS_SNAPSHOT_NAME "snapshot_name"
+#define FM_EREPORT_PAYLOAD_ZFS_DEVICE_NAME "device_name"
+#define FM_EREPORT_PAYLOAD_ZFS_RAW_DEVICE_NAME "raw_name"
+#define FM_EREPORT_PAYLOAD_ZFS_VOLUME "volume"
#define FM_EREPORT_FAILMODE_WAIT "wait"
#define FM_EREPORT_FAILMODE_CONTINUE "continue"
#define FM_EREPORT_FAILMODE_PANIC "panic"
#define FM_RESOURCE_REMOVED "removed"
#define FM_RESOURCE_AUTOREPLACE "autoreplace"
#define FM_RESOURCE_STATECHANGE "statechange"
+#define FM_RESOURCE_ZFS_SNAPSHOT_MOUNT "snapshot_mount"
+#define FM_RESOURCE_ZFS_SNAPSHOT_UNMOUNT "snapshot_unmount"
+#define FM_RESOURCE_ZVOL_CREATE_SYMLINK "zvol_create"
+#define FM_RESOURCE_ZVOL_REMOVE_SYMLINK "zvol_remove"
+
#ifdef __cplusplus
}
#endif
#endif /* _SYS_FM_FS_ZFS_H */
diff --git a/sys/contrib/openzfs/include/sys/spa.h b/sys/contrib/openzfs/include/sys/spa.h
index 532926e12487..f168015abffc 100644
--- a/sys/contrib/openzfs/include/sys/spa.h
+++ b/sys/contrib/openzfs/include/sys/spa.h
@@ -1,1230 +1,1232 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2021 by Delphix. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2013 Saso Kiselkov. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2017 Joyent, Inc.
* Copyright (c) 2017, 2019, Datto Inc. All rights reserved.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, Allan Jude
* Copyright (c) 2019, Klara Inc.
*/
#ifndef _SYS_SPA_H
#define _SYS_SPA_H
#include <sys/avl.h>
#include <sys/zfs_context.h>
#include <sys/kstat.h>
#include <sys/nvpair.h>
#include <sys/sysmacros.h>
#include <sys/types.h>
#include <sys/fs/zfs.h>
#include <sys/spa_checksum.h>
#include <sys/dmu.h>
#include <sys/space_map.h>
#include <sys/bitops.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Forward references that lots of things need.
*/
typedef struct spa spa_t;
typedef struct vdev vdev_t;
typedef struct metaslab metaslab_t;
typedef struct metaslab_group metaslab_group_t;
typedef struct metaslab_class metaslab_class_t;
typedef struct zio zio_t;
typedef struct zilog zilog_t;
typedef struct spa_aux_vdev spa_aux_vdev_t;
typedef struct ddt ddt_t;
typedef struct ddt_entry ddt_entry_t;
typedef struct zbookmark_phys zbookmark_phys_t;
struct bpobj;
struct bplist;
struct dsl_pool;
struct dsl_dataset;
struct dsl_crypto_params;
/*
* We currently support block sizes from 512 bytes to 16MB.
* The benefits of larger blocks, and thus larger IO, need to be weighed
* against the cost of COWing a giant block to modify one byte, and the
* large latency of reading or writing a large block.
*
* Note that although blocks up to 16MB are supported, the recordsize
* property can not be set larger than zfs_max_recordsize (default 1MB).
* See the comment near zfs_max_recordsize in dsl_dataset.c for details.
*
* Note that although the LSIZE field of the blkptr_t can store sizes up
* to 32MB, the dnode's dn_datablkszsec can only store sizes up to
* 32MB - 512 bytes. Therefore, we limit SPA_MAXBLOCKSIZE to 16MB.
*/
#define SPA_MINBLOCKSHIFT 9
#define SPA_OLD_MAXBLOCKSHIFT 17
#define SPA_MAXBLOCKSHIFT 24
#define SPA_MINBLOCKSIZE (1ULL << SPA_MINBLOCKSHIFT)
#define SPA_OLD_MAXBLOCKSIZE (1ULL << SPA_OLD_MAXBLOCKSHIFT)
#define SPA_MAXBLOCKSIZE (1ULL << SPA_MAXBLOCKSHIFT)
/*
* Alignment Shift (ashift) is an immutable, internal top-level vdev property
* which can only be set at vdev creation time. Physical writes are always done
* according to it, which makes 2^ashift the smallest possible IO on a vdev.
*
* We currently allow values ranging from 512 bytes (2^9 = 512) to 64 KiB
* (2^16 = 65,536).
*/
#define ASHIFT_MIN 9
#define ASHIFT_MAX 16
/*
* Size of block to hold the configuration data (a packed nvlist)
*/
#define SPA_CONFIG_BLOCKSIZE (1ULL << 14)
/*
* The DVA size encodings for LSIZE and PSIZE support blocks up to 32MB.
* The ASIZE encoding should be at least 64 times larger (6 more bits)
* to support up to 4-way RAID-Z mirror mode with worst-case gang block
* overhead, three DVAs per bp, plus one more bit in case we do anything
* else that expands the ASIZE.
*/
#define SPA_LSIZEBITS 16 /* LSIZE up to 32M (2^16 * 512) */
#define SPA_PSIZEBITS 16 /* PSIZE up to 32M (2^16 * 512) */
#define SPA_ASIZEBITS 24 /* ASIZE up to 64 times larger */
#define SPA_COMPRESSBITS 7
#define SPA_VDEVBITS 24
#define SPA_COMPRESSMASK ((1U << SPA_COMPRESSBITS) - 1)
/*
* All SPA data is represented by 128-bit data virtual addresses (DVAs).
* The members of the dva_t should be considered opaque outside the SPA.
*/
typedef struct dva {
uint64_t dva_word[2];
} dva_t;
/*
* Some checksums/hashes need a 256-bit initialization salt. This salt is kept
* secret and is suitable for use in MAC algorithms as the key.
*/
typedef struct zio_cksum_salt {
uint8_t zcs_bytes[32];
} zio_cksum_salt_t;
/*
* Each block is described by its DVAs, time of birth, checksum, etc.
* The word-by-word, bit-by-bit layout of the blkptr is as follows:
*
* 64 56 48 40 32 24 16 8 0
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 0 | pad | vdev1 | GRID | ASIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 1 |G| offset1 |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 2 | pad | vdev2 | GRID | ASIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 3 |G| offset2 |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 4 | pad | vdev3 | GRID | ASIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 5 |G| offset3 |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 6 |BDX|lvl| type | cksum |E| comp| PSIZE | LSIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 7 | padding |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 8 | padding |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 9 | physical birth txg |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* a | logical birth txg |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* b | fill count |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* c | checksum[0] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* d | checksum[1] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* e | checksum[2] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* f | checksum[3] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
*
* Legend:
*
* vdev virtual device ID
* offset offset into virtual device
* LSIZE logical size
* PSIZE physical size (after compression)
* ASIZE allocated size (including RAID-Z parity and gang block headers)
* GRID RAID-Z layout information (reserved for future use)
* cksum checksum function
* comp compression function
* G gang block indicator
* B byteorder (endianness)
* D dedup
* X encryption
* E blkptr_t contains embedded data (see below)
* lvl level of indirection
* type DMU object type
* phys birth txg when dva[0] was written; zero if same as logical birth txg
* note that typically all the dva's would be written in this
* txg, but they could be different if they were moved by
* device removal.
* log. birth transaction group in which the block was logically born
* fill count number of non-zero blocks under this bp
* checksum[4] 256-bit checksum of the data this bp describes
*/
/*
* The blkptr_t's of encrypted blocks also need to store the encryption
* parameters so that the block can be decrypted. This layout is as follows:
*
* 64 56 48 40 32 24 16 8 0
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 0 | vdev1 | GRID | ASIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 1 |G| offset1 |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 2 | vdev2 | GRID | ASIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 3 |G| offset2 |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 4 | salt |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 5 | IV1 |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 6 |BDX|lvl| type | cksum |E| comp| PSIZE | LSIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 7 | padding |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 8 | padding |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 9 | physical birth txg |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* a | logical birth txg |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* b | IV2 | fill count |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* c | checksum[0] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* d | checksum[1] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* e | MAC[0] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* f | MAC[1] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
*
* Legend:
*
* salt Salt for generating encryption keys
* IV1 First 64 bits of encryption IV
* X Block requires encryption handling (set to 1)
* E blkptr_t contains embedded data (set to 0, see below)
* fill count number of non-zero blocks under this bp (truncated to 32 bits)
* IV2 Last 32 bits of encryption IV
* checksum[2] 128-bit checksum of the data this bp describes
* MAC[2] 128-bit message authentication code for this data
*
* The X bit being set indicates that this block is one of 3 types. If this is
* a level 0 block with an encrypted object type, the block is encrypted
* (see BP_IS_ENCRYPTED()). If this is a level 0 block with an unencrypted
* object type, this block is authenticated with an HMAC (see
* BP_IS_AUTHENTICATED()). Otherwise (if level > 0), this bp will use the MAC
* words to store a checksum-of-MACs from the level below (see
* BP_HAS_INDIRECT_MAC_CKSUM()). For convenience in the code, BP_IS_PROTECTED()
* refers to both encrypted and authenticated blocks and BP_USES_CRYPT()
* refers to any of these 3 kinds of blocks.
*
* The additional encryption parameters are the salt, IV, and MAC which are
* explained in greater detail in the block comment at the top of zio_crypt.c.
* The MAC occupies half of the checksum space since it serves a very similar
* purpose: to prevent data corruption on disk. The only functional difference
* is that the checksum is used to detect on-disk corruption whether or not the
* encryption key is loaded and the MAC provides additional protection against
* malicious disk tampering. We use the 3rd DVA to store the salt and first
* 64 bits of the IV. As a result encrypted blocks can only have 2 copies
* maximum instead of the normal 3. The last 32 bits of the IV are stored in
* the upper bits of what is usually the fill count. Note that only blocks at
* level 0 or -2 are ever encrypted, which allows us to guarantee that these
* 32 bits are not trampled over by other code (see zio_crypt.c for details).
* The salt and IV are not used for authenticated bps or bps with an indirect
* MAC checksum, so these blocks can utilize all 3 DVAs and the full 64 bits
* for the fill count.
*/
/*
* "Embedded" blkptr_t's don't actually point to a block, instead they
* have a data payload embedded in the blkptr_t itself. See the comment
* in blkptr.c for more details.
*
* The blkptr_t is laid out as follows:
*
* 64 56 48 40 32 24 16 8 0
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 0 | payload |
* 1 | payload |
* 2 | payload |
* 3 | payload |
* 4 | payload |
* 5 | payload |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 6 |BDX|lvl| type | etype |E| comp| PSIZE| LSIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 7 | payload |
* 8 | payload |
* 9 | payload |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* a | logical birth txg |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* b | payload |
* c | payload |
* d | payload |
* e | payload |
* f | payload |
* +-------+-------+-------+-------+-------+-------+-------+-------+
*
* Legend:
*
* payload contains the embedded data
* B (byteorder) byteorder (endianness)
* D (dedup) padding (set to zero)
* X encryption (set to zero)
* E (embedded) set to one
* lvl indirection level
* type DMU object type
* etype how to interpret embedded data (BP_EMBEDDED_TYPE_*)
* comp compression function of payload
* PSIZE size of payload after compression, in bytes
* LSIZE logical size of payload, in bytes
* note that 25 bits is enough to store the largest
* "normal" BP's LSIZE (2^16 * 2^9) in bytes
* log. birth transaction group in which the block was logically born
*
* Note that LSIZE and PSIZE are stored in bytes, whereas for non-embedded
* bp's they are stored in units of SPA_MINBLOCKSHIFT.
* Generally, the generic BP_GET_*() macros can be used on embedded BP's.
* The B, D, X, lvl, type, and comp fields are stored the same as with normal
* BP's so the BP_SET_* macros can be used with them. etype, PSIZE, LSIZE must
* be set with the BPE_SET_* macros. BP_SET_EMBEDDED() should be called before
* other macros, as they assert that they are only used on BP's of the correct
* "embedded-ness". Encrypted blkptr_t's cannot be embedded because they use
* the payload space for encryption parameters (see the comment above on
* how encryption parameters are stored).
*/
#define BPE_GET_ETYPE(bp) \
(ASSERT(BP_IS_EMBEDDED(bp)), \
BF64_GET((bp)->blk_prop, 40, 8))
#define BPE_SET_ETYPE(bp, t) do { \
ASSERT(BP_IS_EMBEDDED(bp)); \
BF64_SET((bp)->blk_prop, 40, 8, t); \
_NOTE(CONSTCOND) } while (0)
#define BPE_GET_LSIZE(bp) \
(ASSERT(BP_IS_EMBEDDED(bp)), \
BF64_GET_SB((bp)->blk_prop, 0, 25, 0, 1))
#define BPE_SET_LSIZE(bp, x) do { \
ASSERT(BP_IS_EMBEDDED(bp)); \
BF64_SET_SB((bp)->blk_prop, 0, 25, 0, 1, x); \
_NOTE(CONSTCOND) } while (0)
#define BPE_GET_PSIZE(bp) \
(ASSERT(BP_IS_EMBEDDED(bp)), \
BF64_GET_SB((bp)->blk_prop, 25, 7, 0, 1))
#define BPE_SET_PSIZE(bp, x) do { \
ASSERT(BP_IS_EMBEDDED(bp)); \
BF64_SET_SB((bp)->blk_prop, 25, 7, 0, 1, x); \
_NOTE(CONSTCOND) } while (0)
typedef enum bp_embedded_type {
BP_EMBEDDED_TYPE_DATA,
BP_EMBEDDED_TYPE_RESERVED, /* Reserved for Delphix byteswap feature. */
BP_EMBEDDED_TYPE_REDACTED,
NUM_BP_EMBEDDED_TYPES
} bp_embedded_type_t;
#define BPE_NUM_WORDS 14
#define BPE_PAYLOAD_SIZE (BPE_NUM_WORDS * sizeof (uint64_t))
#define BPE_IS_PAYLOADWORD(bp, wp) \
((wp) != &(bp)->blk_prop && (wp) != &(bp)->blk_birth)
#define SPA_BLKPTRSHIFT 7 /* blkptr_t is 128 bytes */
#define SPA_DVAS_PER_BP 3 /* Number of DVAs in a bp */
#define SPA_SYNC_MIN_VDEVS 3 /* min vdevs to update during sync */
/*
* A block is a hole when it has either 1) never been written to, or
* 2) is zero-filled. In both cases, ZFS can return all zeroes for all reads
* without physically allocating disk space. Holes are represented in the
* blkptr_t structure by zeroed blk_dva. Correct checking for holes is
* done through the BP_IS_HOLE macro. For holes, the logical size, level,
* DMU object type, and birth times are all also stored for holes that
* were written to at some point (i.e. were punched after having been filled).
*/
typedef struct blkptr {
dva_t blk_dva[SPA_DVAS_PER_BP]; /* Data Virtual Addresses */
uint64_t blk_prop; /* size, compression, type, etc */
uint64_t blk_pad[2]; /* Extra space for the future */
uint64_t blk_phys_birth; /* txg when block was allocated */
uint64_t blk_birth; /* transaction group at birth */
uint64_t blk_fill; /* fill count */
zio_cksum_t blk_cksum; /* 256-bit checksum */
} blkptr_t;
/*
* Macros to get and set fields in a bp or DVA.
*/
/*
* Note, for gang blocks, DVA_GET_ASIZE() is the total space allocated for
* this gang DVA including its children BP's. The space allocated at this
* DVA's vdev/offset is vdev_gang_header_asize(vdev).
*/
#define DVA_GET_ASIZE(dva) \
BF64_GET_SB((dva)->dva_word[0], 0, SPA_ASIZEBITS, SPA_MINBLOCKSHIFT, 0)
#define DVA_SET_ASIZE(dva, x) \
BF64_SET_SB((dva)->dva_word[0], 0, SPA_ASIZEBITS, \
SPA_MINBLOCKSHIFT, 0, x)
#define DVA_GET_GRID(dva) BF64_GET((dva)->dva_word[0], 24, 8)
#define DVA_SET_GRID(dva, x) BF64_SET((dva)->dva_word[0], 24, 8, x)
#define DVA_GET_VDEV(dva) BF64_GET((dva)->dva_word[0], 32, SPA_VDEVBITS)
#define DVA_SET_VDEV(dva, x) \
BF64_SET((dva)->dva_word[0], 32, SPA_VDEVBITS, x)
#define DVA_GET_OFFSET(dva) \
BF64_GET_SB((dva)->dva_word[1], 0, 63, SPA_MINBLOCKSHIFT, 0)
#define DVA_SET_OFFSET(dva, x) \
BF64_SET_SB((dva)->dva_word[1], 0, 63, SPA_MINBLOCKSHIFT, 0, x)
#define DVA_GET_GANG(dva) BF64_GET((dva)->dva_word[1], 63, 1)
#define DVA_SET_GANG(dva, x) BF64_SET((dva)->dva_word[1], 63, 1, x)
#define BP_GET_LSIZE(bp) \
(BP_IS_EMBEDDED(bp) ? \
(BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA ? BPE_GET_LSIZE(bp) : 0): \
BF64_GET_SB((bp)->blk_prop, 0, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1))
#define BP_SET_LSIZE(bp, x) do { \
ASSERT(!BP_IS_EMBEDDED(bp)); \
BF64_SET_SB((bp)->blk_prop, \
0, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1, x); \
_NOTE(CONSTCOND) } while (0)
#define BP_GET_PSIZE(bp) \
(BP_IS_EMBEDDED(bp) ? 0 : \
BF64_GET_SB((bp)->blk_prop, 16, SPA_PSIZEBITS, SPA_MINBLOCKSHIFT, 1))
#define BP_SET_PSIZE(bp, x) do { \
ASSERT(!BP_IS_EMBEDDED(bp)); \
BF64_SET_SB((bp)->blk_prop, \
16, SPA_PSIZEBITS, SPA_MINBLOCKSHIFT, 1, x); \
_NOTE(CONSTCOND) } while (0)
#define BP_GET_COMPRESS(bp) \
BF64_GET((bp)->blk_prop, 32, SPA_COMPRESSBITS)
#define BP_SET_COMPRESS(bp, x) \
BF64_SET((bp)->blk_prop, 32, SPA_COMPRESSBITS, x)
#define BP_IS_EMBEDDED(bp) BF64_GET((bp)->blk_prop, 39, 1)
#define BP_SET_EMBEDDED(bp, x) BF64_SET((bp)->blk_prop, 39, 1, x)
#define BP_GET_CHECKSUM(bp) \
(BP_IS_EMBEDDED(bp) ? ZIO_CHECKSUM_OFF : \
BF64_GET((bp)->blk_prop, 40, 8))
#define BP_SET_CHECKSUM(bp, x) do { \
ASSERT(!BP_IS_EMBEDDED(bp)); \
BF64_SET((bp)->blk_prop, 40, 8, x); \
_NOTE(CONSTCOND) } while (0)
#define BP_GET_TYPE(bp) BF64_GET((bp)->blk_prop, 48, 8)
#define BP_SET_TYPE(bp, x) BF64_SET((bp)->blk_prop, 48, 8, x)
#define BP_GET_LEVEL(bp) BF64_GET((bp)->blk_prop, 56, 5)
#define BP_SET_LEVEL(bp, x) BF64_SET((bp)->blk_prop, 56, 5, x)
/* encrypted, authenticated, and MAC cksum bps use the same bit */
#define BP_USES_CRYPT(bp) BF64_GET((bp)->blk_prop, 61, 1)
#define BP_SET_CRYPT(bp, x) BF64_SET((bp)->blk_prop, 61, 1, x)
#define BP_IS_ENCRYPTED(bp) \
(BP_USES_CRYPT(bp) && \
BP_GET_LEVEL(bp) <= 0 && \
DMU_OT_IS_ENCRYPTED(BP_GET_TYPE(bp)))
#define BP_IS_AUTHENTICATED(bp) \
(BP_USES_CRYPT(bp) && \
BP_GET_LEVEL(bp) <= 0 && \
!DMU_OT_IS_ENCRYPTED(BP_GET_TYPE(bp)))
#define BP_HAS_INDIRECT_MAC_CKSUM(bp) \
(BP_USES_CRYPT(bp) && BP_GET_LEVEL(bp) > 0)
#define BP_IS_PROTECTED(bp) \
(BP_IS_ENCRYPTED(bp) || BP_IS_AUTHENTICATED(bp))
#define BP_GET_DEDUP(bp) BF64_GET((bp)->blk_prop, 62, 1)
#define BP_SET_DEDUP(bp, x) BF64_SET((bp)->blk_prop, 62, 1, x)
#define BP_GET_BYTEORDER(bp) BF64_GET((bp)->blk_prop, 63, 1)
#define BP_SET_BYTEORDER(bp, x) BF64_SET((bp)->blk_prop, 63, 1, x)
#define BP_GET_FREE(bp) BF64_GET((bp)->blk_fill, 0, 1)
#define BP_SET_FREE(bp, x) BF64_SET((bp)->blk_fill, 0, 1, x)
#define BP_PHYSICAL_BIRTH(bp) \
(BP_IS_EMBEDDED(bp) ? 0 : \
(bp)->blk_phys_birth ? (bp)->blk_phys_birth : (bp)->blk_birth)
#define BP_SET_BIRTH(bp, logical, physical) \
{ \
ASSERT(!BP_IS_EMBEDDED(bp)); \
(bp)->blk_birth = (logical); \
(bp)->blk_phys_birth = ((logical) == (physical) ? 0 : (physical)); \
}
#define BP_GET_FILL(bp) \
((BP_IS_ENCRYPTED(bp)) ? BF64_GET((bp)->blk_fill, 0, 32) : \
((BP_IS_EMBEDDED(bp)) ? 1 : (bp)->blk_fill))
#define BP_SET_FILL(bp, fill) \
{ \
if (BP_IS_ENCRYPTED(bp)) \
BF64_SET((bp)->blk_fill, 0, 32, fill); \
else \
(bp)->blk_fill = fill; \
}
#define BP_GET_IV2(bp) \
(ASSERT(BP_IS_ENCRYPTED(bp)), \
BF64_GET((bp)->blk_fill, 32, 32))
#define BP_SET_IV2(bp, iv2) \
{ \
ASSERT(BP_IS_ENCRYPTED(bp)); \
BF64_SET((bp)->blk_fill, 32, 32, iv2); \
}
#define BP_IS_METADATA(bp) \
(BP_GET_LEVEL(bp) > 0 || DMU_OT_IS_METADATA(BP_GET_TYPE(bp)))
#define BP_GET_ASIZE(bp) \
(BP_IS_EMBEDDED(bp) ? 0 : \
DVA_GET_ASIZE(&(bp)->blk_dva[0]) + \
DVA_GET_ASIZE(&(bp)->blk_dva[1]) + \
(DVA_GET_ASIZE(&(bp)->blk_dva[2]) * !BP_IS_ENCRYPTED(bp)))
#define BP_GET_UCSIZE(bp) \
(BP_IS_METADATA(bp) ? BP_GET_PSIZE(bp) : BP_GET_LSIZE(bp))
#define BP_GET_NDVAS(bp) \
(BP_IS_EMBEDDED(bp) ? 0 : \
!!DVA_GET_ASIZE(&(bp)->blk_dva[0]) + \
!!DVA_GET_ASIZE(&(bp)->blk_dva[1]) + \
(!!DVA_GET_ASIZE(&(bp)->blk_dva[2]) * !BP_IS_ENCRYPTED(bp)))
#define BP_COUNT_GANG(bp) \
(BP_IS_EMBEDDED(bp) ? 0 : \
(DVA_GET_GANG(&(bp)->blk_dva[0]) + \
DVA_GET_GANG(&(bp)->blk_dva[1]) + \
(DVA_GET_GANG(&(bp)->blk_dva[2]) * !BP_IS_ENCRYPTED(bp))))
#define DVA_EQUAL(dva1, dva2) \
((dva1)->dva_word[1] == (dva2)->dva_word[1] && \
(dva1)->dva_word[0] == (dva2)->dva_word[0])
#define BP_EQUAL(bp1, bp2) \
(BP_PHYSICAL_BIRTH(bp1) == BP_PHYSICAL_BIRTH(bp2) && \
(bp1)->blk_birth == (bp2)->blk_birth && \
DVA_EQUAL(&(bp1)->blk_dva[0], &(bp2)->blk_dva[0]) && \
DVA_EQUAL(&(bp1)->blk_dva[1], &(bp2)->blk_dva[1]) && \
DVA_EQUAL(&(bp1)->blk_dva[2], &(bp2)->blk_dva[2]))
#define DVA_IS_VALID(dva) (DVA_GET_ASIZE(dva) != 0)
#define BP_IDENTITY(bp) (ASSERT(!BP_IS_EMBEDDED(bp)), &(bp)->blk_dva[0])
#define BP_IS_GANG(bp) \
(BP_IS_EMBEDDED(bp) ? B_FALSE : DVA_GET_GANG(BP_IDENTITY(bp)))
#define DVA_IS_EMPTY(dva) ((dva)->dva_word[0] == 0ULL && \
(dva)->dva_word[1] == 0ULL)
#define BP_IS_HOLE(bp) \
(!BP_IS_EMBEDDED(bp) && DVA_IS_EMPTY(BP_IDENTITY(bp)))
#define BP_SET_REDACTED(bp) \
{ \
BP_SET_EMBEDDED(bp, B_TRUE); \
BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_REDACTED); \
}
#define BP_IS_REDACTED(bp) \
(BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_REDACTED)
/* BP_IS_RAIDZ(bp) assumes no block compression */
#define BP_IS_RAIDZ(bp) (DVA_GET_ASIZE(&(bp)->blk_dva[0]) > \
BP_GET_PSIZE(bp))
#define BP_ZERO(bp) \
{ \
(bp)->blk_dva[0].dva_word[0] = 0; \
(bp)->blk_dva[0].dva_word[1] = 0; \
(bp)->blk_dva[1].dva_word[0] = 0; \
(bp)->blk_dva[1].dva_word[1] = 0; \
(bp)->blk_dva[2].dva_word[0] = 0; \
(bp)->blk_dva[2].dva_word[1] = 0; \
(bp)->blk_prop = 0; \
(bp)->blk_pad[0] = 0; \
(bp)->blk_pad[1] = 0; \
(bp)->blk_phys_birth = 0; \
(bp)->blk_birth = 0; \
(bp)->blk_fill = 0; \
ZIO_SET_CHECKSUM(&(bp)->blk_cksum, 0, 0, 0, 0); \
}
#ifdef _ZFS_BIG_ENDIAN
#define ZFS_HOST_BYTEORDER (0ULL)
#else
#define ZFS_HOST_BYTEORDER (1ULL)
#endif
#define BP_SHOULD_BYTESWAP(bp) (BP_GET_BYTEORDER(bp) != ZFS_HOST_BYTEORDER)
#define BP_SPRINTF_LEN 400
/*
* This macro allows code sharing between zfs, libzpool, and mdb.
* 'func' is either snprintf() or mdb_snprintf().
* 'ws' (whitespace) can be ' ' for single-line format, '\n' for multi-line.
*/
#define SNPRINTF_BLKPTR(func, ws, buf, size, bp, type, checksum, compress) \
{ \
static const char *copyname[] = \
{ "zero", "single", "double", "triple" }; \
int len = 0; \
int copies = 0; \
const char *crypt_type; \
if (bp != NULL) { \
if (BP_IS_ENCRYPTED(bp)) { \
crypt_type = "encrypted"; \
/* LINTED E_SUSPICIOUS_COMPARISON */ \
} else if (BP_IS_AUTHENTICATED(bp)) { \
crypt_type = "authenticated"; \
} else if (BP_HAS_INDIRECT_MAC_CKSUM(bp)) { \
crypt_type = "indirect-MAC"; \
} else { \
crypt_type = "unencrypted"; \
} \
} \
if (bp == NULL) { \
len += func(buf + len, size - len, "<NULL>"); \
} else if (BP_IS_HOLE(bp)) { \
len += func(buf + len, size - len, \
"HOLE [L%llu %s] " \
"size=%llxL birth=%lluL", \
(u_longlong_t)BP_GET_LEVEL(bp), \
type, \
(u_longlong_t)BP_GET_LSIZE(bp), \
(u_longlong_t)bp->blk_birth); \
} else if (BP_IS_EMBEDDED(bp)) { \
len = func(buf + len, size - len, \
"EMBEDDED [L%llu %s] et=%u %s " \
"size=%llxL/%llxP birth=%lluL", \
(u_longlong_t)BP_GET_LEVEL(bp), \
type, \
(int)BPE_GET_ETYPE(bp), \
compress, \
(u_longlong_t)BPE_GET_LSIZE(bp), \
(u_longlong_t)BPE_GET_PSIZE(bp), \
(u_longlong_t)bp->blk_birth); \
} else if (BP_IS_REDACTED(bp)) { \
len += func(buf + len, size - len, \
"REDACTED [L%llu %s] size=%llxL birth=%lluL", \
(u_longlong_t)BP_GET_LEVEL(bp), \
type, \
(u_longlong_t)BP_GET_LSIZE(bp), \
(u_longlong_t)bp->blk_birth); \
} else { \
for (int d = 0; d < BP_GET_NDVAS(bp); d++) { \
const dva_t *dva = &bp->blk_dva[d]; \
if (DVA_IS_VALID(dva)) \
copies++; \
len += func(buf + len, size - len, \
"DVA[%d]=<%llu:%llx:%llx>%c", d, \
(u_longlong_t)DVA_GET_VDEV(dva), \
(u_longlong_t)DVA_GET_OFFSET(dva), \
(u_longlong_t)DVA_GET_ASIZE(dva), \
ws); \
} \
if (BP_IS_ENCRYPTED(bp)) { \
len += func(buf + len, size - len, \
"salt=%llx iv=%llx:%llx%c", \
(u_longlong_t)bp->blk_dva[2].dva_word[0], \
(u_longlong_t)bp->blk_dva[2].dva_word[1], \
(u_longlong_t)BP_GET_IV2(bp), \
ws); \
} \
if (BP_IS_GANG(bp) && \
DVA_GET_ASIZE(&bp->blk_dva[2]) <= \
DVA_GET_ASIZE(&bp->blk_dva[1]) / 2) \
copies--; \
len += func(buf + len, size - len, \
"[L%llu %s] %s %s %s %s %s %s %s%c" \
"size=%llxL/%llxP birth=%lluL/%lluP fill=%llu%c" \
"cksum=%llx:%llx:%llx:%llx", \
(u_longlong_t)BP_GET_LEVEL(bp), \
type, \
checksum, \
compress, \
crypt_type, \
BP_GET_BYTEORDER(bp) == 0 ? "BE" : "LE", \
BP_IS_GANG(bp) ? "gang" : "contiguous", \
BP_GET_DEDUP(bp) ? "dedup" : "unique", \
copyname[copies], \
ws, \
(u_longlong_t)BP_GET_LSIZE(bp), \
(u_longlong_t)BP_GET_PSIZE(bp), \
(u_longlong_t)bp->blk_birth, \
(u_longlong_t)BP_PHYSICAL_BIRTH(bp), \
(u_longlong_t)BP_GET_FILL(bp), \
ws, \
(u_longlong_t)bp->blk_cksum.zc_word[0], \
(u_longlong_t)bp->blk_cksum.zc_word[1], \
(u_longlong_t)bp->blk_cksum.zc_word[2], \
(u_longlong_t)bp->blk_cksum.zc_word[3]); \
} \
ASSERT(len < size); \
}
#define BP_GET_BUFC_TYPE(bp) \
(BP_IS_METADATA(bp) ? ARC_BUFC_METADATA : ARC_BUFC_DATA)
typedef enum spa_import_type {
SPA_IMPORT_EXISTING,
SPA_IMPORT_ASSEMBLE
} spa_import_type_t;
typedef enum spa_mode {
SPA_MODE_UNINIT = 0,
SPA_MODE_READ = 1,
SPA_MODE_WRITE = 2,
} spa_mode_t;
/*
* Send TRIM commands in-line during normal pool operation while deleting.
* OFF: no
* ON: yes
* NB: IN_FREEBSD_BASE is defined within the FreeBSD sources.
*/
typedef enum {
SPA_AUTOTRIM_OFF = 0, /* default */
SPA_AUTOTRIM_ON,
#ifdef IN_FREEBSD_BASE
SPA_AUTOTRIM_DEFAULT = SPA_AUTOTRIM_ON,
#else
SPA_AUTOTRIM_DEFAULT = SPA_AUTOTRIM_OFF,
#endif
} spa_autotrim_t;
/*
* Reason TRIM command was issued, used internally for accounting purposes.
*/
typedef enum trim_type {
TRIM_TYPE_MANUAL = 0,
TRIM_TYPE_AUTO = 1,
TRIM_TYPE_SIMPLE = 2
} trim_type_t;
/* state manipulation functions */
extern int spa_open(const char *pool, spa_t **, void *tag);
extern int spa_open_rewind(const char *pool, spa_t **, void *tag,
nvlist_t *policy, nvlist_t **config);
extern int spa_get_stats(const char *pool, nvlist_t **config, char *altroot,
size_t buflen);
extern int spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
nvlist_t *zplprops, struct dsl_crypto_params *dcp);
extern int spa_import(char *pool, nvlist_t *config, nvlist_t *props,
uint64_t flags);
extern nvlist_t *spa_tryimport(nvlist_t *tryconfig);
extern int spa_destroy(const char *pool);
extern int spa_checkpoint(const char *pool);
extern int spa_checkpoint_discard(const char *pool);
extern int spa_export(const char *pool, nvlist_t **oldconfig, boolean_t force,
boolean_t hardforce);
extern int spa_reset(const char *pool);
extern void spa_async_request(spa_t *spa, int flag);
extern void spa_async_unrequest(spa_t *spa, int flag);
extern void spa_async_suspend(spa_t *spa);
extern void spa_async_resume(spa_t *spa);
extern int spa_async_tasks(spa_t *spa);
extern spa_t *spa_inject_addref(char *pool);
extern void spa_inject_delref(spa_t *spa);
extern void spa_scan_stat_init(spa_t *spa);
extern int spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps);
extern int bpobj_enqueue_alloc_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx);
extern int bpobj_enqueue_free_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx);
#define SPA_ASYNC_CONFIG_UPDATE 0x01
#define SPA_ASYNC_REMOVE 0x02
#define SPA_ASYNC_PROBE 0x04
#define SPA_ASYNC_RESILVER_DONE 0x08
#define SPA_ASYNC_RESILVER 0x10
#define SPA_ASYNC_AUTOEXPAND 0x20
#define SPA_ASYNC_REMOVE_DONE 0x40
#define SPA_ASYNC_REMOVE_STOP 0x80
#define SPA_ASYNC_INITIALIZE_RESTART 0x100
#define SPA_ASYNC_TRIM_RESTART 0x200
#define SPA_ASYNC_AUTOTRIM_RESTART 0x400
#define SPA_ASYNC_L2CACHE_REBUILD 0x800
#define SPA_ASYNC_L2CACHE_TRIM 0x1000
#define SPA_ASYNC_REBUILD_DONE 0x2000
/* device manipulation */
extern int spa_vdev_add(spa_t *spa, nvlist_t *nvroot);
extern int spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot,
int replacing, int rebuild);
extern int spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid,
int replace_done);
extern int spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare);
extern boolean_t spa_vdev_remove_active(spa_t *spa);
extern int spa_vdev_initialize(spa_t *spa, nvlist_t *nv, uint64_t cmd_type,
nvlist_t *vdev_errlist);
extern int spa_vdev_trim(spa_t *spa, nvlist_t *nv, uint64_t cmd_type,
uint64_t rate, boolean_t partial, boolean_t secure, nvlist_t *vdev_errlist);
extern int spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath);
extern int spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru);
extern int spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
nvlist_t *props, boolean_t exp);
/* spare state (which is global across all pools) */
extern void spa_spare_add(vdev_t *vd);
extern void spa_spare_remove(vdev_t *vd);
extern boolean_t spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt);
extern void spa_spare_activate(vdev_t *vd);
/* L2ARC state (which is global across all pools) */
extern void spa_l2cache_add(vdev_t *vd);
extern void spa_l2cache_remove(vdev_t *vd);
extern boolean_t spa_l2cache_exists(uint64_t guid, uint64_t *pool);
extern void spa_l2cache_activate(vdev_t *vd);
extern void spa_l2cache_drop(spa_t *spa);
/* scanning */
extern int spa_scan(spa_t *spa, pool_scan_func_t func);
extern int spa_scan_stop(spa_t *spa);
extern int spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t flag);
/* spa syncing */
extern void spa_sync(spa_t *spa, uint64_t txg); /* only for DMU use */
extern void spa_sync_allpools(void);
extern int zfs_sync_pass_deferred_free;
/* spa namespace global mutex */
extern kmutex_t spa_namespace_lock;
/*
* SPA configuration functions in spa_config.c
*/
#define SPA_CONFIG_UPDATE_POOL 0
#define SPA_CONFIG_UPDATE_VDEVS 1
extern void spa_write_cachefile(spa_t *, boolean_t, boolean_t);
extern void spa_config_load(void);
extern nvlist_t *spa_all_configs(uint64_t *);
extern void spa_config_set(spa_t *spa, nvlist_t *config);
extern nvlist_t *spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg,
int getstats);
extern void spa_config_update(spa_t *spa, int what);
extern int spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv,
vdev_t *parent, uint_t id, int atype);
/*
* Miscellaneous SPA routines in spa_misc.c
*/
/* Namespace manipulation */
extern spa_t *spa_lookup(const char *name);
extern spa_t *spa_add(const char *name, nvlist_t *config, const char *altroot);
extern void spa_remove(spa_t *spa);
extern spa_t *spa_next(spa_t *prev);
/* Refcount functions */
extern void spa_open_ref(spa_t *spa, void *tag);
extern void spa_close(spa_t *spa, void *tag);
extern void spa_async_close(spa_t *spa, void *tag);
extern boolean_t spa_refcount_zero(spa_t *spa);
#define SCL_NONE 0x00
#define SCL_CONFIG 0x01
#define SCL_STATE 0x02
#define SCL_L2ARC 0x04 /* hack until L2ARC 2.0 */
#define SCL_ALLOC 0x08
#define SCL_ZIO 0x10
#define SCL_FREE 0x20
#define SCL_VDEV 0x40
#define SCL_LOCKS 7
#define SCL_ALL ((1 << SCL_LOCKS) - 1)
#define SCL_STATE_ALL (SCL_STATE | SCL_L2ARC | SCL_ZIO)
/* Historical pool statistics */
typedef struct spa_history_kstat {
kmutex_t lock;
uint64_t count;
uint64_t size;
kstat_t *kstat;
void *priv;
list_t list;
} spa_history_kstat_t;
typedef struct spa_history_list {
uint64_t size;
procfs_list_t procfs_list;
} spa_history_list_t;
typedef struct spa_stats {
spa_history_list_t read_history;
spa_history_list_t txg_history;
spa_history_kstat_t tx_assign_histogram;
spa_history_list_t mmp_history;
spa_history_kstat_t state; /* pool state */
spa_history_kstat_t iostats;
} spa_stats_t;
typedef enum txg_state {
TXG_STATE_BIRTH = 0,
TXG_STATE_OPEN = 1,
TXG_STATE_QUIESCED = 2,
TXG_STATE_WAIT_FOR_SYNC = 3,
TXG_STATE_SYNCED = 4,
TXG_STATE_COMMITTED = 5,
} txg_state_t;
typedef struct txg_stat {
vdev_stat_t vs1;
vdev_stat_t vs2;
uint64_t txg;
uint64_t ndirty;
} txg_stat_t;
/* Assorted pool IO kstats */
typedef struct spa_iostats {
kstat_named_t trim_extents_written;
kstat_named_t trim_bytes_written;
kstat_named_t trim_extents_skipped;
kstat_named_t trim_bytes_skipped;
kstat_named_t trim_extents_failed;
kstat_named_t trim_bytes_failed;
kstat_named_t autotrim_extents_written;
kstat_named_t autotrim_bytes_written;
kstat_named_t autotrim_extents_skipped;
kstat_named_t autotrim_bytes_skipped;
kstat_named_t autotrim_extents_failed;
kstat_named_t autotrim_bytes_failed;
kstat_named_t simple_trim_extents_written;
kstat_named_t simple_trim_bytes_written;
kstat_named_t simple_trim_extents_skipped;
kstat_named_t simple_trim_bytes_skipped;
kstat_named_t simple_trim_extents_failed;
kstat_named_t simple_trim_bytes_failed;
} spa_iostats_t;
extern void spa_stats_init(spa_t *spa);
extern void spa_stats_destroy(spa_t *spa);
extern void spa_read_history_add(spa_t *spa, const zbookmark_phys_t *zb,
uint32_t aflags);
extern void spa_txg_history_add(spa_t *spa, uint64_t txg, hrtime_t birth_time);
extern int spa_txg_history_set(spa_t *spa, uint64_t txg,
txg_state_t completed_state, hrtime_t completed_time);
extern txg_stat_t *spa_txg_history_init_io(spa_t *, uint64_t,
struct dsl_pool *);
extern void spa_txg_history_fini_io(spa_t *, txg_stat_t *);
extern void spa_tx_assign_add_nsecs(spa_t *spa, uint64_t nsecs);
extern int spa_mmp_history_set_skip(spa_t *spa, uint64_t mmp_kstat_id);
extern int spa_mmp_history_set(spa_t *spa, uint64_t mmp_kstat_id, int io_error,
hrtime_t duration);
extern void spa_mmp_history_add(spa_t *spa, uint64_t txg, uint64_t timestamp,
uint64_t mmp_delay, vdev_t *vd, int label, uint64_t mmp_kstat_id,
int error);
extern void spa_iostats_trim_add(spa_t *spa, trim_type_t type,
uint64_t extents_written, uint64_t bytes_written,
uint64_t extents_skipped, uint64_t bytes_skipped,
uint64_t extents_failed, uint64_t bytes_failed);
extern void spa_import_progress_add(spa_t *spa);
extern void spa_import_progress_remove(uint64_t spa_guid);
extern int spa_import_progress_set_mmp_check(uint64_t pool_guid,
uint64_t mmp_sec_remaining);
extern int spa_import_progress_set_max_txg(uint64_t pool_guid,
uint64_t max_txg);
extern int spa_import_progress_set_state(uint64_t pool_guid,
spa_load_state_t spa_load_state);
/* Pool configuration locks */
extern int spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw);
extern void spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw);
extern void spa_config_exit(spa_t *spa, int locks, const void *tag);
extern int spa_config_held(spa_t *spa, int locks, krw_t rw);
/* Pool vdev add/remove lock */
extern uint64_t spa_vdev_enter(spa_t *spa);
extern uint64_t spa_vdev_detach_enter(spa_t *spa, uint64_t guid);
extern uint64_t spa_vdev_config_enter(spa_t *spa);
extern void spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg,
int error, char *tag);
extern int spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error);
/* Pool vdev state change lock */
extern void spa_vdev_state_enter(spa_t *spa, int oplock);
extern int spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error);
/* Log state */
typedef enum spa_log_state {
SPA_LOG_UNKNOWN = 0, /* unknown log state */
SPA_LOG_MISSING, /* missing log(s) */
SPA_LOG_CLEAR, /* clear the log(s) */
SPA_LOG_GOOD, /* log(s) are good */
} spa_log_state_t;
extern spa_log_state_t spa_get_log_state(spa_t *spa);
extern void spa_set_log_state(spa_t *spa, spa_log_state_t state);
extern int spa_reset_logs(spa_t *spa);
/* Log claim callback */
extern void spa_claim_notify(zio_t *zio);
extern void spa_deadman(void *);
/* Accessor functions */
extern boolean_t spa_shutting_down(spa_t *spa);
extern struct dsl_pool *spa_get_dsl(spa_t *spa);
extern boolean_t spa_is_initializing(spa_t *spa);
extern boolean_t spa_indirect_vdevs_loaded(spa_t *spa);
extern blkptr_t *spa_get_rootblkptr(spa_t *spa);
extern void spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp);
extern void spa_altroot(spa_t *, char *, size_t);
extern int spa_sync_pass(spa_t *spa);
extern char *spa_name(spa_t *spa);
extern uint64_t spa_guid(spa_t *spa);
extern uint64_t spa_load_guid(spa_t *spa);
extern uint64_t spa_last_synced_txg(spa_t *spa);
extern uint64_t spa_first_txg(spa_t *spa);
extern uint64_t spa_syncing_txg(spa_t *spa);
extern uint64_t spa_final_dirty_txg(spa_t *spa);
extern uint64_t spa_version(spa_t *spa);
extern pool_state_t spa_state(spa_t *spa);
extern spa_load_state_t spa_load_state(spa_t *spa);
extern uint64_t spa_freeze_txg(spa_t *spa);
extern uint64_t spa_get_worst_case_asize(spa_t *spa, uint64_t lsize);
extern uint64_t spa_get_dspace(spa_t *spa);
extern uint64_t spa_get_checkpoint_space(spa_t *spa);
extern uint64_t spa_get_slop_space(spa_t *spa);
extern void spa_update_dspace(spa_t *spa);
extern uint64_t spa_version(spa_t *spa);
extern boolean_t spa_deflate(spa_t *spa);
extern metaslab_class_t *spa_normal_class(spa_t *spa);
extern metaslab_class_t *spa_log_class(spa_t *spa);
extern metaslab_class_t *spa_embedded_log_class(spa_t *spa);
extern metaslab_class_t *spa_special_class(spa_t *spa);
extern metaslab_class_t *spa_dedup_class(spa_t *spa);
extern metaslab_class_t *spa_preferred_class(spa_t *spa, uint64_t size,
dmu_object_type_t objtype, uint_t level, uint_t special_smallblk);
extern void spa_evicting_os_register(spa_t *, objset_t *os);
extern void spa_evicting_os_deregister(spa_t *, objset_t *os);
extern void spa_evicting_os_wait(spa_t *spa);
extern int spa_max_replication(spa_t *spa);
extern int spa_prev_software_version(spa_t *spa);
extern uint64_t spa_get_failmode(spa_t *spa);
extern uint64_t spa_get_deadman_failmode(spa_t *spa);
extern void spa_set_deadman_failmode(spa_t *spa, const char *failmode);
extern boolean_t spa_suspended(spa_t *spa);
extern uint64_t spa_bootfs(spa_t *spa);
extern uint64_t spa_delegation(spa_t *spa);
extern objset_t *spa_meta_objset(spa_t *spa);
extern space_map_t *spa_syncing_log_sm(spa_t *spa);
extern uint64_t spa_deadman_synctime(spa_t *spa);
extern uint64_t spa_deadman_ziotime(spa_t *spa);
extern uint64_t spa_dirty_data(spa_t *spa);
extern spa_autotrim_t spa_get_autotrim(spa_t *spa);
/* Miscellaneous support routines */
extern void spa_load_failed(spa_t *spa, const char *fmt, ...);
extern void spa_load_note(spa_t *spa, const char *fmt, ...);
extern void spa_activate_mos_feature(spa_t *spa, const char *feature,
dmu_tx_t *tx);
extern void spa_deactivate_mos_feature(spa_t *spa, const char *feature);
extern spa_t *spa_by_guid(uint64_t pool_guid, uint64_t device_guid);
extern boolean_t spa_guid_exists(uint64_t pool_guid, uint64_t device_guid);
extern char *spa_strdup(const char *);
extern void spa_strfree(char *);
extern uint64_t spa_generate_guid(spa_t *spa);
extern void snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp);
extern void spa_freeze(spa_t *spa);
extern int spa_change_guid(spa_t *spa);
extern void spa_upgrade(spa_t *spa, uint64_t version);
extern void spa_evict_all(void);
extern vdev_t *spa_lookup_by_guid(spa_t *spa, uint64_t guid,
boolean_t l2cache);
extern boolean_t spa_has_spare(spa_t *, uint64_t guid);
extern uint64_t dva_get_dsize_sync(spa_t *spa, const dva_t *dva);
extern uint64_t bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp);
extern uint64_t bp_get_dsize(spa_t *spa, const blkptr_t *bp);
extern boolean_t spa_has_slogs(spa_t *spa);
extern boolean_t spa_is_root(spa_t *spa);
extern boolean_t spa_writeable(spa_t *spa);
extern boolean_t spa_has_pending_synctask(spa_t *spa);
extern int spa_maxblocksize(spa_t *spa);
extern int spa_maxdnodesize(spa_t *spa);
extern boolean_t spa_has_checkpoint(spa_t *spa);
extern boolean_t spa_importing_readonly_checkpoint(spa_t *spa);
extern boolean_t spa_suspend_async_destroy(spa_t *spa);
extern uint64_t spa_min_claim_txg(spa_t *spa);
extern boolean_t zfs_dva_valid(spa_t *spa, const dva_t *dva,
const blkptr_t *bp);
typedef void (*spa_remap_cb_t)(uint64_t vdev, uint64_t offset, uint64_t size,
void *arg);
extern boolean_t spa_remap_blkptr(spa_t *spa, blkptr_t *bp,
spa_remap_cb_t callback, void *arg);
extern uint64_t spa_get_last_removal_txg(spa_t *spa);
extern boolean_t spa_trust_config(spa_t *spa);
extern uint64_t spa_missing_tvds_allowed(spa_t *spa);
extern void spa_set_missing_tvds(spa_t *spa, uint64_t missing);
extern boolean_t spa_top_vdevs_spacemap_addressable(spa_t *spa);
extern uint64_t spa_total_metaslabs(spa_t *spa);
extern boolean_t spa_multihost(spa_t *spa);
extern uint32_t spa_get_hostid(spa_t *spa);
extern void spa_activate_allocation_classes(spa_t *, dmu_tx_t *);
extern boolean_t spa_livelist_delete_check(spa_t *spa);
extern spa_mode_t spa_mode(spa_t *spa);
extern uint64_t zfs_strtonum(const char *str, char **nptr);
extern char *spa_his_ievent_table[];
extern void spa_history_create_obj(spa_t *spa, dmu_tx_t *tx);
extern int spa_history_get(spa_t *spa, uint64_t *offset, uint64_t *len_read,
char *his_buf);
extern int spa_history_log(spa_t *spa, const char *his_buf);
extern int spa_history_log_nvl(spa_t *spa, nvlist_t *nvl);
extern void spa_history_log_version(spa_t *spa, const char *operation,
dmu_tx_t *tx);
extern void spa_history_log_internal(spa_t *spa, const char *operation,
dmu_tx_t *tx, const char *fmt, ...) __printflike(4, 5);
extern void spa_history_log_internal_ds(struct dsl_dataset *ds, const char *op,
dmu_tx_t *tx, const char *fmt, ...) __printflike(4, 5);
extern void spa_history_log_internal_dd(dsl_dir_t *dd, const char *operation,
dmu_tx_t *tx, const char *fmt, ...) __printflike(4, 5);
extern const char *spa_state_to_name(spa_t *spa);
/* error handling */
struct zbookmark_phys;
extern void spa_log_error(spa_t *spa, const zbookmark_phys_t *zb);
extern int zfs_ereport_post(const char *clazz, spa_t *spa, vdev_t *vd,
const zbookmark_phys_t *zb, zio_t *zio, uint64_t state);
extern boolean_t zfs_ereport_is_valid(const char *clazz, spa_t *spa, vdev_t *vd,
zio_t *zio);
extern void zfs_ereport_taskq_fini(void);
extern void zfs_ereport_clear(spa_t *spa, vdev_t *vd);
extern nvlist_t *zfs_event_create(spa_t *spa, vdev_t *vd, const char *type,
const char *name, nvlist_t *aux);
extern void zfs_post_remove(spa_t *spa, vdev_t *vd);
extern void zfs_post_state_change(spa_t *spa, vdev_t *vd, uint64_t laststate);
extern void zfs_post_autoreplace(spa_t *spa, vdev_t *vd);
extern uint64_t spa_get_errlog_size(spa_t *spa);
extern int spa_get_errlog(spa_t *spa, void *uaddr, size_t *count);
extern void spa_errlog_rotate(spa_t *spa);
extern void spa_errlog_drain(spa_t *spa);
extern void spa_errlog_sync(spa_t *spa, uint64_t txg);
extern void spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub);
/* vdev cache */
extern void vdev_cache_stat_init(void);
extern void vdev_cache_stat_fini(void);
/* vdev mirror */
extern void vdev_mirror_stat_init(void);
extern void vdev_mirror_stat_fini(void);
/* Initialization and termination */
extern void spa_init(spa_mode_t mode);
extern void spa_fini(void);
extern void spa_boot_init(void);
/* properties */
extern int spa_prop_set(spa_t *spa, nvlist_t *nvp);
extern int spa_prop_get(spa_t *spa, nvlist_t **nvp);
extern void spa_prop_clear_bootfs(spa_t *spa, uint64_t obj, dmu_tx_t *tx);
extern void spa_configfile_set(spa_t *, nvlist_t *, boolean_t);
/* asynchronous event notification */
extern void spa_event_notify(spa_t *spa, vdev_t *vdev, nvlist_t *hist_nvl,
const char *name);
+extern void zfs_ereport_zvol_post(const char *subclass, const char *name,
+ const char *device_name, const char *raw_name);
/* waiting for pool activities to complete */
extern int spa_wait(const char *pool, zpool_wait_activity_t activity,
boolean_t *waited);
extern int spa_wait_tag(const char *name, zpool_wait_activity_t activity,
uint64_t tag, boolean_t *waited);
extern void spa_notify_waiters(spa_t *spa);
extern void spa_wake_waiters(spa_t *spa);
/* module param call functions */
int param_set_deadman_ziotime(ZFS_MODULE_PARAM_ARGS);
int param_set_deadman_synctime(ZFS_MODULE_PARAM_ARGS);
int param_set_slop_shift(ZFS_MODULE_PARAM_ARGS);
int param_set_deadman_failmode(ZFS_MODULE_PARAM_ARGS);
#ifdef ZFS_DEBUG
#define dprintf_bp(bp, fmt, ...) do { \
if (zfs_flags & ZFS_DEBUG_DPRINTF) { \
char *__blkbuf = kmem_alloc(BP_SPRINTF_LEN, KM_SLEEP); \
snprintf_blkptr(__blkbuf, BP_SPRINTF_LEN, (bp)); \
dprintf(fmt " %s\n", __VA_ARGS__, __blkbuf); \
kmem_free(__blkbuf, BP_SPRINTF_LEN); \
} \
_NOTE(CONSTCOND) } while (0)
#else
#define dprintf_bp(bp, fmt, ...)
#endif
extern spa_mode_t spa_mode_global;
extern int zfs_deadman_enabled;
extern unsigned long zfs_deadman_synctime_ms;
extern unsigned long zfs_deadman_ziotime_ms;
extern unsigned long zfs_deadman_checktime_ms;
#ifdef __cplusplus
}
#endif
#endif /* _SYS_SPA_H */
diff --git a/sys/contrib/openzfs/include/sys/vdev_draid.h b/sys/contrib/openzfs/include/sys/vdev_draid.h
index 52ce4ba16105..dd334acbacf1 100644
--- a/sys/contrib/openzfs/include/sys/vdev_draid.h
+++ b/sys/contrib/openzfs/include/sys/vdev_draid.h
@@ -1,110 +1,111 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2016, Intel Corporation.
* Copyright (c) 2020 by Lawrence Livermore National Security, LLC.
*/
#ifndef _SYS_VDEV_DRAID_H
#define _SYS_VDEV_DRAID_H
#include <sys/types.h>
#include <sys/abd.h>
#include <sys/nvpair.h>
#include <sys/zio.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_raidz_impl.h>
#include <sys/vdev.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Constants required to generate and use dRAID permutations.
*/
#define VDEV_DRAID_SEED 0xd7a1d5eed
#define VDEV_DRAID_MAX_MAPS 254
#define VDEV_DRAID_ROWSHIFT SPA_MAXBLOCKSHIFT
#define VDEV_DRAID_ROWHEIGHT (1ULL << VDEV_DRAID_ROWSHIFT)
#define VDEV_DRAID_REFLOW_RESERVE (2 * VDEV_DRAID_ROWHEIGHT)
/*
* dRAID permutation map.
*/
typedef struct draid_map {
uint64_t dm_children; /* # of permutation columns */
uint64_t dm_nperms; /* # of permutation rows */
uint64_t dm_seed; /* dRAID map seed */
uint64_t dm_checksum; /* Checksum of generated map */
uint8_t *dm_perms; /* base permutation array */
} draid_map_t;
/*
* dRAID configuration.
*/
typedef struct vdev_draid_config {
/*
* Values read from the dRAID nvlist configuration.
*/
uint64_t vdc_ndata; /* # of data devices in group */
uint64_t vdc_nparity; /* # of parity devices in group */
uint64_t vdc_nspares; /* # of distributed spares */
uint64_t vdc_children; /* # of children */
uint64_t vdc_ngroups; /* # groups per slice */
/*
* Immutable derived constants.
*/
uint8_t *vdc_perms; /* permutation array */
uint64_t vdc_nperms; /* # of permutations */
uint64_t vdc_groupwidth; /* = data + parity */
uint64_t vdc_ndisks; /* = children - spares */
uint64_t vdc_groupsz; /* = groupwidth * DRAID_ROWSIZE */
uint64_t vdc_devslicesz; /* = (groupsz * groups) / ndisks */
} vdev_draid_config_t;
/*
* Functions for handling dRAID permutation maps.
*/
extern uint64_t vdev_draid_rand(uint64_t *);
extern int vdev_draid_lookup_map(uint64_t, const draid_map_t **);
extern int vdev_draid_generate_perms(const draid_map_t *, uint8_t **);
/*
* General dRAID support functions.
*/
extern boolean_t vdev_draid_readable(vdev_t *, uint64_t);
extern boolean_t vdev_draid_missing(vdev_t *, uint64_t, uint64_t, uint64_t);
extern uint64_t vdev_draid_asize_to_psize(vdev_t *, uint64_t);
extern void vdev_draid_map_alloc_empty(zio_t *, struct raidz_row *);
+extern int vdev_draid_map_verify_empty(zio_t *, struct raidz_row *);
extern nvlist_t *vdev_draid_read_config_spare(vdev_t *);
/* Functions for dRAID distributed spares. */
extern vdev_t *vdev_draid_spare_get_child(vdev_t *, uint64_t);
extern vdev_t *vdev_draid_spare_get_parent(vdev_t *);
extern int vdev_draid_spare_create(nvlist_t *, vdev_t *, uint64_t *, uint64_t);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_VDEV_DRAID_H */
diff --git a/sys/contrib/openzfs/include/sys/vdev_raidz.h b/sys/contrib/openzfs/include/sys/vdev_raidz.h
index ee597eb0dbb3..c7cf0af6d945 100644
--- a/sys/contrib/openzfs/include/sys/vdev_raidz.h
+++ b/sys/contrib/openzfs/include/sys/vdev_raidz.h
@@ -1,75 +1,77 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (C) 2016 Gvozden Neskovic <neskovic@compeng.uni-frankfurt.de>.
*/
#ifndef _SYS_VDEV_RAIDZ_H
#define _SYS_VDEV_RAIDZ_H
#include <sys/types.h>
#ifdef __cplusplus
extern "C" {
#endif
struct zio;
+struct raidz_col;
struct raidz_row;
struct raidz_map;
#if !defined(_KERNEL)
struct kernel_param {};
#endif
/*
* vdev_raidz interface
*/
struct raidz_map *vdev_raidz_map_alloc(struct zio *, uint64_t, uint64_t,
uint64_t);
void vdev_raidz_map_free(struct raidz_map *);
void vdev_raidz_generate_parity_row(struct raidz_map *, struct raidz_row *);
void vdev_raidz_generate_parity(struct raidz_map *);
void vdev_raidz_reconstruct(struct raidz_map *, const int *, int);
void vdev_raidz_child_done(zio_t *);
void vdev_raidz_io_done(zio_t *);
+void vdev_raidz_checksum_error(zio_t *, struct raidz_col *, abd_t *);
extern const zio_vsd_ops_t vdev_raidz_vsd_ops;
/*
* vdev_raidz_math interface
*/
void vdev_raidz_math_init(void);
void vdev_raidz_math_fini(void);
const struct raidz_impl_ops *vdev_raidz_math_get_ops(void);
int vdev_raidz_math_generate(struct raidz_map *, struct raidz_row *);
int vdev_raidz_math_reconstruct(struct raidz_map *, struct raidz_row *,
const int *, const int *, const int);
int vdev_raidz_impl_set(const char *);
typedef struct vdev_raidz {
int vd_logical_width;
int vd_nparity;
} vdev_raidz_t;
#ifdef __cplusplus
}
#endif
#endif /* _SYS_VDEV_RAIDZ_H */
diff --git a/sys/contrib/openzfs/include/sys/zio.h b/sys/contrib/openzfs/include/sys/zio.h
index 97bd1a26ad40..e46455ea990b 100644
--- a/sys/contrib/openzfs/include/sys/zio.h
+++ b/sys/contrib/openzfs/include/sys/zio.h
@@ -1,707 +1,709 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright (c) 2013, Joyent, Inc. All rights reserved.
* Copyright 2016 Toomas Soome <tsoome@me.com>
* Copyright (c) 2019, Allan Jude
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019-2020, Michael Niewöhner
*/
#ifndef _ZIO_H
#define _ZIO_H
#include <sys/zio_priority.h>
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/avl.h>
#include <sys/fs/zfs.h>
#include <sys/zio_impl.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Embedded checksum
*/
#define ZEC_MAGIC 0x210da7ab10c7a11ULL
typedef struct zio_eck {
uint64_t zec_magic; /* for validation, endianness */
zio_cksum_t zec_cksum; /* 256-bit checksum */
} zio_eck_t;
/*
* Gang block headers are self-checksumming and contain an array
* of block pointers.
*/
#define SPA_GANGBLOCKSIZE SPA_MINBLOCKSIZE
#define SPA_GBH_NBLKPTRS ((SPA_GANGBLOCKSIZE - \
sizeof (zio_eck_t)) / sizeof (blkptr_t))
#define SPA_GBH_FILLER ((SPA_GANGBLOCKSIZE - \
sizeof (zio_eck_t) - \
(SPA_GBH_NBLKPTRS * sizeof (blkptr_t))) /\
sizeof (uint64_t))
typedef struct zio_gbh {
blkptr_t zg_blkptr[SPA_GBH_NBLKPTRS];
uint64_t zg_filler[SPA_GBH_FILLER];
zio_eck_t zg_tail;
} zio_gbh_phys_t;
enum zio_checksum {
ZIO_CHECKSUM_INHERIT = 0,
ZIO_CHECKSUM_ON,
ZIO_CHECKSUM_OFF,
ZIO_CHECKSUM_LABEL,
ZIO_CHECKSUM_GANG_HEADER,
ZIO_CHECKSUM_ZILOG,
ZIO_CHECKSUM_FLETCHER_2,
ZIO_CHECKSUM_FLETCHER_4,
ZIO_CHECKSUM_SHA256,
ZIO_CHECKSUM_ZILOG2,
ZIO_CHECKSUM_NOPARITY,
ZIO_CHECKSUM_SHA512,
ZIO_CHECKSUM_SKEIN,
#if !defined(__FreeBSD__)
ZIO_CHECKSUM_EDONR,
#endif
ZIO_CHECKSUM_FUNCTIONS
};
/*
* The number of "legacy" compression functions which can be set on individual
* objects.
*/
#define ZIO_CHECKSUM_LEGACY_FUNCTIONS ZIO_CHECKSUM_ZILOG2
#define ZIO_CHECKSUM_ON_VALUE ZIO_CHECKSUM_FLETCHER_4
#define ZIO_CHECKSUM_DEFAULT ZIO_CHECKSUM_ON
#define ZIO_CHECKSUM_MASK 0xffULL
#define ZIO_CHECKSUM_VERIFY (1 << 8)
#define ZIO_DEDUPCHECKSUM ZIO_CHECKSUM_SHA256
/* supported encryption algorithms */
enum zio_encrypt {
ZIO_CRYPT_INHERIT = 0,
ZIO_CRYPT_ON,
ZIO_CRYPT_OFF,
ZIO_CRYPT_AES_128_CCM,
ZIO_CRYPT_AES_192_CCM,
ZIO_CRYPT_AES_256_CCM,
ZIO_CRYPT_AES_128_GCM,
ZIO_CRYPT_AES_192_GCM,
ZIO_CRYPT_AES_256_GCM,
ZIO_CRYPT_FUNCTIONS
};
#define ZIO_CRYPT_ON_VALUE ZIO_CRYPT_AES_256_GCM
#define ZIO_CRYPT_DEFAULT ZIO_CRYPT_OFF
/* macros defining encryption lengths */
#define ZIO_OBJSET_MAC_LEN 32
#define ZIO_DATA_IV_LEN 12
#define ZIO_DATA_SALT_LEN 8
#define ZIO_DATA_MAC_LEN 16
/*
* The number of "legacy" compression functions which can be set on individual
* objects.
*/
#define ZIO_COMPRESS_LEGACY_FUNCTIONS ZIO_COMPRESS_LZ4
/*
* The meaning of "compress = on" selected by the compression features enabled
* on a given pool.
*/
#define ZIO_COMPRESS_LEGACY_ON_VALUE ZIO_COMPRESS_LZJB
#define ZIO_COMPRESS_LZ4_ON_VALUE ZIO_COMPRESS_LZ4
#define ZIO_COMPRESS_DEFAULT ZIO_COMPRESS_OFF
#define BOOTFS_COMPRESS_VALID(compress) \
((compress) == ZIO_COMPRESS_LZJB || \
(compress) == ZIO_COMPRESS_LZ4 || \
(compress) == ZIO_COMPRESS_GZIP_1 || \
(compress) == ZIO_COMPRESS_GZIP_2 || \
(compress) == ZIO_COMPRESS_GZIP_3 || \
(compress) == ZIO_COMPRESS_GZIP_4 || \
(compress) == ZIO_COMPRESS_GZIP_5 || \
(compress) == ZIO_COMPRESS_GZIP_6 || \
(compress) == ZIO_COMPRESS_GZIP_7 || \
(compress) == ZIO_COMPRESS_GZIP_8 || \
(compress) == ZIO_COMPRESS_GZIP_9 || \
(compress) == ZIO_COMPRESS_ZLE || \
(compress) == ZIO_COMPRESS_ZSTD || \
(compress) == ZIO_COMPRESS_ON || \
(compress) == ZIO_COMPRESS_OFF)
#define ZIO_COMPRESS_ALGO(x) (x & SPA_COMPRESSMASK)
#define ZIO_COMPRESS_LEVEL(x) ((x & ~SPA_COMPRESSMASK) >> SPA_COMPRESSBITS)
#define ZIO_COMPRESS_RAW(type, level) (type | ((level) << SPA_COMPRESSBITS))
#define ZIO_COMPLEVEL_ZSTD(level) \
ZIO_COMPRESS_RAW(ZIO_COMPRESS_ZSTD, level)
#define ZIO_FAILURE_MODE_WAIT 0
#define ZIO_FAILURE_MODE_CONTINUE 1
#define ZIO_FAILURE_MODE_PANIC 2
typedef enum zio_suspend_reason {
ZIO_SUSPEND_NONE = 0,
ZIO_SUSPEND_IOERR,
ZIO_SUSPEND_MMP,
} zio_suspend_reason_t;
enum zio_flag {
/*
* Flags inherited by gang, ddt, and vdev children,
* and that must be equal for two zios to aggregate
*/
ZIO_FLAG_DONT_AGGREGATE = 1 << 0,
ZIO_FLAG_IO_REPAIR = 1 << 1,
ZIO_FLAG_SELF_HEAL = 1 << 2,
ZIO_FLAG_RESILVER = 1 << 3,
ZIO_FLAG_SCRUB = 1 << 4,
ZIO_FLAG_SCAN_THREAD = 1 << 5,
ZIO_FLAG_PHYSICAL = 1 << 6,
#define ZIO_FLAG_AGG_INHERIT (ZIO_FLAG_CANFAIL - 1)
/*
* Flags inherited by ddt, gang, and vdev children.
*/
ZIO_FLAG_CANFAIL = 1 << 7, /* must be first for INHERIT */
ZIO_FLAG_SPECULATIVE = 1 << 8,
ZIO_FLAG_CONFIG_WRITER = 1 << 9,
ZIO_FLAG_DONT_RETRY = 1 << 10,
ZIO_FLAG_DONT_CACHE = 1 << 11,
ZIO_FLAG_NODATA = 1 << 12,
ZIO_FLAG_INDUCE_DAMAGE = 1 << 13,
ZIO_FLAG_IO_ALLOCATING = 1 << 14,
#define ZIO_FLAG_DDT_INHERIT (ZIO_FLAG_IO_RETRY - 1)
#define ZIO_FLAG_GANG_INHERIT (ZIO_FLAG_IO_RETRY - 1)
/*
* Flags inherited by vdev children.
*/
ZIO_FLAG_IO_RETRY = 1 << 15, /* must be first for INHERIT */
ZIO_FLAG_PROBE = 1 << 16,
ZIO_FLAG_TRYHARD = 1 << 17,
ZIO_FLAG_OPTIONAL = 1 << 18,
#define ZIO_FLAG_VDEV_INHERIT (ZIO_FLAG_DONT_QUEUE - 1)
/*
* Flags not inherited by any children.
*/
ZIO_FLAG_DONT_QUEUE = 1 << 19, /* must be first for INHERIT */
ZIO_FLAG_DONT_PROPAGATE = 1 << 20,
ZIO_FLAG_IO_BYPASS = 1 << 21,
ZIO_FLAG_IO_REWRITE = 1 << 22,
ZIO_FLAG_RAW_COMPRESS = 1 << 23,
ZIO_FLAG_RAW_ENCRYPT = 1 << 24,
ZIO_FLAG_GANG_CHILD = 1 << 25,
ZIO_FLAG_DDT_CHILD = 1 << 26,
ZIO_FLAG_GODFATHER = 1 << 27,
ZIO_FLAG_NOPWRITE = 1 << 28,
ZIO_FLAG_REEXECUTED = 1 << 29,
ZIO_FLAG_DELEGATED = 1 << 30,
ZIO_FLAG_FASTWRITE = 1 << 31,
};
#define ZIO_FLAG_MUSTSUCCEED 0
#define ZIO_FLAG_RAW (ZIO_FLAG_RAW_COMPRESS | ZIO_FLAG_RAW_ENCRYPT)
#define ZIO_DDT_CHILD_FLAGS(zio) \
(((zio)->io_flags & ZIO_FLAG_DDT_INHERIT) | \
ZIO_FLAG_DDT_CHILD | ZIO_FLAG_CANFAIL)
#define ZIO_GANG_CHILD_FLAGS(zio) \
(((zio)->io_flags & ZIO_FLAG_GANG_INHERIT) | \
ZIO_FLAG_GANG_CHILD | ZIO_FLAG_CANFAIL)
#define ZIO_VDEV_CHILD_FLAGS(zio) \
(((zio)->io_flags & ZIO_FLAG_VDEV_INHERIT) | \
ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_CANFAIL)
#define ZIO_CHILD_BIT(x) (1 << (x))
#define ZIO_CHILD_BIT_IS_SET(val, x) ((val) & (1 << (x)))
enum zio_child {
ZIO_CHILD_VDEV = 0,
ZIO_CHILD_GANG,
ZIO_CHILD_DDT,
ZIO_CHILD_LOGICAL,
ZIO_CHILD_TYPES
};
#define ZIO_CHILD_VDEV_BIT ZIO_CHILD_BIT(ZIO_CHILD_VDEV)
#define ZIO_CHILD_GANG_BIT ZIO_CHILD_BIT(ZIO_CHILD_GANG)
#define ZIO_CHILD_DDT_BIT ZIO_CHILD_BIT(ZIO_CHILD_DDT)
#define ZIO_CHILD_LOGICAL_BIT ZIO_CHILD_BIT(ZIO_CHILD_LOGICAL)
#define ZIO_CHILD_ALL_BITS \
(ZIO_CHILD_VDEV_BIT | ZIO_CHILD_GANG_BIT | \
ZIO_CHILD_DDT_BIT | ZIO_CHILD_LOGICAL_BIT)
enum zio_wait_type {
ZIO_WAIT_READY = 0,
ZIO_WAIT_DONE,
ZIO_WAIT_TYPES
};
typedef void zio_done_func_t(zio_t *zio);
extern int zio_exclude_metadata;
extern int zio_dva_throttle_enabled;
extern const char *zio_type_name[ZIO_TYPES];
/*
* A bookmark is a four-tuple <objset, object, level, blkid> that uniquely
* identifies any block in the pool. By convention, the meta-objset (MOS)
* is objset 0, and the meta-dnode is object 0. This covers all blocks
* except root blocks and ZIL blocks, which are defined as follows:
*
* Root blocks (objset_phys_t) are object 0, level -1: <objset, 0, -1, 0>.
* ZIL blocks are bookmarked <objset, 0, -2, blkid == ZIL sequence number>.
* dmu_sync()ed ZIL data blocks are bookmarked <objset, object, -2, blkid>.
* dnode visit bookmarks are <objset, object id of dnode, -3, 0>.
*
* Note: this structure is called a bookmark because its original purpose
* was to remember where to resume a pool-wide traverse.
*
* Note: this structure is passed between userland and the kernel, and is
* stored on disk (by virtue of being incorporated into other on-disk
* structures, e.g. dsl_scan_phys_t).
*/
struct zbookmark_phys {
uint64_t zb_objset;
uint64_t zb_object;
int64_t zb_level;
uint64_t zb_blkid;
};
#define SET_BOOKMARK(zb, objset, object, level, blkid) \
{ \
(zb)->zb_objset = objset; \
(zb)->zb_object = object; \
(zb)->zb_level = level; \
(zb)->zb_blkid = blkid; \
}
#define ZB_DESTROYED_OBJSET (-1ULL)
#define ZB_ROOT_OBJECT (0ULL)
#define ZB_ROOT_LEVEL (-1LL)
#define ZB_ROOT_BLKID (0ULL)
#define ZB_ZIL_OBJECT (0ULL)
#define ZB_ZIL_LEVEL (-2LL)
#define ZB_DNODE_LEVEL (-3LL)
#define ZB_DNODE_BLKID (0ULL)
#define ZB_IS_ZERO(zb) \
((zb)->zb_objset == 0 && (zb)->zb_object == 0 && \
(zb)->zb_level == 0 && (zb)->zb_blkid == 0)
#define ZB_IS_ROOT(zb) \
((zb)->zb_object == ZB_ROOT_OBJECT && \
(zb)->zb_level == ZB_ROOT_LEVEL && \
(zb)->zb_blkid == ZB_ROOT_BLKID)
typedef struct zio_prop {
enum zio_checksum zp_checksum;
enum zio_compress zp_compress;
uint8_t zp_complevel;
dmu_object_type_t zp_type;
uint8_t zp_level;
uint8_t zp_copies;
boolean_t zp_dedup;
boolean_t zp_dedup_verify;
boolean_t zp_nopwrite;
boolean_t zp_encrypt;
boolean_t zp_byteorder;
uint8_t zp_salt[ZIO_DATA_SALT_LEN];
uint8_t zp_iv[ZIO_DATA_IV_LEN];
uint8_t zp_mac[ZIO_DATA_MAC_LEN];
uint32_t zp_zpl_smallblk;
} zio_prop_t;
typedef struct zio_cksum_report zio_cksum_report_t;
typedef void zio_cksum_finish_f(zio_cksum_report_t *rep,
const abd_t *good_data);
typedef void zio_cksum_free_f(void *cbdata, size_t size);
struct zio_bad_cksum; /* defined in zio_checksum.h */
struct dnode_phys;
struct abd;
struct zio_cksum_report {
struct zio_cksum_report *zcr_next;
nvlist_t *zcr_ereport;
nvlist_t *zcr_detector;
void *zcr_cbdata;
size_t zcr_cbinfo; /* passed to zcr_free() */
uint64_t zcr_sector;
uint64_t zcr_align;
uint64_t zcr_length;
zio_cksum_finish_f *zcr_finish;
zio_cksum_free_f *zcr_free;
/* internal use only */
struct zio_bad_cksum *zcr_ckinfo; /* information from failure */
};
typedef struct zio_vsd_ops {
zio_done_func_t *vsd_free;
} zio_vsd_ops_t;
typedef struct zio_gang_node {
zio_gbh_phys_t *gn_gbh;
struct zio_gang_node *gn_child[SPA_GBH_NBLKPTRS];
} zio_gang_node_t;
typedef zio_t *zio_gang_issue_func_t(zio_t *zio, blkptr_t *bp,
zio_gang_node_t *gn, struct abd *data, uint64_t offset);
typedef void zio_transform_func_t(zio_t *zio, struct abd *data, uint64_t size);
typedef struct zio_transform {
struct abd *zt_orig_abd;
uint64_t zt_orig_size;
uint64_t zt_bufsize;
zio_transform_func_t *zt_transform;
struct zio_transform *zt_next;
} zio_transform_t;
typedef zio_t *zio_pipe_stage_t(zio_t *zio);
/*
* The io_reexecute flags are distinct from io_flags because the child must
* be able to propagate them to the parent. The normal io_flags are local
* to the zio, not protected by any lock, and not modifiable by children;
* the reexecute flags are protected by io_lock, modifiable by children,
* and always propagated -- even when ZIO_FLAG_DONT_PROPAGATE is set.
*/
#define ZIO_REEXECUTE_NOW 0x01
#define ZIO_REEXECUTE_SUSPEND 0x02
/*
* The io_trim flags are used to specify the type of TRIM to perform. They
* only apply to ZIO_TYPE_TRIM zios are distinct from io_flags.
*/
enum trim_flag {
ZIO_TRIM_SECURE = 1 << 0,
};
typedef struct zio_alloc_list {
list_t zal_list;
uint64_t zal_size;
} zio_alloc_list_t;
typedef struct zio_link {
zio_t *zl_parent;
zio_t *zl_child;
list_node_t zl_parent_node;
list_node_t zl_child_node;
} zio_link_t;
struct zio {
/* Core information about this I/O */
zbookmark_phys_t io_bookmark;
zio_prop_t io_prop;
zio_type_t io_type;
enum zio_child io_child_type;
enum trim_flag io_trim_flags;
int io_cmd;
zio_priority_t io_priority;
uint8_t io_reexecute;
uint8_t io_state[ZIO_WAIT_TYPES];
uint64_t io_txg;
spa_t *io_spa;
blkptr_t *io_bp;
blkptr_t *io_bp_override;
blkptr_t io_bp_copy;
list_t io_parent_list;
list_t io_child_list;
zio_t *io_logical;
zio_transform_t *io_transform_stack;
/* Callback info */
zio_done_func_t *io_ready;
zio_done_func_t *io_children_ready;
zio_done_func_t *io_physdone;
zio_done_func_t *io_done;
void *io_private;
int64_t io_prev_space_delta; /* DMU private */
blkptr_t io_bp_orig;
/* io_lsize != io_orig_size iff this is a raw write */
uint64_t io_lsize;
/* Data represented by this I/O */
struct abd *io_abd;
struct abd *io_orig_abd;
uint64_t io_size;
uint64_t io_orig_size;
/* Stuff for the vdev stack */
vdev_t *io_vd;
void *io_vsd;
const zio_vsd_ops_t *io_vsd_ops;
metaslab_class_t *io_metaslab_class; /* dva throttle class */
uint64_t io_offset;
hrtime_t io_timestamp; /* submitted at */
hrtime_t io_queued_timestamp;
hrtime_t io_target_timestamp;
hrtime_t io_delta; /* vdev queue service delta */
hrtime_t io_delay; /* Device access time (disk or */
/* file). */
avl_node_t io_queue_node;
avl_node_t io_offset_node;
avl_node_t io_alloc_node;
zio_alloc_list_t io_alloc_list;
/* Internal pipeline state */
enum zio_flag io_flags;
enum zio_stage io_stage;
enum zio_stage io_pipeline;
enum zio_flag io_orig_flags;
enum zio_stage io_orig_stage;
enum zio_stage io_orig_pipeline;
enum zio_stage io_pipeline_trace;
int io_error;
int io_child_error[ZIO_CHILD_TYPES];
uint64_t io_children[ZIO_CHILD_TYPES][ZIO_WAIT_TYPES];
uint64_t io_child_count;
uint64_t io_phys_children;
uint64_t io_parent_count;
uint64_t *io_stall;
zio_t *io_gang_leader;
zio_gang_node_t *io_gang_tree;
void *io_executor;
void *io_waiter;
void *io_bio;
kmutex_t io_lock;
kcondvar_t io_cv;
int io_allocator;
/* FMA state */
zio_cksum_report_t *io_cksum_report;
uint64_t io_ena;
/* Taskq dispatching state */
taskq_ent_t io_tqent;
};
enum blk_verify_flag {
BLK_VERIFY_ONLY,
BLK_VERIFY_LOG,
BLK_VERIFY_HALT
};
extern int zio_bookmark_compare(const void *, const void *);
extern zio_t *zio_null(zio_t *pio, spa_t *spa, vdev_t *vd,
zio_done_func_t *done, void *priv, enum zio_flag flags);
extern zio_t *zio_root(spa_t *spa,
zio_done_func_t *done, void *priv, enum zio_flag flags);
extern zio_t *zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
struct abd *data, uint64_t lsize, zio_done_func_t *done, void *priv,
zio_priority_t priority, enum zio_flag flags, const zbookmark_phys_t *zb);
extern zio_t *zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
struct abd *data, uint64_t size, uint64_t psize, const zio_prop_t *zp,
zio_done_func_t *ready, zio_done_func_t *children_ready,
zio_done_func_t *physdone, zio_done_func_t *done,
void *priv, zio_priority_t priority, enum zio_flag flags,
const zbookmark_phys_t *zb);
extern zio_t *zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
struct abd *data, uint64_t size, zio_done_func_t *done, void *priv,
zio_priority_t priority, enum zio_flag flags, zbookmark_phys_t *zb);
extern void zio_write_override(zio_t *zio, blkptr_t *bp, int copies,
boolean_t nopwrite);
extern void zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp);
extern zio_t *zio_claim(zio_t *pio, spa_t *spa, uint64_t txg,
const blkptr_t *bp,
zio_done_func_t *done, void *priv, enum zio_flag flags);
extern zio_t *zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd,
zio_done_func_t *done, void *priv, enum zio_flag flags);
extern zio_t *zio_trim(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
zio_done_func_t *done, void *priv, zio_priority_t priority,
enum zio_flag flags, enum trim_flag trim_flags);
extern zio_t *zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset,
uint64_t size, struct abd *data, int checksum,
zio_done_func_t *done, void *priv, zio_priority_t priority,
enum zio_flag flags, boolean_t labels);
extern zio_t *zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset,
uint64_t size, struct abd *data, int checksum,
zio_done_func_t *done, void *priv, zio_priority_t priority,
enum zio_flag flags, boolean_t labels);
extern zio_t *zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg,
const blkptr_t *bp, enum zio_flag flags);
extern int zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg,
blkptr_t *new_bp, uint64_t size, boolean_t *slog);
extern void zio_flush(zio_t *zio, vdev_t *vd);
extern void zio_shrink(zio_t *zio, uint64_t size);
extern int zio_wait(zio_t *zio);
extern void zio_nowait(zio_t *zio);
extern void zio_execute(void *zio);
extern void zio_interrupt(void *zio);
extern void zio_delay_init(zio_t *zio);
extern void zio_delay_interrupt(zio_t *zio);
extern void zio_deadman(zio_t *zio, char *tag);
extern zio_t *zio_walk_parents(zio_t *cio, zio_link_t **);
extern zio_t *zio_walk_children(zio_t *pio, zio_link_t **);
extern zio_t *zio_unique_parent(zio_t *cio);
extern void zio_add_child(zio_t *pio, zio_t *cio);
extern void *zio_buf_alloc(size_t size);
extern void zio_buf_free(void *buf, size_t size);
extern void *zio_data_buf_alloc(size_t size);
extern void zio_data_buf_free(void *buf, size_t size);
extern void zio_push_transform(zio_t *zio, struct abd *abd, uint64_t size,
uint64_t bufsize, zio_transform_func_t *transform);
extern void zio_pop_transforms(zio_t *zio);
extern void zio_resubmit_stage_async(void *);
extern zio_t *zio_vdev_child_io(zio_t *zio, blkptr_t *bp, vdev_t *vd,
uint64_t offset, struct abd *data, uint64_t size, int type,
zio_priority_t priority, enum zio_flag flags,
zio_done_func_t *done, void *priv);
extern zio_t *zio_vdev_delegated_io(vdev_t *vd, uint64_t offset,
struct abd *data, uint64_t size, zio_type_t type, zio_priority_t priority,
enum zio_flag flags, zio_done_func_t *done, void *priv);
extern void zio_vdev_io_bypass(zio_t *zio);
extern void zio_vdev_io_reissue(zio_t *zio);
extern void zio_vdev_io_redone(zio_t *zio);
extern void zio_change_priority(zio_t *pio, zio_priority_t priority);
extern void zio_checksum_verified(zio_t *zio);
extern int zio_worst_error(int e1, int e2);
extern enum zio_checksum zio_checksum_select(enum zio_checksum child,
enum zio_checksum parent);
extern enum zio_checksum zio_checksum_dedup_select(spa_t *spa,
enum zio_checksum child, enum zio_checksum parent);
extern enum zio_compress zio_compress_select(spa_t *spa,
enum zio_compress child, enum zio_compress parent);
extern uint8_t zio_complevel_select(spa_t *spa, enum zio_compress compress,
uint8_t child, uint8_t parent);
extern void zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t);
extern int zio_resume(spa_t *spa);
extern void zio_resume_wait(spa_t *spa);
extern boolean_t zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp,
boolean_t config_held, enum blk_verify_flag blk_verify);
/*
* Initial setup and teardown.
*/
extern void zio_init(void);
extern void zio_fini(void);
/*
* Fault injection
*/
struct zinject_record;
extern uint32_t zio_injection_enabled;
extern int zio_inject_fault(char *name, int flags, int *id,
struct zinject_record *record);
extern int zio_inject_list_next(int *id, char *name, size_t buflen,
struct zinject_record *record);
extern int zio_clear_fault(int id);
extern void zio_handle_panic_injection(spa_t *spa, char *tag, uint64_t type);
extern int zio_handle_decrypt_injection(spa_t *spa, const zbookmark_phys_t *zb,
uint64_t type, int error);
extern int zio_handle_fault_injection(zio_t *zio, int error);
extern int zio_handle_device_injection(vdev_t *vd, zio_t *zio, int error);
extern int zio_handle_device_injections(vdev_t *vd, zio_t *zio, int err1,
int err2);
extern int zio_handle_label_injection(zio_t *zio, int error);
extern void zio_handle_ignored_writes(zio_t *zio);
extern hrtime_t zio_handle_io_delay(zio_t *zio);
/*
* Checksum ereport functions
*/
extern int zfs_ereport_start_checksum(spa_t *spa, vdev_t *vd,
const zbookmark_phys_t *zb, struct zio *zio, uint64_t offset,
uint64_t length, struct zio_bad_cksum *info);
extern void zfs_ereport_finish_checksum(zio_cksum_report_t *report,
const abd_t *good_data, const abd_t *bad_data, boolean_t drop_if_identical);
extern void zfs_ereport_free_checksum(zio_cksum_report_t *report);
/* If we have the good data in hand, this function can be used */
extern int zfs_ereport_post_checksum(spa_t *spa, vdev_t *vd,
const zbookmark_phys_t *zb, struct zio *zio, uint64_t offset,
uint64_t length, const abd_t *good_data, const abd_t *bad_data,
struct zio_bad_cksum *info);
void zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr);
+extern void zfs_ereport_snapshot_post(const char *subclass, spa_t *spa,
+ const char *name);
/* Called from spa_sync(), but primarily an injection handler */
extern void spa_handle_ignored_writes(spa_t *spa);
/* zbookmark_phys functions */
boolean_t zbookmark_subtree_completed(const struct dnode_phys *dnp,
const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block);
int zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2,
uint8_t ibs2, const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2);
#ifdef __cplusplus
}
#endif
#endif /* _ZIO_H */
diff --git a/sys/contrib/openzfs/lib/libshare/os/freebsd/nfs.c b/sys/contrib/openzfs/lib/libshare/os/freebsd/nfs.c
index 56df3e66643c..97092bdc0fdf 100644
--- a/sys/contrib/openzfs/lib/libshare/os/freebsd/nfs.c
+++ b/sys/contrib/openzfs/lib/libshare/os/freebsd/nfs.c
@@ -1,459 +1,465 @@
/*
* Copyright (c) 2007 Pawel Jakub Dawidek <pjd@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Copyright (c) 2020 by Delphix. All rights reserved.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/vfs.h>
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <libutil.h>
#include <signal.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <libintl.h>
#include "libzfs_impl.h"
#include "libshare_impl.h"
#include "nfs.h"
#define _PATH_MOUNTDPID "/var/run/mountd.pid"
#define FILE_HEADER "# !!! DO NOT EDIT THIS FILE MANUALLY !!!\n\n"
#define OPTSSIZE 1024
#define MAXLINESIZE (PATH_MAX + OPTSSIZE)
#define ZFS_EXPORTS_FILE "/etc/zfs/exports"
#define ZFS_EXPORTS_LOCK ZFS_EXPORTS_FILE".lock"
static sa_fstype_t *nfs_fstype;
static int nfs_lock_fd = -1;
/*
* The nfs_exports_[lock|unlock] is used to guard against conconcurrent
* updates to the exports file. Each protocol is responsible for
* providing the necessary locking to ensure consistency.
*/
static int
nfs_exports_lock(void)
{
int err;
nfs_lock_fd = open(ZFS_EXPORTS_LOCK,
O_RDWR | O_CREAT | O_CLOEXEC, 0600);
if (nfs_lock_fd == -1) {
err = errno;
fprintf(stderr, "failed to lock %s: %s\n",
ZFS_EXPORTS_LOCK, strerror(err));
return (err);
}
if (flock(nfs_lock_fd, LOCK_EX) != 0) {
err = errno;
fprintf(stderr, "failed to lock %s: %s\n",
ZFS_EXPORTS_LOCK, strerror(err));
(void) close(nfs_lock_fd);
return (err);
}
return (0);
}
static void
nfs_exports_unlock(void)
{
verify(nfs_lock_fd > 0);
if (flock(nfs_lock_fd, LOCK_UN) != 0) {
fprintf(stderr, "failed to unlock %s: %s\n",
ZFS_EXPORTS_LOCK, strerror(errno));
}
close(nfs_lock_fd);
nfs_lock_fd = -1;
}
/*
* Read one line from a file. Skip comments, empty lines and a line with a
* mountpoint specified in the 'skip' argument.
*
* NOTE: This function returns a static buffer and thus is not thread-safe.
*/
static char *
zgetline(FILE *fd, const char *skip)
{
static char line[MAXLINESIZE];
size_t len, skiplen = 0;
char *s, last;
if (skip != NULL)
skiplen = strlen(skip);
for (;;) {
s = fgets(line, sizeof (line), fd);
if (s == NULL)
return (NULL);
/* Skip empty lines and comments. */
if (line[0] == '\n' || line[0] == '#')
continue;
len = strlen(line);
if (line[len - 1] == '\n')
line[len - 1] = '\0';
last = line[skiplen];
/* Skip the given mountpoint. */
if (skip != NULL && strncmp(skip, line, skiplen) == 0 &&
(last == '\t' || last == ' ' || last == '\0')) {
continue;
}
break;
}
return (line);
}
/*
* This function translate options to a format acceptable by exports(5), eg.
*
* -ro -network=192.168.0.0 -mask=255.255.255.0 -maproot=0 \
* zfs.freebsd.org 69.147.83.54
*
* Accepted input formats:
*
* ro,network=192.168.0.0,mask=255.255.255.0,maproot=0,zfs.freebsd.org
* ro network=192.168.0.0 mask=255.255.255.0 maproot=0 zfs.freebsd.org
* -ro,-network=192.168.0.0,-mask=255.255.255.0,-maproot=0,zfs.freebsd.org
* -ro -network=192.168.0.0 -mask=255.255.255.0 -maproot=0 \
* zfs.freebsd.org
*
* Recognized keywords:
*
* ro, maproot, mapall, mask, network, sec, alldirs, public, webnfs,
* index, quiet
*
* NOTE: This function returns a static buffer and thus is not thread-safe.
*/
static char *
translate_opts(const char *shareopts)
{
static const char *known_opts[] = { "ro", "maproot", "mapall", "mask",
"network", "sec", "alldirs", "public", "webnfs", "index", "quiet",
NULL };
static char newopts[OPTSSIZE];
char oldopts[OPTSSIZE];
char *o, *s = NULL;
unsigned int i;
size_t len;
strlcpy(oldopts, shareopts, sizeof (oldopts));
newopts[0] = '\0';
s = oldopts;
while ((o = strsep(&s, "-, ")) != NULL) {
if (o[0] == '\0')
continue;
for (i = 0; known_opts[i] != NULL; i++) {
len = strlen(known_opts[i]);
if (strncmp(known_opts[i], o, len) == 0 &&
(o[len] == '\0' || o[len] == '=')) {
strlcat(newopts, "-", sizeof (newopts));
break;
}
}
strlcat(newopts, o, sizeof (newopts));
strlcat(newopts, " ", sizeof (newopts));
}
return (newopts);
}
static char *
nfs_init_tmpfile(void)
{
char *tmpfile = NULL;
if (asprintf(&tmpfile, "%s%s", ZFS_EXPORTS_FILE, ".XXXXXXXX") == -1) {
fprintf(stderr, "Unable to allocate buffer for temporary "
"file name\n");
return (NULL);
}
int fd = mkstemp(tmpfile);
if (fd == -1) {
fprintf(stderr, "Unable to create temporary file: %s",
strerror(errno));
free(tmpfile);
return (NULL);
}
close(fd);
return (tmpfile);
}
static int
nfs_fini_tmpfile(char *tmpfile)
{
if (rename(tmpfile, ZFS_EXPORTS_FILE) == -1) {
fprintf(stderr, "Unable to rename %s: %s\n", tmpfile,
strerror(errno));
unlink(tmpfile);
free(tmpfile);
return (SA_SYSTEM_ERR);
}
free(tmpfile);
return (SA_OK);
}
/*
* This function copies all entries from the exports file to "filename",
* omitting any entries for the specified mountpoint.
*/
static int
nfs_copy_entries(char *filename, const char *mountpoint)
{
int error = SA_OK;
char *line;
FILE *oldfp = fopen(ZFS_EXPORTS_FILE, "re");
FILE *newfp = fopen(filename, "w+e");
if (newfp == NULL) {
fprintf(stderr, "failed to open %s file: %s", filename,
strerror(errno));
fclose(oldfp);
return (SA_SYSTEM_ERR);
}
fputs(FILE_HEADER, newfp);
/*
* The ZFS_EXPORTS_FILE may not exist yet. If that's the
* case then just write out the new file.
*/
if (oldfp != NULL) {
while ((line = zgetline(oldfp, mountpoint)) != NULL)
fprintf(newfp, "%s\n", line);
if (ferror(oldfp) != 0) {
error = ferror(oldfp);
}
if (fclose(oldfp) != 0) {
fprintf(stderr, "Unable to close file %s: %s\n",
filename, strerror(errno));
error = error != 0 ? error : SA_SYSTEM_ERR;
}
}
if (error == 0 && ferror(newfp) != 0) {
error = ferror(newfp);
}
if (fclose(newfp) != 0) {
fprintf(stderr, "Unable to close file %s: %s\n",
filename, strerror(errno));
error = error != 0 ? error : SA_SYSTEM_ERR;
}
return (error);
}
static int
nfs_enable_share(sa_share_impl_t impl_share)
{
char *filename = NULL;
int error;
if ((filename = nfs_init_tmpfile()) == NULL)
return (SA_SYSTEM_ERR);
error = nfs_exports_lock();
if (error != 0) {
unlink(filename);
free(filename);
return (error);
}
error = nfs_copy_entries(filename, impl_share->sa_mountpoint);
if (error != SA_OK) {
unlink(filename);
free(filename);
nfs_exports_unlock();
return (error);
}
FILE *fp = fopen(filename, "a+e");
if (fp == NULL) {
fprintf(stderr, "failed to open %s file: %s", filename,
strerror(errno));
unlink(filename);
free(filename);
nfs_exports_unlock();
return (SA_SYSTEM_ERR);
}
char *shareopts = FSINFO(impl_share, nfs_fstype)->shareopts;
if (strcmp(shareopts, "on") == 0)
shareopts = "";
if (fprintf(fp, "%s\t%s\n", impl_share->sa_mountpoint,
translate_opts(shareopts)) < 0) {
fprintf(stderr, "failed to write to %s\n", filename);
fclose(fp);
unlink(filename);
free(filename);
nfs_exports_unlock();
return (SA_SYSTEM_ERR);
}
if (fclose(fp) != 0) {
fprintf(stderr, "Unable to close file %s: %s\n",
filename, strerror(errno));
unlink(filename);
free(filename);
nfs_exports_unlock();
return (SA_SYSTEM_ERR);
}
error = nfs_fini_tmpfile(filename);
nfs_exports_unlock();
return (error);
}
static int
nfs_disable_share(sa_share_impl_t impl_share)
{
int error;
char *filename = NULL;
if ((filename = nfs_init_tmpfile()) == NULL)
return (SA_SYSTEM_ERR);
error = nfs_exports_lock();
if (error != 0) {
unlink(filename);
free(filename);
return (error);
}
error = nfs_copy_entries(filename, impl_share->sa_mountpoint);
if (error != SA_OK) {
unlink(filename);
free(filename);
nfs_exports_unlock();
return (error);
}
error = nfs_fini_tmpfile(filename);
nfs_exports_unlock();
return (error);
}
static boolean_t
nfs_is_shared(sa_share_impl_t impl_share)
{
char *s, last, line[MAXLINESIZE];
size_t len;
char *mntpoint = impl_share->sa_mountpoint;
size_t mntlen = strlen(mntpoint);
FILE *fp = fopen(ZFS_EXPORTS_FILE, "re");
if (fp == NULL)
return (B_FALSE);
for (;;) {
s = fgets(line, sizeof (line), fp);
if (s == NULL)
return (B_FALSE);
/* Skip empty lines and comments. */
if (line[0] == '\n' || line[0] == '#')
continue;
len = strlen(line);
if (line[len - 1] == '\n')
line[len - 1] = '\0';
last = line[mntlen];
/* Skip the given mountpoint. */
if (strncmp(mntpoint, line, mntlen) == 0 &&
(last == '\t' || last == ' ' || last == '\0')) {
fclose(fp);
return (B_TRUE);
}
}
fclose(fp);
return (B_FALSE);
}
static int
nfs_validate_shareopts(const char *shareopts)
{
return (SA_OK);
}
static int
nfs_update_shareopts(sa_share_impl_t impl_share, const char *shareopts)
{
FSINFO(impl_share, nfs_fstype)->shareopts = (char *)shareopts;
return (SA_OK);
}
static void
nfs_clear_shareopts(sa_share_impl_t impl_share)
{
FSINFO(impl_share, nfs_fstype)->shareopts = NULL;
}
/*
* Commit the shares by restarting mountd.
*/
static int
nfs_commit_shares(void)
{
struct pidfh *pfh;
pid_t mountdpid;
+start:
pfh = pidfile_open(_PATH_MOUNTDPID, 0600, &mountdpid);
if (pfh != NULL) {
- /* Mountd is not running. */
+ /* mountd(8) is not running. */
pidfile_remove(pfh);
return (SA_OK);
}
if (errno != EEXIST) {
/* Cannot open pidfile for some reason. */
return (SA_SYSTEM_ERR);
}
+ if (mountdpid == -1) {
+ /* mountd(8) exists, but didn't write the PID yet */
+ usleep(500);
+ goto start;
+ }
/* We have mountd(8) PID in mountdpid variable. */
kill(mountdpid, SIGHUP);
return (SA_OK);
}
static const sa_share_ops_t nfs_shareops = {
.enable_share = nfs_enable_share,
.disable_share = nfs_disable_share,
.is_shared = nfs_is_shared,
.validate_shareopts = nfs_validate_shareopts,
.update_shareopts = nfs_update_shareopts,
.clear_shareopts = nfs_clear_shareopts,
.commit_shares = nfs_commit_shares,
};
/*
* Initializes the NFS functionality of libshare.
*/
void
libshare_nfs_init(void)
{
nfs_fstype = register_fstype("nfs", &nfs_shareops);
}
diff --git a/sys/contrib/openzfs/lib/libspl/os/freebsd/mnttab.c b/sys/contrib/openzfs/lib/libspl/os/freebsd/mnttab.c
index bd3e3e4e3eef..d830257fbd16 100644
--- a/sys/contrib/openzfs/lib/libspl/os/freebsd/mnttab.c
+++ b/sys/contrib/openzfs/lib/libspl/os/freebsd/mnttab.c
@@ -1,216 +1,223 @@
/*
* Copyright (c) 2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* This file implements Solaris compatible getmntany() and hasmntopt()
* functions.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/mount.h>
#include <sys/mntent.h>
#include <sys/mnttab.h>
#include <ctype.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
static char *
mntopt(char **p)
{
char *cp = *p;
char *retstr;
while (*cp && isspace(*cp))
cp++;
retstr = cp;
while (*cp && *cp != ',')
cp++;
if (*cp) {
*cp = '\0';
cp++;
}
*p = cp;
return (retstr);
}
char *
hasmntopt(struct mnttab *mnt, char *opt)
{
char tmpopts[MNT_LINE_MAX];
char *f, *opts = tmpopts;
if (mnt->mnt_mntopts == NULL)
return (NULL);
(void) strcpy(opts, mnt->mnt_mntopts);
f = mntopt(&opts);
for (; *f; f = mntopt(&opts)) {
if (strncmp(opt, f, strlen(opt)) == 0)
return (f - tmpopts + mnt->mnt_mntopts);
}
return (NULL);
}
static void
optadd(char *mntopts, size_t size, const char *opt)
{
if (mntopts[0] != '\0')
strlcat(mntopts, ",", size);
strlcat(mntopts, opt, size);
}
+static __thread char gfstypename[MFSNAMELEN];
+static __thread char gmntfromname[MNAMELEN];
+static __thread char gmntonname[MNAMELEN];
+static __thread char gmntopts[MNTMAXSTR];
+
void
statfs2mnttab(struct statfs *sfs, struct mnttab *mp)
{
- static char mntopts[MNTMAXSTR];
long flags;
- mntopts[0] = '\0';
+ strlcpy(gfstypename, sfs->f_fstypename, sizeof (gfstypename));
+ mp->mnt_fstype = gfstypename;
+
+ strlcpy(gmntfromname, sfs->f_mntfromname, sizeof (gmntfromname));
+ mp->mnt_special = gmntfromname;
+
+ strlcpy(gmntonname, sfs->f_mntonname, sizeof (gmntonname));
+ mp->mnt_mountp = gmntonname;
flags = sfs->f_flags;
-#define OPTADD(opt) optadd(mntopts, sizeof (mntopts), (opt))
+ gmntopts[0] = '\0';
+#define OPTADD(opt) optadd(gmntopts, sizeof (gmntopts), (opt))
if (flags & MNT_RDONLY)
OPTADD(MNTOPT_RO);
else
OPTADD(MNTOPT_RW);
if (flags & MNT_NOSUID)
OPTADD(MNTOPT_NOSETUID);
else
OPTADD(MNTOPT_SETUID);
if (flags & MNT_UPDATE)
OPTADD(MNTOPT_REMOUNT);
if (flags & MNT_NOATIME)
OPTADD(MNTOPT_NOATIME);
else
OPTADD(MNTOPT_ATIME);
OPTADD(MNTOPT_NOXATTR);
if (flags & MNT_NOEXEC)
OPTADD(MNTOPT_NOEXEC);
else
OPTADD(MNTOPT_EXEC);
#undef OPTADD
- mp->mnt_special = strdup(sfs->f_mntfromname);
- mp->mnt_mountp = strdup(sfs->f_mntonname);
- mp->mnt_fstype = strdup(sfs->f_fstypename);
- mp->mnt_mntopts = strdup(mntopts);
+ mp->mnt_mntopts = gmntopts;
}
static struct statfs *gsfs = NULL;
static int allfs = 0;
static int
statfs_init(void)
{
struct statfs *sfs;
int error;
if (gsfs != NULL) {
free(gsfs);
gsfs = NULL;
}
allfs = getfsstat(NULL, 0, MNT_NOWAIT);
if (allfs == -1)
goto fail;
gsfs = malloc(sizeof (gsfs[0]) * allfs * 2);
if (gsfs == NULL)
goto fail;
allfs = getfsstat(gsfs, (long)(sizeof (gsfs[0]) * allfs * 2),
MNT_NOWAIT);
if (allfs == -1)
goto fail;
sfs = realloc(gsfs, allfs * sizeof (gsfs[0]));
if (sfs != NULL)
gsfs = sfs;
return (0);
fail:
error = errno;
if (gsfs != NULL)
free(gsfs);
gsfs = NULL;
allfs = 0;
return (error);
}
int
getmntany(FILE *fd __unused, struct mnttab *mgetp, struct mnttab *mrefp)
{
- // struct statfs *sfs;
int i, error;
error = statfs_init();
if (error != 0)
return (error);
for (i = 0; i < allfs; i++) {
if (mrefp->mnt_special != NULL &&
strcmp(mrefp->mnt_special, gsfs[i].f_mntfromname) != 0) {
continue;
}
if (mrefp->mnt_mountp != NULL &&
strcmp(mrefp->mnt_mountp, gsfs[i].f_mntonname) != 0) {
continue;
}
if (mrefp->mnt_fstype != NULL &&
strcmp(mrefp->mnt_fstype, gsfs[i].f_fstypename) != 0) {
continue;
}
statfs2mnttab(&gsfs[i], mgetp);
return (0);
}
return (-1);
}
int
getmntent(FILE *fp, struct mnttab *mp)
{
- // struct statfs *sfs;
int error, nfs;
nfs = (int)lseek(fileno(fp), 0, SEEK_CUR);
if (nfs == -1)
return (errno);
/* If nfs is 0, we want to refresh out cache. */
if (nfs == 0 || gsfs == NULL) {
error = statfs_init();
if (error != 0)
return (error);
}
if (nfs >= allfs)
return (-1);
statfs2mnttab(&gsfs[nfs], mp);
if (lseek(fileno(fp), 1, SEEK_CUR) == -1)
return (errno);
return (0);
}
diff --git a/sys/contrib/openzfs/lib/libzfs/Makefile.am b/sys/contrib/openzfs/lib/libzfs/Makefile.am
index 1a7698b4760e..31267fd9a5e9 100644
--- a/sys/contrib/openzfs/lib/libzfs/Makefile.am
+++ b/sys/contrib/openzfs/lib/libzfs/Makefile.am
@@ -1,98 +1,98 @@
include $(top_srcdir)/config/Rules.am
VPATH = \
$(top_srcdir)/module/icp \
$(top_srcdir)/module/zcommon \
$(top_srcdir)/lib/libzfs
# Suppress unused but set variable warnings often due to ASSERTs
AM_CFLAGS += $(NO_UNUSED_BUT_SET_VARIABLE)
AM_CFLAGS += $(LIBCRYPTO_CFLAGS) $(ZLIB_CFLAGS)
pkgconfig_DATA = libzfs.pc
lib_LTLIBRARIES = libzfs.la
include $(top_srcdir)/config/Abigail.am
USER_C = \
libzfs_changelist.c \
libzfs_config.c \
libzfs_crypto.c \
libzfs_dataset.c \
libzfs_diff.c \
libzfs_import.c \
libzfs_iter.c \
libzfs_mount.c \
libzfs_pool.c \
libzfs_sendrecv.c \
libzfs_status.c \
libzfs_util.c
if BUILD_FREEBSD
USER_C += \
os/freebsd/libzfs_compat.c \
os/freebsd/libzfs_ioctl_compat.c \
os/freebsd/libzfs_zmount.c
endif
if BUILD_LINUX
USER_C += \
os/linux/libzfs_mount_os.c \
os/linux/libzfs_pool_os.c \
os/linux/libzfs_sendrecv_os.c \
os/linux/libzfs_util_os.c
endif
KERNEL_C = \
algs/sha2/sha2.c \
cityhash.c \
zfeature_common.c \
zfs_comutil.c \
zfs_deleg.c \
zfs_fletcher.c \
zfs_fletcher_aarch64_neon.c \
zfs_fletcher_avx512.c \
zfs_fletcher_intel.c \
zfs_fletcher_sse.c \
zfs_fletcher_superscalar.c \
zfs_fletcher_superscalar4.c \
zfs_namecheck.c \
zfs_prop.c \
zpool_prop.c \
zprop_common.c
dist_libzfs_la_SOURCES = \
$(USER_C)
nodist_libzfs_la_SOURCES = \
$(KERNEL_C)
libzfs_la_LIBADD = \
$(abs_top_builddir)/lib/libshare/libshare.la \
$(abs_top_builddir)/lib/libzfs_core/libzfs_core.la \
$(abs_top_builddir)/lib/libnvpair/libnvpair.la \
$(abs_top_builddir)/lib/libuutil/libuutil.la
-libzfs_la_LIBADD += -lm $(LIBCRYPTO_LIBS) $(ZLIB_LIBS) $(LTLIBINTL)
+libzfs_la_LIBADD += -lm $(LIBCRYPTO_LIBS) $(ZLIB_LIBS) $(LIBFETCH_LIBS) $(LTLIBINTL)
libzfs_la_LDFLAGS = -pthread
if !ASAN_ENABLED
libzfs_la_LDFLAGS += -Wl,-z,defs
endif
if BUILD_FREEBSD
libzfs_la_LIBADD += -lutil -lgeom
endif
libzfs_la_LDFLAGS += -version-info 5:0:1
include $(top_srcdir)/config/CppCheck.am
# Library ABI
EXTRA_DIST = libzfs.abi libzfs.suppr
# Licensing data
EXTRA_DIST += THIRDPARTYLICENSE.openssl THIRDPARTYLICENSE.openssl.descrip
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs.abi b/sys/contrib/openzfs/lib/libzfs/libzfs.abi
index 82f8b7dc87f8..14e03ee28ffe 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs.abi
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs.abi
@@ -1,7554 +1,7587 @@
<abi-corpus version='2.0' architecture='elf-amd-x86_64' soname='libzfs.so.4'>
<elf-needed>
<dependency name='libzfs_core.so.3'/>
<dependency name='libuuid.so.1'/>
<dependency name='libblkid.so.1'/>
<dependency name='libudev.so.1'/>
<dependency name='libnvpair.so.3'/>
<dependency name='libtirpc.so.3'/>
<dependency name='libuutil.so.3'/>
<dependency name='libm.so.6'/>
<dependency name='libcrypto.so.1.1'/>
<dependency name='libz.so.1'/>
<dependency name='libc.so.6'/>
</elf-needed>
<elf-function-symbols>
<elf-symbol name='SHA256Init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='SHA2Final' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='SHA2Init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='SHA2Update' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='SHA384Init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='SHA512Init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='bookmark_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='changelist_free' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='changelist_gather' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='changelist_haszonedchild' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='changelist_postfix' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='changelist_prefix' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='changelist_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='changelist_rename' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='changelist_unshare' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='cityhash4' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='color_end' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='color_start' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='create_parents' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='dataset_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='dataset_nestcheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='do_mount' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='do_unmount' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='entity_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='find_shares_object' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_2_byteswap' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_2_incremental_byteswap' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_2_incremental_native' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_2_native' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_byteswap' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_fini' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_impl_set' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_incremental_byteswap' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_incremental_native' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_native' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_native_varsize' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='get_dataset_depth' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getprop_uint64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='is_mounted' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='is_shared' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='isa_child_of' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libshare_nfs_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libshare_smb_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_add_handle' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_envvar_is_set' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_errno' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_error_action' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_error_description' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_error_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_fini' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_free_str_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_load_module' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_mnttab_add' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_mnttab_cache' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_mnttab_find' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_mnttab_fini' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_mnttab_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_mnttab_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_print_on_error' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_run_process' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_run_process_get_stdout' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_run_process_get_stdout_nopath' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_set_pipe_max' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='make_bookmark_handle' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='make_dataset_handle' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='make_dataset_handle_zc' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='make_dataset_simple_handle_zc' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='mountpoint_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='namespace_clear' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='no_memory' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='permset_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='pool_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='printf_color' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='register_fstype' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='remove_mountpoint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='sa_commit_shares' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='sa_disable_share' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='sa_enable_share' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='sa_errorstr' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='sa_is_shared' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='sa_validate_shareopts' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='snapshot_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='unshare_one' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zcmd_alloc_dst_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zcmd_expand_dst_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zcmd_free_nvlists' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zcmd_read_dst_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zcmd_write_conf_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zcmd_write_src_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfeature_depends_on' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfeature_is_supported' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfeature_is_valid_guid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfeature_lookup_guid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfeature_lookup_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_adjust_mount_options' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_alloc' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_allocatable_devs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_asprintf' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_bookmark_exists' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_clone' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_close' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_commit_all_shares' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_commit_nfs_shares' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_commit_proto' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_commit_shares' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_commit_smb_shares' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_component_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_create_ancestors' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_crypto_attempt_load_keys' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_crypto_clone_check' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_crypto_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_crypto_get_encryption_root' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_crypto_load_key' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_crypto_rewrap' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_crypto_unload_key' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_dataset_exists' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_dataset_name_hidden' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_deleg_canonicalize_perm' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_deleg_verify_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_deleg_whokey' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_destroy_snaps' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_destroy_snaps_nvl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_error' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_error_aux' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_error_fmt' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_expand_proplist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_foreach_mountpoint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_all_props' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_clones_nvl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_fsacl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_handle' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_holds' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_pool_handle' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_pool_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_recvd_props' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_type' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_user_props' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_handle_dup' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_hold' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_hold_nvl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_ioctl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_is_mountable' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_is_mounted' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_is_shared' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_is_shared_nfs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_is_shared_proto' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_is_shared_smb' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_bookmarks' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_children' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_dependents' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_filesystems' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_mounted' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_root' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_snapshots' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_snapshots_sorted' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_snapspec' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_mod_supported' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_mount' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_mount_at' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_mount_delegation_check' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_name_to_prop' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_name_valid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_nicestrtonum' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_open' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_parent_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_parse_mount_options' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_parse_options' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_path_to_zhandle' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_promote' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_align_right' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_column_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_default_numeric' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_default_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_delegatable' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_encryption_key_param' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_int' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_numeric' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_recvd' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_table' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_type' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_userquota' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_userquota_int' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_written' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_written_int' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_index_to_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_inherit' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_inheritable' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_is_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_random_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_readonly' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_set' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_set_list' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_setonce' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_string_to_index' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_to_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_user' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_userquota' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_valid_for_type' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_valid_keylocation' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_values' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_visible' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_written' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prune_proplist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_realloc' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_receive' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_refresh_properties' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_release' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_rename' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_rollback' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_save_arguments' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_send' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_send_one' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_send_progress' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_send_resume' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_send_resume_token_to_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_send_saved' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_set_fsacl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_setprop_error' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_share' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_share_nfs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_share_proto' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_share_smb' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_shareall' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_show_diffs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_smb_acl_add' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_smb_acl_purge' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_smb_acl_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_smb_acl_rename' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_snapshot' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_snapshot_nvl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_spa_version' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_spa_version_map' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_special_devs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_standard_error' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_standard_error_fmt' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_strdup' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_type_to_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unmount' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unmountall' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unshare' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unshare_nfs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unshare_proto' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unshare_smb' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unshareall' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unshareall_bypath' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unshareall_bytype' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unshareall_nfs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unshareall_smb' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_userspace' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_valid_proplist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_validate_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_version_kernel' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_version_print' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_version_userland' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_wait_status' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_zpl_version_map' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_add' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_checkpoint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_clear' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_clear_label' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_close' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_disable_datasets' type='func-type' binding='global-binding' visibility='default-visibility' alias='zpool_unmount_datasets' is-defined='yes'/>
<elf-symbol name='zpool_discard_checkpoint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_enable_datasets' type='func-type' binding='global-binding' visibility='default-visibility' alias='zpool_mount_datasets' is-defined='yes'/>
<elf-symbol name='zpool_events_clear' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_events_next' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_events_seek' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_expand_proplist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_explain_recover' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_export' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_export_force' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_feature_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_find_vdev' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_find_vdev_by_physpath' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_free_handles' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_bootenv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_config' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_errlog' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_features' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_handle' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_history' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_load_policy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_physpath' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_prop' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_prop_int' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_state' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_state_str' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_status' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_import' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_import_props' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_import_status' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_in_use' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_initialize' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_initialize_wait' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_is_draid_spare' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_iter' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_label_disk' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_load_compat' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_log_history' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_mount_datasets' type='func-type' binding='weak-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_name_to_prop' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_name_valid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_obj_to_path' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_obj_to_path_ds' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_open' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_open_canfail' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_open_silent' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_pool_state_to_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_print_unsup_feat' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_align_right' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_column_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_default_numeric' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_default_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_feature' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_get_feature' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_get_table' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_get_type' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_index_to_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_random_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_readonly' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_setonce' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_string_to_index' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_to_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_unsupported' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_values' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_props_refresh' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_refresh_stats' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_reguid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_relabel_disk' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_reopen_one' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_scan' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_set_bootenv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_set_prop' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_skip_pool' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_standard_error' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_standard_error_fmt' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_state_to_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_sync_one' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_trim' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_unmount_datasets' type='func-type' binding='weak-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_upgrade' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_attach' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_clear' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_degrade' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_detach' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_fault' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_indirect_size' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_offline' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_online' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_path_to_guid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_remove_cancel' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_split' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_wait' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_wait_status' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_expand_list' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_free_list' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_get_list' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_index_to_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_iter' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_iter_common' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_name_to_prop' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_parse_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_print_one_property' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_random_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_register_hidden' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_register_impl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_register_index' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_register_number' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_register_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_string_to_index' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_valid_for_type' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_values' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_width' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zvol_volsize_to_reservation' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
</elf-function-symbols>
<elf-variable-symbols>
<elf-symbol name='fletcher_4_abd_ops' size='24' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_avx2_ops' size='64' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_avx512bw_ops' size='64' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_avx512f_ops' size='64' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_sse2_ops' size='64' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_ssse3_ops' size='64' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_superscalar4_ops' size='64' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_superscalar_ops' size='64' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_config_ops' size='16' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nfs_only' size='8' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='proto_table' size='48' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='share_all_proto' size='12' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='smb_only' size='8' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='smb_shares' size='8' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='spa_feature_table' size='1904' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfeature_checks_disable' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_deleg_perm_tab' size='512' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_history_event_names' size='328' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_max_dataset_nesting' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_userquota_prop_prefixes' size='96' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
</elf-variable-symbols>
<abi-instr address-size='64' path='libshare.c' language='LANG_C99'>
<class-decl name='sa_share_fsinfo' size-in-bits='64' is-struct='yes' visibility='default' id='412a8a55'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='shareopts' type-id='26a90f95' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='sa_share_fsinfo_t' type-id='412a8a55' id='24463d51'/>
<class-decl name='sa_share_impl' size-in-bits='192' is-struct='yes' visibility='default' id='72b09bf8'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='sa_mountpoint' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='sa_zfsname' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='sa_fsinfo' type-id='17934354' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='sa_share_impl_t' type-id='2722c1de' id='a48b47d0'/>
<class-decl name='sa_share_ops' size-in-bits='448' is-struct='yes' visibility='default' id='9990a42a'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='enable_share' type-id='fa1f29ce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='disable_share' type-id='fa1f29ce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='is_shared' type-id='f337456d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='validate_shareopts' type-id='70487b28' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='update_shareopts' type-id='8c9ca98d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='clear_shareopts' type-id='20e6b301' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='commit_shares' type-id='1db260e5' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='sa_share_ops_t' type-id='9990a42a' id='cfdd2674'/>
<class-decl name='sa_fstype' size-in-bits='256' is-struct='yes' visibility='default' id='b329094d'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='next' type-id='3a81ee0d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='name' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='ops' type-id='4f0de78a' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='fsinfo_index' type-id='95e97e5e' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='sa_fstype_t' type-id='b329094d' id='639af739'/>
<qualified-type-def type-id='cfdd2674' const='yes' id='3903d8a4'/>
<pointer-type-def type-id='3903d8a4' size-in-bits='64' id='4f0de78a'/>
<pointer-type-def type-id='276427e1' size-in-bits='64' id='1db260e5'/>
<pointer-type-def type-id='5113b296' size-in-bits='64' id='70487b28'/>
<pointer-type-def type-id='c13578bc' size-in-bits='64' id='fa1f29ce'/>
<pointer-type-def type-id='4d896449' size-in-bits='64' id='8c9ca98d'/>
<pointer-type-def type-id='b329094d' size-in-bits='64' id='3a81ee0d'/>
<pointer-type-def type-id='639af739' size-in-bits='64' id='0dd0309c'/>
<pointer-type-def type-id='24463d51' size-in-bits='64' id='17934354'/>
<pointer-type-def type-id='72b09bf8' size-in-bits='64' id='2722c1de'/>
<pointer-type-def type-id='86373eb1' size-in-bits='64' id='f337456d'/>
<pointer-type-def type-id='6b19040d' size-in-bits='64' id='20e6b301'/>
<function-decl name='register_fstype' mangled-name='register_fstype' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='register_fstype'>
<parameter type-id='80f4b756' name='name'/>
<parameter type-id='4f0de78a' name='ops'/>
<return type-id='0dd0309c'/>
</function-decl>
<function-decl name='libshare_nfs_init' mangled-name='libshare_nfs_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libshare_nfs_init'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='libshare_smb_init' mangled-name='libshare_smb_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libshare_smb_init'>
<return type-id='48b5725f'/>
</function-decl>
<function-type size-in-bits='64' id='276427e1'>
<return type-id='95e97e5e'/>
</function-type>
<function-type size-in-bits='64' id='5113b296'>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-type>
<function-type size-in-bits='64' id='c13578bc'>
<parameter type-id='a48b47d0'/>
<return type-id='95e97e5e'/>
</function-type>
<function-type size-in-bits='64' id='4d896449'>
<parameter type-id='a48b47d0'/>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-type>
<function-type size-in-bits='64' id='86373eb1'>
<parameter type-id='a48b47d0'/>
<return type-id='c19b74c3'/>
</function-type>
<function-type size-in-bits='64' id='6b19040d'>
<parameter type-id='a48b47d0'/>
<return type-id='48b5725f'/>
</function-type>
</abi-instr>
<abi-instr address-size='64' path='os/linux/nfs.c' language='LANG_C99'>
<function-decl name='rename' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='fputs' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='9d26089a'/>
<parameter type-id='e75a27e9'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='flock' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='mkdir' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='e1c52942'/>
<return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='unlink' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='80f4b756'/>
- <return type-id='95e97e5e'/>
- </function-decl>
</abi-instr>
<abi-instr address-size='64' path='os/linux/smb.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='2040' id='11641789'>
<subrange length='255' type-id='7359adad' id='36e7f891'/>
</array-type-def>
<class-decl name='smb_share_s' size-in-bits='36992' is-struct='yes' visibility='default' id='a75bc907'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='name' type-id='11641789' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2040'>
<var-decl name='path' type-id='d16c6df4' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='34808'>
<var-decl name='comment' type-id='11641789' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='36864'>
<var-decl name='guest_ok' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='36928'>
<var-decl name='next' type-id='05ed1c5f' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='smb_share_t' type-id='a75bc907' id='2d05afd9'/>
<class-decl name='dirent' size-in-bits='2240' is-struct='yes' visibility='default' id='611586a1'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='d_ino' type-id='71288a47' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='d_off' type-id='724e4de6' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='d_reclen' type-id='8efea9e5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='144'>
<var-decl name='d_type' type-id='002ac4a6' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='152'>
<var-decl name='d_name' type-id='d1617432' visibility='default'/>
</data-member>
</class-decl>
<pointer-type-def type-id='611586a1' size-in-bits='64' id='2e243169'/>
<pointer-type-def type-id='a75bc907' size-in-bits='64' id='05ed1c5f'/>
<pointer-type-def type-id='2d05afd9' size-in-bits='64' id='a3e5c654'/>
<var-decl name='smb_shares' type-id='a3e5c654' mangled-name='smb_shares' visibility='default' elf-symbol-id='smb_shares'/>
<function-decl name='opendir' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<return type-id='f09217ba'/>
</function-decl>
<function-decl name='fgets' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='266fe297'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='e75a27e9'/>
<return type-id='26a90f95'/>
</function-decl>
<type-decl name='unsigned long int' size-in-bits='64' id='7359adad'/>
<type-decl name='unsigned short int' size-in-bits='16' id='8efea9e5'/>
</abi-instr>
<abi-instr address-size='64' path='../../module/icp/algs/sha2/sha2.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='8f92235e' size-in-bits='64' id='337c1cdd'>
<subrange length='2' type-id='7359adad' id='52efc4ef'/>
</array-type-def>
<array-type-def dimensions='1' type-id='8f92235e' size-in-bits='1024' id='388e96b8'>
<subrange length='32' type-id='7359adad' id='ae5bde82'/>
</array-type-def>
<array-type-def dimensions='1' type-id='8f92235e' size-in-bits='256' id='2f8b211b'>
<subrange length='8' type-id='7359adad' id='56e0c0b1'/>
</array-type-def>
<array-type-def dimensions='1' type-id='9c313c2d' size-in-bits='1024' id='b316cf0d'>
<subrange length='16' type-id='7359adad' id='848d0938'/>
</array-type-def>
<array-type-def dimensions='1' type-id='9c313c2d' size-in-bits='512' id='c5d13f42'>
<subrange length='8' type-id='7359adad' id='56e0c0b1'/>
</array-type-def>
<array-type-def dimensions='1' type-id='b96825af' size-in-bits='1024' id='c768f32d'>
<subrange length='128' type-id='7359adad' id='1eb1687a'/>
</array-type-def>
<class-decl name='SHA2_CTX' size-in-bits='1728' is-struct='yes' naming-typedef-id='2aec903e' visibility='default' id='51cc0913'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='algotype' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='state' type-id='ac5ab595' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='576'>
<var-decl name='count' type-id='ac5ab596' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='704'>
<var-decl name='buf_un' type-id='ac5ab597' visibility='default'/>
</data-member>
</class-decl>
<union-decl name='__anonymous_union__' size-in-bits='512' is-anonymous='yes' visibility='default' id='ac5ab595'>
<data-member access='public'>
<var-decl name='s32' type-id='2f8b211b' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='s64' type-id='c5d13f42' visibility='default'/>
</data-member>
</union-decl>
<union-decl name='__anonymous_union__1' size-in-bits='128' is-anonymous='yes' visibility='default' id='ac5ab596'>
<data-member access='public'>
<var-decl name='c32' type-id='337c1cdd' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='c64' type-id='c1c22e6c' visibility='default'/>
</data-member>
</union-decl>
<union-decl name='__anonymous_union__2' size-in-bits='1024' is-anonymous='yes' visibility='default' id='ac5ab597'>
<data-member access='public'>
<var-decl name='buf8' type-id='c768f32d' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='buf32' type-id='388e96b8' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='buf64' type-id='b316cf0d' visibility='default'/>
</data-member>
</union-decl>
<typedef-decl name='SHA2_CTX' type-id='51cc0913' id='2aec903e'/>
<typedef-decl name='SHA256_CTX' type-id='2aec903e' id='1ef7fe01'/>
<typedef-decl name='SHA384_CTX' type-id='2aec903e' id='139bfea5'/>
<typedef-decl name='SHA512_CTX' type-id='2aec903e' id='33c643d0'/>
<pointer-type-def type-id='1ef7fe01' size-in-bits='64' id='aacf5386'/>
<pointer-type-def type-id='2aec903e' size-in-bits='64' id='5d626b03'/>
<pointer-type-def type-id='139bfea5' size-in-bits='64' id='074c43fa'/>
<pointer-type-def type-id='33c643d0' size-in-bits='64' id='ceba8189'/>
<function-decl name='htonl' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='8f92235e'/>
<return type-id='8f92235e'/>
</function-decl>
<function-decl name='SHA2Init' mangled-name='SHA2Init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='SHA2Init'>
<parameter type-id='9c313c2d' name='mech'/>
<parameter type-id='5d626b03' name='ctx'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='SHA256Init' mangled-name='SHA256Init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='SHA256Init'>
<parameter type-id='aacf5386' name='ctx'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='SHA384Init' mangled-name='SHA384Init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='SHA384Init'>
<parameter type-id='074c43fa' name='ctx'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='SHA512Init' mangled-name='SHA512Init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='SHA512Init'>
<parameter type-id='ceba8189' name='ctx'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='SHA2Update' mangled-name='SHA2Update' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='SHA2Update'>
<parameter type-id='5d626b03' name='ctx'/>
<parameter type-id='eaa32e2f' name='inptr'/>
<parameter type-id='b59d7dce' name='input_len'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='SHA2Final' mangled-name='SHA2Final' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='SHA2Final'>
<parameter type-id='eaa32e2f' name='digest'/>
<parameter type-id='5d626b03' name='ctx'/>
<return type-id='48b5725f'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='../../module/zcommon/cityhash.c' language='LANG_C99'>
<function-decl name='cityhash4' mangled-name='cityhash4' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='cityhash4'>
<parameter type-id='9c313c2d' name='w1'/>
<parameter type-id='9c313c2d' name='w2'/>
<parameter type-id='9c313c2d' name='w3'/>
<parameter type-id='9c313c2d' name='w4'/>
<return type-id='9c313c2d'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='../../module/zcommon/zfeature_common.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='83f29ca2' size-in-bits='15232' id='d96379d0'>
<subrange length='34' type-id='7359adad' id='6a6a7e00'/>
</array-type-def>
<enum-decl name='zfeature_flags' id='6db816a4'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZFEATURE_FLAG_READONLY_COMPAT' value='1'/>
<enumerator name='ZFEATURE_FLAG_MOS' value='2'/>
<enumerator name='ZFEATURE_FLAG_ACTIVATE_ON_ENABLE' value='4'/>
<enumerator name='ZFEATURE_FLAG_PER_DATASET' value='8'/>
</enum-decl>
<typedef-decl name='zfeature_flags_t' type-id='6db816a4' id='fc329033'/>
<enum-decl name='zfeature_type' id='c4fa2355'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZFEATURE_TYPE_BOOLEAN' value='0'/>
<enumerator name='ZFEATURE_TYPE_UINT64_ARRAY' value='1'/>
<enumerator name='ZFEATURE_NUM_TYPES' value='2'/>
</enum-decl>
<typedef-decl name='zfeature_type_t' type-id='c4fa2355' id='732d2bb2'/>
<class-decl name='zfeature_info' size-in-bits='448' is-struct='yes' visibility='default' id='1178d146'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='fi_feature' type-id='d6618c78' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='fi_uname' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='fi_guid' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='fi_desc' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='fi_flags' type-id='fc329033' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='288'>
<var-decl name='fi_zfs_mod_supported' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='fi_type' type-id='732d2bb2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='fi_depends' type-id='1acff326' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zfeature_info_t' type-id='1178d146' id='83f29ca2'/>
<qualified-type-def type-id='d6618c78' const='yes' id='81a65028'/>
<pointer-type-def type-id='81a65028' size-in-bits='64' id='1acff326'/>
<var-decl name='spa_feature_table' type-id='d96379d0' mangled-name='spa_feature_table' visibility='default' elf-symbol-id='spa_feature_table'/>
<var-decl name='zfeature_checks_disable' type-id='c19b74c3' mangled-name='zfeature_checks_disable' visibility='default' elf-symbol-id='zfeature_checks_disable'/>
<function-decl name='zfeature_is_valid_guid' mangled-name='zfeature_is_valid_guid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfeature_is_valid_guid'>
<parameter type-id='80f4b756' name='name'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfeature_depends_on' mangled-name='zfeature_depends_on' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfeature_depends_on'>
<parameter type-id='d6618c78' name='fid'/>
<parameter type-id='d6618c78' name='check'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_mod_supported' mangled-name='zfs_mod_supported' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_mod_supported'>
<parameter type-id='80f4b756' name='scope'/>
<parameter type-id='80f4b756' name='name'/>
<return type-id='c19b74c3'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='../../module/zcommon/zfs_comutil.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='80f4b756' size-in-bits='2624' id='ef31fedf'>
<subrange length='41' type-id='7359adad' id='cb834f44'/>
</array-type-def>
<pointer-type-def type-id='8f92235e' size-in-bits='64' id='90421557'/>
<function-decl name='nvpair_value_uint32' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='3fa542f0'/>
<parameter type-id='90421557'/>
<return type-id='95e97e5e'/>
</function-decl>
<var-decl name='zfs_history_event_names' type-id='ef31fedf' mangled-name='zfs_history_event_names' visibility='default' elf-symbol-id='zfs_history_event_names'/>
<function-decl name='zfs_allocatable_devs' mangled-name='zfs_allocatable_devs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_allocatable_devs'>
<parameter type-id='5ce45b60' name='nv'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_special_devs' mangled-name='zfs_special_devs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_special_devs'>
<parameter type-id='5ce45b60' name='nv'/>
<parameter type-id='26a90f95' name='type'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_zpl_version_map' mangled-name='zfs_zpl_version_map' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_zpl_version_map'>
<parameter type-id='95e97e5e' name='spa_version'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_spa_version_map' mangled-name='zfs_spa_version_map' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_spa_version_map'>
<parameter type-id='95e97e5e' name='zpl_version'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_dataset_name_hidden' mangled-name='zfs_dataset_name_hidden' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dataset_name_hidden'>
<parameter type-id='80f4b756' name='name'/>
<return type-id='c19b74c3'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='../../module/zcommon/zfs_deleg.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='f3f851ad' size-in-bits='4096' id='3dd2cc5f'>
<subrange length='32' type-id='7359adad' id='ae5bde82'/>
</array-type-def>
<array-type-def dimensions='1' type-id='f3f851ad' size-in-bits='infinite' id='bc4e5d90'>
<subrange length='infinite' id='031f2035'/>
</array-type-def>
<enum-decl name='zfs_deleg_who_type_t' naming-typedef-id='36d4bd5a' id='b5fa5816'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZFS_DELEG_WHO_UNKNOWN' value='0'/>
<enumerator name='ZFS_DELEG_USER' value='117'/>
<enumerator name='ZFS_DELEG_USER_SETS' value='85'/>
<enumerator name='ZFS_DELEG_GROUP' value='103'/>
<enumerator name='ZFS_DELEG_GROUP_SETS' value='71'/>
<enumerator name='ZFS_DELEG_EVERYONE' value='101'/>
<enumerator name='ZFS_DELEG_EVERYONE_SETS' value='69'/>
<enumerator name='ZFS_DELEG_CREATE' value='99'/>
<enumerator name='ZFS_DELEG_CREATE_SETS' value='67'/>
<enumerator name='ZFS_DELEG_NAMED_SET' value='115'/>
<enumerator name='ZFS_DELEG_NAMED_SET_SETS' value='83'/>
</enum-decl>
<typedef-decl name='zfs_deleg_who_type_t' type-id='b5fa5816' id='36d4bd5a'/>
<enum-decl name='zfs_deleg_note_t' naming-typedef-id='4613c173' id='729d4547'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZFS_DELEG_NOTE_CREATE' value='0'/>
<enumerator name='ZFS_DELEG_NOTE_DESTROY' value='1'/>
<enumerator name='ZFS_DELEG_NOTE_SNAPSHOT' value='2'/>
<enumerator name='ZFS_DELEG_NOTE_ROLLBACK' value='3'/>
<enumerator name='ZFS_DELEG_NOTE_CLONE' value='4'/>
<enumerator name='ZFS_DELEG_NOTE_PROMOTE' value='5'/>
<enumerator name='ZFS_DELEG_NOTE_RENAME' value='6'/>
<enumerator name='ZFS_DELEG_NOTE_SEND' value='7'/>
<enumerator name='ZFS_DELEG_NOTE_RECEIVE' value='8'/>
<enumerator name='ZFS_DELEG_NOTE_ALLOW' value='9'/>
<enumerator name='ZFS_DELEG_NOTE_USERPROP' value='10'/>
<enumerator name='ZFS_DELEG_NOTE_MOUNT' value='11'/>
<enumerator name='ZFS_DELEG_NOTE_SHARE' value='12'/>
<enumerator name='ZFS_DELEG_NOTE_USERQUOTA' value='13'/>
<enumerator name='ZFS_DELEG_NOTE_GROUPQUOTA' value='14'/>
<enumerator name='ZFS_DELEG_NOTE_USERUSED' value='15'/>
<enumerator name='ZFS_DELEG_NOTE_GROUPUSED' value='16'/>
<enumerator name='ZFS_DELEG_NOTE_USEROBJQUOTA' value='17'/>
<enumerator name='ZFS_DELEG_NOTE_GROUPOBJQUOTA' value='18'/>
<enumerator name='ZFS_DELEG_NOTE_USEROBJUSED' value='19'/>
<enumerator name='ZFS_DELEG_NOTE_GROUPOBJUSED' value='20'/>
<enumerator name='ZFS_DELEG_NOTE_HOLD' value='21'/>
<enumerator name='ZFS_DELEG_NOTE_RELEASE' value='22'/>
<enumerator name='ZFS_DELEG_NOTE_DIFF' value='23'/>
<enumerator name='ZFS_DELEG_NOTE_BOOKMARK' value='24'/>
<enumerator name='ZFS_DELEG_NOTE_LOAD_KEY' value='25'/>
<enumerator name='ZFS_DELEG_NOTE_CHANGE_KEY' value='26'/>
<enumerator name='ZFS_DELEG_NOTE_PROJECTUSED' value='27'/>
<enumerator name='ZFS_DELEG_NOTE_PROJECTQUOTA' value='28'/>
<enumerator name='ZFS_DELEG_NOTE_PROJECTOBJUSED' value='29'/>
<enumerator name='ZFS_DELEG_NOTE_PROJECTOBJQUOTA' value='30'/>
<enumerator name='ZFS_DELEG_NOTE_NONE' value='31'/>
</enum-decl>
<typedef-decl name='zfs_deleg_note_t' type-id='729d4547' id='4613c173'/>
<class-decl name='zfs_deleg_perm_tab' size-in-bits='128' is-struct='yes' visibility='default' id='5aa05c1f'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='z_perm' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='z_note' type-id='4613c173' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zfs_deleg_perm_tab_t' type-id='5aa05c1f' id='f3f851ad'/>
<var-decl name='zfs_deleg_perm_tab' type-id='bc4e5d90' mangled-name='zfs_deleg_perm_tab' visibility='default' elf-symbol-id='zfs_deleg_perm_tab'/>
<function-decl name='permset_namecheck' mangled-name='permset_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='permset_namecheck'>
<parameter type-id='80f4b756'/>
<parameter type-id='053457bd'/>
<parameter type-id='26a90f95'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_delegatable' mangled-name='zfs_prop_delegatable' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_delegatable'>
<parameter type-id='58603c44'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_deleg_canonicalize_perm' mangled-name='zfs_deleg_canonicalize_perm' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_deleg_canonicalize_perm'>
<parameter type-id='80f4b756' name='perm'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zfs_deleg_verify_nvlist' mangled-name='zfs_deleg_verify_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_deleg_verify_nvlist'>
<parameter type-id='5ce45b60' name='nvp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_deleg_whokey' mangled-name='zfs_deleg_whokey' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_deleg_whokey'>
<parameter type-id='26a90f95' name='attr'/>
<parameter type-id='36d4bd5a' name='type'/>
<parameter type-id='a84c031d' name='inheritchr'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='48b5725f'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='../../module/zcommon/zfs_fletcher.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='90dbb6d6' size-in-bits='2048' id='16582e69'>
<subrange length='4' type-id='7359adad' id='16fe7105'/>
</array-type-def>
<array-type-def dimensions='1' type-id='8240361c' size-in-bits='1024' id='481f90b1'>
<subrange length='4' type-id='7359adad' id='16fe7105'/>
</array-type-def>
<array-type-def dimensions='1' type-id='7c1ab40c' size-in-bits='512' id='cbd91ec1'>
<subrange length='4' type-id='7359adad' id='16fe7105'/>
</array-type-def>
<array-type-def dimensions='1' type-id='6d059eaa' size-in-bits='1024' id='729b6ebb'>
<subrange length='4' type-id='7359adad' id='16fe7105'/>
</array-type-def>
<enum-decl name='zio_byteorder_t' naming-typedef-id='595a65ec' id='fc861be0'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZIO_CHECKSUM_NATIVE' value='0'/>
<enumerator name='ZIO_CHECKSUM_BYTESWAP' value='1'/>
</enum-decl>
<typedef-decl name='zio_byteorder_t' type-id='fc861be0' id='595a65ec'/>
<class-decl name='zio_abd_checksum_data' size-in-bits='256' is-struct='yes' visibility='default' id='4bf4b004'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='acd_byteorder' type-id='595a65ec' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='acd_ctx' type-id='0f7df99e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='acd_zcp' type-id='c24fc2ee' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='acd_private' type-id='eaa32e2f' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zio_abd_checksum_data_t' type-id='4bf4b004' id='74e39470'/>
<typedef-decl name='zio_abd_checksum_init_t' type-id='a5444274' id='029a8ebe'/>
<typedef-decl name='zio_abd_checksum_fini_t' type-id='a5444274' id='d6fd5c6c'/>
<typedef-decl name='zio_abd_checksum_iter_t' type-id='f4a1892e' id='cefa0f4a'/>
<class-decl name='zio_abd_checksum_func' size-in-bits='192' is-struct='yes' visibility='default' id='aa14691a'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='acf_init' type-id='0bcca125' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='acf_fini' type-id='bfe36153' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='acf_iter' type-id='1e276399' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zio_abd_checksum_func_t' type-id='3f8e8d11' id='c2eb138a'/>
<class-decl name='zfs_fletcher_superscalar' size-in-bits='256' is-struct='yes' visibility='default' id='28efb250'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='v' type-id='85c64d26' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zfs_fletcher_superscalar_t' type-id='28efb250' id='6d059eaa'/>
<class-decl name='zfs_fletcher_sse' size-in-bits='128' is-struct='yes' visibility='default' id='acd4019a'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='v' type-id='c1c22e6c' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zfs_fletcher_sse_t' type-id='acd4019a' id='7c1ab40c'/>
<class-decl name='zfs_fletcher_avx' size-in-bits='256' is-struct='yes' visibility='default' id='8c208dfa'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='v' type-id='85c64d26' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zfs_fletcher_avx_t' type-id='8c208dfa' id='8240361c'/>
<class-decl name='zfs_fletcher_avx512' size-in-bits='512' is-struct='yes' visibility='default' id='c6d0c382'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='v' type-id='c5d13f42' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zfs_fletcher_avx512_t' type-id='c6d0c382' id='90dbb6d6'/>
<union-decl name='fletcher_4_ctx' size-in-bits='2048' visibility='default' id='1f951ade'>
<data-member access='public'>
<var-decl name='scalar' type-id='39730d0b' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='superscalar' type-id='729b6ebb' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='sse' type-id='cbd91ec1' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='avx' type-id='481f90b1' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='avx512' type-id='16582e69' visibility='default'/>
</data-member>
</union-decl>
<typedef-decl name='fletcher_4_ctx_t' type-id='1f951ade' id='4b675395'/>
<qualified-type-def type-id='aa14691a' const='yes' id='3f8e8d11'/>
<pointer-type-def type-id='4b675395' size-in-bits='64' id='0f7df99e'/>
<qualified-type-def type-id='8f92235e' volatile='yes' id='430e0681'/>
<pointer-type-def type-id='430e0681' size-in-bits='64' id='3a147f31'/>
<pointer-type-def type-id='74e39470' size-in-bits='64' id='eefe7427'/>
<pointer-type-def type-id='d6fd5c6c' size-in-bits='64' id='bfe36153'/>
<pointer-type-def type-id='029a8ebe' size-in-bits='64' id='0bcca125'/>
<pointer-type-def type-id='cefa0f4a' size-in-bits='64' id='1e276399'/>
<var-decl name='fletcher_4_abd_ops' type-id='c2eb138a' mangled-name='fletcher_4_abd_ops' visibility='default' elf-symbol-id='fletcher_4_abd_ops'/>
<function-decl name='atomic_swap_32' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='3a147f31'/>
<parameter type-id='8f92235e'/>
<return type-id='8f92235e'/>
</function-decl>
<function-decl name='membar_producer' visibility='default' binding='global' size-in-bits='64'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fletcher_init' mangled-name='fletcher_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_init'>
<parameter type-id='c24fc2ee' name='zcp'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fletcher_2_incremental_native' mangled-name='fletcher_2_incremental_native' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_2_incremental_native'>
<parameter type-id='eaa32e2f' name='buf'/>
<parameter type-id='b59d7dce' name='size'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='fletcher_2_native' mangled-name='fletcher_2_native' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_2_native'>
<parameter type-id='eaa32e2f' name='buf'/>
<parameter type-id='9c313c2d' name='size'/>
<parameter type-id='eaa32e2f' name='ctx_template'/>
<parameter type-id='c24fc2ee' name='zcp'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fletcher_2_incremental_byteswap' mangled-name='fletcher_2_incremental_byteswap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_2_incremental_byteswap'>
<parameter type-id='eaa32e2f' name='buf'/>
<parameter type-id='b59d7dce' name='size'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='fletcher_2_byteswap' mangled-name='fletcher_2_byteswap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_2_byteswap'>
<parameter type-id='eaa32e2f' name='buf'/>
<parameter type-id='9c313c2d' name='size'/>
<parameter type-id='eaa32e2f' name='ctx_template'/>
<parameter type-id='c24fc2ee' name='zcp'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fletcher_4_impl_set' mangled-name='fletcher_4_impl_set' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_impl_set'>
<parameter type-id='80f4b756' name='val'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='fletcher_4_native' mangled-name='fletcher_4_native' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_native'>
<parameter type-id='eaa32e2f' name='buf'/>
<parameter type-id='9c313c2d' name='size'/>
<parameter type-id='eaa32e2f' name='ctx_template'/>
<parameter type-id='c24fc2ee' name='zcp'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fletcher_4_byteswap' mangled-name='fletcher_4_byteswap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_byteswap'>
<parameter type-id='eaa32e2f' name='buf'/>
<parameter type-id='9c313c2d' name='size'/>
<parameter type-id='eaa32e2f' name='ctx_template'/>
<parameter type-id='c24fc2ee' name='zcp'/>
<return type-id='48b5725f'/>
</function-decl>
<function-type size-in-bits='64' id='f4a1892e'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-type>
<function-type size-in-bits='64' id='a5444274'>
<parameter type-id='eefe7427'/>
<return type-id='48b5725f'/>
</function-type>
</abi-instr>
<abi-instr address-size='64' path='../../module/zcommon/zfs_fletcher_avx512.c' language='LANG_C99'>
<typedef-decl name='fletcher_4_init_f' type-id='173aa527' id='b9ae1656'/>
<typedef-decl name='fletcher_4_fini_f' type-id='0ad5b8a8' id='c4c1f4fc'/>
<typedef-decl name='fletcher_4_compute_f' type-id='38147eff' id='ad1dc4cb'/>
<class-decl name='fletcher_4_func' size-in-bits='512' is-struct='yes' visibility='default' id='57f479a0'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='init_native' type-id='b9ae1656' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='fini_native' type-id='c4c1f4fc' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='compute_native' type-id='ad1dc4cb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='init_byteswap' type-id='b9ae1656' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='fini_byteswap' type-id='c4c1f4fc' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='compute_byteswap' type-id='ad1dc4cb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='valid' type-id='297d38bc' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='name' type-id='80f4b756' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='fletcher_4_ops_t' type-id='57f479a0' id='eba91718'/>
<qualified-type-def type-id='eba91718' const='yes' id='9eeabdc8'/>
<pointer-type-def type-id='e9e61702' size-in-bits='64' id='297d38bc'/>
<pointer-type-def type-id='fe40251b' size-in-bits='64' id='173aa527'/>
<pointer-type-def type-id='17fb1f83' size-in-bits='64' id='38147eff'/>
<pointer-type-def type-id='fb39e25e' size-in-bits='64' id='0ad5b8a8'/>
<var-decl name='fletcher_4_avx512f_ops' type-id='9eeabdc8' mangled-name='fletcher_4_avx512f_ops' visibility='default' elf-symbol-id='fletcher_4_avx512f_ops'/>
<var-decl name='fletcher_4_avx512bw_ops' type-id='9eeabdc8' mangled-name='fletcher_4_avx512bw_ops' visibility='default' elf-symbol-id='fletcher_4_avx512bw_ops'/>
<function-type size-in-bits='64' id='e9e61702'>
<return type-id='c19b74c3'/>
</function-type>
<function-type size-in-bits='64' id='fe40251b'>
<parameter type-id='0f7df99e'/>
<return type-id='48b5725f'/>
</function-type>
<function-type size-in-bits='64' id='17fb1f83'>
<parameter type-id='0f7df99e'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='9c313c2d'/>
<return type-id='48b5725f'/>
</function-type>
<function-type size-in-bits='64' id='fb39e25e'>
<parameter type-id='0f7df99e'/>
<parameter type-id='c24fc2ee'/>
<return type-id='48b5725f'/>
</function-type>
</abi-instr>
<abi-instr address-size='64' path='../../module/zcommon/zfs_fletcher_intel.c' language='LANG_C99'>
<var-decl name='fletcher_4_avx2_ops' type-id='9eeabdc8' mangled-name='fletcher_4_avx2_ops' visibility='default' elf-symbol-id='fletcher_4_avx2_ops'/>
</abi-instr>
<abi-instr address-size='64' path='../../module/zcommon/zfs_fletcher_sse.c' language='LANG_C99'>
<var-decl name='fletcher_4_sse2_ops' type-id='9eeabdc8' mangled-name='fletcher_4_sse2_ops' visibility='default' elf-symbol-id='fletcher_4_sse2_ops'/>
<var-decl name='fletcher_4_ssse3_ops' type-id='9eeabdc8' mangled-name='fletcher_4_ssse3_ops' visibility='default' elf-symbol-id='fletcher_4_ssse3_ops'/>
</abi-instr>
<abi-instr address-size='64' path='../../module/zcommon/zfs_fletcher_superscalar.c' language='LANG_C99'>
<var-decl name='fletcher_4_superscalar_ops' type-id='9eeabdc8' mangled-name='fletcher_4_superscalar_ops' visibility='default' elf-symbol-id='fletcher_4_superscalar_ops'/>
</abi-instr>
<abi-instr address-size='64' path='../../module/zcommon/zfs_fletcher_superscalar4.c' language='LANG_C99'>
<var-decl name='fletcher_4_superscalar4_ops' type-id='9eeabdc8' mangled-name='fletcher_4_superscalar4_ops' visibility='default' elf-symbol-id='fletcher_4_superscalar4_ops'/>
</abi-instr>
<abi-instr address-size='64' path='../../module/zcommon/zfs_namecheck.c' language='LANG_C99'>
<var-decl name='zfs_max_dataset_nesting' type-id='95e97e5e' mangled-name='zfs_max_dataset_nesting' visibility='default' elf-symbol-id='zfs_max_dataset_nesting'/>
<function-decl name='get_dataset_depth' mangled-name='get_dataset_depth' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='get_dataset_depth'>
<parameter type-id='80f4b756' name='path'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_component_namecheck' mangled-name='zfs_component_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_component_namecheck'>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='053457bd' name='why'/>
<parameter type-id='26a90f95' name='what'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='dataset_namecheck' mangled-name='dataset_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='dataset_namecheck'>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='053457bd' name='why'/>
<parameter type-id='26a90f95' name='what'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='bookmark_namecheck' mangled-name='bookmark_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='bookmark_namecheck'>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='053457bd' name='why'/>
<parameter type-id='26a90f95' name='what'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='snapshot_namecheck' mangled-name='snapshot_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='snapshot_namecheck'>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='053457bd' name='why'/>
<parameter type-id='26a90f95' name='what'/>
<return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='../../module/zcommon/zfs_prop.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='80f4b756' size-in-bits='768' id='35e4b367'>
<subrange length='12' type-id='7359adad' id='84827bdc'/>
</array-type-def>
<var-decl name='zfs_userquota_prop_prefixes' type-id='35e4b367' mangled-name='zfs_userquota_prop_prefixes' visibility='default' elf-symbol-id='zfs_userquota_prop_prefixes'/>
<function-decl name='zprop_register_impl' mangled-name='zprop_register_impl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_register_impl'>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<parameter type-id='31429eff'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='80f4b756'/>
<parameter type-id='999701cc'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='c19b74c3'/>
<parameter type-id='c19b74c3'/>
<parameter type-id='c8bc397b'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zprop_register_string' mangled-name='zprop_register_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_register_string'>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='999701cc'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zprop_register_number' mangled-name='zprop_register_number' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_register_number'>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='999701cc'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zprop_register_index' mangled-name='zprop_register_index' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_register_index'>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='999701cc'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='c8bc397b'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zprop_register_hidden' mangled-name='zprop_register_hidden' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_register_hidden'>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<parameter type-id='31429eff'/>
<parameter type-id='999701cc'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zprop_index_to_string' mangled-name='zprop_index_to_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_index_to_string'>
<parameter type-id='95e97e5e'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='7d3cd834'/>
<parameter type-id='2e45de5d'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zprop_random_value' mangled-name='zprop_random_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_random_value'>
<parameter type-id='95e97e5e'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='2e45de5d'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='zfs_prop_string_to_index' mangled-name='zfs_prop_string_to_index' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_string_to_index'>
<parameter type-id='58603c44' name='prop'/>
<parameter type-id='80f4b756' name='string'/>
<parameter type-id='5d6479ae' name='index'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_random_value' mangled-name='zfs_prop_random_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_random_value'>
<parameter type-id='58603c44' name='prop'/>
<parameter type-id='9c313c2d' name='seed'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='zfs_prop_visible' mangled-name='zfs_prop_visible' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_visible'>
<parameter type-id='58603c44' name='prop'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_prop_values' mangled-name='zfs_prop_values' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_values'>
<parameter type-id='58603c44' name='prop'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zfs_prop_is_string' mangled-name='zfs_prop_is_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_is_string'>
<parameter type-id='58603c44' name='prop'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_column_name' mangled-name='zfs_prop_column_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_column_name'>
<parameter type-id='58603c44' name='prop'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zfs_prop_align_right' mangled-name='zfs_prop_align_right' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_align_right'>
<parameter type-id='58603c44' name='prop'/>
<return type-id='c19b74c3'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='../../module/zcommon/zpool_prop.c' language='LANG_C99'>
<function-decl name='zpool_prop_string_to_index' mangled-name='zpool_prop_string_to_index' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_string_to_index'>
<parameter type-id='5d0c23fb' name='prop'/>
<parameter type-id='80f4b756' name='string'/>
<parameter type-id='5d6479ae' name='index'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_prop_random_value' mangled-name='zpool_prop_random_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_random_value'>
<parameter type-id='5d0c23fb' name='prop'/>
<parameter type-id='9c313c2d' name='seed'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='zpool_prop_values' mangled-name='zpool_prop_values' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_values'>
<parameter type-id='5d0c23fb' name='prop'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zpool_prop_column_name' mangled-name='zpool_prop_column_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_column_name'>
<parameter type-id='5d0c23fb' name='prop'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zpool_prop_align_right' mangled-name='zpool_prop_align_right' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_align_right'>
<parameter type-id='5d0c23fb' name='prop'/>
<return type-id='c19b74c3'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='../../module/zcommon/zprop_common.c' language='LANG_C99'>
<function-decl name='__ctype_tolower_loc' visibility='default' binding='global' size-in-bits='64'>
<return type-id='24f95ba5'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='libzfs_changelist.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='bf311473' size-in-bits='128' id='f0f65199'>
<subrange length='2' type-id='7359adad' id='52efc4ef'/>
</array-type-def>
<type-decl name='char' size-in-bits='8' id='a84c031d'/>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='8192' id='b54ce520'>
<subrange length='1024' type-id='7359adad' id='c60446f8'/>
</array-type-def>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='8' id='89feb1ec'>
<subrange length='1' type-id='7359adad' id='52f813b4'/>
</array-type-def>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='160' id='664ac0b7'>
<subrange length='20' type-id='7359adad' id='fdca39cf'/>
</array-type-def>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='2048' id='d1617432'>
<subrange length='256' type-id='7359adad' id='36e5b9fa'/>
</array-type-def>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='320' id='36c46961'>
<subrange length='40' type-id='7359adad' id='8f80b239'/>
</array-type-def>
<class-decl name='_IO_codecvt' is-struct='yes' visibility='default' is-declaration-only='yes' id='a4036571'/>
<class-decl name='_IO_marker' is-struct='yes' visibility='default' is-declaration-only='yes' id='010ae0b9'/>
<class-decl name='_IO_wide_data' is-struct='yes' visibility='default' is-declaration-only='yes' id='79bd3751'/>
<class-decl name='re_dfa_t' is-struct='yes' visibility='default' is-declaration-only='yes' id='b48d2441'/>
<class-decl name='uu_avl' is-struct='yes' visibility='default' is-declaration-only='yes' id='4af029d1'/>
<class-decl name='uu_avl_pool' is-struct='yes' visibility='default' is-declaration-only='yes' id='12a530a8'/>
<class-decl name='uu_avl_walk' is-struct='yes' visibility='default' is-declaration-only='yes' id='e70a39e3'/>
<type-decl name='int' size-in-bits='32' id='95e97e5e'/>
<type-decl name='long int' size-in-bits='64' id='bd54fe1a'/>
<type-decl name='long long int' size-in-bits='64' id='1eb56b1e'/>
<type-decl name='short int' size-in-bits='16' id='a2185560'/>
<type-decl name='signed char' size-in-bits='8' id='28577a57'/>
<array-type-def dimensions='1' type-id='e475ab95' size-in-bits='192' id='0ce65a8b'>
<subrange length='3' type-id='7359adad' id='56f209d2'/>
</array-type-def>
<type-decl name='unnamed-enum-underlying-type-32' is-anonymous='yes' size-in-bits='32' alignment-in-bits='32' id='9cac1fee'/>
<type-decl name='unsigned char' size-in-bits='8' id='002ac4a6'/>
<type-decl name='unsigned int' size-in-bits='32' id='f0981eeb'/>
<type-decl name='unsigned long int' size-in-bits='64' id='7359adad'/>
<type-decl name='unsigned short int' size-in-bits='16' id='8efea9e5'/>
<type-decl name='void' id='48b5725f'/>
<typedef-decl name='uu_compare_fn_t' type-id='add6e811' id='40f93560'/>
<typedef-decl name='uu_avl_pool_t' type-id='12a530a8' id='7f84e390'/>
<typedef-decl name='uu_avl_t' type-id='4af029d1' id='bb7f0973'/>
<class-decl name='uu_avl_node' size-in-bits='192' is-struct='yes' visibility='default' id='f65f4326'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='uan_opaque' type-id='0ce65a8b' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='uu_avl_node_t' type-id='f65f4326' id='73a65116'/>
<typedef-decl name='uu_avl_walk_t' type-id='e70a39e3' id='edd8457b'/>
<typedef-decl name='uu_avl_index_t' type-id='e475ab95' id='5d7f5fc8'/>
<typedef-decl name='zfs_handle_t' type-id='f6ee4445' id='775509eb'/>
<typedef-decl name='zpool_handle_t' type-id='67002a8a' id='b1efc708'/>
<typedef-decl name='libzfs_handle_t' type-id='c8a9d9d8' id='95942d0c'/>
<typedef-decl name='zfs_iter_f' type-id='5571cde4' id='d8e49ab9'/>
- <class-decl name='libzfs_handle' size-in-bits='20224' is-struct='yes' visibility='default' id='c8a9d9d8'>
+ <class-decl name='libzfs_handle' size-in-bits='20352' is-struct='yes' visibility='default' id='c8a9d9d8'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='libzfs_error' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='libzfs_fd' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='libzfs_mnttab' type-id='822cd80b' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='libzfs_pool_handles' type-id='4c81de99' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='libzfs_ns_avlpool' type-id='de82c773' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='libzfs_ns_avl' type-id='a5c21a38' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='libzfs_ns_gen' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='libzfs_desc_active' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='416'>
<var-decl name='libzfs_action' type-id='b54ce520' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='8608'>
<var-decl name='libzfs_desc' type-id='b54ce520' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='16800'>
<var-decl name='libzfs_printerr' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='16832'>
<var-decl name='libzfs_storeerr' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='16864'>
<var-decl name='libzfs_mnttab_enable' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='16896'>
<var-decl name='libzfs_mnttab_cache_lock' type-id='7a6844eb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='17216'>
<var-decl name='libzfs_mnttab_cache' type-id='f20fbd51' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='17536'>
<var-decl name='libzfs_pool_iter' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='17568'>
<var-decl name='libzfs_chassis_id' type-id='d1617432' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='19616'>
<var-decl name='libzfs_prop_debug' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='19648'>
<var-decl name='libzfs_urire' type-id='aca3bac8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='20160'>
<var-decl name='libzfs_max_nvlist' type-id='9c313c2d' visibility='default'/>
</data-member>
+ <data-member access='public' layout-offset-in-bits='20224'>
+ <var-decl name='libfetch' type-id='eaa32e2f' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='20288'>
+ <var-decl name='libfetch_load_error' type-id='26a90f95' visibility='default'/>
+ </data-member>
</class-decl>
<class-decl name='zfs_handle' size-in-bits='4928' is-struct='yes' visibility='default' id='f6ee4445'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='zfs_hdl' type-id='b0382bb3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='zpool_hdl' type-id='4c81de99' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='zfs_name' type-id='d1617432' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2176'>
<var-decl name='zfs_type' type-id='2e45de5d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2208'>
<var-decl name='zfs_head_type' type-id='2e45de5d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2240'>
<var-decl name='zfs_dmustats' type-id='b2c14f17' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='4544'>
<var-decl name='zfs_props' type-id='5ce45b60' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='4608'>
<var-decl name='zfs_user_props' type-id='5ce45b60' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='4672'>
<var-decl name='zfs_recvd_props' type-id='5ce45b60' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='4736'>
<var-decl name='zfs_mntcheck' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='4800'>
<var-decl name='zfs_mntopts' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='4864'>
<var-decl name='zfs_props_table' type-id='ae3e8ca6' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='zpool_handle' size-in-bits='2560' is-struct='yes' visibility='default' id='67002a8a'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='zpool_hdl' type-id='b0382bb3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='zpool_next' type-id='4c81de99' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='zpool_name' type-id='d1617432' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2176'>
<var-decl name='zpool_state' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2240'>
<var-decl name='zpool_config_size' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2304'>
<var-decl name='zpool_config' type-id='5ce45b60' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2368'>
<var-decl name='zpool_old_config' type-id='5ce45b60' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2432'>
<var-decl name='zpool_props' type-id='5ce45b60' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2496'>
<var-decl name='zpool_start_block' type-id='804dc465' visibility='default'/>
</data-member>
</class-decl>
<enum-decl name='zfs_share_proto_t' naming-typedef-id='a7913f77' id='d34e3aab'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='PROTO_NFS' value='0'/>
<enumerator name='PROTO_SMB' value='1'/>
<enumerator name='PROTO_END' value='2'/>
</enum-decl>
<typedef-decl name='zfs_share_proto_t' type-id='d34e3aab' id='a7913f77'/>
<typedef-decl name='prop_changelist_t' type-id='d86edc51' id='eae6431d'/>
<typedef-decl name='avl_tree_t' type-id='b351119f' id='f20fbd51'/>
<class-decl name='avl_node' size-in-bits='192' is-struct='yes' visibility='default' id='428b67b3'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='avl_child' type-id='f0f65199' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='avl_pcb' type-id='e475ab95' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='avl_tree' size-in-bits='320' is-struct='yes' visibility='default' id='b351119f'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='avl_root' type-id='bf311473' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='avl_compar' type-id='585e1de9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='avl_offset' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='avl_numnodes' type-id='ee1f298e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='avl_size' type-id='b59d7dce' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='dmu_objset_stats' size-in-bits='2304' is-struct='yes' visibility='default' id='098f0221'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='dds_num_clones' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='dds_creation_txg' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='dds_guid' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='dds_type' type-id='230f1e16' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='224'>
<var-decl name='dds_is_snapshot' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='232'>
<var-decl name='dds_inconsistent' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='240'>
<var-decl name='dds_redacted' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='248'>
<var-decl name='dds_origin' type-id='d1617432' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='dmu_objset_stats_t' type-id='098f0221' id='b2c14f17'/>
<enum-decl name='zfs_type_t' naming-typedef-id='2e45de5d' id='5d8f7321'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZFS_TYPE_FILESYSTEM' value='1'/>
<enumerator name='ZFS_TYPE_SNAPSHOT' value='2'/>
<enumerator name='ZFS_TYPE_VOLUME' value='4'/>
<enumerator name='ZFS_TYPE_POOL' value='8'/>
<enumerator name='ZFS_TYPE_BOOKMARK' value='16'/>
</enum-decl>
<typedef-decl name='zfs_type_t' type-id='5d8f7321' id='2e45de5d'/>
<enum-decl name='dmu_objset_type' id='6b1b19f9'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='DMU_OST_NONE' value='0'/>
<enumerator name='DMU_OST_META' value='1'/>
<enumerator name='DMU_OST_ZFS' value='2'/>
<enumerator name='DMU_OST_ZVOL' value='3'/>
<enumerator name='DMU_OST_OTHER' value='4'/>
<enumerator name='DMU_OST_ANY' value='5'/>
<enumerator name='DMU_OST_NUMTYPES' value='6'/>
</enum-decl>
<typedef-decl name='dmu_objset_type_t' type-id='6b1b19f9' id='230f1e16'/>
<enum-decl name='zfs_prop_t' naming-typedef-id='58603c44' id='4b000d60'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZPROP_CONT' value='-2'/>
<enumerator name='ZPROP_INVAL' value='-1'/>
<enumerator name='ZFS_PROP_TYPE' value='0'/>
<enumerator name='ZFS_PROP_CREATION' value='1'/>
<enumerator name='ZFS_PROP_USED' value='2'/>
<enumerator name='ZFS_PROP_AVAILABLE' value='3'/>
<enumerator name='ZFS_PROP_REFERENCED' value='4'/>
<enumerator name='ZFS_PROP_COMPRESSRATIO' value='5'/>
<enumerator name='ZFS_PROP_MOUNTED' value='6'/>
<enumerator name='ZFS_PROP_ORIGIN' value='7'/>
<enumerator name='ZFS_PROP_QUOTA' value='8'/>
<enumerator name='ZFS_PROP_RESERVATION' value='9'/>
<enumerator name='ZFS_PROP_VOLSIZE' value='10'/>
<enumerator name='ZFS_PROP_VOLBLOCKSIZE' value='11'/>
<enumerator name='ZFS_PROP_RECORDSIZE' value='12'/>
<enumerator name='ZFS_PROP_MOUNTPOINT' value='13'/>
<enumerator name='ZFS_PROP_SHARENFS' value='14'/>
<enumerator name='ZFS_PROP_CHECKSUM' value='15'/>
<enumerator name='ZFS_PROP_COMPRESSION' value='16'/>
<enumerator name='ZFS_PROP_ATIME' value='17'/>
<enumerator name='ZFS_PROP_DEVICES' value='18'/>
<enumerator name='ZFS_PROP_EXEC' value='19'/>
<enumerator name='ZFS_PROP_SETUID' value='20'/>
<enumerator name='ZFS_PROP_READONLY' value='21'/>
<enumerator name='ZFS_PROP_ZONED' value='22'/>
<enumerator name='ZFS_PROP_SNAPDIR' value='23'/>
<enumerator name='ZFS_PROP_ACLMODE' value='24'/>
<enumerator name='ZFS_PROP_ACLINHERIT' value='25'/>
<enumerator name='ZFS_PROP_CREATETXG' value='26'/>
<enumerator name='ZFS_PROP_NAME' value='27'/>
<enumerator name='ZFS_PROP_CANMOUNT' value='28'/>
<enumerator name='ZFS_PROP_ISCSIOPTIONS' value='29'/>
<enumerator name='ZFS_PROP_XATTR' value='30'/>
<enumerator name='ZFS_PROP_NUMCLONES' value='31'/>
<enumerator name='ZFS_PROP_COPIES' value='32'/>
<enumerator name='ZFS_PROP_VERSION' value='33'/>
<enumerator name='ZFS_PROP_UTF8ONLY' value='34'/>
<enumerator name='ZFS_PROP_NORMALIZE' value='35'/>
<enumerator name='ZFS_PROP_CASE' value='36'/>
<enumerator name='ZFS_PROP_VSCAN' value='37'/>
<enumerator name='ZFS_PROP_NBMAND' value='38'/>
<enumerator name='ZFS_PROP_SHARESMB' value='39'/>
<enumerator name='ZFS_PROP_REFQUOTA' value='40'/>
<enumerator name='ZFS_PROP_REFRESERVATION' value='41'/>
<enumerator name='ZFS_PROP_GUID' value='42'/>
<enumerator name='ZFS_PROP_PRIMARYCACHE' value='43'/>
<enumerator name='ZFS_PROP_SECONDARYCACHE' value='44'/>
<enumerator name='ZFS_PROP_USEDSNAP' value='45'/>
<enumerator name='ZFS_PROP_USEDDS' value='46'/>
<enumerator name='ZFS_PROP_USEDCHILD' value='47'/>
<enumerator name='ZFS_PROP_USEDREFRESERV' value='48'/>
<enumerator name='ZFS_PROP_USERACCOUNTING' value='49'/>
<enumerator name='ZFS_PROP_STMF_SHAREINFO' value='50'/>
<enumerator name='ZFS_PROP_DEFER_DESTROY' value='51'/>
<enumerator name='ZFS_PROP_USERREFS' value='52'/>
<enumerator name='ZFS_PROP_LOGBIAS' value='53'/>
<enumerator name='ZFS_PROP_UNIQUE' value='54'/>
<enumerator name='ZFS_PROP_OBJSETID' value='55'/>
<enumerator name='ZFS_PROP_DEDUP' value='56'/>
<enumerator name='ZFS_PROP_MLSLABEL' value='57'/>
<enumerator name='ZFS_PROP_SYNC' value='58'/>
<enumerator name='ZFS_PROP_DNODESIZE' value='59'/>
<enumerator name='ZFS_PROP_REFRATIO' value='60'/>
<enumerator name='ZFS_PROP_WRITTEN' value='61'/>
<enumerator name='ZFS_PROP_CLONES' value='62'/>
<enumerator name='ZFS_PROP_LOGICALUSED' value='63'/>
<enumerator name='ZFS_PROP_LOGICALREFERENCED' value='64'/>
<enumerator name='ZFS_PROP_INCONSISTENT' value='65'/>
<enumerator name='ZFS_PROP_VOLMODE' value='66'/>
<enumerator name='ZFS_PROP_FILESYSTEM_LIMIT' value='67'/>
<enumerator name='ZFS_PROP_SNAPSHOT_LIMIT' value='68'/>
<enumerator name='ZFS_PROP_FILESYSTEM_COUNT' value='69'/>
<enumerator name='ZFS_PROP_SNAPSHOT_COUNT' value='70'/>
<enumerator name='ZFS_PROP_SNAPDEV' value='71'/>
<enumerator name='ZFS_PROP_ACLTYPE' value='72'/>
<enumerator name='ZFS_PROP_SELINUX_CONTEXT' value='73'/>
<enumerator name='ZFS_PROP_SELINUX_FSCONTEXT' value='74'/>
<enumerator name='ZFS_PROP_SELINUX_DEFCONTEXT' value='75'/>
<enumerator name='ZFS_PROP_SELINUX_ROOTCONTEXT' value='76'/>
<enumerator name='ZFS_PROP_RELATIME' value='77'/>
<enumerator name='ZFS_PROP_REDUNDANT_METADATA' value='78'/>
<enumerator name='ZFS_PROP_OVERLAY' value='79'/>
<enumerator name='ZFS_PROP_PREV_SNAP' value='80'/>
<enumerator name='ZFS_PROP_RECEIVE_RESUME_TOKEN' value='81'/>
<enumerator name='ZFS_PROP_ENCRYPTION' value='82'/>
<enumerator name='ZFS_PROP_KEYLOCATION' value='83'/>
<enumerator name='ZFS_PROP_KEYFORMAT' value='84'/>
<enumerator name='ZFS_PROP_PBKDF2_SALT' value='85'/>
<enumerator name='ZFS_PROP_PBKDF2_ITERS' value='86'/>
<enumerator name='ZFS_PROP_ENCRYPTION_ROOT' value='87'/>
<enumerator name='ZFS_PROP_KEY_GUID' value='88'/>
<enumerator name='ZFS_PROP_KEYSTATUS' value='89'/>
<enumerator name='ZFS_PROP_REMAPTXG' value='90'/>
<enumerator name='ZFS_PROP_SPECIAL_SMALL_BLOCKS' value='91'/>
<enumerator name='ZFS_PROP_IVSET_GUID' value='92'/>
<enumerator name='ZFS_PROP_REDACTED' value='93'/>
<enumerator name='ZFS_PROP_REDACT_SNAPS' value='94'/>
<enumerator name='ZFS_NUM_PROPS' value='95'/>
</enum-decl>
<typedef-decl name='zfs_prop_t' type-id='4b000d60' id='58603c44'/>
<enum-decl name='zprop_source_t' naming-typedef-id='a2256d42' id='5903f80e'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZPROP_SRC_NONE' value='1'/>
<enumerator name='ZPROP_SRC_DEFAULT' value='2'/>
<enumerator name='ZPROP_SRC_TEMPORARY' value='4'/>
<enumerator name='ZPROP_SRC_LOCAL' value='8'/>
<enumerator name='ZPROP_SRC_INHERITED' value='16'/>
<enumerator name='ZPROP_SRC_RECEIVED' value='32'/>
</enum-decl>
<typedef-decl name='zprop_source_t' type-id='5903f80e' id='a2256d42'/>
<class-decl name='nvlist' size-in-bits='192' is-struct='yes' visibility='default' id='ac266fd9'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='nvl_version' type-id='3ff5601b' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='nvl_nvflag' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='nvl_priv' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='nvl_flag' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='160'>
<var-decl name='nvl_pad' type-id='3ff5601b' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='nvlist_t' type-id='ac266fd9' id='8e8d4be3'/>
<enum-decl name='boolean_t' naming-typedef-id='c19b74c3' id='f58c8277'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='B_FALSE' value='0'/>
<enumerator name='B_TRUE' value='1'/>
</enum-decl>
<typedef-decl name='boolean_t' type-id='f58c8277' id='c19b74c3'/>
<typedef-decl name='ulong_t' type-id='7359adad' id='ee1f298e'/>
<typedef-decl name='longlong_t' type-id='1eb56b1e' id='9b3ff54f'/>
<typedef-decl name='diskaddr_t' type-id='9b3ff54f' id='804dc465'/>
<typedef-decl name='zoneid_t' type-id='95e97e5e' id='4da03624'/>
<class-decl name='prop_changelist' size-in-bits='448' is-struct='yes' visibility='default' id='d86edc51'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='cl_prop' type-id='58603c44' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='cl_realprop' type-id='58603c44' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='cl_shareprop' type-id='58603c44' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='cl_pool' type-id='de82c773' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='cl_tree' type-id='a5c21a38' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='cl_waslegacy' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='288'>
<var-decl name='cl_allchildren' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='cl_alldependents' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='352'>
<var-decl name='cl_mflags' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='cl_gflags' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='416'>
<var-decl name='cl_haszonedchild' type-id='c19b74c3' visibility='default'/>
</data-member>
</class-decl>
<union-decl name='pthread_mutex_t' size-in-bits='320' naming-typedef-id='7a6844eb' visibility='default' id='70681f9b'>
<data-member access='public'>
<var-decl name='__data' type-id='4c734837' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='__size' type-id='36c46961' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='__align' type-id='bd54fe1a' visibility='default'/>
</data-member>
</union-decl>
<typedef-decl name='pthread_mutex_t' type-id='70681f9b' id='7a6844eb'/>
<typedef-decl name='int32_t' type-id='33f57a65' id='3ff5601b'/>
<typedef-decl name='uint8_t' type-id='c51d6389' id='b96825af'/>
<typedef-decl name='uint32_t' type-id='62f1140c' id='8f92235e'/>
<typedef-decl name='uint64_t' type-id='8910171f' id='9c313c2d'/>
<class-decl name='__pthread_mutex_s' size-in-bits='320' is-struct='yes' visibility='default' id='4c734837'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='__lock' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='__count' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='__owner' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
<var-decl name='__nusers' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='__kind' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='160'>
<var-decl name='__spins' type-id='a2185560' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='176'>
<var-decl name='__elision' type-id='a2185560' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='__list' type-id='518fb49c' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='__pthread_internal_list' size-in-bits='128' is-struct='yes' visibility='default' id='0e01899c'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='__prev' type-id='4d98cd5a' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='__next' type-id='4d98cd5a' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='__pthread_list_t' type-id='0e01899c' id='518fb49c'/>
<typedef-decl name='__uint8_t' type-id='002ac4a6' id='c51d6389'/>
<typedef-decl name='__int32_t' type-id='95e97e5e' id='33f57a65'/>
<typedef-decl name='__uint32_t' type-id='f0981eeb' id='62f1140c'/>
<typedef-decl name='__uint64_t' type-id='7359adad' id='8910171f'/>
<typedef-decl name='__off_t' type-id='bd54fe1a' id='79989e9c'/>
<typedef-decl name='__off64_t' type-id='bd54fe1a' id='724e4de6'/>
<typedef-decl name='FILE' type-id='ec1ed955' id='aa12d1ba'/>
<typedef-decl name='_IO_lock_t' type-id='48b5725f' id='bb4788fa'/>
<class-decl name='_IO_FILE' size-in-bits='1728' is-struct='yes' visibility='default' id='ec1ed955'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='_flags' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='_IO_read_ptr' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='_IO_read_end' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='_IO_read_base' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='_IO_write_base' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='_IO_write_ptr' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='_IO_write_end' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='_IO_buf_base' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
<var-decl name='_IO_buf_end' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='576'>
<var-decl name='_IO_save_base' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='640'>
<var-decl name='_IO_backup_base' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='704'>
<var-decl name='_IO_save_end' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='768'>
<var-decl name='_markers' type-id='e4c6fa61' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='832'>
<var-decl name='_chain' type-id='dca988a5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='896'>
<var-decl name='_fileno' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='928'>
<var-decl name='_flags2' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='960'>
<var-decl name='_old_offset' type-id='79989e9c' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1024'>
<var-decl name='_cur_column' type-id='8efea9e5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1040'>
<var-decl name='_vtable_offset' type-id='28577a57' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1048'>
<var-decl name='_shortbuf' type-id='89feb1ec' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1088'>
<var-decl name='_lock' type-id='cecf4ea7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1152'>
<var-decl name='_offset' type-id='724e4de6' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1216'>
<var-decl name='_codecvt' type-id='570f8c59' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1280'>
<var-decl name='_wide_data' type-id='c65a1f29' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1344'>
<var-decl name='_freeres_list' type-id='dca988a5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1408'>
<var-decl name='_freeres_buf' type-id='eaa32e2f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1472'>
<var-decl name='__pad5' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1536'>
<var-decl name='_mode' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1568'>
<var-decl name='_unused2' type-id='664ac0b7' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='__re_long_size_t' type-id='7359adad' id='ba516949'/>
<typedef-decl name='reg_syntax_t' type-id='7359adad' id='1b72c3b3'/>
<class-decl name='re_pattern_buffer' size-in-bits='512' is-struct='yes' visibility='default' id='19fc9a8c'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='buffer' type-id='33976309' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='allocated' type-id='ba516949' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='used' type-id='ba516949' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='syntax' type-id='1b72c3b3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='fastmap' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='translate' type-id='cf536864' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='re_nsub' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='can_be_null' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='449'>
<var-decl name='regs_allocated' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='451'>
<var-decl name='fastmap_accurate' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='452'>
<var-decl name='no_sub' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='453'>
<var-decl name='not_bol' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='454'>
<var-decl name='not_eol' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='455'>
<var-decl name='newline_anchor' type-id='f0981eeb' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='regex_t' type-id='19fc9a8c' id='aca3bac8'/>
<typedef-decl name='uintptr_t' type-id='7359adad' id='e475ab95'/>
<typedef-decl name='size_t' type-id='7359adad' id='b59d7dce'/>
<pointer-type-def type-id='aa12d1ba' size-in-bits='64' id='822cd80b'/>
<pointer-type-def type-id='ec1ed955' size-in-bits='64' id='dca988a5'/>
<pointer-type-def type-id='a4036571' size-in-bits='64' id='570f8c59'/>
<pointer-type-def type-id='bb4788fa' size-in-bits='64' id='cecf4ea7'/>
<pointer-type-def type-id='010ae0b9' size-in-bits='64' id='e4c6fa61'/>
<pointer-type-def type-id='79bd3751' size-in-bits='64' id='c65a1f29'/>
<pointer-type-def type-id='0e01899c' size-in-bits='64' id='4d98cd5a'/>
<pointer-type-def type-id='428b67b3' size-in-bits='64' id='bf311473'/>
<pointer-type-def type-id='a84c031d' size-in-bits='64' id='26a90f95'/>
<pointer-type-def type-id='26a90f95' size-in-bits='64' id='9b23c9ad'/>
<qualified-type-def type-id='a84c031d' const='yes' id='9b45d938'/>
<pointer-type-def type-id='9b45d938' size-in-bits='64' id='80f4b756'/>
<qualified-type-def type-id='775509eb' const='yes' id='5eadf2db'/>
<pointer-type-def type-id='5eadf2db' size-in-bits='64' id='fcd57163'/>
<pointer-type-def type-id='96ee24a5' size-in-bits='64' id='585e1de9'/>
<pointer-type-def type-id='cb9628fa' size-in-bits='64' id='5571cde4'/>
<pointer-type-def type-id='95942d0c' size-in-bits='64' id='b0382bb3'/>
<pointer-type-def type-id='8e8d4be3' size-in-bits='64' id='5ce45b60'/>
<pointer-type-def type-id='eae6431d' size-in-bits='64' id='0d41d328'/>
<pointer-type-def type-id='b48d2441' size-in-bits='64' id='33976309'/>
<pointer-type-def type-id='b96825af' size-in-bits='64' id='ae3e8ca6'/>
<pointer-type-def type-id='002ac4a6' size-in-bits='64' id='cf536864'/>
<pointer-type-def type-id='5d7f5fc8' size-in-bits='64' id='813a2225'/>
<pointer-type-def type-id='73a65116' size-in-bits='64' id='2dc35b9d'/>
<pointer-type-def type-id='7f84e390' size-in-bits='64' id='de82c773'/>
<pointer-type-def type-id='bb7f0973' size-in-bits='64' id='a5c21a38'/>
<pointer-type-def type-id='edd8457b' size-in-bits='64' id='5842d146'/>
<pointer-type-def type-id='40f93560' size-in-bits='64' id='d502b39f'/>
<pointer-type-def type-id='48b5725f' size-in-bits='64' id='eaa32e2f'/>
<pointer-type-def type-id='775509eb' size-in-bits='64' id='9200a744'/>
<pointer-type-def type-id='a7913f77' size-in-bits='64' id='bf9c30ee'/>
<pointer-type-def type-id='b1efc708' size-in-bits='64' id='4c81de99'/>
<pointer-type-def type-id='a2256d42' size-in-bits='64' id='debc6aa3'/>
<class-decl name='_IO_codecvt' is-struct='yes' visibility='default' is-declaration-only='yes' id='a4036571'/>
<class-decl name='_IO_marker' is-struct='yes' visibility='default' is-declaration-only='yes' id='010ae0b9'/>
<class-decl name='_IO_wide_data' is-struct='yes' visibility='default' is-declaration-only='yes' id='79bd3751'/>
<class-decl name='re_dfa_t' is-struct='yes' visibility='default' is-declaration-only='yes' id='b48d2441'/>
<class-decl name='uu_avl' is-struct='yes' visibility='default' is-declaration-only='yes' id='4af029d1'/>
<class-decl name='uu_avl_pool' is-struct='yes' visibility='default' is-declaration-only='yes' id='12a530a8'/>
<class-decl name='uu_avl_walk' is-struct='yes' visibility='default' is-declaration-only='yes' id='e70a39e3'/>
<function-decl name='uu_avl_pool_create' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='d502b39f'/>
<parameter type-id='8f92235e'/>
<return type-id='de82c773'/>
</function-decl>
<function-decl name='uu_avl_pool_destroy' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='de82c773'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='uu_avl_node_init' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='2dc35b9d'/>
<parameter type-id='de82c773'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='uu_avl_create' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='de82c773'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='8f92235e'/>
<return type-id='a5c21a38'/>
</function-decl>
<function-decl name='uu_avl_destroy' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='a5c21a38'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='uu_avl_last' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='a5c21a38'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='uu_avl_walk_start' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='a5c21a38'/>
<parameter type-id='8f92235e'/>
<return type-id='5842d146'/>
</function-decl>
<function-decl name='uu_avl_walk_next' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5842d146'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='uu_avl_walk_end' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5842d146'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='uu_avl_find' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='a5c21a38'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='813a2225'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='uu_avl_insert' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='a5c21a38'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='5d7f5fc8'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='uu_avl_remove' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='a5c21a38'/>
<parameter type-id='eaa32e2f'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_get_handle' mangled-name='zfs_get_handle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_handle'>
<parameter type-id='9200a744'/>
<return type-id='b0382bb3'/>
</function-decl>
<function-decl name='zfs_open' mangled-name='zfs_open' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_open'>
<parameter type-id='b0382bb3'/>
<parameter type-id='80f4b756'/>
<parameter type-id='95e97e5e'/>
<return type-id='9200a744'/>
</function-decl>
<function-decl name='zfs_close' mangled-name='zfs_close' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_close'>
<parameter type-id='9200a744'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_get_name' mangled-name='zfs_get_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_name'>
<parameter type-id='fcd57163'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zfs_prop_get' mangled-name='zfs_prop_get' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get'>
<parameter type-id='9200a744'/>
<parameter type-id='58603c44'/>
<parameter type-id='26a90f95'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='debc6aa3'/>
<parameter type-id='26a90f95'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='c19b74c3'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_get_int' mangled-name='zfs_prop_get_int' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_int'>
<parameter type-id='9200a744'/>
<parameter type-id='58603c44'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='zfs_iter_children' mangled-name='zfs_iter_children' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_children'>
<parameter type-id='9200a744'/>
<parameter type-id='d8e49ab9'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_iter_dependents' mangled-name='zfs_iter_dependents' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_dependents'>
<parameter type-id='9200a744'/>
<parameter type-id='c19b74c3'/>
<parameter type-id='d8e49ab9'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_iter_mounted' mangled-name='zfs_iter_mounted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_mounted'>
<parameter type-id='9200a744'/>
<parameter type-id='d8e49ab9'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_refresh_properties' mangled-name='zfs_refresh_properties' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_refresh_properties'>
<parameter type-id='9200a744'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_is_mounted' mangled-name='zfs_is_mounted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_is_mounted'>
<parameter type-id='9200a744'/>
<parameter type-id='9b23c9ad'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_mount' mangled-name='zfs_mount' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_mount'>
<parameter type-id='9200a744'/>
<parameter type-id='80f4b756'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_unmount' mangled-name='zfs_unmount' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unmount'>
<parameter type-id='9200a744'/>
<parameter type-id='80f4b756'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_is_shared' mangled-name='zfs_is_shared' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_is_shared'>
<parameter type-id='9200a744'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_share_nfs' mangled-name='zfs_share_nfs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_share_nfs'>
<parameter type-id='9200a744'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_share_smb' mangled-name='zfs_share_smb' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_share_smb'>
<parameter type-id='9200a744'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_unshare_nfs' mangled-name='zfs_unshare_nfs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshare_nfs'>
<parameter type-id='9200a744'/>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_unshare_smb' mangled-name='zfs_unshare_smb' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshare_smb'>
<parameter type-id='9200a744'/>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_commit_nfs_shares' mangled-name='zfs_commit_nfs_shares' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_commit_nfs_shares'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_commit_smb_shares' mangled-name='zfs_commit_smb_shares' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_commit_smb_shares'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_error' mangled-name='zfs_error' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_error'>
<parameter type-id='b0382bb3'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_alloc' mangled-name='zfs_alloc' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_alloc'>
<parameter type-id='b0382bb3'/>
<parameter type-id='b59d7dce'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='remove_mountpoint' mangled-name='remove_mountpoint' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='remove_mountpoint'>
<parameter type-id='9200a744'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_unshare_proto' mangled-name='zfs_unshare_proto' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshare_proto'>
<parameter type-id='9200a744'/>
<parameter type-id='80f4b756'/>
<parameter type-id='bf9c30ee'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_commit_proto' mangled-name='zfs_commit_proto' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_commit_proto'>
<parameter type-id='bf9c30ee'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='getzoneid' visibility='default' binding='global' size-in-bits='64'>
<return type-id='4da03624'/>
</function-decl>
<function-decl name='strlcat' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='26a90f95'/>
<parameter type-id='80f4b756'/>
<parameter type-id='b59d7dce'/>
<return type-id='b59d7dce'/>
</function-decl>
<function-decl name='strlcpy' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='26a90f95'/>
<parameter type-id='80f4b756'/>
<parameter type-id='b59d7dce'/>
<return type-id='b59d7dce'/>
</function-decl>
<function-decl name='changelist_prefix' mangled-name='changelist_prefix' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='changelist_prefix'>
<parameter type-id='0d41d328' name='clp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='changelist_postfix' mangled-name='changelist_postfix' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='changelist_postfix'>
<parameter type-id='0d41d328' name='clp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='isa_child_of' mangled-name='isa_child_of' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='isa_child_of'>
<parameter type-id='80f4b756' name='dataset'/>
<parameter type-id='80f4b756' name='parent'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='changelist_rename' mangled-name='changelist_rename' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='changelist_rename'>
<parameter type-id='0d41d328' name='clp'/>
<parameter type-id='80f4b756' name='src'/>
<parameter type-id='80f4b756' name='dst'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='changelist_unshare' mangled-name='changelist_unshare' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='changelist_unshare'>
<parameter type-id='0d41d328' name='clp'/>
<parameter type-id='bf9c30ee' name='proto'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='changelist_haszonedchild' mangled-name='changelist_haszonedchild' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='changelist_haszonedchild'>
<parameter type-id='0d41d328' name='clp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='changelist_remove' mangled-name='changelist_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='changelist_remove'>
<parameter type-id='0d41d328' name='clp'/>
<parameter type-id='80f4b756' name='name'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='changelist_free' mangled-name='changelist_free' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='changelist_free'>
<parameter type-id='0d41d328' name='clp'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='changelist_gather' mangled-name='changelist_gather' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='changelist_gather'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='58603c44' name='prop'/>
<parameter type-id='95e97e5e' name='gather_flags'/>
<parameter type-id='95e97e5e' name='mnt_flags'/>
<return type-id='0d41d328'/>
</function-decl>
<function-decl name='free' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='eaa32e2f'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='strcmp' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='strncmp' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='b59d7dce'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='strlen' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<return type-id='b59d7dce'/>
</function-decl>
<function-type size-in-bits='64' id='96ee24a5'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-type>
<function-type size-in-bits='64' id='add6e811'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-type>
<function-type size-in-bits='64' id='cb9628fa'>
<parameter type-id='9200a744'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-type>
</abi-instr>
<abi-instr address-size='64' path='libzfs_config.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='32768' id='d16c6df4'>
<subrange length='4096' type-id='7359adad' id='bc1b5ddc'/>
</array-type-def>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='65536' id='163f6aa5'>
<subrange length='8192' type-id='7359adad' id='c88f397d'/>
</array-type-def>
<array-type-def dimensions='1' type-id='9c313c2d' size-in-bits='128' id='c1c22e6c'>
<subrange length='2' type-id='7359adad' id='52efc4ef'/>
</array-type-def>
<array-type-def dimensions='1' type-id='b96825af' size-in-bits='24' id='d3490169'>
<subrange length='3' type-id='7359adad' id='56f209d2'/>
</array-type-def>
<type-decl name='variadic parameter type' id='2c1145c5'/>
<typedef-decl name='zpool_iter_f' type-id='3aebb66f' id='fa476e62'/>
<enum-decl name='data_type_t' naming-typedef-id='8d0687d2' id='aeeae136'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='DATA_TYPE_DONTCARE' value='-1'/>
<enumerator name='DATA_TYPE_UNKNOWN' value='0'/>
<enumerator name='DATA_TYPE_BOOLEAN' value='1'/>
<enumerator name='DATA_TYPE_BYTE' value='2'/>
<enumerator name='DATA_TYPE_INT16' value='3'/>
<enumerator name='DATA_TYPE_UINT16' value='4'/>
<enumerator name='DATA_TYPE_INT32' value='5'/>
<enumerator name='DATA_TYPE_UINT32' value='6'/>
<enumerator name='DATA_TYPE_INT64' value='7'/>
<enumerator name='DATA_TYPE_UINT64' value='8'/>
<enumerator name='DATA_TYPE_STRING' value='9'/>
<enumerator name='DATA_TYPE_BYTE_ARRAY' value='10'/>
<enumerator name='DATA_TYPE_INT16_ARRAY' value='11'/>
<enumerator name='DATA_TYPE_UINT16_ARRAY' value='12'/>
<enumerator name='DATA_TYPE_INT32_ARRAY' value='13'/>
<enumerator name='DATA_TYPE_UINT32_ARRAY' value='14'/>
<enumerator name='DATA_TYPE_INT64_ARRAY' value='15'/>
<enumerator name='DATA_TYPE_UINT64_ARRAY' value='16'/>
<enumerator name='DATA_TYPE_STRING_ARRAY' value='17'/>
<enumerator name='DATA_TYPE_HRTIME' value='18'/>
<enumerator name='DATA_TYPE_NVLIST' value='19'/>
<enumerator name='DATA_TYPE_NVLIST_ARRAY' value='20'/>
<enumerator name='DATA_TYPE_BOOLEAN_VALUE' value='21'/>
<enumerator name='DATA_TYPE_INT8' value='22'/>
<enumerator name='DATA_TYPE_UINT8' value='23'/>
<enumerator name='DATA_TYPE_BOOLEAN_ARRAY' value='24'/>
<enumerator name='DATA_TYPE_INT8_ARRAY' value='25'/>
<enumerator name='DATA_TYPE_UINT8_ARRAY' value='26'/>
<enumerator name='DATA_TYPE_DOUBLE' value='27'/>
</enum-decl>
<typedef-decl name='data_type_t' type-id='aeeae136' id='8d0687d2'/>
<class-decl name='nvpair' size-in-bits='128' is-struct='yes' visibility='default' id='1c34e459'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='nvp_size' type-id='3ff5601b' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='nvp_name_sz' type-id='23bd8cb5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='48'>
<var-decl name='nvp_reserve' type-id='23bd8cb5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='nvp_value_elem' type-id='3ff5601b' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
<var-decl name='nvp_type' type-id='8d0687d2' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='nvpair_t' type-id='1c34e459' id='57928edf'/>
<class-decl name='drr_begin' size-in-bits='2432' is-struct='yes' visibility='default' id='09fcdc01'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='drr_magic' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='drr_versioninfo' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='drr_creation_time' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='drr_type' type-id='230f1e16' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='224'>
<var-decl name='drr_flags' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='drr_toguid' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='drr_fromguid' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='drr_toname' type-id='d1617432' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='zinject_record' size-in-bits='2816' is-struct='yes' visibility='default' id='3216f820'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='zi_objset' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='zi_object' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='zi_start' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='zi_end' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='zi_guid' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='zi_level' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='352'>
<var-decl name='zi_error' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='zi_type' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='zi_freq' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='480'>
<var-decl name='zi_failfast' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
<var-decl name='zi_func' type-id='d1617432' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2560'>
<var-decl name='zi_iotype' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2592'>
<var-decl name='zi_duration' type-id='3ff5601b' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2624'>
<var-decl name='zi_timer' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2688'>
<var-decl name='zi_nlanes' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2752'>
<var-decl name='zi_cmd' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2784'>
<var-decl name='zi_dvas' type-id='8f92235e' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zinject_record_t' type-id='3216f820' id='a4301ca6'/>
<class-decl name='zfs_share' size-in-bits='256' is-struct='yes' visibility='default' id='feb6f2da'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='z_exportdata' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='z_sharedata' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='z_sharetype' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='z_sharemax' type-id='9c313c2d' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zfs_share_t' type-id='feb6f2da' id='ee5cec36'/>
<class-decl name='zfs_cmd' size-in-bits='109952' is-struct='yes' visibility='default' id='3522cd69'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='zc_name' type-id='d16c6df4' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32768'>
<var-decl name='zc_nvlist_src' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32832'>
<var-decl name='zc_nvlist_src_size' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32896'>
<var-decl name='zc_nvlist_dst' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32960'>
<var-decl name='zc_nvlist_dst_size' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='33024'>
<var-decl name='zc_nvlist_dst_filled' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='33056'>
<var-decl name='zc_pad2' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='33088'>
<var-decl name='zc_history' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='33152'>
<var-decl name='zc_value' type-id='163f6aa5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='98688'>
<var-decl name='zc_string' type-id='d1617432' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='100736'>
<var-decl name='zc_guid' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='100800'>
<var-decl name='zc_nvlist_conf' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='100864'>
<var-decl name='zc_nvlist_conf_size' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='100928'>
<var-decl name='zc_cookie' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='100992'>
<var-decl name='zc_objset_type' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='101056'>
<var-decl name='zc_perm_action' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='101120'>
<var-decl name='zc_history_len' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='101184'>
<var-decl name='zc_history_offset' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='101248'>
<var-decl name='zc_obj' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='101312'>
<var-decl name='zc_iflags' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='101376'>
<var-decl name='zc_share' type-id='ee5cec36' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='101632'>
<var-decl name='zc_objset_stats' type-id='b2c14f17' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='103936'>
<var-decl name='zc_begin_record' type-id='09fcdc01' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='106368'>
<var-decl name='zc_inject_record' type-id='a4301ca6' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='109184'>
<var-decl name='zc_defer_destroy' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='109216'>
<var-decl name='zc_flags' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='109248'>
<var-decl name='zc_action_handle' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='109312'>
<var-decl name='zc_cleanup_fd' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='109344'>
<var-decl name='zc_simple' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='109352'>
<var-decl name='zc_pad' type-id='d3490169' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='109376'>
<var-decl name='zc_sendobj' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='109440'>
<var-decl name='zc_fromobj' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='109504'>
<var-decl name='zc_createtxg' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='109568'>
<var-decl name='zc_stat' type-id='0371a9c7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='109888'>
<var-decl name='zc_zoneid' type-id='9c313c2d' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zfs_cmd_t' type-id='3522cd69' id='a5559cdd'/>
<class-decl name='zfs_stat' size-in-bits='320' is-struct='yes' visibility='default' id='6417f0b9'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='zs_gen' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='zs_mode' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='zs_links' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='zs_ctime' type-id='c1c22e6c' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zfs_stat_t' type-id='6417f0b9' id='0371a9c7'/>
<typedef-decl name='int16_t' type-id='03896e23' id='23bd8cb5'/>
<typedef-decl name='__int16_t' type-id='a2185560' id='03896e23'/>
<pointer-type-def type-id='c19b74c3' size-in-bits='64' id='37e3bd22'/>
<pointer-type-def type-id='2bce87e3' size-in-bits='64' id='3aebb66f'/>
<pointer-type-def type-id='95e97e5e' size-in-bits='64' id='7292109c'/>
<pointer-type-def type-id='5ce45b60' size-in-bits='64' id='857bb57e'/>
<pointer-type-def type-id='57928edf' size-in-bits='64' id='3fa542f0'/>
<pointer-type-def type-id='eaa32e2f' size-in-bits='64' id='63e171df'/>
<pointer-type-def type-id='3522cd69' size-in-bits='64' id='b65f7fd1'/>
<pointer-type-def type-id='a5559cdd' size-in-bits='64' id='e4ec4540'/>
<pointer-type-def type-id='4c81de99' size-in-bits='64' id='237193c9'/>
<function-decl name='uu_avl_first' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='a5c21a38'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='uu_avl_next' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='a5c21a38'/>
<parameter type-id='eaa32e2f'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='uu_avl_teardown' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='a5c21a38'/>
<parameter type-id='63e171df'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='zfs_ioctl' mangled-name='zfs_ioctl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_ioctl'>
<parameter type-id='b0382bb3'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='b65f7fd1'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_strdup' mangled-name='zfs_strdup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_strdup'>
<parameter type-id='b0382bb3'/>
<parameter type-id='80f4b756'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='no_memory' mangled-name='no_memory' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='no_memory'>
<parameter type-id='b0382bb3'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_standard_error' mangled-name='zfs_standard_error' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_standard_error'>
<parameter type-id='b0382bb3'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zcmd_alloc_dst_nvlist' mangled-name='zcmd_alloc_dst_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zcmd_alloc_dst_nvlist'>
<parameter type-id='b0382bb3'/>
<parameter type-id='e4ec4540'/>
<parameter type-id='b59d7dce'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zcmd_expand_dst_nvlist' mangled-name='zcmd_expand_dst_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zcmd_expand_dst_nvlist'>
<parameter type-id='b0382bb3'/>
<parameter type-id='e4ec4540'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zcmd_read_dst_nvlist' mangled-name='zcmd_read_dst_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zcmd_read_dst_nvlist'>
<parameter type-id='b0382bb3'/>
<parameter type-id='e4ec4540'/>
<parameter type-id='857bb57e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zcmd_free_nvlists' mangled-name='zcmd_free_nvlists' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zcmd_free_nvlists'>
<parameter type-id='e4ec4540'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='make_dataset_handle' mangled-name='make_dataset_handle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='make_dataset_handle'>
<parameter type-id='b0382bb3'/>
<parameter type-id='80f4b756'/>
<return type-id='9200a744'/>
</function-decl>
<function-decl name='zpool_open_silent' mangled-name='zpool_open_silent' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_open_silent'>
<parameter type-id='b0382bb3'/>
<parameter type-id='80f4b756'/>
<parameter type-id='237193c9'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_free' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='nvlist_dup' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='857bb57e'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_lookup_nvlist' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='857bb57e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_exists' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='nvlist_next_nvpair' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='3fa542f0'/>
<return type-id='3fa542f0'/>
</function-decl>
<function-decl name='nvpair_name' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='3fa542f0'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='nvpair_value_nvlist' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='3fa542f0'/>
<parameter type-id='857bb57e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='libspl_assertf' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<parameter is-variadic='yes'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='namespace_clear' mangled-name='namespace_clear' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='namespace_clear'>
<parameter type-id='b0382bb3' name='hdl'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zpool_get_config' mangled-name='zpool_get_config' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_config'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='857bb57e' name='oldconfig'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='zpool_get_features' mangled-name='zpool_get_features' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_features'>
<parameter type-id='4c81de99' name='zhp'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='zpool_refresh_stats' mangled-name='zpool_refresh_stats' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_refresh_stats'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='37e3bd22' name='missing'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_skip_pool' mangled-name='zpool_skip_pool' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_skip_pool'>
<parameter type-id='80f4b756' name='poolname'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zpool_iter' mangled-name='zpool_iter' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_iter'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='fa476e62' name='func'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_iter_root' mangled-name='zfs_iter_root' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_root'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='d8e49ab9' name='func'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='__errno_location' visibility='default' binding='global' size-in-bits='64'>
<return type-id='7292109c'/>
</function-decl>
<function-decl name='dcgettext' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='95e97e5e'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='getenv' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='strcpy' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='26a90f95'/>
<parameter type-id='80f4b756'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='strchr' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='95e97e5e'/>
<return type-id='26a90f95'/>
</function-decl>
<function-type size-in-bits='64' id='2bce87e3'>
<parameter type-id='4c81de99'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-type>
</abi-instr>
<abi-instr address-size='64' path='libzfs_crypto.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='fb7c6451' size-in-bits='256' id='64177143'>
<subrange length='32' type-id='7359adad' id='ae5bde82'/>
</array-type-def>
<array-type-def dimensions='1' type-id='95e97e5e' size-in-bits='896' id='47394ee0'>
<subrange length='28' type-id='7359adad' id='3db583d7'/>
</array-type-def>
<array-type-def dimensions='1' type-id='7359adad' size-in-bits='1024' id='d2baa450'>
<subrange length='16' type-id='7359adad' id='848d0938'/>
</array-type-def>
<enum-decl name='zpool_prop_t' naming-typedef-id='5d0c23fb' id='af1ba157'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZPOOL_PROP_INVAL' value='-1'/>
<enumerator name='ZPOOL_PROP_NAME' value='0'/>
<enumerator name='ZPOOL_PROP_SIZE' value='1'/>
<enumerator name='ZPOOL_PROP_CAPACITY' value='2'/>
<enumerator name='ZPOOL_PROP_ALTROOT' value='3'/>
<enumerator name='ZPOOL_PROP_HEALTH' value='4'/>
<enumerator name='ZPOOL_PROP_GUID' value='5'/>
<enumerator name='ZPOOL_PROP_VERSION' value='6'/>
<enumerator name='ZPOOL_PROP_BOOTFS' value='7'/>
<enumerator name='ZPOOL_PROP_DELEGATION' value='8'/>
<enumerator name='ZPOOL_PROP_AUTOREPLACE' value='9'/>
<enumerator name='ZPOOL_PROP_CACHEFILE' value='10'/>
<enumerator name='ZPOOL_PROP_FAILUREMODE' value='11'/>
<enumerator name='ZPOOL_PROP_LISTSNAPS' value='12'/>
<enumerator name='ZPOOL_PROP_AUTOEXPAND' value='13'/>
<enumerator name='ZPOOL_PROP_DEDUPDITTO' value='14'/>
<enumerator name='ZPOOL_PROP_DEDUPRATIO' value='15'/>
<enumerator name='ZPOOL_PROP_FREE' value='16'/>
<enumerator name='ZPOOL_PROP_ALLOCATED' value='17'/>
<enumerator name='ZPOOL_PROP_READONLY' value='18'/>
<enumerator name='ZPOOL_PROP_ASHIFT' value='19'/>
<enumerator name='ZPOOL_PROP_COMMENT' value='20'/>
<enumerator name='ZPOOL_PROP_EXPANDSZ' value='21'/>
<enumerator name='ZPOOL_PROP_FREEING' value='22'/>
<enumerator name='ZPOOL_PROP_FRAGMENTATION' value='23'/>
<enumerator name='ZPOOL_PROP_LEAKED' value='24'/>
<enumerator name='ZPOOL_PROP_MAXBLOCKSIZE' value='25'/>
<enumerator name='ZPOOL_PROP_TNAME' value='26'/>
<enumerator name='ZPOOL_PROP_MAXDNODESIZE' value='27'/>
<enumerator name='ZPOOL_PROP_MULTIHOST' value='28'/>
<enumerator name='ZPOOL_PROP_CHECKPOINT' value='29'/>
<enumerator name='ZPOOL_PROP_LOAD_GUID' value='30'/>
<enumerator name='ZPOOL_PROP_AUTOTRIM' value='31'/>
<enumerator name='ZPOOL_PROP_COMPATIBILITY' value='32'/>
<enumerator name='ZPOOL_NUM_PROPS' value='33'/>
</enum-decl>
<typedef-decl name='zpool_prop_t' type-id='af1ba157' id='5d0c23fb'/>
<typedef-decl name='uint_t' type-id='f0981eeb' id='3502e3ff'/>
<class-decl name='sigaction' size-in-bits='1216' is-struct='yes' visibility='default' id='fe391c48'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='__sigaction_handler' type-id='ac5ab598' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='sa_mask' type-id='b9c97942' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1088'>
<var-decl name='sa_flags' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1152'>
<var-decl name='sa_restorer' type-id='953b12f8' visibility='default'/>
</data-member>
</class-decl>
<union-decl name='__anonymous_union__' size-in-bits='64' is-anonymous='yes' visibility='default' id='ac5ab598'>
<data-member access='public'>
<var-decl name='sa_handler' type-id='8cdd9566' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='sa_sigaction' type-id='6e756877' visibility='default'/>
</data-member>
</union-decl>
<class-decl name='termios' size-in-bits='480' is-struct='yes' visibility='default' id='ad55d2bc'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='c_iflag' type-id='241ce6f8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='c_oflag' type-id='241ce6f8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='c_cflag' type-id='241ce6f8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
<var-decl name='c_lflag' type-id='241ce6f8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='c_line' type-id='fb7c6451' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='136'>
<var-decl name='c_cc' type-id='64177143' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='416'>
<var-decl name='c_ispeed' type-id='6a8e8a14' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='c_ospeed' type-id='6a8e8a14' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='cc_t' type-id='002ac4a6' id='fb7c6451'/>
<typedef-decl name='speed_t' type-id='f0981eeb' id='6a8e8a14'/>
<typedef-decl name='tcflag_t' type-id='f0981eeb' id='241ce6f8'/>
<typedef-decl name='__uid_t' type-id='f0981eeb' id='cc5fcceb'/>
<typedef-decl name='__pid_t' type-id='95e97e5e' id='3629bad8'/>
<typedef-decl name='__clock_t' type-id='bd54fe1a' id='4d66c6d7'/>
<typedef-decl name='__ssize_t' type-id='bd54fe1a' id='41060289'/>
<class-decl name='__sigset_t' size-in-bits='1024' is-struct='yes' naming-typedef-id='b9c97942' visibility='default' id='2616147f'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='__val' type-id='d2baa450' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='__sigset_t' type-id='2616147f' id='b9c97942'/>
<union-decl name='sigval' size-in-bits='64' visibility='default' id='a094b870'>
<data-member access='public'>
<var-decl name='sival_int' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='sival_ptr' type-id='eaa32e2f' visibility='default'/>
</data-member>
</union-decl>
<typedef-decl name='__sigval_t' type-id='a094b870' id='eabacd01'/>
<class-decl name='siginfo_t' size-in-bits='1024' is-struct='yes' naming-typedef-id='cb681f62' visibility='default' id='d8149419'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='si_signo' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='si_errno' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='si_code' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
<var-decl name='__pad0' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='_sifields' type-id='ac5ab599' visibility='default'/>
</data-member>
</class-decl>
<union-decl name='__anonymous_union__1' size-in-bits='896' is-anonymous='yes' visibility='default' id='ac5ab599'>
<data-member access='public'>
<var-decl name='_pad' type-id='47394ee0' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='_kill' type-id='e7f43f72' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='_timer' type-id='e7f43f73' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='_rt' type-id='e7f43f74' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='_sigchld' type-id='e7f43f75' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='_sigfault' type-id='e7f43f76' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='_sigpoll' type-id='e7f43f77' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='_sigsys' type-id='e7f43f78' visibility='default'/>
</data-member>
</union-decl>
<class-decl name='__anonymous_struct__1' size-in-bits='64' is-struct='yes' is-anonymous='yes' visibility='default' id='e7f43f72'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='si_pid' type-id='3629bad8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='si_uid' type-id='cc5fcceb' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='__anonymous_struct__2' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='e7f43f73'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='si_tid' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='si_overrun' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='si_sigval' type-id='eabacd01' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='__anonymous_struct__3' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='e7f43f74'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='si_pid' type-id='3629bad8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='si_uid' type-id='cc5fcceb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='si_sigval' type-id='eabacd01' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='__anonymous_struct__4' size-in-bits='256' is-struct='yes' is-anonymous='yes' visibility='default' id='e7f43f75'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='si_pid' type-id='3629bad8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='si_uid' type-id='cc5fcceb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='si_status' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='si_utime' type-id='4d66c6d7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='si_stime' type-id='4d66c6d7' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='__anonymous_struct__5' size-in-bits='256' is-struct='yes' is-anonymous='yes' visibility='default' id='e7f43f76'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='si_addr' type-id='eaa32e2f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='si_addr_lsb' type-id='a2185560' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='_bounds' type-id='ac5ab59a' visibility='default'/>
</data-member>
</class-decl>
<union-decl name='__anonymous_union__2' size-in-bits='128' is-anonymous='yes' visibility='default' id='ac5ab59a'>
<data-member access='public'>
<var-decl name='_addr_bnd' type-id='e7f43f79' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='_pkey' type-id='62f1140c' visibility='default'/>
</data-member>
</union-decl>
<class-decl name='__anonymous_struct__6' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='e7f43f79'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='_lower' type-id='eaa32e2f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='_upper' type-id='eaa32e2f' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='__anonymous_struct__7' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='e7f43f77'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='si_band' type-id='bd54fe1a' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='si_fd' type-id='95e97e5e' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='__anonymous_struct__8' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='e7f43f78'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='_call_addr' type-id='eaa32e2f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='_syscall' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
<var-decl name='_arch' type-id='f0981eeb' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='siginfo_t' type-id='d8149419' id='cb681f62'/>
<typedef-decl name='sigset_t' type-id='b9c97942' id='daf33c64'/>
<typedef-decl name='regoff_t' type-id='95e97e5e' id='54a2a2a8'/>
<class-decl name='regmatch_t' size-in-bits='64' is-struct='yes' naming-typedef-id='1b941664' visibility='default' id='4f932615'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='rm_so' type-id='54a2a2a8' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='rm_eo' type-id='54a2a2a8' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='regmatch_t' type-id='4f932615' id='1b941664'/>
<typedef-decl name='__sighandler_t' type-id='03347643' id='8cdd9566'/>
<typedef-decl name='ssize_t' type-id='41060289' id='79a0948f'/>
<qualified-type-def type-id='822cd80b' restrict='yes' id='e75a27e9'/>
<qualified-type-def type-id='9b23c9ad' restrict='yes' id='8c85230f'/>
<qualified-type-def type-id='80f4b756' restrict='yes' id='9d26089a'/>
<qualified-type-def type-id='aca3bac8' const='yes' id='2498fd78'/>
<pointer-type-def type-id='2498fd78' size-in-bits='64' id='eed6c816'/>
<qualified-type-def type-id='eed6c816' restrict='yes' id='a431a9da'/>
<qualified-type-def type-id='fe391c48' const='yes' id='14a93b33'/>
<pointer-type-def type-id='14a93b33' size-in-bits='64' id='9f68085b'/>
<qualified-type-def type-id='9f68085b' restrict='yes' id='e2a5e6f9'/>
<qualified-type-def type-id='ad55d2bc' const='yes' id='a46bf13f'/>
<pointer-type-def type-id='a46bf13f' size-in-bits='64' id='eaec840f'/>
<qualified-type-def type-id='002ac4a6' const='yes' id='ea86de29'/>
<pointer-type-def type-id='ea86de29' size-in-bits='64' id='354f7eb9'/>
<qualified-type-def type-id='8efea9e5' const='yes' id='3beb2af4'/>
<pointer-type-def type-id='3beb2af4' size-in-bits='64' id='31347b7a'/>
<pointer-type-def type-id='31347b7a' size-in-bits='64' id='c59e1ef0'/>
<pointer-type-def type-id='1b941664' size-in-bits='64' id='7e2979d5'/>
<qualified-type-def type-id='7e2979d5' restrict='yes' id='fc212857'/>
<pointer-type-def type-id='fe391c48' size-in-bits='64' id='568dd84e'/>
<qualified-type-def type-id='568dd84e' restrict='yes' id='3d8ee6f2'/>
<pointer-type-def type-id='cb681f62' size-in-bits='64' id='185869c1'/>
<pointer-type-def type-id='daf33c64' size-in-bits='64' id='9e80f729'/>
<pointer-type-def type-id='b59d7dce' size-in-bits='64' id='78c01427'/>
<qualified-type-def type-id='78c01427' restrict='yes' id='d19b2c25'/>
<pointer-type-def type-id='ad55d2bc' size-in-bits='64' id='665a4eda'/>
<pointer-type-def type-id='9c313c2d' size-in-bits='64' id='5d6479ae'/>
<pointer-type-def type-id='ae3e8ca6' size-in-bits='64' id='d8774064'/>
<pointer-type-def type-id='3502e3ff' size-in-bits='64' id='4dd26a40'/>
<pointer-type-def type-id='ee076206' size-in-bits='64' id='953b12f8'/>
<pointer-type-def type-id='f712e2b7' size-in-bits='64' id='03347643'/>
<pointer-type-def type-id='ef70d893' size-in-bits='64' id='6e756877'/>
<qualified-type-def type-id='eaa32e2f' restrict='yes' id='1b7446cd'/>
<function-decl name='zpool_get_prop_int' mangled-name='zpool_get_prop_int' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_prop_int'>
<parameter type-id='4c81de99'/>
<parameter type-id='5d0c23fb'/>
<parameter type-id='debc6aa3'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='zfs_handle_dup' mangled-name='zfs_handle_dup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_handle_dup'>
<parameter type-id='9200a744'/>
<return type-id='9200a744'/>
</function-decl>
<function-decl name='zfs_valid_proplist' mangled-name='zfs_valid_proplist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_valid_proplist'>
<parameter type-id='b0382bb3'/>
<parameter type-id='2e45de5d'/>
<parameter type-id='5ce45b60'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='9200a744'/>
<parameter type-id='4c81de99'/>
<parameter type-id='c19b74c3'/>
<parameter type-id='80f4b756'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='zfs_prop_to_name' mangled-name='zfs_prop_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_to_name'>
<parameter type-id='58603c44'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zfs_iter_filesystems' mangled-name='zfs_iter_filesystems' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_filesystems'>
<parameter type-id='9200a744'/>
<parameter type-id='d8e49ab9'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_parent_name' mangled-name='zfs_parent_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_parent_name'>
<parameter type-id='9200a744'/>
<parameter type-id='26a90f95'/>
<parameter type-id='b59d7dce'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_load_key' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='c19b74c3'/>
<parameter type-id='ae3e8ca6'/>
<parameter type-id='3502e3ff'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_unload_key' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_change_key' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='5ce45b60'/>
<parameter type-id='ae3e8ca6'/>
<parameter type-id='3502e3ff'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_error_aux' mangled-name='zfs_error_aux' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_error_aux'>
<parameter type-id='b0382bb3'/>
<parameter type-id='80f4b756'/>
<parameter is-variadic='yes'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_name_to_prop' mangled-name='zfs_name_to_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_name_to_prop'>
<parameter type-id='80f4b756'/>
<return type-id='58603c44'/>
</function-decl>
<function-decl name='nvlist_add_uint64' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='9c313c2d'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_add_string' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_lookup_uint64' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='5d6479ae'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_lookup_string' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='9b23c9ad'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='fnvlist_alloc' visibility='default' binding='global' size-in-bits='64'>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='zfs_crypto_get_encryption_root' mangled-name='zfs_crypto_get_encryption_root' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_get_encryption_root'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='37e3bd22' name='is_encroot'/>
<parameter type-id='26a90f95' name='buf'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_crypto_create' mangled-name='zfs_crypto_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_create'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='26a90f95' name='parent_name'/>
<parameter type-id='5ce45b60' name='props'/>
<parameter type-id='5ce45b60' name='pool_props'/>
<parameter type-id='c19b74c3' name='stdin_available'/>
<parameter type-id='d8774064' name='wkeydata_out'/>
<parameter type-id='4dd26a40' name='wkeylen_out'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_crypto_clone_check' mangled-name='zfs_crypto_clone_check' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_clone_check'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='9200a744' name='origin_zhp'/>
<parameter type-id='26a90f95' name='parent_name'/>
<parameter type-id='5ce45b60' name='props'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_crypto_attempt_load_keys' mangled-name='zfs_crypto_attempt_load_keys' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_attempt_load_keys'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='26a90f95' name='fsname'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_crypto_load_key' mangled-name='zfs_crypto_load_key' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_load_key'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='c19b74c3' name='noop'/>
<parameter type-id='26a90f95' name='alt_keylocation'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_crypto_unload_key' mangled-name='zfs_crypto_unload_key' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_unload_key'>
<parameter type-id='9200a744' name='zhp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_crypto_rewrap' mangled-name='zfs_crypto_rewrap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_rewrap'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='5ce45b60' name='raw_props'/>
<parameter type-id='c19b74c3' name='inheritkey'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='__ctype_b_loc' visibility='default' binding='global' size-in-bits='64'>
<return type-id='c59e1ef0'/>
</function-decl>
+ <function-decl name='dlopen' visibility='default' binding='global' size-in-bits='64'>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='95e97e5e'/>
+ <return type-id='eaa32e2f'/>
+ </function-decl>
+ <function-decl name='dlsym' visibility='default' binding='global' size-in-bits='64'>
+ <parameter type-id='1b7446cd'/>
+ <parameter type-id='9d26089a'/>
+ <return type-id='eaa32e2f'/>
+ </function-decl>
+ <function-decl name='dlerror' visibility='default' binding='global' size-in-bits='64'>
+ <return type-id='26a90f95'/>
+ </function-decl>
<function-decl name='PKCS5_PBKDF2_HMAC_SHA1' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='354f7eb9'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='cf536864'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='regexec' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='a431a9da'/>
<parameter type-id='9d26089a'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='fc212857'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='kill' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='3629bad8'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='sigemptyset' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='9e80f729'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='sigaction' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<parameter type-id='e2a5e6f9'/>
<parameter type-id='3d8ee6f2'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='fclose' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='822cd80b'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='fflush' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='822cd80b'/>
<return type-id='95e97e5e'/>
</function-decl>
+ <function-decl name='fdopen' visibility='default' binding='global' size-in-bits='64'>
+ <parameter type-id='95e97e5e'/>
+ <parameter type-id='80f4b756'/>
+ <return type-id='822cd80b'/>
+ </function-decl>
<function-decl name='printf' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter is-variadic='yes'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='snprintf' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='26a90f95'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='80f4b756'/>
<parameter is-variadic='yes'/>
<return type-id='95e97e5e'/>
</function-decl>
+ <function-decl name='asprintf' visibility='default' binding='global' size-in-bits='64'>
+ <parameter type-id='8c85230f'/>
+ <parameter type-id='9d26089a'/>
+ <parameter is-variadic='yes'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
<function-decl name='fputc' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<parameter type-id='822cd80b'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='__getdelim' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='8c85230f'/>
<parameter type-id='d19b2c25'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='e75a27e9'/>
<return type-id='41060289'/>
</function-decl>
<function-decl name='fread' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='1b7446cd'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='e75a27e9'/>
<return type-id='b59d7dce'/>
</function-decl>
+ <function-decl name='rewind' visibility='default' binding='global' size-in-bits='64'>
+ <parameter type-id='822cd80b'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
<function-decl name='ferror' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='822cd80b'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='fileno' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='822cd80b'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='malloc' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b59d7dce'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='calloc' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b59d7dce'/>
<parameter type-id='b59d7dce'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='memcpy' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='b59d7dce'/>
<return type-id='eaa32e2f'/>
</function-decl>
+ <function-decl name='strdup' visibility='default' binding='global' size-in-bits='64'>
+ <parameter type-id='80f4b756'/>
+ <return type-id='26a90f95'/>
+ </function-decl>
<function-decl name='strerror' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='tcgetattr' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<parameter type-id='665a4eda'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='tcsetattr' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='eaec840f'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='close' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='read' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='b59d7dce'/>
<return type-id='79a0948f'/>
</function-decl>
<function-decl name='getpid' visibility='default' binding='global' size-in-bits='64'>
<return type-id='3629bad8'/>
</function-decl>
<function-decl name='isatty' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
+ <function-decl name='unlink' visibility='default' binding='global' size-in-bits='64'>
+ <parameter type-id='80f4b756'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
<function-type size-in-bits='64' id='ee076206'>
<return type-id='48b5725f'/>
</function-type>
<function-type size-in-bits='64' id='f712e2b7'>
<parameter type-id='95e97e5e'/>
<return type-id='48b5725f'/>
</function-type>
<function-type size-in-bits='64' id='ef70d893'>
<parameter type-id='95e97e5e'/>
<parameter type-id='185869c1'/>
<parameter type-id='eaa32e2f'/>
<return type-id='48b5725f'/>
</function-type>
</abi-instr>
<abi-instr address-size='64' path='libzfs_dataset.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='32' id='8e0573fd'>
<subrange length='4' type-id='7359adad' id='16fe7105'/>
</array-type-def>
<class-decl name='zprop_list' size-in-bits='448' is-struct='yes' visibility='default' id='bd9b4291'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='pl_prop' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='pl_user_prop' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='pl_next' type-id='9f1a1109' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='pl_all' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='pl_width' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='pl_recvd_width' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='pl_fixed' type-id='c19b74c3' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zprop_list_t' type-id='bd9b4291' id='bdb8ac4f'/>
<class-decl name='renameflags' size-in-bits='32' is-struct='yes' visibility='default' id='7aee5792'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='recursive' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1'>
<var-decl name='nounmount' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2'>
<var-decl name='forceunmount' type-id='95e97e5e' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='renameflags_t' type-id='7aee5792' id='067170c2'/>
<typedef-decl name='zfs_userspace_cb_t' type-id='ca64ff60' id='16c5f410'/>
<enum-decl name='lzc_dataset_type' id='bc9887f1'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='LZC_DATSET_TYPE_ZFS' value='2'/>
<enumerator name='LZC_DATSET_TYPE_ZVOL' value='3'/>
</enum-decl>
<typedef-decl name='avl_index_t' type-id='e475ab95' id='fba6cb51'/>
<enum-decl name='zfs_userquota_prop_t' naming-typedef-id='279fde6a' id='5258d2f6'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZFS_PROP_USERUSED' value='0'/>
<enumerator name='ZFS_PROP_USERQUOTA' value='1'/>
<enumerator name='ZFS_PROP_GROUPUSED' value='2'/>
<enumerator name='ZFS_PROP_GROUPQUOTA' value='3'/>
<enumerator name='ZFS_PROP_USEROBJUSED' value='4'/>
<enumerator name='ZFS_PROP_USEROBJQUOTA' value='5'/>
<enumerator name='ZFS_PROP_GROUPOBJUSED' value='6'/>
<enumerator name='ZFS_PROP_GROUPOBJQUOTA' value='7'/>
<enumerator name='ZFS_PROP_PROJECTUSED' value='8'/>
<enumerator name='ZFS_PROP_PROJECTQUOTA' value='9'/>
<enumerator name='ZFS_PROP_PROJECTOBJUSED' value='10'/>
<enumerator name='ZFS_PROP_PROJECTOBJQUOTA' value='11'/>
<enumerator name='ZFS_NUM_USERQUOTA_PROPS' value='12'/>
</enum-decl>
<typedef-decl name='zfs_userquota_prop_t' type-id='5258d2f6' id='279fde6a'/>
<enum-decl name='zfs_wait_activity_t' naming-typedef-id='3024501a' id='527d5dc6'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZFS_WAIT_DELETEQ' value='0'/>
<enumerator name='ZFS_WAIT_NUM_ACTIVITIES' value='1'/>
</enum-decl>
<typedef-decl name='zfs_wait_activity_t' type-id='527d5dc6' id='3024501a'/>
<enum-decl name='namecheck_err_t' naming-typedef-id='8e0af06e' id='f43bbcda'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='NAME_ERR_LEADING_SLASH' value='0'/>
<enumerator name='NAME_ERR_EMPTY_COMPONENT' value='1'/>
<enumerator name='NAME_ERR_TRAILING_SLASH' value='2'/>
<enumerator name='NAME_ERR_INVALCHAR' value='3'/>
<enumerator name='NAME_ERR_MULTIPLE_DELIMITERS' value='4'/>
<enumerator name='NAME_ERR_NOLETTER' value='5'/>
<enumerator name='NAME_ERR_RESERVED' value='6'/>
<enumerator name='NAME_ERR_DISKLIKE' value='7'/>
<enumerator name='NAME_ERR_TOOLONG' value='8'/>
<enumerator name='NAME_ERR_SELF_REF' value='9'/>
<enumerator name='NAME_ERR_PARENT_REF' value='10'/>
<enumerator name='NAME_ERR_NO_AT' value='11'/>
<enumerator name='NAME_ERR_NO_POUND' value='12'/>
</enum-decl>
<typedef-decl name='namecheck_err_t' type-id='f43bbcda' id='8e0af06e'/>
<enum-decl name='zprop_type_t' naming-typedef-id='31429eff' id='87676253'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='PROP_TYPE_NUMBER' value='0'/>
<enumerator name='PROP_TYPE_STRING' value='1'/>
<enumerator name='PROP_TYPE_INDEX' value='2'/>
</enum-decl>
<typedef-decl name='zprop_type_t' type-id='87676253' id='31429eff'/>
<class-decl name='mnttab' size-in-bits='256' is-struct='yes' visibility='default' id='1b055409'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='mnt_special' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='mnt_mountp' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='mnt_fstype' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='mnt_mntopts' type-id='26a90f95' visibility='default'/>
</data-member>
</class-decl>
<union-decl name='pthread_mutexattr_t' size-in-bits='32' naming-typedef-id='8afd6070' visibility='default' id='7300eb00'>
<data-member access='public'>
<var-decl name='__size' type-id='8e0573fd' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='__align' type-id='95e97e5e' visibility='default'/>
</data-member>
</union-decl>
<typedef-decl name='pthread_mutexattr_t' type-id='7300eb00' id='8afd6070'/>
<typedef-decl name='int64_t' type-id='0c9942d2' id='9da381c4'/>
<typedef-decl name='__int64_t' type-id='bd54fe1a' id='0c9942d2'/>
<typedef-decl name='__gid_t' type-id='f0981eeb' id='d94ec6d9'/>
<typedef-decl name='__time_t' type-id='bd54fe1a' id='65eda9c0'/>
<class-decl name='tm' size-in-bits='448' is-struct='yes' visibility='default' id='dddf6ca2'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='tm_sec' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='tm_min' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='tm_hour' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
<var-decl name='tm_mday' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='tm_mon' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='160'>
<var-decl name='tm_year' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='tm_wday' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='224'>
<var-decl name='tm_yday' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='tm_isdst' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='tm_gmtoff' type-id='bd54fe1a' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='tm_zone' type-id='80f4b756' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='time_t' type-id='65eda9c0' id='c9d12d66'/>
<class-decl name='group' size-in-bits='256' is-struct='yes' visibility='default' id='01a1b934'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='gr_name' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='gr_passwd' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='gr_gid' type-id='d94ec6d9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='gr_mem' type-id='9b23c9ad' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='mntent' size-in-bits='320' is-struct='yes' visibility='default' id='56fe4a37'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='mnt_fsname' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='mnt_dir' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='mnt_type' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='mnt_opts' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='mnt_freq' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='288'>
<var-decl name='mnt_passno' type-id='95e97e5e' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='passwd' size-in-bits='384' is-struct='yes' visibility='default' id='a63d15a3'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='pw_name' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='pw_passwd' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='pw_uid' type-id='cc5fcceb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='160'>
<var-decl name='pw_gid' type-id='d94ec6d9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='pw_gecos' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='pw_dir' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='pw_shell' type-id='26a90f95' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='uid_t' type-id='cc5fcceb' id='354978ed'/>
<pointer-type-def type-id='fba6cb51' size-in-bits='64' id='32adbf30'/>
<pointer-type-def type-id='f20fbd51' size-in-bits='64' id='a3681dea'/>
<qualified-type-def type-id='26a90f95' restrict='yes' id='266fe297'/>
<pointer-type-def type-id='80f4b756' size-in-bits='64' id='7d3cd834'/>
<qualified-type-def type-id='56fe4a37' const='yes' id='a75125ce'/>
<pointer-type-def type-id='a75125ce' size-in-bits='64' id='48bea5ec'/>
<qualified-type-def type-id='8afd6070' const='yes' id='1d853360'/>
<pointer-type-def type-id='1d853360' size-in-bits='64' id='c2afbd7e'/>
<qualified-type-def type-id='c9d12d66' const='yes' id='588b3216'/>
<pointer-type-def type-id='588b3216' size-in-bits='64' id='9f201474'/>
<qualified-type-def type-id='9f201474' restrict='yes' id='d6e2847c'/>
<qualified-type-def type-id='dddf6ca2' const='yes' id='e824a34f'/>
<pointer-type-def type-id='e824a34f' size-in-bits='64' id='d6ad37ff'/>
<qualified-type-def type-id='d6ad37ff' restrict='yes' id='f8c6051d'/>
<pointer-type-def type-id='01a1b934' size-in-bits='64' id='566b3f52'/>
<pointer-type-def type-id='7e291ce6' size-in-bits='64' id='ca64ff60'/>
<pointer-type-def type-id='9da381c4' size-in-bits='64' id='cb785ebf'/>
<pointer-type-def type-id='1b055409' size-in-bits='64' id='9d424d31'/>
<pointer-type-def type-id='8e0af06e' size-in-bits='64' id='053457bd'/>
<pointer-type-def type-id='857bb57e' size-in-bits='64' id='75be733c'/>
<pointer-type-def type-id='a63d15a3' size-in-bits='64' id='a195f4a3'/>
<pointer-type-def type-id='7a6844eb' size-in-bits='64' id='18c91f9e'/>
<pointer-type-def type-id='dddf6ca2' size-in-bits='64' id='d915a820'/>
<qualified-type-def type-id='d915a820' restrict='yes' id='f099ad08'/>
<pointer-type-def type-id='5d6479ae' size-in-bits='64' id='892b4acc'/>
<pointer-type-def type-id='bd9b4291' size-in-bits='64' id='9f1a1109'/>
<pointer-type-def type-id='bdb8ac4f' size-in-bits='64' id='3a9b2288'/>
<pointer-type-def type-id='3a9b2288' size-in-bits='64' id='e4378506'/>
<function-decl name='zpool_open' mangled-name='zpool_open' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_open'>
<parameter type-id='b0382bb3'/>
<parameter type-id='80f4b756'/>
<return type-id='4c81de99'/>
</function-decl>
<function-decl name='zpool_open_canfail' mangled-name='zpool_open_canfail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_open_canfail'>
<parameter type-id='b0382bb3'/>
<parameter type-id='80f4b756'/>
<return type-id='4c81de99'/>
</function-decl>
<function-decl name='zpool_close' mangled-name='zpool_close' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_close'>
<parameter type-id='4c81de99'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zpool_get_name' mangled-name='zpool_get_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_name'>
<parameter type-id='4c81de99'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zpool_get_prop' mangled-name='zpool_get_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_prop'>
<parameter type-id='4c81de99'/>
<parameter type-id='5d0c23fb'/>
<parameter type-id='26a90f95'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='debc6aa3'/>
<parameter type-id='c19b74c3'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_default_string' mangled-name='zfs_prop_default_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_default_string'>
<parameter type-id='58603c44'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zfs_prop_default_numeric' mangled-name='zfs_prop_default_numeric' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_default_numeric'>
<parameter type-id='58603c44'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='zpool_prop_get_feature' mangled-name='zpool_prop_get_feature' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_get_feature'>
<parameter type-id='4c81de99'/>
<parameter type-id='80f4b756'/>
<parameter type-id='26a90f95'/>
<parameter type-id='b59d7dce'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_iter_snapshots' mangled-name='zfs_iter_snapshots' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_snapshots'>
<parameter type-id='9200a744'/>
<parameter type-id='c19b74c3'/>
<parameter type-id='d8e49ab9'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='9c313c2d'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_iter_bookmarks' mangled-name='zfs_iter_bookmarks' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_bookmarks'>
<parameter type-id='9200a744'/>
<parameter type-id='d8e49ab9'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_share' mangled-name='zfs_share' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_share'>
<parameter type-id='9200a744'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_commit_all_shares' mangled-name='zfs_commit_all_shares' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_commit_all_shares'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_nicestrtonum' mangled-name='zfs_nicestrtonum' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_nicestrtonum'>
<parameter type-id='b0382bb3'/>
<parameter type-id='80f4b756'/>
<parameter type-id='5d6479ae'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_snapshot' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='5ce45b60'/>
<parameter type-id='857bb57e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_create' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='bc9887f1'/>
<parameter type-id='5ce45b60'/>
<parameter type-id='ae3e8ca6'/>
<parameter type-id='3502e3ff'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_clone' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='5ce45b60'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_promote' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='26a90f95'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_destroy_snaps' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='c19b74c3'/>
<parameter type-id='857bb57e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_get_bookmarks' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='5ce45b60'/>
<parameter type-id='857bb57e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_destroy_bookmarks' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='857bb57e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_hold' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='857bb57e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_release' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='857bb57e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_get_holds' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='857bb57e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_exists' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='lzc_rollback_to' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_destroy' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_channel_program_nosync' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='5ce45b60'/>
<parameter type-id='857bb57e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_wait_fs' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='3024501a'/>
<parameter type-id='37e3bd22'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_error_fmt' mangled-name='zfs_error_fmt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_error_fmt'>
<parameter type-id='b0382bb3'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<parameter is-variadic='yes'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_standard_error_fmt' mangled-name='zfs_standard_error_fmt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_standard_error_fmt'>
<parameter type-id='b0382bb3'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<parameter is-variadic='yes'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_setprop_error' mangled-name='zfs_setprop_error' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_setprop_error'>
<parameter type-id='b0382bb3'/>
<parameter type-id='58603c44'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='26a90f95'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zprop_parse_value' mangled-name='zprop_parse_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_parse_value'>
<parameter type-id='b0382bb3'/>
<parameter type-id='3fa542f0'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='2e45de5d'/>
<parameter type-id='5ce45b60'/>
<parameter type-id='9b23c9ad'/>
<parameter type-id='5d6479ae'/>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zprop_expand_list' mangled-name='zprop_expand_list' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_expand_list'>
<parameter type-id='b0382bb3'/>
<parameter type-id='e4378506'/>
<parameter type-id='2e45de5d'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zcmd_write_src_nvlist' mangled-name='zcmd_write_src_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zcmd_write_src_nvlist'>
<parameter type-id='b0382bb3'/>
<parameter type-id='e4ec4540'/>
<parameter type-id='5ce45b60'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_name_valid' mangled-name='zpool_name_valid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_name_valid'>
<parameter type-id='b0382bb3'/>
<parameter type-id='c19b74c3'/>
<parameter type-id='80f4b756'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_parse_options' mangled-name='zfs_parse_options' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_parse_options'>
<parameter type-id='26a90f95'/>
<parameter type-id='a7913f77'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_nicebytes' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='9c313c2d'/>
<parameter type-id='26a90f95'/>
<parameter type-id='b59d7dce'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_nicenum' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='9c313c2d'/>
<parameter type-id='26a90f95'/>
<parameter type-id='b59d7dce'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='avl_create' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='a3681dea'/>
<parameter type-id='585e1de9'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='b59d7dce'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='avl_find' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='a3681dea'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='32adbf30'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='avl_add' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='a3681dea'/>
<parameter type-id='eaa32e2f'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='avl_remove' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='a3681dea'/>
<parameter type-id='eaa32e2f'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='avl_numnodes' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='a3681dea'/>
<return type-id='ee1f298e'/>
</function-decl>
<function-decl name='avl_destroy_nodes' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='a3681dea'/>
<parameter type-id='63e171df'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='avl_destroy' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='a3681dea'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_prop_readonly' mangled-name='zfs_prop_readonly' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_readonly'>
<parameter type-id='58603c44'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_prop_inheritable' mangled-name='zfs_prop_inheritable' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_inheritable'>
<parameter type-id='58603c44'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_prop_setonce' mangled-name='zfs_prop_setonce' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_setonce'>
<parameter type-id='58603c44'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_prop_encryption_key_param' mangled-name='zfs_prop_encryption_key_param' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_encryption_key_param'>
<parameter type-id='58603c44'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_prop_valid_keylocation' mangled-name='zfs_prop_valid_keylocation' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_valid_keylocation'>
<parameter type-id='80f4b756'/>
<parameter type-id='c19b74c3'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_prop_user' mangled-name='zfs_prop_user' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_user'>
<parameter type-id='80f4b756'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_prop_userquota' mangled-name='zfs_prop_userquota' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_userquota'>
<parameter type-id='80f4b756'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_prop_written' mangled-name='zfs_prop_written' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_written'>
<parameter type-id='80f4b756'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_prop_index_to_string' mangled-name='zfs_prop_index_to_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_index_to_string'>
<parameter type-id='58603c44'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='7d3cd834'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_valid_for_type' mangled-name='zfs_prop_valid_for_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_valid_for_type'>
<parameter type-id='95e97e5e'/>
<parameter type-id='2e45de5d'/>
<parameter type-id='c19b74c3'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='nvlist_alloc' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='857bb57e'/>
<parameter type-id='3502e3ff'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_size' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='78c01427'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_pack' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='9b23c9ad'/>
<parameter type-id='78c01427'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_unpack' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='26a90f95'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='857bb57e'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_add_boolean' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_add_nvlist' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='5ce45b60'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_add_uint64_array' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='5d6479ae'/>
<parameter type-id='3502e3ff'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_remove' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='8d0687d2'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_remove_all' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_lookup_int64' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='cb785ebf'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_lookup_uint64_array' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='892b4acc'/>
<parameter type-id='4dd26a40'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_lookup_nvlist_array' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='75be733c'/>
<parameter type-id='4dd26a40'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_empty' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='nvpair_type' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='3fa542f0'/>
<return type-id='8d0687d2'/>
</function-decl>
<function-decl name='nvpair_value_uint64' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='3fa542f0'/>
<parameter type-id='5d6479ae'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvpair_value_string' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='3fa542f0'/>
<parameter type-id='9b23c9ad'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='fnvlist_free' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fnvlist_add_boolean' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fnvlist_add_uint64' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='9c313c2d'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fnvlist_add_string' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fnvlist_add_nvlist' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='5ce45b60'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fnvlist_lookup_string' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='fnvlist_lookup_nvlist' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='fnvpair_value_int32' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='3fa542f0'/>
<return type-id='3ff5601b'/>
</function-decl>
<function-decl name='fnvpair_value_uint64' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='3fa542f0'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='entity_namecheck' mangled-name='entity_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='entity_namecheck'>
<parameter type-id='80f4b756'/>
<parameter type-id='053457bd'/>
<parameter type-id='26a90f95'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='dataset_nestcheck' mangled-name='dataset_nestcheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='dataset_nestcheck'>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='mountpoint_namecheck' mangled-name='mountpoint_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='mountpoint_namecheck'>
<parameter type-id='80f4b756'/>
<parameter type-id='053457bd'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_get_type' mangled-name='zfs_prop_get_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_type'>
<parameter type-id='58603c44'/>
<return type-id='31429eff'/>
</function-decl>
<function-decl name='getmntany' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='822cd80b'/>
<parameter type-id='9d424d31'/>
<parameter type-id='9d424d31'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='_sol_getmntent' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='822cd80b'/>
<parameter type-id='9d424d31'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_type_to_name' mangled-name='zfs_type_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_type_to_name'>
<parameter type-id='2e45de5d' name='type'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zfs_validate_name' mangled-name='zfs_validate_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_validate_name'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='95e97e5e' name='type'/>
<parameter type-id='c19b74c3' name='modifying'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_name_valid' mangled-name='zfs_name_valid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_name_valid'>
<parameter type-id='80f4b756' name='name'/>
<parameter type-id='2e45de5d' name='type'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_free_handles' mangled-name='zpool_free_handles' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_free_handles'>
<parameter type-id='b0382bb3' name='hdl'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='make_dataset_handle_zc' mangled-name='make_dataset_handle_zc' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='make_dataset_handle_zc'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='e4ec4540' name='zc'/>
<return type-id='9200a744'/>
</function-decl>
<function-decl name='make_dataset_simple_handle_zc' mangled-name='make_dataset_simple_handle_zc' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='make_dataset_simple_handle_zc'>
<parameter type-id='9200a744' name='pzhp'/>
<parameter type-id='e4ec4540' name='zc'/>
<return type-id='9200a744'/>
</function-decl>
<function-decl name='zfs_bookmark_exists' mangled-name='zfs_bookmark_exists' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_bookmark_exists'>
<parameter type-id='80f4b756' name='path'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='make_bookmark_handle' mangled-name='make_bookmark_handle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='make_bookmark_handle'>
<parameter type-id='9200a744' name='parent'/>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='5ce45b60' name='bmark_props'/>
<return type-id='9200a744'/>
</function-decl>
<function-decl name='libzfs_mnttab_init' mangled-name='libzfs_mnttab_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_init'>
<parameter type-id='b0382bb3' name='hdl'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='libzfs_mnttab_fini' mangled-name='libzfs_mnttab_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_fini'>
<parameter type-id='b0382bb3' name='hdl'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='libzfs_mnttab_cache' mangled-name='libzfs_mnttab_cache' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_cache'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='c19b74c3' name='enable'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='libzfs_mnttab_find' mangled-name='libzfs_mnttab_find' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_find'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='fsname'/>
<parameter type-id='9d424d31' name='entry'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='libzfs_mnttab_add' mangled-name='libzfs_mnttab_add' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_add'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='special'/>
<parameter type-id='80f4b756' name='mountp'/>
<parameter type-id='80f4b756' name='mntopts'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='libzfs_mnttab_remove' mangled-name='libzfs_mnttab_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_remove'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='fsname'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_spa_version' mangled-name='zfs_spa_version' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_spa_version'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='7292109c' name='spa_version'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_set' mangled-name='zfs_prop_set' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_set'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='propname'/>
<parameter type-id='80f4b756' name='propval'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_set_list' mangled-name='zfs_prop_set_list' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_set_list'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='5ce45b60' name='props'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_inherit' mangled-name='zfs_prop_inherit' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_inherit'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='propname'/>
<parameter type-id='c19b74c3' name='received'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='getprop_uint64' mangled-name='getprop_uint64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getprop_uint64'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='58603c44' name='prop'/>
<parameter type-id='9b23c9ad' name='source'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='zfs_prop_get_recvd' mangled-name='zfs_prop_get_recvd' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_recvd'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='propname'/>
<parameter type-id='26a90f95' name='propbuf'/>
<parameter type-id='b59d7dce' name='proplen'/>
<parameter type-id='c19b74c3' name='literal'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_get_clones_nvl' mangled-name='zfs_get_clones_nvl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_clones_nvl'>
<parameter type-id='9200a744' name='zhp'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='zfs_prop_get_numeric' mangled-name='zfs_prop_get_numeric' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_numeric'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='58603c44' name='prop'/>
<parameter type-id='5d6479ae' name='value'/>
<parameter type-id='debc6aa3' name='src'/>
<parameter type-id='26a90f95' name='statbuf'/>
<parameter type-id='b59d7dce' name='statlen'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_get_userquota_int' mangled-name='zfs_prop_get_userquota_int' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_userquota_int'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='propname'/>
<parameter type-id='5d6479ae' name='propvalue'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_get_userquota' mangled-name='zfs_prop_get_userquota' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_userquota'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='propname'/>
<parameter type-id='26a90f95' name='propbuf'/>
<parameter type-id='95e97e5e' name='proplen'/>
<parameter type-id='c19b74c3' name='literal'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_get_written_int' mangled-name='zfs_prop_get_written_int' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_written_int'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='propname'/>
<parameter type-id='5d6479ae' name='propvalue'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prop_get_written' mangled-name='zfs_prop_get_written' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_written'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='propname'/>
<parameter type-id='26a90f95' name='propbuf'/>
<parameter type-id='95e97e5e' name='proplen'/>
<parameter type-id='c19b74c3' name='literal'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_get_pool_name' mangled-name='zfs_get_pool_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_pool_name'>
<parameter type-id='fcd57163' name='zhp'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zfs_get_type' mangled-name='zfs_get_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_type'>
<parameter type-id='fcd57163' name='zhp'/>
<return type-id='2e45de5d'/>
</function-decl>
<function-decl name='zfs_dataset_exists' mangled-name='zfs_dataset_exists' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dataset_exists'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='2e45de5d' name='types'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='create_parents' mangled-name='create_parents' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='create_parents'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='26a90f95' name='target'/>
<parameter type-id='95e97e5e' name='prefixlen'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_create_ancestors' mangled-name='zfs_create_ancestors' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_create_ancestors'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='path'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_create' mangled-name='zfs_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_create'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='2e45de5d' name='type'/>
<parameter type-id='5ce45b60' name='props'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_destroy' mangled-name='zfs_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_destroy'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='c19b74c3' name='defer'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_destroy_snaps' mangled-name='zfs_destroy_snaps' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_destroy_snaps'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='26a90f95' name='snapname'/>
<parameter type-id='c19b74c3' name='defer'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_destroy_snaps_nvl' mangled-name='zfs_destroy_snaps_nvl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_destroy_snaps_nvl'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='5ce45b60' name='snaps'/>
<parameter type-id='c19b74c3' name='defer'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_clone' mangled-name='zfs_clone' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_clone'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='target'/>
<parameter type-id='5ce45b60' name='props'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_promote' mangled-name='zfs_promote' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_promote'>
<parameter type-id='9200a744' name='zhp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_snapshot_nvl' mangled-name='zfs_snapshot_nvl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_snapshot_nvl'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='5ce45b60' name='snaps'/>
<parameter type-id='5ce45b60' name='props'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_snapshot' mangled-name='zfs_snapshot' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_snapshot'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='c19b74c3' name='recursive'/>
<parameter type-id='5ce45b60' name='props'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_rollback' mangled-name='zfs_rollback' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_rollback'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='9200a744' name='snap'/>
<parameter type-id='c19b74c3' name='force'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_rename' mangled-name='zfs_rename' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_rename'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='target'/>
<parameter type-id='067170c2' name='flags'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_get_all_props' mangled-name='zfs_get_all_props' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_all_props'>
<parameter type-id='9200a744' name='zhp'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='zfs_get_recvd_props' mangled-name='zfs_get_recvd_props' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_recvd_props'>
<parameter type-id='9200a744' name='zhp'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='zfs_get_user_props' mangled-name='zfs_get_user_props' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_user_props'>
<parameter type-id='9200a744' name='zhp'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='zfs_expand_proplist' mangled-name='zfs_expand_proplist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_expand_proplist'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='e4378506' name='plp'/>
<parameter type-id='c19b74c3' name='received'/>
<parameter type-id='c19b74c3' name='literal'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_prune_proplist' mangled-name='zfs_prune_proplist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prune_proplist'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='ae3e8ca6' name='props'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_smb_acl_add' mangled-name='zfs_smb_acl_add' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_smb_acl_add'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='26a90f95' name='dataset'/>
<parameter type-id='26a90f95' name='path'/>
<parameter type-id='26a90f95' name='resource'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_smb_acl_remove' mangled-name='zfs_smb_acl_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_smb_acl_remove'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='26a90f95' name='dataset'/>
<parameter type-id='26a90f95' name='path'/>
<parameter type-id='26a90f95' name='resource'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_smb_acl_purge' mangled-name='zfs_smb_acl_purge' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_smb_acl_purge'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='26a90f95' name='dataset'/>
<parameter type-id='26a90f95' name='path'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_smb_acl_rename' mangled-name='zfs_smb_acl_rename' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_smb_acl_rename'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='26a90f95' name='dataset'/>
<parameter type-id='26a90f95' name='path'/>
<parameter type-id='26a90f95' name='oldname'/>
<parameter type-id='26a90f95' name='newname'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_userspace' mangled-name='zfs_userspace' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_userspace'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='279fde6a' name='type'/>
<parameter type-id='16c5f410' name='func'/>
<parameter type-id='eaa32e2f' name='arg'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_hold' mangled-name='zfs_hold' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_hold'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='snapname'/>
<parameter type-id='80f4b756' name='tag'/>
<parameter type-id='c19b74c3' name='recursive'/>
<parameter type-id='95e97e5e' name='cleanup_fd'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_hold_nvl' mangled-name='zfs_hold_nvl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_hold_nvl'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='95e97e5e' name='cleanup_fd'/>
<parameter type-id='5ce45b60' name='holds'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_release' mangled-name='zfs_release' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_release'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='snapname'/>
<parameter type-id='80f4b756' name='tag'/>
<parameter type-id='c19b74c3' name='recursive'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_get_fsacl' mangled-name='zfs_get_fsacl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_fsacl'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='857bb57e' name='nvl'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_set_fsacl' mangled-name='zfs_set_fsacl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_set_fsacl'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='c19b74c3' name='un'/>
<parameter type-id='5ce45b60' name='nvl'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_get_holds' mangled-name='zfs_get_holds' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_holds'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='857bb57e' name='nvl'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zvol_volsize_to_reservation' mangled-name='zvol_volsize_to_reservation' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zvol_volsize_to_reservation'>
<parameter type-id='4c81de99' name='zph'/>
<parameter type-id='9c313c2d' name='volsize'/>
<parameter type-id='5ce45b60' name='props'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='zfs_wait_status' mangled-name='zfs_wait_status' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_wait_status'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='3024501a' name='activity'/>
<parameter type-id='37e3bd22' name='missing'/>
<parameter type-id='37e3bd22' name='waited'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='getgrnam' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<return type-id='566b3f52'/>
</function-decl>
<function-decl name='hasmntopt' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='48bea5ec'/>
<parameter type-id='80f4b756'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='pthread_mutex_init' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='18c91f9e'/>
<parameter type-id='c2afbd7e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pthread_mutex_destroy' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='18c91f9e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pthread_mutex_lock' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='18c91f9e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pthread_mutex_unlock' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='18c91f9e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='getpwnam' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<return type-id='a195f4a3'/>
</function-decl>
<function-decl name='fprintf' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='e75a27e9'/>
<parameter type-id='9d26089a'/>
<parameter is-variadic='yes'/>
<return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='asprintf' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='8c85230f'/>
- <parameter type-id='9d26089a'/>
- <parameter is-variadic='yes'/>
- <return type-id='95e97e5e'/>
- </function-decl>
<function-decl name='strtol' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='9d26089a'/>
<parameter type-id='8c85230f'/>
<parameter type-id='95e97e5e'/>
<return type-id='bd54fe1a'/>
</function-decl>
<function-decl name='strtoul' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='9d26089a'/>
<parameter type-id='8c85230f'/>
<parameter type-id='95e97e5e'/>
<return type-id='7359adad'/>
</function-decl>
<function-decl name='abort' visibility='default' binding='global' size-in-bits='64'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='strncpy' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='26a90f95'/>
<parameter type-id='80f4b756'/>
<parameter type-id='b59d7dce'/>
<return type-id='26a90f95'/>
</function-decl>
- <function-decl name='strdup' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='80f4b756'/>
- <return type-id='26a90f95'/>
- </function-decl>
<function-decl name='strrchr' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='95e97e5e'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='strcspn' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<return type-id='b59d7dce'/>
</function-decl>
<function-decl name='strstr' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='strsep' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='8c85230f'/>
<parameter type-id='9d26089a'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='ioctl' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<parameter type-id='7359adad'/>
<parameter is-variadic='yes'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='strftime' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='266fe297'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='9d26089a'/>
<parameter type-id='f8c6051d'/>
<return type-id='b59d7dce'/>
</function-decl>
<function-decl name='localtime_r' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='d6e2847c'/>
<parameter type-id='f099ad08'/>
<return type-id='d915a820'/>
</function-decl>
<function-type size-in-bits='64' id='7e291ce6'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='80f4b756'/>
<parameter type-id='354978ed'/>
<parameter type-id='9c313c2d'/>
<return type-id='95e97e5e'/>
</function-type>
</abi-instr>
<abi-instr address-size='64' path='libzfs_diff.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='448' id='6093ff7c'>
<subrange length='56' type-id='7359adad' id='f8137894'/>
</array-type-def>
<class-decl name='differ_info' size-in-bits='9024' is-struct='yes' visibility='default' id='d41965ee'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='zhp' type-id='9200a744' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='fromsnap' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='frommnt' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='tosnap' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='tomnt' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='ds' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='dsmnt' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='tmpsnap' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
<var-decl name='errbuf' type-id='b54ce520' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='8704'>
<var-decl name='isclone' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='8736'>
<var-decl name='scripted' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='8768'>
<var-decl name='classify' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='8800'>
<var-decl name='timestamped' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='8832'>
<var-decl name='shares' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='8896'>
<var-decl name='zerr' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='8928'>
<var-decl name='cleanupfd' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='8960'>
<var-decl name='outputfd' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='8992'>
<var-decl name='datafd' type-id='95e97e5e' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='differ_info_t' type-id='d41965ee' id='e8525f0e'/>
<typedef-decl name='pthread_t' type-id='7359adad' id='4051f5e7'/>
<union-decl name='pthread_attr_t' size-in-bits='448' visibility='default' id='b63afacd'>
<data-member access='public'>
<var-decl name='__size' type-id='6093ff7c' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='__align' type-id='bd54fe1a' visibility='default'/>
</data-member>
</union-decl>
<typedef-decl name='pthread_attr_t' type-id='b63afacd' id='7d8569fd'/>
<qualified-type-def type-id='7d8569fd' const='yes' id='e06dee2d'/>
<pointer-type-def type-id='e06dee2d' size-in-bits='64' id='540db505'/>
<qualified-type-def type-id='540db505' restrict='yes' id='e1815e87'/>
<pointer-type-def type-id='e8525f0e' size-in-bits='64' id='ee78f675'/>
<pointer-type-def type-id='4051f5e7' size-in-bits='64' id='e01b5462'/>
<qualified-type-def type-id='e01b5462' restrict='yes' id='cc338b26'/>
<pointer-type-def type-id='cd5d79f4' size-in-bits='64' id='5ad9edb6'/>
<function-decl name='is_mounted' mangled-name='is_mounted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='is_mounted'>
<parameter type-id='b0382bb3'/>
<parameter type-id='80f4b756'/>
<parameter type-id='9b23c9ad'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_asprintf' mangled-name='zfs_asprintf' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_asprintf'>
<parameter type-id='b0382bb3'/>
<parameter type-id='80f4b756'/>
<parameter is-variadic='yes'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='find_shares_object' mangled-name='find_shares_object' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='find_shares_object'>
<parameter type-id='ee78f675'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_show_diffs' mangled-name='zfs_show_diffs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_show_diffs'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='95e97e5e' name='outfd'/>
<parameter type-id='80f4b756' name='fromsnap'/>
<parameter type-id='80f4b756' name='tosnap'/>
<parameter type-id='95e97e5e' name='flags'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pthread_create' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='cc338b26'/>
<parameter type-id='e1815e87'/>
<parameter type-id='5ad9edb6'/>
<parameter type-id='1b7446cd'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pthread_join' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='4051f5e7'/>
<parameter type-id='63e171df'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pthread_cancel' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='4051f5e7'/>
<return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='fdopen' visibility='default' binding='global' size-in-bits='64'>
- <parameter type-id='95e97e5e'/>
- <parameter type-id='80f4b756'/>
- <return type-id='822cd80b'/>
- </function-decl>
<function-decl name='pipe2' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='7292109c'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-type size-in-bits='64' id='cd5d79f4'>
<parameter type-id='eaa32e2f'/>
<return type-id='eaa32e2f'/>
</function-type>
</abi-instr>
<abi-instr address-size='64' path='libzfs_import.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='03085adc' size-in-bits='192' id='083f8d58'>
<subrange length='3' type-id='7359adad' id='56f209d2'/>
</array-type-def>
<typedef-decl name='refresh_config_func_t' type-id='29f040d2' id='b7c58eaa'/>
<typedef-decl name='pool_active_func_t' type-id='baa42fef' id='de5d1d8f'/>
<class-decl name='pool_config_ops' size-in-bits='128' is-struct='yes' visibility='default' id='8b092c69'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='pco_refresh_config' type-id='e7c00489' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='pco_pool_active' type-id='9eadf5e0' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='pool_config_ops_t' type-id='1a21babe' id='b1e62775'/>
<enum-decl name='pool_state' id='4871ac24'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='POOL_STATE_ACTIVE' value='0'/>
<enumerator name='POOL_STATE_EXPORTED' value='1'/>
<enumerator name='POOL_STATE_DESTROYED' value='2'/>
<enumerator name='POOL_STATE_SPARE' value='3'/>
<enumerator name='POOL_STATE_L2CACHE' value='4'/>
<enumerator name='POOL_STATE_UNINITIALIZED' value='5'/>
<enumerator name='POOL_STATE_UNAVAIL' value='6'/>
<enumerator name='POOL_STATE_POTENTIALLY_ACTIVE' value='7'/>
</enum-decl>
<typedef-decl name='pool_state_t' type-id='4871ac24' id='084a08a3'/>
<class-decl name='stat64' size-in-bits='1152' is-struct='yes' visibility='default' id='0bbec9cd'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='st_dev' type-id='35ed8932' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='st_ino' type-id='71288a47' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='st_nlink' type-id='80f0b9df' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='st_mode' type-id='e1c52942' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='224'>
<var-decl name='st_uid' type-id='cc5fcceb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='st_gid' type-id='d94ec6d9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='288'>
<var-decl name='__pad0' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='st_rdev' type-id='35ed8932' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='st_size' type-id='79989e9c' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='st_blksize' type-id='d3f10a7f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
<var-decl name='st_blocks' type-id='4e711bf1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='576'>
<var-decl name='st_atim' type-id='a9c79a1f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='704'>
<var-decl name='st_mtim' type-id='a9c79a1f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='832'>
<var-decl name='st_ctim' type-id='a9c79a1f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='960'>
<var-decl name='__glibc_reserved' type-id='083f8d58' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='__dev_t' type-id='7359adad' id='35ed8932'/>
<typedef-decl name='__ino64_t' type-id='7359adad' id='71288a47'/>
<typedef-decl name='__mode_t' type-id='f0981eeb' id='e1c52942'/>
<typedef-decl name='__nlink_t' type-id='7359adad' id='80f0b9df'/>
<typedef-decl name='__blksize_t' type-id='bd54fe1a' id='d3f10a7f'/>
<typedef-decl name='__blkcnt64_t' type-id='bd54fe1a' id='4e711bf1'/>
<typedef-decl name='__syscall_slong_t' type-id='bd54fe1a' id='03085adc'/>
<class-decl name='timespec' size-in-bits='128' is-struct='yes' visibility='default' id='a9c79a1f'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='tv_sec' type-id='65eda9c0' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='tv_nsec' type-id='03085adc' visibility='default'/>
</data-member>
</class-decl>
<qualified-type-def type-id='8b092c69' const='yes' id='1a21babe'/>
<pointer-type-def type-id='de5d1d8f' size-in-bits='64' id='9eadf5e0'/>
<pointer-type-def type-id='084a08a3' size-in-bits='64' id='b9ea57b8'/>
<pointer-type-def type-id='b7c58eaa' size-in-bits='64' id='e7c00489'/>
<pointer-type-def type-id='0bbec9cd' size-in-bits='64' id='62f7a03d'/>
<function-decl name='zcmd_write_conf_nvlist' mangled-name='zcmd_write_conf_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zcmd_write_conf_nvlist'>
<parameter type-id='b0382bb3'/>
<parameter type-id='e4ec4540'/>
<parameter type-id='5ce45b60'/>
<return type-id='95e97e5e'/>
</function-decl>
<var-decl name='libzfs_config_ops' type-id='b1e62775' mangled-name='libzfs_config_ops' visibility='default' elf-symbol-id='libzfs_config_ops'/>
<function-decl name='zpool_read_label' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<parameter type-id='857bb57e'/>
<parameter type-id='7292109c'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_clear_label' mangled-name='zpool_clear_label' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_clear_label'>
<parameter type-id='95e97e5e' name='fd'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_in_use' mangled-name='zpool_in_use' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_in_use'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='95e97e5e' name='fd'/>
<parameter type-id='b9ea57b8' name='state'/>
<parameter type-id='9b23c9ad' name='namestr'/>
<parameter type-id='37e3bd22' name='inuse'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='memset' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='b59d7dce'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='fstat64' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<parameter type-id='62f7a03d'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pread64' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='724e4de6'/>
<return type-id='79a0948f'/>
</function-decl>
<function-decl name='pwrite64' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='724e4de6'/>
<return type-id='79a0948f'/>
</function-decl>
<function-type size-in-bits='64' id='baa42fef'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='80f4b756'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='37e3bd22'/>
<return type-id='95e97e5e'/>
</function-type>
<function-type size-in-bits='64' id='29f040d2'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='5ce45b60'/>
<return type-id='5ce45b60'/>
</function-type>
</abi-instr>
<abi-instr address-size='64' path='libzfs_iter.c' language='LANG_C99'>
<pointer-type-def type-id='b351119f' size-in-bits='64' id='716943c7'/>
<function-decl name='avl_first' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='a3681dea'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='avl_walk' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='716943c7'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='95e97e5e'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='fnvpair_value_nvlist' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='3fa542f0'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='zfs_iter_snapshots_sorted' mangled-name='zfs_iter_snapshots_sorted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_snapshots_sorted'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='d8e49ab9' name='callback'/>
<parameter type-id='eaa32e2f' name='data'/>
<parameter type-id='9c313c2d' name='min_txg'/>
<parameter type-id='9c313c2d' name='max_txg'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_iter_snapspec' mangled-name='zfs_iter_snapspec' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_snapspec'>
<parameter type-id='9200a744' name='fs_zhp'/>
<parameter type-id='80f4b756' name='spec_orig'/>
<parameter type-id='d8e49ab9' name='func'/>
<parameter type-id='eaa32e2f' name='arg'/>
<return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='libzfs_mount.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='6028cbfe' size-in-bits='256' id='b39b9aa7'>
<subrange length='4' type-id='7359adad' id='16fe7105'/>
</array-type-def>
<class-decl name='__dirstream' is-struct='yes' visibility='default' is-declaration-only='yes' id='20cd73f2'/>
<class-decl name='tpool' is-struct='yes' visibility='default' is-declaration-only='yes' id='88d1b7f9'/>
<array-type-def dimensions='1' type-id='95e97e5e' size-in-bits='64' id='e4266c7e'>
<subrange length='2' type-id='7359adad' id='52efc4ef'/>
</array-type-def>
<array-type-def dimensions='1' type-id='f1bd64e2' size-in-bits='384' id='b2c36c9f'>
<subrange length='2' type-id='7359adad' id='52efc4ef'/>
</array-type-def>
<array-type-def dimensions='1' type-id='a7913f77' size-in-bits='64' alignment-in-bits='32' id='79c9b3ac'>
<subrange length='2' type-id='7359adad' id='52efc4ef'/>
</array-type-def>
<array-type-def dimensions='1' type-id='a7913f77' size-in-bits='96' alignment-in-bits='32' id='7dc77b61'>
<subrange length='3' type-id='7359adad' id='56f209d2'/>
</array-type-def>
<class-decl name='get_all_cb' size-in-bits='192' is-struct='yes' visibility='default' id='803dac95'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='cb_handles' type-id='4507922a' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='cb_alloc' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='cb_used' type-id='b59d7dce' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='get_all_cb_t' type-id='803dac95' id='9b293607'/>
<enum-decl name='zfs_share_type_t' naming-typedef-id='7eb57c2d' id='5bc85791'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='SHARED_NOT_SHARED' value='0'/>
<enumerator name='SHARED_NFS' value='2'/>
<enumerator name='SHARED_SMB' value='4'/>
</enum-decl>
<typedef-decl name='zfs_share_type_t' type-id='5bc85791' id='7eb57c2d'/>
<class-decl name='proto_table_t' size-in-bits='192' is-struct='yes' naming-typedef-id='f1bd64e2' visibility='default' id='f4c8e1ed'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='p_prop' type-id='58603c44' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='p_name' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='p_share_err' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='160'>
<var-decl name='p_unshare_err' type-id='95e97e5e' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='proto_table_t' type-id='f4c8e1ed' id='f1bd64e2'/>
<typedef-decl name='tpool_t' type-id='88d1b7f9' id='b1bbf10d'/>
<class-decl name='dirent64' size-in-bits='2240' is-struct='yes' visibility='default' id='5725d813'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='d_ino' type-id='71288a47' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='d_off' type-id='724e4de6' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='d_reclen' type-id='8efea9e5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='144'>
<var-decl name='d_type' type-id='002ac4a6' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='152'>
<var-decl name='d_name' type-id='d1617432' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='statfs64' size-in-bits='960' is-struct='yes' visibility='default' id='a2a6be1a'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='f_type' type-id='6028cbfe' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='f_bsize' type-id='6028cbfe' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='f_blocks' type-id='95fe1a02' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='f_bfree' type-id='95fe1a02' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='f_bavail' type-id='95fe1a02' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='f_files' type-id='0c3a4dde' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='f_ffree' type-id='0c3a4dde' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='f_fsid' type-id='0f35d263' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
<var-decl name='f_namelen' type-id='6028cbfe' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='576'>
<var-decl name='f_frsize' type-id='6028cbfe' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='640'>
<var-decl name='f_flags' type-id='6028cbfe' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='704'>
<var-decl name='f_spare' type-id='b39b9aa7' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='stat' size-in-bits='1152' is-struct='yes' visibility='default' id='aafc373f'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='st_dev' type-id='35ed8932' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='st_ino' type-id='e43e523d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='st_nlink' type-id='80f0b9df' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='st_mode' type-id='e1c52942' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='224'>
<var-decl name='st_uid' type-id='cc5fcceb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='st_gid' type-id='d94ec6d9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='288'>
<var-decl name='__pad0' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='st_rdev' type-id='35ed8932' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='st_size' type-id='79989e9c' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='st_blksize' type-id='d3f10a7f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
<var-decl name='st_blocks' type-id='dbc43803' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='576'>
<var-decl name='st_atim' type-id='a9c79a1f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='704'>
<var-decl name='st_mtim' type-id='a9c79a1f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='832'>
<var-decl name='st_ctim' type-id='a9c79a1f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='960'>
<var-decl name='__glibc_reserved' type-id='083f8d58' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='__ino_t' type-id='7359adad' id='e43e523d'/>
<class-decl name='__fsid_t' size-in-bits='64' is-struct='yes' naming-typedef-id='0f35d263' visibility='default' id='ea35c84a'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='__val' type-id='e4266c7e' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='__fsid_t' type-id='ea35c84a' id='0f35d263'/>
<typedef-decl name='__blkcnt_t' type-id='bd54fe1a' id='dbc43803'/>
<typedef-decl name='__fsblkcnt64_t' type-id='7359adad' id='95fe1a02'/>
<typedef-decl name='__fsfilcnt64_t' type-id='7359adad' id='0c3a4dde'/>
<typedef-decl name='__fsword_t' type-id='bd54fe1a' id='6028cbfe'/>
<typedef-decl name='DIR' type-id='20cd73f2' id='54a5d683'/>
<typedef-decl name='mode_t' type-id='e1c52942' id='d50d396c'/>
<typedef-decl name='__compar_fn_t' type-id='585e1de9' id='aba7edd8'/>
<pointer-type-def type-id='54a5d683' size-in-bits='64' id='f09217ba'/>
<pointer-type-def type-id='5725d813' size-in-bits='64' id='07b96073'/>
<pointer-type-def type-id='9b293607' size-in-bits='64' id='77bf1784'/>
<pointer-type-def type-id='7d8569fd' size-in-bits='64' id='7347a39e'/>
<pointer-type-def type-id='aafc373f' size-in-bits='64' id='4330df87'/>
<qualified-type-def type-id='4330df87' restrict='yes' id='73665405'/>
<pointer-type-def type-id='a2a6be1a' size-in-bits='64' id='7fd094c8'/>
<pointer-type-def type-id='b1bbf10d' size-in-bits='64' id='9cf59a50'/>
<pointer-type-def type-id='c5c76c9c' size-in-bits='64' id='b7f9d8e6'/>
<pointer-type-def type-id='9200a744' size-in-bits='64' id='4507922a'/>
<class-decl name='__dirstream' is-struct='yes' visibility='default' is-declaration-only='yes' id='20cd73f2'/>
<class-decl name='tpool' is-struct='yes' visibility='default' is-declaration-only='yes' id='88d1b7f9'/>
<function-decl name='zfs_realloc' mangled-name='zfs_realloc' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_realloc'>
<parameter type-id='b0382bb3'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='b59d7dce'/>
<return type-id='eaa32e2f'/>
</function-decl>
<var-decl name='proto_table' type-id='b2c36c9f' mangled-name='proto_table' visibility='default' elf-symbol-id='proto_table'/>
<function-decl name='do_mount' mangled-name='do_mount' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='do_mount'>
<parameter type-id='9200a744'/>
<parameter type-id='80f4b756'/>
<parameter type-id='26a90f95'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='do_unmount' mangled-name='do_unmount' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='do_unmount'>
<parameter type-id='80f4b756'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='tpool_create' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='3502e3ff'/>
<parameter type-id='3502e3ff'/>
<parameter type-id='3502e3ff'/>
<parameter type-id='7347a39e'/>
<return type-id='9cf59a50'/>
</function-decl>
<function-decl name='tpool_dispatch' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='9cf59a50'/>
<parameter type-id='b7f9d8e6'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='tpool_destroy' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='9cf59a50'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='tpool_wait' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='9cf59a50'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='mkdirp' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='d50d396c'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='sa_errorstr' mangled-name='sa_errorstr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_errorstr'>
<parameter type-id='95e97e5e'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='sa_enable_share' mangled-name='sa_enable_share' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_enable_share'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='26a90f95'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='sa_disable_share' mangled-name='sa_disable_share' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_disable_share'>
<parameter type-id='80f4b756'/>
<parameter type-id='26a90f95'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='sa_is_shared' mangled-name='sa_is_shared' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_is_shared'>
<parameter type-id='80f4b756'/>
<parameter type-id='26a90f95'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='sa_commit_shares' mangled-name='sa_commit_shares' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_commit_shares'>
<parameter type-id='80f4b756'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='sa_validate_shareopts' mangled-name='sa_validate_shareopts' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_validate_shareopts'>
<parameter type-id='26a90f95'/>
<parameter type-id='26a90f95'/>
<return type-id='95e97e5e'/>
</function-decl>
<var-decl name='nfs_only' type-id='79c9b3ac' mangled-name='nfs_only' visibility='default' elf-symbol-id='nfs_only'/>
<var-decl name='smb_only' type-id='79c9b3ac' mangled-name='smb_only' visibility='default' elf-symbol-id='smb_only'/>
<var-decl name='share_all_proto' type-id='7dc77b61' mangled-name='share_all_proto' visibility='default' elf-symbol-id='share_all_proto'/>
<function-decl name='zfs_is_mountable' mangled-name='zfs_is_mountable' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_is_mountable'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='26a90f95' name='buf'/>
<parameter type-id='b59d7dce' name='buflen'/>
<parameter type-id='debc6aa3' name='source'/>
<parameter type-id='95e97e5e' name='flags'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_mount_at' mangled-name='zfs_mount_at' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_mount_at'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='options'/>
<parameter type-id='95e97e5e' name='flags'/>
<parameter type-id='80f4b756' name='mountpoint'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_unmountall' mangled-name='zfs_unmountall' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unmountall'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='95e97e5e' name='flags'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='unshare_one' mangled-name='unshare_one' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='unshare_one'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='name'/>
<parameter type-id='80f4b756' name='mountpoint'/>
<parameter type-id='a7913f77' name='proto'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='is_shared' mangled-name='is_shared' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='is_shared'>
<parameter type-id='80f4b756' name='mountpoint'/>
<parameter type-id='a7913f77' name='proto'/>
<return type-id='7eb57c2d'/>
</function-decl>
<function-decl name='zfs_share_proto' mangled-name='zfs_share_proto' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_share_proto'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='bf9c30ee' name='proto'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_unshare' mangled-name='zfs_unshare' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshare'>
<parameter type-id='9200a744' name='zhp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_is_shared_proto' mangled-name='zfs_is_shared_proto' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_is_shared_proto'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='9b23c9ad' name='where'/>
<parameter type-id='a7913f77' name='proto'/>
<return type-id='7eb57c2d'/>
</function-decl>
<function-decl name='zfs_is_shared_nfs' mangled-name='zfs_is_shared_nfs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_is_shared_nfs'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='9b23c9ad' name='where'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_is_shared_smb' mangled-name='zfs_is_shared_smb' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_is_shared_smb'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='9b23c9ad' name='where'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfs_commit_shares' mangled-name='zfs_commit_shares' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_commit_shares'>
<parameter type-id='80f4b756' name='proto'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_unshareall_nfs' mangled-name='zfs_unshareall_nfs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshareall_nfs'>
<parameter type-id='9200a744' name='zhp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_unshareall_smb' mangled-name='zfs_unshareall_smb' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshareall_smb'>
<parameter type-id='9200a744' name='zhp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_unshareall' mangled-name='zfs_unshareall' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshareall'>
<parameter type-id='9200a744' name='zhp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_unshareall_bypath' mangled-name='zfs_unshareall_bypath' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshareall_bypath'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='mountpoint'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_unshareall_bytype' mangled-name='zfs_unshareall_bytype' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshareall_bytype'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='mountpoint'/>
<parameter type-id='80f4b756' name='proto'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='libzfs_add_handle' mangled-name='libzfs_add_handle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_add_handle'>
<parameter type-id='77bf1784' name='cbp'/>
<parameter type-id='9200a744' name='zhp'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_foreach_mountpoint' mangled-name='zfs_foreach_mountpoint' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_foreach_mountpoint'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='4507922a' name='handles'/>
<parameter type-id='b59d7dce' name='num_handles'/>
<parameter type-id='d8e49ab9' name='func'/>
<parameter type-id='eaa32e2f' name='data'/>
<parameter type-id='c19b74c3' name='parallel'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zpool_enable_datasets' mangled-name='zpool_enable_datasets' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_enable_datasets'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='mntopts'/>
<parameter type-id='95e97e5e' name='flags'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_disable_datasets' mangled-name='zpool_disable_datasets' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_disable_datasets'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='c19b74c3' name='force'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='fdopendir' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<return type-id='f09217ba'/>
</function-decl>
<function-decl name='closedir' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='f09217ba'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='readdir64' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='f09217ba'/>
<return type-id='07b96073'/>
</function-decl>
<function-decl name='qsort' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='aba7edd8'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='statfs64' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='7fd094c8'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='rmdir' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-type size-in-bits='64' id='c5c76c9c'>
<parameter type-id='eaa32e2f'/>
<return type-id='48b5725f'/>
</function-type>
</abi-instr>
<abi-instr address-size='64' path='libzfs_pool.c' language='LANG_C99'>
<type-decl name='long long unsigned int' size-in-bits='64' id='3a47d82b'/>
<class-decl name='splitflags' size-in-bits='64' is-struct='yes' visibility='default' id='dc01bf52'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='dryrun' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1'>
<var-decl name='import' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='name_flags' type-id='95e97e5e' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='splitflags_t' type-id='dc01bf52' id='325c1e34'/>
<class-decl name='trimflags' size-in-bits='192' is-struct='yes' visibility='default' id='8ef58008'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='fullpool' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='secure' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='wait' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='rate' type-id='9c313c2d' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='trimflags_t' type-id='8ef58008' id='a093cbb8'/>
<enum-decl name='zpool_status_t' naming-typedef-id='d3dd6294' id='5e770b40'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZPOOL_STATUS_CORRUPT_CACHE' value='0'/>
<enumerator name='ZPOOL_STATUS_MISSING_DEV_R' value='1'/>
<enumerator name='ZPOOL_STATUS_MISSING_DEV_NR' value='2'/>
<enumerator name='ZPOOL_STATUS_CORRUPT_LABEL_R' value='3'/>
<enumerator name='ZPOOL_STATUS_CORRUPT_LABEL_NR' value='4'/>
<enumerator name='ZPOOL_STATUS_BAD_GUID_SUM' value='5'/>
<enumerator name='ZPOOL_STATUS_CORRUPT_POOL' value='6'/>
<enumerator name='ZPOOL_STATUS_CORRUPT_DATA' value='7'/>
<enumerator name='ZPOOL_STATUS_FAILING_DEV' value='8'/>
<enumerator name='ZPOOL_STATUS_VERSION_NEWER' value='9'/>
<enumerator name='ZPOOL_STATUS_HOSTID_MISMATCH' value='10'/>
<enumerator name='ZPOOL_STATUS_HOSTID_ACTIVE' value='11'/>
<enumerator name='ZPOOL_STATUS_HOSTID_REQUIRED' value='12'/>
<enumerator name='ZPOOL_STATUS_IO_FAILURE_WAIT' value='13'/>
<enumerator name='ZPOOL_STATUS_IO_FAILURE_CONTINUE' value='14'/>
<enumerator name='ZPOOL_STATUS_IO_FAILURE_MMP' value='15'/>
<enumerator name='ZPOOL_STATUS_BAD_LOG' value='16'/>
<enumerator name='ZPOOL_STATUS_ERRATA' value='17'/>
<enumerator name='ZPOOL_STATUS_UNSUP_FEAT_READ' value='18'/>
<enumerator name='ZPOOL_STATUS_UNSUP_FEAT_WRITE' value='19'/>
<enumerator name='ZPOOL_STATUS_FAULTED_DEV_R' value='20'/>
<enumerator name='ZPOOL_STATUS_FAULTED_DEV_NR' value='21'/>
<enumerator name='ZPOOL_STATUS_VERSION_OLDER' value='22'/>
<enumerator name='ZPOOL_STATUS_FEAT_DISABLED' value='23'/>
<enumerator name='ZPOOL_STATUS_RESILVERING' value='24'/>
<enumerator name='ZPOOL_STATUS_OFFLINE_DEV' value='25'/>
<enumerator name='ZPOOL_STATUS_REMOVED_DEV' value='26'/>
<enumerator name='ZPOOL_STATUS_REBUILDING' value='27'/>
<enumerator name='ZPOOL_STATUS_REBUILD_SCRUB' value='28'/>
<enumerator name='ZPOOL_STATUS_NON_NATIVE_ASHIFT' value='29'/>
<enumerator name='ZPOOL_STATUS_COMPATIBILITY_ERR' value='30'/>
<enumerator name='ZPOOL_STATUS_INCOMPATIBLE_FEAT' value='31'/>
<enumerator name='ZPOOL_STATUS_OK' value='32'/>
</enum-decl>
<typedef-decl name='zpool_status_t' type-id='5e770b40' id='d3dd6294'/>
<enum-decl name='zpool_compat_status_t' naming-typedef-id='901b78d1' id='20676925'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZPOOL_COMPATIBILITY_OK' value='0'/>
<enumerator name='ZPOOL_COMPATIBILITY_WARNTOKEN' value='1'/>
<enumerator name='ZPOOL_COMPATIBILITY_BADTOKEN' value='2'/>
<enumerator name='ZPOOL_COMPATIBILITY_BADFILE' value='3'/>
<enumerator name='ZPOOL_COMPATIBILITY_NOFILES' value='4'/>
</enum-decl>
<typedef-decl name='zpool_compat_status_t' type-id='20676925' id='901b78d1'/>
<class-decl name='zpool_load_policy' size-in-bits='256' is-struct='yes' visibility='default' id='2f65b36f'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='zlp_rewind' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='zlp_maxmeta' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='zlp_maxdata' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='zlp_txg' type-id='9c313c2d' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zpool_load_policy_t' type-id='2f65b36f' id='d11b7617'/>
<enum-decl name='vdev_state' id='21566197'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='VDEV_STATE_UNKNOWN' value='0'/>
<enumerator name='VDEV_STATE_CLOSED' value='1'/>
<enumerator name='VDEV_STATE_OFFLINE' value='2'/>
<enumerator name='VDEV_STATE_REMOVED' value='3'/>
<enumerator name='VDEV_STATE_CANT_OPEN' value='4'/>
<enumerator name='VDEV_STATE_FAULTED' value='5'/>
<enumerator name='VDEV_STATE_DEGRADED' value='6'/>
<enumerator name='VDEV_STATE_HEALTHY' value='7'/>
</enum-decl>
<typedef-decl name='vdev_state_t' type-id='21566197' id='35acf840'/>
<enum-decl name='vdev_aux' id='7f5bcca4'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='VDEV_AUX_NONE' value='0'/>
<enumerator name='VDEV_AUX_OPEN_FAILED' value='1'/>
<enumerator name='VDEV_AUX_CORRUPT_DATA' value='2'/>
<enumerator name='VDEV_AUX_NO_REPLICAS' value='3'/>
<enumerator name='VDEV_AUX_BAD_GUID_SUM' value='4'/>
<enumerator name='VDEV_AUX_TOO_SMALL' value='5'/>
<enumerator name='VDEV_AUX_BAD_LABEL' value='6'/>
<enumerator name='VDEV_AUX_VERSION_NEWER' value='7'/>
<enumerator name='VDEV_AUX_VERSION_OLDER' value='8'/>
<enumerator name='VDEV_AUX_UNSUP_FEAT' value='9'/>
<enumerator name='VDEV_AUX_SPARED' value='10'/>
<enumerator name='VDEV_AUX_ERR_EXCEEDED' value='11'/>
<enumerator name='VDEV_AUX_IO_FAILURE' value='12'/>
<enumerator name='VDEV_AUX_BAD_LOG' value='13'/>
<enumerator name='VDEV_AUX_EXTERNAL' value='14'/>
<enumerator name='VDEV_AUX_SPLIT_POOL' value='15'/>
<enumerator name='VDEV_AUX_BAD_ASHIFT' value='16'/>
<enumerator name='VDEV_AUX_EXTERNAL_PERSIST' value='17'/>
<enumerator name='VDEV_AUX_ACTIVE' value='18'/>
<enumerator name='VDEV_AUX_CHILDREN_OFFLINE' value='19'/>
<enumerator name='VDEV_AUX_ASHIFT_TOO_BIG' value='20'/>
</enum-decl>
<typedef-decl name='vdev_aux_t' type-id='7f5bcca4' id='9d774e0b'/>
<enum-decl name='pool_scan_func' id='1b092565'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='POOL_SCAN_NONE' value='0'/>
<enumerator name='POOL_SCAN_SCRUB' value='1'/>
<enumerator name='POOL_SCAN_RESILVER' value='2'/>
<enumerator name='POOL_SCAN_FUNCS' value='3'/>
</enum-decl>
<typedef-decl name='pool_scan_func_t' type-id='1b092565' id='7313fbe2'/>
<enum-decl name='pool_scrub_cmd' id='a1474cbd'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='POOL_SCRUB_NORMAL' value='0'/>
<enumerator name='POOL_SCRUB_PAUSE' value='1'/>
<enumerator name='POOL_SCRUB_FLAGS_END' value='2'/>
</enum-decl>
<typedef-decl name='pool_scrub_cmd_t' type-id='a1474cbd' id='b51cf3c2'/>
<enum-decl name='zpool_errata' id='d9abbf54'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZPOOL_ERRATA_NONE' value='0'/>
<enumerator name='ZPOOL_ERRATA_ZOL_2094_SCRUB' value='1'/>
<enumerator name='ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY' value='2'/>
<enumerator name='ZPOOL_ERRATA_ZOL_6845_ENCRYPTION' value='3'/>
<enumerator name='ZPOOL_ERRATA_ZOL_8308_ENCRYPTION' value='4'/>
</enum-decl>
<typedef-decl name='zpool_errata_t' type-id='d9abbf54' id='688c495b'/>
<enum-decl name='pool_initialize_func' id='5c246ad4'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='POOL_INITIALIZE_START' value='0'/>
<enumerator name='POOL_INITIALIZE_CANCEL' value='1'/>
<enumerator name='POOL_INITIALIZE_SUSPEND' value='2'/>
<enumerator name='POOL_INITIALIZE_FUNCS' value='3'/>
</enum-decl>
<typedef-decl name='pool_initialize_func_t' type-id='5c246ad4' id='7063e1ab'/>
<enum-decl name='pool_trim_func' id='54ed608a'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='POOL_TRIM_START' value='0'/>
<enumerator name='POOL_TRIM_CANCEL' value='1'/>
<enumerator name='POOL_TRIM_SUSPEND' value='2'/>
<enumerator name='POOL_TRIM_FUNCS' value='3'/>
</enum-decl>
<typedef-decl name='pool_trim_func_t' type-id='54ed608a' id='b1146b8d'/>
<enum-decl name='zpool_wait_activity_t' naming-typedef-id='73446457' id='849338e3'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='ZPOOL_WAIT_CKPT_DISCARD' value='0'/>
<enumerator name='ZPOOL_WAIT_FREE' value='1'/>
<enumerator name='ZPOOL_WAIT_INITIALIZE' value='2'/>
<enumerator name='ZPOOL_WAIT_REPLACE' value='3'/>
<enumerator name='ZPOOL_WAIT_REMOVE' value='4'/>
<enumerator name='ZPOOL_WAIT_RESILVER' value='5'/>
<enumerator name='ZPOOL_WAIT_SCRUB' value='6'/>
<enumerator name='ZPOOL_WAIT_TRIM' value='7'/>
<enumerator name='ZPOOL_WAIT_NUM_ACTIVITIES' value='8'/>
</enum-decl>
<typedef-decl name='zpool_wait_activity_t' type-id='849338e3' id='73446457'/>
<enum-decl name='spa_feature' id='33ecb627'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='SPA_FEATURE_NONE' value='-1'/>
<enumerator name='SPA_FEATURE_ASYNC_DESTROY' value='0'/>
<enumerator name='SPA_FEATURE_EMPTY_BPOBJ' value='1'/>
<enumerator name='SPA_FEATURE_LZ4_COMPRESS' value='2'/>
<enumerator name='SPA_FEATURE_MULTI_VDEV_CRASH_DUMP' value='3'/>
<enumerator name='SPA_FEATURE_SPACEMAP_HISTOGRAM' value='4'/>
<enumerator name='SPA_FEATURE_ENABLED_TXG' value='5'/>
<enumerator name='SPA_FEATURE_HOLE_BIRTH' value='6'/>
<enumerator name='SPA_FEATURE_EXTENSIBLE_DATASET' value='7'/>
<enumerator name='SPA_FEATURE_EMBEDDED_DATA' value='8'/>
<enumerator name='SPA_FEATURE_BOOKMARKS' value='9'/>
<enumerator name='SPA_FEATURE_FS_SS_LIMIT' value='10'/>
<enumerator name='SPA_FEATURE_LARGE_BLOCKS' value='11'/>
<enumerator name='SPA_FEATURE_LARGE_DNODE' value='12'/>
<enumerator name='SPA_FEATURE_SHA512' value='13'/>
<enumerator name='SPA_FEATURE_SKEIN' value='14'/>
<enumerator name='SPA_FEATURE_EDONR' value='15'/>
<enumerator name='SPA_FEATURE_USEROBJ_ACCOUNTING' value='16'/>
<enumerator name='SPA_FEATURE_ENCRYPTION' value='17'/>
<enumerator name='SPA_FEATURE_PROJECT_QUOTA' value='18'/>
<enumerator name='SPA_FEATURE_DEVICE_REMOVAL' value='19'/>
<enumerator name='SPA_FEATURE_OBSOLETE_COUNTS' value='20'/>
<enumerator name='SPA_FEATURE_POOL_CHECKPOINT' value='21'/>
<enumerator name='SPA_FEATURE_SPACEMAP_V2' value='22'/>
<enumerator name='SPA_FEATURE_ALLOCATION_CLASSES' value='23'/>
<enumerator name='SPA_FEATURE_RESILVER_DEFER' value='24'/>
<enumerator name='SPA_FEATURE_BOOKMARK_V2' value='25'/>
<enumerator name='SPA_FEATURE_REDACTION_BOOKMARKS' value='26'/>
<enumerator name='SPA_FEATURE_REDACTED_DATASETS' value='27'/>
<enumerator name='SPA_FEATURE_BOOKMARK_WRITTEN' value='28'/>
<enumerator name='SPA_FEATURE_LOG_SPACEMAP' value='29'/>
<enumerator name='SPA_FEATURE_LIVELIST' value='30'/>
<enumerator name='SPA_FEATURE_DEVICE_REBUILD' value='31'/>
<enumerator name='SPA_FEATURE_ZSTD_COMPRESS' value='32'/>
<enumerator name='SPA_FEATURE_DRAID' value='33'/>
<enumerator name='SPA_FEATURES' value='34'/>
</enum-decl>
<typedef-decl name='spa_feature_t' type-id='33ecb627' id='d6618c78'/>
<qualified-type-def type-id='8e8d4be3' const='yes' id='693c3853'/>
<pointer-type-def type-id='693c3853' size-in-bits='64' id='22cce67b'/>
<pointer-type-def type-id='d6618c78' size-in-bits='64' id='a8425263'/>
<qualified-type-def type-id='62f7a03d' restrict='yes' id='f1cadedf'/>
<pointer-type-def type-id='a093cbb8' size-in-bits='64' id='b13f38c3'/>
<pointer-type-def type-id='35acf840' size-in-bits='64' id='17f3480d'/>
<pointer-type-def type-id='688c495b' size-in-bits='64' id='cec6f2e4'/>
<pointer-type-def type-id='d11b7617' size-in-bits='64' id='23432aaa'/>
<function-decl name='zpool_get_handle' mangled-name='zpool_get_handle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_handle'>
<parameter type-id='4c81de99'/>
<return type-id='b0382bb3'/>
</function-decl>
<function-decl name='zpool_prop_to_name' mangled-name='zpool_prop_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_to_name'>
<parameter type-id='5d0c23fb'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zpool_get_status' mangled-name='zpool_get_status' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_status'>
<parameter type-id='4c81de99'/>
<parameter type-id='9b23c9ad'/>
<parameter type-id='cec6f2e4'/>
<return type-id='d3dd6294'/>
</function-decl>
<function-decl name='zpool_prop_default_string' mangled-name='zpool_prop_default_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_default_string'>
<parameter type-id='5d0c23fb'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zpool_prop_default_numeric' mangled-name='zpool_prop_default_numeric' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_default_numeric'>
<parameter type-id='5d0c23fb'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='lzc_initialize' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='7063e1ab'/>
<parameter type-id='5ce45b60'/>
<parameter type-id='857bb57e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_trim' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='b1146b8d'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='c19b74c3'/>
<parameter type-id='5ce45b60'/>
<parameter type-id='857bb57e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_sync' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='5ce45b60'/>
<parameter type-id='857bb57e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_reopen' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='c19b74c3'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_pool_checkpoint' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_pool_checkpoint_discard' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_wait' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='73446457'/>
<parameter type-id='37e3bd22'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_wait_tag' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='73446457'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='37e3bd22'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_set_bootenv' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='22cce67b'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_get_bootenv' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='857bb57e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_standard_error' mangled-name='zpool_standard_error' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_standard_error'>
<parameter type-id='b0382bb3'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_standard_error_fmt' mangled-name='zpool_standard_error_fmt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_standard_error_fmt'>
<parameter type-id='b0382bb3'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<parameter is-variadic='yes'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_relabel_disk' mangled-name='zpool_relabel_disk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_relabel_disk'>
<parameter type-id='b0382bb3'/>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_resolve_shortname' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='26a90f95'/>
<parameter type-id='b59d7dce'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_strip_partition' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='26a90f95'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='zfs_strip_path' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='26a90f95'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='zfs_strcmp_pathname' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_history_unpack' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='26a90f95'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='5d6479ae'/>
<parameter type-id='75be733c'/>
<parameter type-id='4dd26a40'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_name_to_prop' mangled-name='zpool_name_to_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_name_to_prop'>
<parameter type-id='80f4b756'/>
<return type-id='5d0c23fb'/>
</function-decl>
<function-decl name='zpool_prop_readonly' mangled-name='zpool_prop_readonly' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_readonly'>
<parameter type-id='5d0c23fb'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zpool_prop_setonce' mangled-name='zpool_prop_setonce' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_setonce'>
<parameter type-id='5d0c23fb'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zpool_prop_feature' mangled-name='zpool_prop_feature' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_feature'>
<parameter type-id='80f4b756'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zpool_prop_index_to_string' mangled-name='zpool_prop_index_to_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_index_to_string'>
<parameter type-id='5d0c23fb'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='7d3cd834'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_add_uint8_array' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='ae3e8ca6'/>
<parameter type-id='3502e3ff'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvlist_add_nvlist_array' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='857bb57e'/>
<parameter type-id='3502e3ff'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='fnvlist_add_boolean_value' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='c19b74c3'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fnvlist_add_int64' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='9da381c4'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fnvlist_lookup_uint64' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='fnvpair_value_int64' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='3fa542f0'/>
<return type-id='9da381c4'/>
</function-decl>
<function-decl name='zfeature_is_supported' mangled-name='zfeature_is_supported' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfeature_is_supported'>
<parameter type-id='80f4b756'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zfeature_lookup_guid' mangled-name='zfeature_lookup_guid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfeature_lookup_guid'>
<parameter type-id='80f4b756'/>
<parameter type-id='a8425263'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfeature_lookup_name' mangled-name='zfeature_lookup_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfeature_lookup_name'>
<parameter type-id='80f4b756'/>
<parameter type-id='a8425263'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_get_load_policy' mangled-name='zpool_get_load_policy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_load_policy'>
<parameter type-id='5ce45b60'/>
<parameter type-id='23432aaa'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='pool_namecheck' mangled-name='pool_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='pool_namecheck'>
<parameter type-id='80f4b756'/>
<parameter type-id='053457bd'/>
<parameter type-id='26a90f95'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_prop_get_type' mangled-name='zpool_prop_get_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_get_type'>
<parameter type-id='5d0c23fb'/>
<return type-id='31429eff'/>
</function-decl>
<function-decl name='get_system_hostid' visibility='default' binding='global' size-in-bits='64'>
<return type-id='7359adad'/>
</function-decl>
<function-decl name='zpool_props_refresh' mangled-name='zpool_props_refresh' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_props_refresh'>
<parameter type-id='4c81de99' name='zhp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_state_to_name' mangled-name='zpool_state_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_state_to_name'>
<parameter type-id='35acf840' name='state'/>
<parameter type-id='9d774e0b' name='aux'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zpool_pool_state_to_name' mangled-name='zpool_pool_state_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_pool_state_to_name'>
<parameter type-id='084a08a3' name='state'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zpool_get_state_str' mangled-name='zpool_get_state_str' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_state_str'>
<parameter type-id='4c81de99' name='zhp'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zpool_set_prop' mangled-name='zpool_set_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_set_prop'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='propname'/>
<parameter type-id='80f4b756' name='propval'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_expand_proplist' mangled-name='zpool_expand_proplist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_expand_proplist'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='e4378506' name='plp'/>
<parameter type-id='c19b74c3' name='literal'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_get_state' mangled-name='zpool_get_state' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_state'>
<parameter type-id='4c81de99' name='zhp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_is_draid_spare' mangled-name='zpool_is_draid_spare' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_is_draid_spare'>
<parameter type-id='80f4b756' name='name'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zpool_create' mangled-name='zpool_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_create'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='pool'/>
<parameter type-id='5ce45b60' name='nvroot'/>
<parameter type-id='5ce45b60' name='props'/>
<parameter type-id='5ce45b60' name='fsprops'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_destroy' mangled-name='zpool_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_destroy'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='log_str'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_checkpoint' mangled-name='zpool_checkpoint' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_checkpoint'>
<parameter type-id='4c81de99' name='zhp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_discard_checkpoint' mangled-name='zpool_discard_checkpoint' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_discard_checkpoint'>
<parameter type-id='4c81de99' name='zhp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_add' mangled-name='zpool_add' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_add'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='5ce45b60' name='nvroot'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_export' mangled-name='zpool_export' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_export'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='c19b74c3' name='force'/>
<parameter type-id='80f4b756' name='log_str'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_export_force' mangled-name='zpool_export_force' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_export_force'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='log_str'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_explain_recover' mangled-name='zpool_explain_recover' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_explain_recover'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='name'/>
<parameter type-id='95e97e5e' name='reason'/>
<parameter type-id='5ce45b60' name='config'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zpool_import' mangled-name='zpool_import' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_import'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='5ce45b60' name='config'/>
<parameter type-id='80f4b756' name='newname'/>
<parameter type-id='26a90f95' name='altroot'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_print_unsup_feat' mangled-name='zpool_print_unsup_feat' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_print_unsup_feat'>
<parameter type-id='5ce45b60' name='config'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zpool_import_props' mangled-name='zpool_import_props' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_import_props'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='5ce45b60' name='config'/>
<parameter type-id='80f4b756' name='newname'/>
<parameter type-id='5ce45b60' name='props'/>
<parameter type-id='95e97e5e' name='flags'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_initialize' mangled-name='zpool_initialize' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_initialize'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='7063e1ab' name='cmd_type'/>
<parameter type-id='5ce45b60' name='vds'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_initialize_wait' mangled-name='zpool_initialize_wait' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_initialize_wait'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='7063e1ab' name='cmd_type'/>
<parameter type-id='5ce45b60' name='vds'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_trim' mangled-name='zpool_trim' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_trim'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='b1146b8d' name='cmd_type'/>
<parameter type-id='5ce45b60' name='vds'/>
<parameter type-id='b13f38c3' name='trim_flags'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_scan' mangled-name='zpool_scan' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_scan'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='7313fbe2' name='func'/>
<parameter type-id='b51cf3c2' name='cmd'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_find_vdev_by_physpath' mangled-name='zpool_find_vdev_by_physpath' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_find_vdev_by_physpath'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='ppath'/>
<parameter type-id='37e3bd22' name='avail_spare'/>
<parameter type-id='37e3bd22' name='l2cache'/>
<parameter type-id='37e3bd22' name='log'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='zpool_find_vdev' mangled-name='zpool_find_vdev' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_find_vdev'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='37e3bd22' name='avail_spare'/>
<parameter type-id='37e3bd22' name='l2cache'/>
<parameter type-id='37e3bd22' name='log'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='zpool_get_physpath' mangled-name='zpool_get_physpath' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_physpath'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='26a90f95' name='physpath'/>
<parameter type-id='b59d7dce' name='phypath_size'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_vdev_path_to_guid' mangled-name='zpool_vdev_path_to_guid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_path_to_guid'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='path'/>
<return type-id='9c313c2d'/>
</function-decl>
<function-decl name='zpool_vdev_online' mangled-name='zpool_vdev_online' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_online'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='95e97e5e' name='flags'/>
<parameter type-id='17f3480d' name='newstate'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_vdev_offline' mangled-name='zpool_vdev_offline' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_offline'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='c19b74c3' name='istmp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_vdev_fault' mangled-name='zpool_vdev_fault' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_fault'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='9c313c2d' name='guid'/>
<parameter type-id='9d774e0b' name='aux'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_vdev_degrade' mangled-name='zpool_vdev_degrade' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_degrade'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='9c313c2d' name='guid'/>
<parameter type-id='9d774e0b' name='aux'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_vdev_attach' mangled-name='zpool_vdev_attach' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_attach'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='old_disk'/>
<parameter type-id='80f4b756' name='new_disk'/>
<parameter type-id='5ce45b60' name='nvroot'/>
<parameter type-id='95e97e5e' name='replacing'/>
<parameter type-id='c19b74c3' name='rebuild'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_vdev_detach' mangled-name='zpool_vdev_detach' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_detach'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='path'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_vdev_split' mangled-name='zpool_vdev_split' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_split'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='26a90f95' name='newname'/>
<parameter type-id='857bb57e' name='newroot'/>
<parameter type-id='5ce45b60' name='props'/>
<parameter type-id='325c1e34' name='flags'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_vdev_remove' mangled-name='zpool_vdev_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_remove'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='path'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_vdev_remove_cancel' mangled-name='zpool_vdev_remove_cancel' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_remove_cancel'>
<parameter type-id='4c81de99' name='zhp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_vdev_indirect_size' mangled-name='zpool_vdev_indirect_size' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_indirect_size'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='5d6479ae' name='sizep'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_clear' mangled-name='zpool_clear' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_clear'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='5ce45b60' name='rewindnvl'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_vdev_clear' mangled-name='zpool_vdev_clear' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_clear'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='9c313c2d' name='guid'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_reguid' mangled-name='zpool_reguid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_reguid'>
<parameter type-id='4c81de99' name='zhp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_reopen_one' mangled-name='zpool_reopen_one' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_reopen_one'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_sync_one' mangled-name='zpool_sync_one' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_sync_one'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_vdev_name' mangled-name='zpool_vdev_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_name'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='5ce45b60' name='nv'/>
<parameter type-id='95e97e5e' name='name_flags'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='zpool_get_errlog' mangled-name='zpool_get_errlog' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_errlog'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='857bb57e' name='nverrlistp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_upgrade' mangled-name='zpool_upgrade' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_upgrade'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='9c313c2d' name='new_version'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_save_arguments' mangled-name='zfs_save_arguments' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_save_arguments'>
<parameter type-id='95e97e5e' name='argc'/>
<parameter type-id='9b23c9ad' name='argv'/>
<parameter type-id='26a90f95' name='string'/>
<parameter type-id='95e97e5e' name='len'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zpool_log_history' mangled-name='zpool_log_history' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_log_history'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='message'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_get_history' mangled-name='zpool_get_history' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_history'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='857bb57e' name='nvhisp'/>
<parameter type-id='5d6479ae' name='off'/>
<parameter type-id='37e3bd22' name='eof'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_events_next' mangled-name='zpool_events_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_events_next'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='857bb57e' name='nvp'/>
<parameter type-id='7292109c' name='dropped'/>
<parameter type-id='f0981eeb' name='flags'/>
<parameter type-id='95e97e5e' name='zevent_fd'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_events_clear' mangled-name='zpool_events_clear' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_events_clear'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='7292109c' name='count'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_events_seek' mangled-name='zpool_events_seek' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_events_seek'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='9c313c2d' name='eid'/>
<parameter type-id='95e97e5e' name='zevent_fd'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_obj_to_path' mangled-name='zpool_obj_to_path' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_obj_to_path'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='9c313c2d' name='dsobj'/>
<parameter type-id='9c313c2d' name='obj'/>
<parameter type-id='26a90f95' name='pathname'/>
<parameter type-id='b59d7dce' name='len'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zpool_obj_to_path_ds' mangled-name='zpool_obj_to_path_ds' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_obj_to_path_ds'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='9c313c2d' name='dsobj'/>
<parameter type-id='9c313c2d' name='obj'/>
<parameter type-id='26a90f95' name='pathname'/>
<parameter type-id='b59d7dce' name='len'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zpool_wait' mangled-name='zpool_wait' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_wait'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='73446457' name='activity'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_wait_status' mangled-name='zpool_wait_status' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_wait_status'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='73446457' name='activity'/>
<parameter type-id='37e3bd22' name='missing'/>
<parameter type-id='37e3bd22' name='waited'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_set_bootenv' mangled-name='zpool_set_bootenv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_set_bootenv'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='22cce67b' name='envmap'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_get_bootenv' mangled-name='zpool_get_bootenv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_bootenv'>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='857bb57e' name='nvlp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_load_compat' mangled-name='zpool_load_compat' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_load_compat'>
<parameter type-id='80f4b756' name='compat'/>
<parameter type-id='37e3bd22' name='features'/>
<parameter type-id='26a90f95' name='report'/>
<parameter type-id='b59d7dce' name='rlen'/>
<return type-id='901b78d1'/>
</function-decl>
<function-decl name='__xpg_basename' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='26a90f95'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='strtoull' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='9d26089a'/>
<parameter type-id='8c85230f'/>
<parameter type-id='95e97e5e'/>
<return type-id='3a47d82b'/>
</function-decl>
<function-decl name='realpath' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='9d26089a'/>
<parameter type-id='266fe297'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='memcmp' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='b59d7dce'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='strtok_r' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='266fe297'/>
<parameter type-id='9d26089a'/>
<parameter type-id='8c85230f'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='strncasecmp' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='b59d7dce'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='munmap' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='b59d7dce'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='stat64' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='9d26089a'/>
<parameter type-id='f1cadedf'/>
<return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='libzfs_sendrecv.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='9c313c2d' size-in-bits='2176' id='8c2bcad1'>
<subrange length='34' type-id='7359adad' id='6a6a7e00'/>
</array-type-def>
<array-type-def dimensions='1' type-id='9c313c2d' size-in-bits='256' id='85c64d26'>
<subrange length='4' type-id='7359adad' id='16fe7105'/>
</array-type-def>
<array-type-def dimensions='1' type-id='b96825af' size-in-bits='96' id='fa8ef949'>
<subrange length='12' type-id='7359adad' id='84827bdc'/>
</array-type-def>
<array-type-def dimensions='1' type-id='b96825af' size-in-bits='128' id='fa9986a5'>
<subrange length='16' type-id='7359adad' id='848d0938'/>
</array-type-def>
<array-type-def dimensions='1' type-id='b96825af' size-in-bits='40' id='0f4ddd0b'>
<subrange length='5' type-id='7359adad' id='53010e10'/>
</array-type-def>
<array-type-def dimensions='1' type-id='b96825af' size-in-bits='48' id='0f562bd0'>
<subrange length='6' type-id='7359adad' id='52fa524b'/>
</array-type-def>
<array-type-def dimensions='1' type-id='b96825af' size-in-bits='64' id='13339fda'>
<subrange length='8' type-id='7359adad' id='56e0c0b1'/>
</array-type-def>
<class-decl name='sendflags' size-in-bits='544' is-struct='yes' visibility='default' id='f6aa15be'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='verbosity' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='replicate' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='skipmissing' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
<var-decl name='doall' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='fromorigin' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='160'>
<var-decl name='pad' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='props' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='224'>
<var-decl name='dryrun' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='parsable' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='288'>
<var-decl name='progress' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='largeblock' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='352'>
<var-decl name='embed_data' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='compress' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='416'>
<var-decl name='raw' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='backup' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='480'>
<var-decl name='holds' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
<var-decl name='saved' type-id='c19b74c3' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='sendflags_t' type-id='f6aa15be' id='945467e6'/>
<typedef-decl name='snapfilter_cb_t' type-id='d2a5e211' id='3d3ffb69'/>
<class-decl name='recvflags' size-in-bits='416' is-struct='yes' visibility='default' id='34a384dc'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='verbose' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='isprefix' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='istail' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
<var-decl name='dryrun' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='force' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='160'>
<var-decl name='canmountoff' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='resumable' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='224'>
<var-decl name='byteswap' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='nomount' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='288'>
<var-decl name='holds' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='skipholds' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='352'>
<var-decl name='domount' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='forceunmount' type-id='c19b74c3' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='recvflags_t' type-id='34a384dc' id='9e59d1d4'/>
<enum-decl name='lzc_send_flags' id='bfbd3c8e'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='LZC_SEND_FLAG_EMBED_DATA' value='1'/>
<enumerator name='LZC_SEND_FLAG_LARGE_BLOCK' value='2'/>
<enumerator name='LZC_SEND_FLAG_COMPRESS' value='4'/>
<enumerator name='LZC_SEND_FLAG_RAW' value='8'/>
<enumerator name='LZC_SEND_FLAG_SAVED' value='16'/>
</enum-decl>
<class-decl name='ddt_key' size-in-bits='320' is-struct='yes' visibility='default' id='e0a4a1cb'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='ddk_cksum' type-id='39730d0b' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='ddk_prop' type-id='9c313c2d' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='ddt_key_t' type-id='e0a4a1cb' id='67f6d2cf'/>
<enum-decl name='dmu_object_type' id='04b3b0b9'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='DMU_OT_NONE' value='0'/>
<enumerator name='DMU_OT_OBJECT_DIRECTORY' value='1'/>
<enumerator name='DMU_OT_OBJECT_ARRAY' value='2'/>
<enumerator name='DMU_OT_PACKED_NVLIST' value='3'/>
<enumerator name='DMU_OT_PACKED_NVLIST_SIZE' value='4'/>
<enumerator name='DMU_OT_BPOBJ' value='5'/>
<enumerator name='DMU_OT_BPOBJ_HDR' value='6'/>
<enumerator name='DMU_OT_SPACE_MAP_HEADER' value='7'/>
<enumerator name='DMU_OT_SPACE_MAP' value='8'/>
<enumerator name='DMU_OT_INTENT_LOG' value='9'/>
<enumerator name='DMU_OT_DNODE' value='10'/>
<enumerator name='DMU_OT_OBJSET' value='11'/>
<enumerator name='DMU_OT_DSL_DIR' value='12'/>
<enumerator name='DMU_OT_DSL_DIR_CHILD_MAP' value='13'/>
<enumerator name='DMU_OT_DSL_DS_SNAP_MAP' value='14'/>
<enumerator name='DMU_OT_DSL_PROPS' value='15'/>
<enumerator name='DMU_OT_DSL_DATASET' value='16'/>
<enumerator name='DMU_OT_ZNODE' value='17'/>
<enumerator name='DMU_OT_OLDACL' value='18'/>
<enumerator name='DMU_OT_PLAIN_FILE_CONTENTS' value='19'/>
<enumerator name='DMU_OT_DIRECTORY_CONTENTS' value='20'/>
<enumerator name='DMU_OT_MASTER_NODE' value='21'/>
<enumerator name='DMU_OT_UNLINKED_SET' value='22'/>
<enumerator name='DMU_OT_ZVOL' value='23'/>
<enumerator name='DMU_OT_ZVOL_PROP' value='24'/>
<enumerator name='DMU_OT_PLAIN_OTHER' value='25'/>
<enumerator name='DMU_OT_UINT64_OTHER' value='26'/>
<enumerator name='DMU_OT_ZAP_OTHER' value='27'/>
<enumerator name='DMU_OT_ERROR_LOG' value='28'/>
<enumerator name='DMU_OT_SPA_HISTORY' value='29'/>
<enumerator name='DMU_OT_SPA_HISTORY_OFFSETS' value='30'/>
<enumerator name='DMU_OT_POOL_PROPS' value='31'/>
<enumerator name='DMU_OT_DSL_PERMS' value='32'/>
<enumerator name='DMU_OT_ACL' value='33'/>
<enumerator name='DMU_OT_SYSACL' value='34'/>
<enumerator name='DMU_OT_FUID' value='35'/>
<enumerator name='DMU_OT_FUID_SIZE' value='36'/>
<enumerator name='DMU_OT_NEXT_CLONES' value='37'/>
<enumerator name='DMU_OT_SCAN_QUEUE' value='38'/>
<enumerator name='DMU_OT_USERGROUP_USED' value='39'/>
<enumerator name='DMU_OT_USERGROUP_QUOTA' value='40'/>
<enumerator name='DMU_OT_USERREFS' value='41'/>
<enumerator name='DMU_OT_DDT_ZAP' value='42'/>
<enumerator name='DMU_OT_DDT_STATS' value='43'/>
<enumerator name='DMU_OT_SA' value='44'/>
<enumerator name='DMU_OT_SA_MASTER_NODE' value='45'/>
<enumerator name='DMU_OT_SA_ATTR_REGISTRATION' value='46'/>
<enumerator name='DMU_OT_SA_ATTR_LAYOUTS' value='47'/>
<enumerator name='DMU_OT_SCAN_XLATE' value='48'/>
<enumerator name='DMU_OT_DEDUP' value='49'/>
<enumerator name='DMU_OT_DEADLIST' value='50'/>
<enumerator name='DMU_OT_DEADLIST_HDR' value='51'/>
<enumerator name='DMU_OT_DSL_CLONES' value='52'/>
<enumerator name='DMU_OT_BPOBJ_SUBOBJ' value='53'/>
<enumerator name='DMU_OT_NUMTYPES' value='54'/>
<enumerator name='DMU_OTN_UINT8_DATA' value='128'/>
<enumerator name='DMU_OTN_UINT8_METADATA' value='192'/>
<enumerator name='DMU_OTN_UINT16_DATA' value='129'/>
<enumerator name='DMU_OTN_UINT16_METADATA' value='193'/>
<enumerator name='DMU_OTN_UINT32_DATA' value='130'/>
<enumerator name='DMU_OTN_UINT32_METADATA' value='194'/>
<enumerator name='DMU_OTN_UINT64_DATA' value='131'/>
<enumerator name='DMU_OTN_UINT64_METADATA' value='195'/>
<enumerator name='DMU_OTN_ZAP_DATA' value='132'/>
<enumerator name='DMU_OTN_ZAP_METADATA' value='196'/>
<enumerator name='DMU_OTN_UINT8_ENC_DATA' value='160'/>
<enumerator name='DMU_OTN_UINT8_ENC_METADATA' value='224'/>
<enumerator name='DMU_OTN_UINT16_ENC_DATA' value='161'/>
<enumerator name='DMU_OTN_UINT16_ENC_METADATA' value='225'/>
<enumerator name='DMU_OTN_UINT32_ENC_DATA' value='162'/>
<enumerator name='DMU_OTN_UINT32_ENC_METADATA' value='226'/>
<enumerator name='DMU_OTN_UINT64_ENC_DATA' value='163'/>
<enumerator name='DMU_OTN_UINT64_ENC_METADATA' value='227'/>
<enumerator name='DMU_OTN_ZAP_ENC_DATA' value='164'/>
<enumerator name='DMU_OTN_ZAP_ENC_METADATA' value='228'/>
</enum-decl>
<typedef-decl name='dmu_object_type_t' type-id='04b3b0b9' id='5c9d8906'/>
<class-decl name='zio_cksum' size-in-bits='256' is-struct='yes' visibility='default' id='1d53e28b'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='zc_word' type-id='85c64d26' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zio_cksum_t' type-id='1d53e28b' id='39730d0b'/>
<class-decl name='dmu_replay_record' size-in-bits='2496' is-struct='yes' visibility='default' id='781a52d7'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='drr_type' type-id='08f5ca17' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='drr_payloadlen' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='drr_u' type-id='ac5ab59b' visibility='default'/>
</data-member>
</class-decl>
<enum-decl name='__anonymous_enum__' is-anonymous='yes' id='08f5ca17'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='DRR_BEGIN' value='0'/>
<enumerator name='DRR_OBJECT' value='1'/>
<enumerator name='DRR_FREEOBJECTS' value='2'/>
<enumerator name='DRR_WRITE' value='3'/>
<enumerator name='DRR_FREE' value='4'/>
<enumerator name='DRR_END' value='5'/>
<enumerator name='DRR_WRITE_BYREF' value='6'/>
<enumerator name='DRR_SPILL' value='7'/>
<enumerator name='DRR_WRITE_EMBEDDED' value='8'/>
<enumerator name='DRR_OBJECT_RANGE' value='9'/>
<enumerator name='DRR_REDACT' value='10'/>
<enumerator name='DRR_NUMTYPES' value='11'/>
</enum-decl>
<union-decl name='__anonymous_union__' size-in-bits='2432' is-anonymous='yes' visibility='default' id='ac5ab59b'>
<data-member access='public'>
<var-decl name='drr_begin' type-id='09fcdc01' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='drr_end' type-id='6ee25631' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='drr_object' type-id='f9ad530b' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='drr_freeobjects' type-id='a27d958e' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='drr_write' type-id='4cc69e4b' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='drr_free' type-id='c836cfd2' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='drr_write_byref' type-id='e511cdce' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='drr_spill' type-id='1e69a80a' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='drr_write_embedded' type-id='98b1345e' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='drr_object_range' type-id='aba1f9e1' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='drr_redact' type-id='50389039' visibility='default'/>
</data-member>
<data-member access='public'>
<var-decl name='drr_checksum' type-id='a5fe3647' visibility='default'/>
</data-member>
</union-decl>
<class-decl name='drr_end' size-in-bits='320' is-struct='yes' visibility='default' id='6ee25631'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='drr_checksum' type-id='39730d0b' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='drr_toguid' type-id='9c313c2d' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='drr_object' size-in-bits='448' is-struct='yes' visibility='default' id='f9ad530b'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='drr_object' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='drr_type' type-id='5c9d8906' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
<var-decl name='drr_bonustype' type-id='5c9d8906' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='drr_blksz' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='160'>
<var-decl name='drr_bonuslen' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='drr_checksumtype' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='200'>
<var-decl name='drr_compress' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='208'>
<var-decl name='drr_dn_slots' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='216'>
<var-decl name='drr_flags' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='224'>
<var-decl name='drr_raw_bonuslen' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='drr_toguid' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='drr_indblkshift' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='328'>
<var-decl name='drr_nlevels' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='336'>
<var-decl name='drr_nblkptr' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='344'>
<var-decl name='drr_pad' type-id='0f4ddd0b' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='drr_maxblkid' type-id='9c313c2d' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='drr_freeobjects' size-in-bits='192' is-struct='yes' visibility='default' id='a27d958e'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='drr_firstobj' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='drr_numobjs' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='drr_toguid' type-id='9c313c2d' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='drr_write' size-in-bits='1088' is-struct='yes' visibility='default' id='4cc69e4b'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='drr_object' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='drr_type' type-id='5c9d8906' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
<var-decl name='drr_pad' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='drr_offset' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='drr_logical_size' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='drr_toguid' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='drr_checksumtype' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='328'>
<var-decl name='drr_flags' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='336'>
<var-decl name='drr_compressiontype' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='344'>
<var-decl name='drr_pad2' type-id='0f4ddd0b' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='drr_key' type-id='67f6d2cf' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='704'>
<var-decl name='drr_compressed_size' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='768'>
<var-decl name='drr_salt' type-id='13339fda' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='832'>
<var-decl name='drr_iv' type-id='fa8ef949' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='928'>
<var-decl name='drr_mac' type-id='fa9986a5' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='drr_free' size-in-bits='256' is-struct='yes' visibility='default' id='c836cfd2'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='drr_object' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='drr_offset' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='drr_length' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='drr_toguid' type-id='9c313c2d' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='drr_write_byref' size-in-bits='832' is-struct='yes' visibility='default' id='e511cdce'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='drr_object' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='drr_offset' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='drr_length' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='drr_toguid' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='drr_refguid' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='drr_refobject' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='drr_refoffset' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='drr_checksumtype' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='456'>
<var-decl name='drr_flags' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='464'>
<var-decl name='drr_pad2' type-id='0f562bd0' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
<var-decl name='drr_key' type-id='67f6d2cf' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='drr_spill' size-in-bits='640' is-struct='yes' visibility='default' id='1e69a80a'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='drr_object' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='drr_length' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='drr_toguid' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='drr_flags' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='200'>
<var-decl name='drr_compressiontype' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='208'>
<var-decl name='drr_pad' type-id='0f562bd0' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='drr_compressed_size' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='drr_salt' type-id='13339fda' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='drr_iv' type-id='fa8ef949' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='480'>
<var-decl name='drr_mac' type-id='fa9986a5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='608'>
<var-decl name='drr_type' type-id='5c9d8906' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='drr_write_embedded' size-in-bits='384' is-struct='yes' visibility='default' id='98b1345e'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='drr_object' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='drr_offset' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='drr_length' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='drr_toguid' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='drr_compression' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='264'>
<var-decl name='drr_etype' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='272'>
<var-decl name='drr_pad' type-id='0f562bd0' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='drr_lsize' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='352'>
<var-decl name='drr_psize' type-id='8f92235e' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='drr_object_range' size-in-bits='512' is-struct='yes' visibility='default' id='aba1f9e1'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='drr_firstobj' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='drr_numslots' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='drr_toguid' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='drr_salt' type-id='13339fda' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='drr_iv' type-id='fa8ef949' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='352'>
<var-decl name='drr_mac' type-id='fa9986a5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='480'>
<var-decl name='drr_flags' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='488'>
<var-decl name='drr_pad' type-id='d3490169' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='drr_redact' size-in-bits='256' is-struct='yes' visibility='default' id='50389039'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='drr_object' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='drr_offset' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='drr_length' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='drr_toguid' type-id='9c313c2d' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='drr_checksum' size-in-bits='2432' is-struct='yes' visibility='default' id='a5fe3647'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='drr_pad' type-id='8c2bcad1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2176'>
<var-decl name='drr_checksum' type-id='39730d0b' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='Byte' type-id='002ac4a6' id='efb9ba06'/>
<typedef-decl name='uLong' type-id='7359adad' id='5bbcce85'/>
<typedef-decl name='Bytef' type-id='efb9ba06' id='c1606520'/>
<typedef-decl name='uLongf' type-id='5bbcce85' id='4d39af59'/>
<pointer-type-def type-id='c1606520' size-in-bits='64' id='4c667223'/>
<qualified-type-def type-id='c1606520' const='yes' id='a6124a50'/>
<pointer-type-def type-id='a6124a50' size-in-bits='64' id='e8cb3e0e'/>
<qualified-type-def type-id='781a52d7' const='yes' id='413ab2b8'/>
<pointer-type-def type-id='413ab2b8' size-in-bits='64' id='41671bd6'/>
<pointer-type-def type-id='3ff5601b' size-in-bits='64' id='4aafb922'/>
<pointer-type-def type-id='9e59d1d4' size-in-bits='64' id='4ea84b4f'/>
<pointer-type-def type-id='945467e6' size-in-bits='64' id='8def7735'/>
<pointer-type-def type-id='3d3ffb69' size-in-bits='64' id='72a26210'/>
<pointer-type-def type-id='c9d12d66' size-in-bits='64' id='b2eb2c3f'/>
<pointer-type-def type-id='4d39af59' size-in-bits='64' id='60db3356'/>
<pointer-type-def type-id='39730d0b' size-in-bits='64' id='c24fc2ee'/>
<function-decl name='nvlist_print' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='822cd80b'/>
<parameter type-id='5ce45b60'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_get_pool_handle' mangled-name='zfs_get_pool_handle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_pool_handle'>
<parameter type-id='fcd57163'/>
<return type-id='4c81de99'/>
</function-decl>
<function-decl name='lzc_send_redacted' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='bfbd3c8e'/>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_send_resume_redacted' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='bfbd3c8e'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_receive_with_cmdprops' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='5ce45b60'/>
<parameter type-id='5ce45b60'/>
<parameter type-id='ae3e8ca6'/>
<parameter type-id='3502e3ff'/>
<parameter type-id='80f4b756'/>
<parameter type-id='c19b74c3'/>
<parameter type-id='c19b74c3'/>
<parameter type-id='c19b74c3'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='41671bd6'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='5d6479ae'/>
<parameter type-id='5d6479ae'/>
<parameter type-id='5d6479ae'/>
<parameter type-id='857bb57e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_send_space' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='bfbd3c8e'/>
<parameter type-id='5d6479ae'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_send_space_resume_redacted' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='bfbd3c8e'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='80f4b756'/>
<parameter type-id='95e97e5e'/>
<parameter type-id='5d6479ae'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzc_rename' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='libzfs_set_pipe_max' mangled-name='libzfs_set_pipe_max' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_set_pipe_max'>
<parameter type-id='95e97e5e'/>
<return type-id='48b5725f'/>
</function-decl>
+ <function-decl name='avl_insert' visibility='default' binding='global' size-in-bits='64'>
+ <parameter type-id='a3681dea'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='fba6cb51'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
<function-decl name='nvlist_lookup_boolean' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='nvpair_value_int32' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='3fa542f0'/>
<parameter type-id='4aafb922'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='fnvlist_size' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<return type-id='b59d7dce'/>
</function-decl>
<function-decl name='fnvlist_merge' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='5ce45b60'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fnvlist_add_nvpair' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='3fa542f0'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fnvlist_remove' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fnvlist_lookup_boolean_value' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='fnvlist_lookup_uint64_array' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5ce45b60'/>
<parameter type-id='80f4b756'/>
<parameter type-id='4dd26a40'/>
<return type-id='5d6479ae'/>
</function-decl>
<function-decl name='fletcher_4_native_varsize' mangled-name='fletcher_4_native_varsize' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_native_varsize'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='9c313c2d'/>
<parameter type-id='c24fc2ee'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fletcher_4_incremental_native' mangled-name='fletcher_4_incremental_native' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_incremental_native'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='fletcher_4_incremental_byteswap' mangled-name='fletcher_4_incremental_byteswap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_incremental_byteswap'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_send_progress' mangled-name='zfs_send_progress' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send_progress'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='95e97e5e' name='fd'/>
<parameter type-id='5d6479ae' name='bytes_written'/>
<parameter type-id='5d6479ae' name='blocks_visited'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_send_resume_token_to_nvlist' mangled-name='zfs_send_resume_token_to_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send_resume_token_to_nvlist'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='token'/>
<return type-id='5ce45b60'/>
</function-decl>
<function-decl name='zfs_send_resume' mangled-name='zfs_send_resume' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send_resume'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='8def7735' name='flags'/>
<parameter type-id='95e97e5e' name='outfd'/>
<parameter type-id='80f4b756' name='resume_token'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_send_saved' mangled-name='zfs_send_saved' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send_saved'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='8def7735' name='flags'/>
<parameter type-id='95e97e5e' name='outfd'/>
<parameter type-id='80f4b756' name='resume_token'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_send' mangled-name='zfs_send' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='fromsnap'/>
<parameter type-id='80f4b756' name='tosnap'/>
<parameter type-id='8def7735' name='flags'/>
<parameter type-id='95e97e5e' name='outfd'/>
<parameter type-id='72a26210' name='filter_func'/>
<parameter type-id='eaa32e2f' name='cb_arg'/>
<parameter type-id='857bb57e' name='debugnvp'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_send_one' mangled-name='zfs_send_one' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send_one'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='from'/>
<parameter type-id='95e97e5e' name='fd'/>
<parameter type-id='8def7735' name='flags'/>
<parameter type-id='80f4b756' name='redactbook'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_receive' mangled-name='zfs_receive' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_receive'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='tosnap'/>
<parameter type-id='5ce45b60' name='props'/>
<parameter type-id='4ea84b4f' name='flags'/>
<parameter type-id='95e97e5e' name='infd'/>
<parameter type-id='a3681dea' name='stream_avl'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='sprintf' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='26a90f95'/>
<parameter type-id='80f4b756'/>
<parameter is-variadic='yes'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='perror' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='strcat' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='26a90f95'/>
<parameter type-id='80f4b756'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='strndup' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='b59d7dce'/>
<return type-id='26a90f95'/>
</function-decl>
<function-decl name='time' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='b2eb2c3f'/>
<return type-id='c9d12d66'/>
</function-decl>
<function-decl name='localtime' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='9f201474'/>
<return type-id='d915a820'/>
</function-decl>
<function-decl name='write' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='b59d7dce'/>
<return type-id='79a0948f'/>
</function-decl>
<function-decl name='sleep' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='f0981eeb'/>
<return type-id='f0981eeb'/>
</function-decl>
<function-decl name='uncompress' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='4c667223'/>
<parameter type-id='60db3356'/>
<parameter type-id='e8cb3e0e'/>
<parameter type-id='5bbcce85'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-type size-in-bits='64' id='d2a5e211'>
<parameter type-id='9200a744'/>
<parameter type-id='eaa32e2f'/>
<return type-id='c19b74c3'/>
</function-type>
</abi-instr>
<abi-instr address-size='64' path='libzfs_status.c' language='LANG_C99'>
<function-decl name='zpool_import_status' mangled-name='zpool_import_status' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_import_status'>
<parameter type-id='5ce45b60' name='config'/>
<parameter type-id='9b23c9ad' name='msgid'/>
<parameter type-id='cec6f2e4' name='errata'/>
<return type-id='d3dd6294'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='libzfs_util.c' language='LANG_C99'>
<class-decl name='__va_list_tag' size-in-bits='192' is-struct='yes' visibility='default' id='d5027220'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='gp_offset' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='fp_offset' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='overflow_arg_area' type-id='eaa32e2f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='reg_save_area' type-id='eaa32e2f' visibility='default'/>
</data-member>
</class-decl>
<type-decl name='double' size-in-bits='64' id='a0eb0f08'/>
<array-type-def dimensions='1' type-id='95e97e5e' size-in-bits='192' id='e41bdf22'>
<subrange length='6' type-id='7359adad' id='52fa524b'/>
</array-type-def>
<array-type-def dimensions='1' type-id='19cefcee' size-in-bits='160' alignment-in-bits='32' id='3fcf57d2'>
<subrange length='5' type-id='7359adad' id='53010e10'/>
</array-type-def>
<enum-decl name='zfs_get_column_t' naming-typedef-id='19cefcee' id='223bdcaa'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='GET_COL_NONE' value='0'/>
<enumerator name='GET_COL_NAME' value='1'/>
<enumerator name='GET_COL_PROPERTY' value='2'/>
<enumerator name='GET_COL_VALUE' value='3'/>
<enumerator name='GET_COL_RECVD' value='4'/>
<enumerator name='GET_COL_SOURCE' value='5'/>
</enum-decl>
<typedef-decl name='zfs_get_column_t' type-id='223bdcaa' id='19cefcee'/>
<class-decl name='zprop_get_cbdata' size-in-bits='640' is-struct='yes' visibility='default' id='f3d3c319'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='cb_sources' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='cb_columns' type-id='3fcf57d2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='cb_colwidths' type-id='e41bdf22' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='cb_scripted' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='416'>
<var-decl name='cb_literal' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='cb_first' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
<var-decl name='cb_proplist' type-id='3a9b2288' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='576'>
<var-decl name='cb_type' type-id='2e45de5d' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zprop_get_cbdata_t' type-id='f3d3c319' id='f3d87113'/>
<typedef-decl name='zprop_func' type-id='2e711a2a' id='1ec3747a'/>
<enum-decl name='zprop_attr_t' naming-typedef-id='999701cc' id='77d05200'>
<underlying-type type-id='9cac1fee'/>
<enumerator name='PROP_DEFAULT' value='0'/>
<enumerator name='PROP_READONLY' value='1'/>
<enumerator name='PROP_INHERIT' value='2'/>
<enumerator name='PROP_ONETIME' value='3'/>
<enumerator name='PROP_ONETIME_DEFAULT' value='4'/>
</enum-decl>
<typedef-decl name='zprop_attr_t' type-id='77d05200' id='999701cc'/>
<class-decl name='zfs_index' size-in-bits='128' is-struct='yes' visibility='default' id='87957af9'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='pi_name' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='pi_value' type-id='9c313c2d' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zprop_index_t' type-id='87957af9' id='64636ce3'/>
<class-decl name='zprop_desc_t' size-in-bits='704' is-struct='yes' naming-typedef-id='ffa52b96' visibility='default' id='bbff5e4b'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='pd_name' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='pd_propnum' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
<var-decl name='pd_proptype' type-id='31429eff' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='pd_strdefault' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='pd_numdefault' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='pd_attr' type-id='999701cc' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='288'>
<var-decl name='pd_types' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='pd_values' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
<var-decl name='pd_colname' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='pd_rightalign' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='480'>
<var-decl name='pd_visible' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
<var-decl name='pd_zfs_mod_supported' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='576'>
<var-decl name='pd_table' type-id='c8bc397b' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='640'>
<var-decl name='pd_table_size' type-id='b59d7dce' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='zprop_desc_t' type-id='bbff5e4b' id='ffa52b96'/>
<class-decl name='extmnttab' size-in-bits='320' is-struct='yes' visibility='default' id='0c544dc0'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='mnt_special' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='mnt_mountp' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='mnt_fstype' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='mnt_mntopts' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='mnt_major' type-id='3502e3ff' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='288'>
<var-decl name='mnt_minor' type-id='3502e3ff' visibility='default'/>
</data-member>
</class-decl>
<pointer-type-def type-id='d5027220' size-in-bits='64' id='b7f2d5e6'/>
<qualified-type-def type-id='26a90f95' const='yes' id='57de658a'/>
<pointer-type-def type-id='57de658a' size-in-bits='64' id='f319fae0'/>
<pointer-type-def type-id='9b23c9ad' size-in-bits='64' id='c0563f85'/>
<qualified-type-def type-id='33f57a65' const='yes' id='21fd6035'/>
<pointer-type-def type-id='21fd6035' size-in-bits='64' id='a0de50cd'/>
<pointer-type-def type-id='a0de50cd' size-in-bits='64' id='24f95ba5'/>
<qualified-type-def type-id='64636ce3' const='yes' id='072f7953'/>
<pointer-type-def type-id='072f7953' size-in-bits='64' id='c8bc397b'/>
<pointer-type-def type-id='0c544dc0' size-in-bits='64' id='394fc496'/>
<pointer-type-def type-id='c70fa2e8' size-in-bits='64' id='2e711a2a'/>
<pointer-type-def type-id='aca3bac8' size-in-bits='64' id='d33f11cb'/>
<qualified-type-def type-id='d33f11cb' restrict='yes' id='5c53ba29'/>
<pointer-type-def type-id='ffa52b96' size-in-bits='64' id='76c8174b'/>
<pointer-type-def type-id='f3d87113' size-in-bits='64' id='0d2a0670'/>
<function-decl name='zfs_version_kernel' mangled-name='zfs_version_kernel' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_version_kernel'>
<parameter type-id='26a90f95'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='libzfs_core_init' visibility='default' binding='global' size-in-bits='64'>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='libzfs_core_fini' visibility='default' binding='global' size-in-bits='64'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='libzfs_load_module' mangled-name='libzfs_load_module' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_load_module'>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_prop_unsupported' mangled-name='zpool_prop_unsupported' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_unsupported'>
<parameter type-id='80f4b756'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='zpool_feature_init' mangled-name='zpool_feature_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_feature_init'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fletcher_4_init' mangled-name='fletcher_4_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_init'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='fletcher_4_fini' mangled-name='fletcher_4_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_fini'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_prop_init' mangled-name='zfs_prop_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_init'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_prop_get_table' mangled-name='zfs_prop_get_table' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_table'>
<return type-id='76c8174b'/>
</function-decl>
<function-decl name='zpool_prop_init' mangled-name='zpool_prop_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_init'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zpool_prop_get_table' mangled-name='zpool_prop_get_table' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_get_table'>
<return type-id='76c8174b'/>
</function-decl>
<function-decl name='zprop_iter_common' mangled-name='zprop_iter_common' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_iter_common'>
<parameter type-id='1ec3747a'/>
<parameter type-id='eaa32e2f'/>
<parameter type-id='c19b74c3'/>
<parameter type-id='c19b74c3'/>
<parameter type-id='2e45de5d'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zprop_name_to_prop' mangled-name='zprop_name_to_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_name_to_prop'>
<parameter type-id='80f4b756'/>
<parameter type-id='2e45de5d'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zprop_string_to_index' mangled-name='zprop_string_to_index' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_string_to_index'>
<parameter type-id='95e97e5e'/>
<parameter type-id='80f4b756'/>
<parameter type-id='5d6479ae'/>
<parameter type-id='2e45de5d'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zprop_values' mangled-name='zprop_values' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_values'>
<parameter type-id='95e97e5e'/>
<parameter type-id='2e45de5d'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='zprop_width' mangled-name='zprop_width' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_width'>
<parameter type-id='95e97e5e'/>
<parameter type-id='37e3bd22'/>
<parameter type-id='2e45de5d'/>
<return type-id='b59d7dce'/>
</function-decl>
<function-decl name='zprop_valid_for_type' mangled-name='zprop_valid_for_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_valid_for_type'>
<parameter type-id='95e97e5e'/>
<parameter type-id='2e45de5d'/>
<parameter type-id='c19b74c3'/>
<return type-id='c19b74c3'/>
</function-decl>
<function-decl name='getextmntent' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='394fc496'/>
<parameter type-id='62f7a03d'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='libzfs_errno' mangled-name='libzfs_errno' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_errno'>
<parameter type-id='b0382bb3' name='hdl'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='libzfs_error_action' mangled-name='libzfs_error_action' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_error_action'>
<parameter type-id='b0382bb3' name='hdl'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='libzfs_error_description' mangled-name='libzfs_error_description' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_error_description'>
<parameter type-id='b0382bb3' name='hdl'/>
<return type-id='80f4b756'/>
</function-decl>
<function-decl name='libzfs_print_on_error' mangled-name='libzfs_print_on_error' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_print_on_error'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='c19b74c3' name='printerr'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='libzfs_run_process' mangled-name='libzfs_run_process' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_run_process'>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='9b23c9ad' name='argv'/>
<parameter type-id='95e97e5e' name='flags'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='libzfs_run_process_get_stdout' mangled-name='libzfs_run_process_get_stdout' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_run_process_get_stdout'>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='9b23c9ad' name='argv'/>
<parameter type-id='9b23c9ad' name='env'/>
<parameter type-id='c0563f85' name='lines'/>
<parameter type-id='7292109c' name='lines_cnt'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='libzfs_run_process_get_stdout_nopath' mangled-name='libzfs_run_process_get_stdout_nopath' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_run_process_get_stdout_nopath'>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='9b23c9ad' name='argv'/>
<parameter type-id='9b23c9ad' name='env'/>
<parameter type-id='c0563f85' name='lines'/>
<parameter type-id='7292109c' name='lines_cnt'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='libzfs_free_str_array' mangled-name='libzfs_free_str_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_free_str_array'>
<parameter type-id='9b23c9ad' name='strs'/>
<parameter type-id='95e97e5e' name='count'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='libzfs_envvar_is_set' mangled-name='libzfs_envvar_is_set' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_envvar_is_set'>
<parameter type-id='26a90f95' name='envvar'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='libzfs_init' mangled-name='libzfs_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_init'>
<return type-id='b0382bb3'/>
</function-decl>
<function-decl name='libzfs_fini' mangled-name='libzfs_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_fini'>
<parameter type-id='b0382bb3' name='hdl'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_path_to_zhandle' mangled-name='zfs_path_to_zhandle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_path_to_zhandle'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='80f4b756' name='path'/>
<parameter type-id='2e45de5d' name='argtype'/>
<return type-id='9200a744'/>
</function-decl>
<function-decl name='zprop_print_one_property' mangled-name='zprop_print_one_property' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_print_one_property'>
<parameter type-id='80f4b756' name='name'/>
<parameter type-id='0d2a0670' name='cbp'/>
<parameter type-id='80f4b756' name='propname'/>
<parameter type-id='80f4b756' name='value'/>
<parameter type-id='a2256d42' name='sourcetype'/>
<parameter type-id='80f4b756' name='source'/>
<parameter type-id='80f4b756' name='recvd_value'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zprop_get_list' mangled-name='zprop_get_list' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_get_list'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='26a90f95' name='props'/>
<parameter type-id='e4378506' name='listp'/>
<parameter type-id='2e45de5d' name='type'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zprop_free_list' mangled-name='zprop_free_list' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_free_list'>
<parameter type-id='3a9b2288' name='pl'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zprop_iter' mangled-name='zprop_iter' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_iter'>
<parameter type-id='1ec3747a' name='func'/>
<parameter type-id='eaa32e2f' name='cb'/>
<parameter type-id='c19b74c3' name='show_all'/>
<parameter type-id='c19b74c3' name='ordered'/>
<parameter type-id='2e45de5d' name='type'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_version_userland' mangled-name='zfs_version_userland' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_version_userland'>
<parameter type-id='26a90f95' name='version'/>
<parameter type-id='95e97e5e' name='len'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_version_print' mangled-name='zfs_version_print' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_version_print'>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='color_start' mangled-name='color_start' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='color_start'>
<parameter type-id='26a90f95' name='color'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='color_end' mangled-name='color_end' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='color_end'>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='printf_color' mangled-name='printf_color' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='printf_color'>
<parameter type-id='26a90f95' name='color'/>
<parameter type-id='26a90f95' name='format'/>
<parameter is-variadic='yes'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='pow' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='a0eb0f08'/>
<parameter type-id='a0eb0f08'/>
<return type-id='a0eb0f08'/>
</function-decl>
<function-decl name='__ctype_toupper_loc' visibility='default' binding='global' size-in-bits='64'>
<return type-id='24f95ba5'/>
</function-decl>
+ <function-decl name='dlclose' visibility='default' binding='global' size-in-bits='64'>
+ <parameter type-id='eaa32e2f'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
<function-decl name='regcomp' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='5c53ba29'/>
<parameter type-id='9d26089a'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='regfree' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='d33f11cb'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='vfprintf' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='e75a27e9'/>
<parameter type-id='9d26089a'/>
<parameter type-id='b7f2d5e6'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='vsnprintf' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='26a90f95'/>
<parameter type-id='b59d7dce'/>
<parameter type-id='80f4b756'/>
<parameter type-id='b7f2d5e6'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='vasprintf' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='8c85230f'/>
<parameter type-id='9d26089a'/>
<parameter type-id='b7f2d5e6'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='strtod' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='9d26089a'/>
<parameter type-id='8c85230f'/>
<return type-id='a0eb0f08'/>
</function-decl>
<function-decl name='realloc' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='eaa32e2f'/>
<parameter type-id='b59d7dce'/>
<return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='exit' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='strnlen' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='b59d7dce'/>
<return type-id='b59d7dce'/>
</function-decl>
<function-decl name='waitpid' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='3629bad8'/>
<parameter type-id='7292109c'/>
<parameter type-id='95e97e5e'/>
<return type-id='3629bad8'/>
</function-decl>
<function-decl name='dup2' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='execve' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='f319fae0'/>
<parameter type-id='f319fae0'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='execv' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='f319fae0'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='execvp' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='f319fae0'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='execvpe' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='f319fae0'/>
<parameter type-id='f319fae0'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='_exit' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='vfork' visibility='default' binding='global' size-in-bits='64'>
<return type-id='3629bad8'/>
</function-decl>
<function-type size-in-bits='64' id='c70fa2e8'>
<parameter type-id='95e97e5e'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-type>
</abi-instr>
<abi-instr address-size='64' path='os/linux/libzfs_mount_os.c' language='LANG_C99'>
<pointer-type-def type-id='7359adad' size-in-bits='64' id='1d2c2b85'/>
<function-decl name='mount' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='80f4b756'/>
<parameter type-id='7359adad'/>
<parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='umount2' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='geteuid' visibility='default' binding='global' size-in-bits='64'>
<return type-id='cc5fcceb'/>
</function-decl>
<function-decl name='zfs_parse_mount_options' mangled-name='zfs_parse_mount_options' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_parse_mount_options'>
<parameter type-id='26a90f95' name='mntopts'/>
<parameter type-id='1d2c2b85' name='mntflags'/>
<parameter type-id='1d2c2b85' name='zfsflags'/>
<parameter type-id='95e97e5e' name='sloppy'/>
<parameter type-id='26a90f95' name='badopt'/>
<parameter type-id='26a90f95' name='mtabopt'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_adjust_mount_options' mangled-name='zfs_adjust_mount_options' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_adjust_mount_options'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='80f4b756' name='mntpoint'/>
<parameter type-id='26a90f95' name='mntopts'/>
<parameter type-id='26a90f95' name='mtabopt'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='zfs_mount_delegation_check' mangled-name='zfs_mount_delegation_check' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_mount_delegation_check'>
<return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='os/linux/libzfs_pool_os.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='a84c031d' size-in-bits='288' id='16e6f2c6'>
<subrange length='36' type-id='7359adad' id='ae666bde'/>
</array-type-def>
<array-type-def dimensions='1' type-id='a65ae39c' size-in-bits='960' id='fa198beb'>
<subrange length='1' type-id='7359adad' id='52f813b4'/>
</array-type-def>
<array-type-def dimensions='1' type-id='3502e3ff' size-in-bits='384' id='dba89ba3'>
<subrange length='12' type-id='7359adad' id='84827bdc'/>
</array-type-def>
<array-type-def dimensions='1' type-id='3502e3ff' size-in-bits='256' id='01d84ed4'>
<subrange length='8' type-id='7359adad' id='56e0c0b1'/>
</array-type-def>
<class-decl name='dk_part' size-in-bits='960' is-struct='yes' visibility='default' id='a65ae39c'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='p_start' type-id='804dc465' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='p_size' type-id='804dc465' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='p_guid' type-id='214f32ea' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='p_tag' type-id='d908a348' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='272'>
<var-decl name='p_flag' type-id='d908a348' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='288'>
<var-decl name='p_name' type-id='16e6f2c6' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='576'>
<var-decl name='p_uguid' type-id='214f32ea' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='704'>
<var-decl name='p_resv' type-id='01d84ed4' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='dk_gpt' size-in-bits='1920' is-struct='yes' visibility='default' id='dd4a2e5a'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='efi_version' type-id='3502e3ff' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='efi_nparts' type-id='3502e3ff' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='efi_part_size' type-id='3502e3ff' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
<var-decl name='efi_lbasize' type-id='3502e3ff' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
<var-decl name='efi_last_lba' type-id='804dc465' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
<var-decl name='efi_first_u_lba' type-id='804dc465' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
<var-decl name='efi_last_u_lba' type-id='804dc465' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
<var-decl name='efi_disk_uguid' type-id='214f32ea' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
<var-decl name='efi_flags' type-id='3502e3ff' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='480'>
<var-decl name='efi_reserved1' type-id='3502e3ff' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
<var-decl name='efi_altern_lba' type-id='804dc465' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='576'>
<var-decl name='efi_reserved' type-id='dba89ba3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='960'>
<var-decl name='efi_parts' type-id='fa198beb' visibility='default'/>
</data-member>
</class-decl>
<class-decl name='uuid' size-in-bits='128' is-struct='yes' visibility='default' id='214f32ea'>
<data-member access='public' layout-offset-in-bits='0'>
<var-decl name='time_low' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
<var-decl name='time_mid' type-id='149c6638' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='48'>
<var-decl name='time_hi_and_version' type-id='149c6638' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
<var-decl name='clock_seq_hi_and_reserved' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='72'>
<var-decl name='clock_seq_low' type-id='b96825af' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='80'>
<var-decl name='node_addr' type-id='0f562bd0' visibility='default'/>
</data-member>
</class-decl>
<typedef-decl name='ushort_t' type-id='8efea9e5' id='d908a348'/>
<typedef-decl name='uint16_t' type-id='253c2d2a' id='149c6638'/>
<typedef-decl name='__uint16_t' type-id='8efea9e5' id='253c2d2a'/>
<pointer-type-def type-id='dd4a2e5a' size-in-bits='64' id='0d8119a8'/>
<pointer-type-def type-id='0d8119a8' size-in-bits='64' id='c43b27a6'/>
<function-decl name='zpool_label_disk_wait' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_append_partition' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='26a90f95'/>
<parameter type-id='b59d7dce'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='efi_alloc_and_init' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<parameter type-id='8f92235e'/>
<parameter type-id='c43b27a6'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='efi_alloc_and_read' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<parameter type-id='c43b27a6'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='efi_write' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<parameter type-id='0d8119a8'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='efi_rescan' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='efi_free' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='0d8119a8'/>
<return type-id='48b5725f'/>
</function-decl>
<function-decl name='efi_use_whole_disk' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='rand' visibility='default' binding='global' size-in-bits='64'>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='fsync' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zpool_label_disk' mangled-name='zpool_label_disk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_label_disk'>
<parameter type-id='b0382bb3' name='hdl'/>
<parameter type-id='4c81de99' name='zhp'/>
<parameter type-id='80f4b756' name='name'/>
<return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
<abi-instr address-size='64' path='os/linux/libzfs_util_os.c' language='LANG_C99'>
<typedef-decl name='__useconds_t' type-id='f0981eeb' id='4e80d4b1'/>
<typedef-decl name='__clockid_t' type-id='95e97e5e' id='08f9a87a'/>
<typedef-decl name='clockid_t' type-id='08f9a87a' id='a1c3b834'/>
<pointer-type-def type-id='a9c79a1f' size-in-bits='64' id='3d83ba87'/>
<function-decl name='sched_yield' visibility='default' binding='global' size-in-bits='64'>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='clock_gettime' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='a1c3b834'/>
<parameter type-id='3d83ba87'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='access' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/>
<parameter type-id='95e97e5e'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='usleep' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='4e80d4b1'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='libzfs_error_init' mangled-name='libzfs_error_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_error_init'>
<parameter type-id='95e97e5e' name='error'/>
<return type-id='80f4b756'/>
</function-decl>
</abi-instr>
</abi-corpus>
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs_crypto.c b/sys/contrib/openzfs/lib/libzfs/libzfs_crypto.c
index e31d4ce44bfb..644dd26859f1 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs_crypto.c
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs_crypto.c
@@ -1,1619 +1,1805 @@
/*
* CDDL HEADER START
*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2017, Datto, Inc. All rights reserved.
* Copyright 2020 Joyent, Inc.
*/
#include <sys/zfs_context.h>
#include <sys/fs/zfs.h>
#include <sys/dsl_crypt.h>
#include <libintl.h>
#include <termios.h>
#include <signal.h>
#include <errno.h>
#include <openssl/evp.h>
+#if LIBFETCH_DYNAMIC
+#include <dlfcn.h>
+#endif
+#if LIBFETCH_IS_FETCH
+#include <sys/param.h>
+#include <stdio.h>
+#include <fetch.h>
+#elif LIBFETCH_IS_LIBCURL
+#include <curl/curl.h>
+#endif
#include <libzfs.h>
#include "libzfs_impl.h"
#include "zfeature_common.h"
/*
* User keys are used to decrypt the master encryption keys of a dataset. This
* indirection allows a user to change his / her access key without having to
* re-encrypt the entire dataset. User keys can be provided in one of several
* ways. Raw keys are simply given to the kernel as is. Similarly, hex keys
* are converted to binary and passed into the kernel. Password based keys are
* a bit more complicated. Passwords alone do not provide suitable entropy for
* encryption and may be too short or too long to be used. In order to derive
* a more appropriate key we use a PBKDF2 function. This function is designed
* to take a (relatively) long time to calculate in order to discourage
* attackers from guessing from a list of common passwords. PBKDF2 requires
* 2 additional parameters. The first is the number of iterations to run, which
* will ultimately determine how long it takes to derive the resulting key from
* the password. The second parameter is a salt that is randomly generated for
* each dataset. The salt is used to "tweak" PBKDF2 such that a group of
* attackers cannot reasonably generate a table of commonly known passwords to
* their output keys and expect it work for all past and future PBKDF2 users.
* We store the salt as a hidden property of the dataset (although it is
* technically ok if the salt is known to the attacker).
*/
#define MIN_PASSPHRASE_LEN 8
#define MAX_PASSPHRASE_LEN 512
#define MAX_KEY_PROMPT_ATTEMPTS 3
static int caught_interrupt;
static int get_key_material_file(libzfs_handle_t *, const char *, const char *,
zfs_keyformat_t, boolean_t, uint8_t **, size_t *);
+static int get_key_material_https(libzfs_handle_t *, const char *, const char *,
+ zfs_keyformat_t, boolean_t, uint8_t **, size_t *);
static zfs_uri_handler_t uri_handlers[] = {
{ "file", get_key_material_file },
+ { "https", get_key_material_https },
+ { "http", get_key_material_https },
{ NULL, NULL }
};
static int
pkcs11_get_urandom(uint8_t *buf, size_t bytes)
{
int rand;
ssize_t bytes_read = 0;
rand = open("/dev/urandom", O_RDONLY | O_CLOEXEC);
if (rand < 0)
return (rand);
while (bytes_read < bytes) {
ssize_t rc = read(rand, buf + bytes_read, bytes - bytes_read);
if (rc < 0)
break;
bytes_read += rc;
}
(void) close(rand);
return (bytes_read);
}
static int
zfs_prop_parse_keylocation(libzfs_handle_t *restrict hdl, const char *str,
zfs_keylocation_t *restrict locp, char **restrict schemep)
{
*locp = ZFS_KEYLOCATION_NONE;
*schemep = NULL;
if (strcmp("prompt", str) == 0) {
*locp = ZFS_KEYLOCATION_PROMPT;
return (0);
}
regmatch_t pmatch[2];
if (regexec(&hdl->libzfs_urire, str, ARRAY_SIZE(pmatch),
pmatch, 0) == 0) {
size_t scheme_len;
if (pmatch[1].rm_so == -1) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Invalid URI"));
return (EINVAL);
}
scheme_len = pmatch[1].rm_eo - pmatch[1].rm_so;
*schemep = calloc(1, scheme_len + 1);
if (*schemep == NULL) {
int ret = errno;
errno = 0;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Invalid URI"));
return (ret);
}
(void) memcpy(*schemep, str + pmatch[1].rm_so, scheme_len);
*locp = ZFS_KEYLOCATION_URI;
return (0);
}
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Invalid keylocation"));
return (EINVAL);
}
static int
hex_key_to_raw(char *hex, int hexlen, uint8_t *out)
{
int ret, i;
unsigned int c;
for (i = 0; i < hexlen; i += 2) {
if (!isxdigit(hex[i]) || !isxdigit(hex[i + 1])) {
ret = EINVAL;
goto error;
}
ret = sscanf(&hex[i], "%02x", &c);
if (ret != 1) {
ret = EINVAL;
goto error;
}
out[i / 2] = c;
}
return (0);
error:
return (ret);
}
static void
catch_signal(int sig)
{
caught_interrupt = sig;
}
static const char *
get_format_prompt_string(zfs_keyformat_t format)
{
switch (format) {
case ZFS_KEYFORMAT_RAW:
return ("raw key");
case ZFS_KEYFORMAT_HEX:
return ("hex key");
case ZFS_KEYFORMAT_PASSPHRASE:
return ("passphrase");
default:
/* shouldn't happen */
return (NULL);
}
}
/* do basic validation of the key material */
static int
validate_key(libzfs_handle_t *hdl, zfs_keyformat_t keyformat,
const char *key, size_t keylen)
{
switch (keyformat) {
case ZFS_KEYFORMAT_RAW:
/* verify the key length is correct */
if (keylen < WRAPPING_KEY_LEN) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Raw key too short (expected %u)."),
WRAPPING_KEY_LEN);
return (EINVAL);
}
if (keylen > WRAPPING_KEY_LEN) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Raw key too long (expected %u)."),
WRAPPING_KEY_LEN);
return (EINVAL);
}
break;
case ZFS_KEYFORMAT_HEX:
/* verify the key length is correct */
if (keylen < WRAPPING_KEY_LEN * 2) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Hex key too short (expected %u)."),
WRAPPING_KEY_LEN * 2);
return (EINVAL);
}
if (keylen > WRAPPING_KEY_LEN * 2) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Hex key too long (expected %u)."),
WRAPPING_KEY_LEN * 2);
return (EINVAL);
}
/* check for invalid hex digits */
for (size_t i = 0; i < WRAPPING_KEY_LEN * 2; i++) {
if (!isxdigit(key[i])) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Invalid hex character detected."));
return (EINVAL);
}
}
break;
case ZFS_KEYFORMAT_PASSPHRASE:
/* verify the length is within bounds */
if (keylen > MAX_PASSPHRASE_LEN) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Passphrase too long (max %u)."),
MAX_PASSPHRASE_LEN);
return (EINVAL);
}
if (keylen < MIN_PASSPHRASE_LEN) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Passphrase too short (min %u)."),
MIN_PASSPHRASE_LEN);
return (EINVAL);
}
break;
default:
/* can't happen, checked above */
break;
}
return (0);
}
static int
libzfs_getpassphrase(zfs_keyformat_t keyformat, boolean_t is_reenter,
boolean_t new_key, const char *fsname,
char **restrict res, size_t *restrict reslen)
{
FILE *f = stdin;
size_t buflen = 0;
ssize_t bytes;
int ret = 0;
struct termios old_term, new_term;
struct sigaction act, osigint, osigtstp;
*res = NULL;
*reslen = 0;
/*
* handle SIGINT and ignore SIGSTP. This is necessary to
* restore the state of the terminal.
*/
caught_interrupt = 0;
act.sa_flags = 0;
(void) sigemptyset(&act.sa_mask);
act.sa_handler = catch_signal;
(void) sigaction(SIGINT, &act, &osigint);
act.sa_handler = SIG_IGN;
(void) sigaction(SIGTSTP, &act, &osigtstp);
(void) printf("%s %s%s",
is_reenter ? "Re-enter" : "Enter",
new_key ? "new " : "",
get_format_prompt_string(keyformat));
if (fsname != NULL)
(void) printf(" for '%s'", fsname);
(void) fputc(':', stdout);
(void) fflush(stdout);
/* disable the terminal echo for key input */
(void) tcgetattr(fileno(f), &old_term);
new_term = old_term;
new_term.c_lflag &= ~(ECHO | ECHOE | ECHOK | ECHONL);
ret = tcsetattr(fileno(f), TCSAFLUSH, &new_term);
if (ret != 0) {
ret = errno;
errno = 0;
goto out;
}
bytes = getline(res, &buflen, f);
if (bytes < 0) {
ret = errno;
errno = 0;
goto out;
}
/* trim the ending newline if it exists */
if (bytes > 0 && (*res)[bytes - 1] == '\n') {
(*res)[bytes - 1] = '\0';
bytes--;
}
*reslen = bytes;
out:
/* reset the terminal */
(void) tcsetattr(fileno(f), TCSAFLUSH, &old_term);
(void) sigaction(SIGINT, &osigint, NULL);
(void) sigaction(SIGTSTP, &osigtstp, NULL);
/* if we caught a signal, re-throw it now */
if (caught_interrupt != 0)
(void) kill(getpid(), caught_interrupt);
/* print the newline that was not echo'd */
(void) printf("\n");
return (ret);
}
static int
get_key_interactive(libzfs_handle_t *restrict hdl, const char *fsname,
zfs_keyformat_t keyformat, boolean_t confirm_key, boolean_t newkey,
uint8_t **restrict outbuf, size_t *restrict len_out)
{
char *buf = NULL, *buf2 = NULL;
size_t buflen = 0, buf2len = 0;
int ret = 0;
ASSERT(isatty(fileno(stdin)));
/* raw keys cannot be entered on the terminal */
if (keyformat == ZFS_KEYFORMAT_RAW) {
ret = EINVAL;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Cannot enter raw keys on the terminal"));
goto out;
}
/* prompt for the key */
if ((ret = libzfs_getpassphrase(keyformat, B_FALSE, newkey, fsname,
&buf, &buflen)) != 0) {
free(buf);
buf = NULL;
buflen = 0;
goto out;
}
if (!confirm_key)
goto out;
if ((ret = validate_key(hdl, keyformat, buf, buflen)) != 0) {
free(buf);
return (ret);
}
ret = libzfs_getpassphrase(keyformat, B_TRUE, newkey, fsname, &buf2,
&buf2len);
if (ret != 0) {
free(buf);
free(buf2);
buf = buf2 = NULL;
buflen = buf2len = 0;
goto out;
}
if (buflen != buf2len || strcmp(buf, buf2) != 0) {
free(buf);
buf = NULL;
buflen = 0;
ret = EINVAL;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Provided keys do not match."));
}
free(buf2);
out:
*outbuf = (uint8_t *)buf;
*len_out = buflen;
return (ret);
}
static int
get_key_material_raw(FILE *fd, zfs_keyformat_t keyformat,
uint8_t **buf, size_t *len_out)
{
int ret = 0;
size_t buflen = 0;
*len_out = 0;
/* read the key material */
if (keyformat != ZFS_KEYFORMAT_RAW) {
ssize_t bytes;
bytes = getline((char **)buf, &buflen, fd);
if (bytes < 0) {
ret = errno;
errno = 0;
goto out;
}
/* trim the ending newline if it exists */
if (bytes > 0 && (*buf)[bytes - 1] == '\n') {
(*buf)[bytes - 1] = '\0';
bytes--;
}
*len_out = bytes;
} else {
size_t n;
/*
* Raw keys may have newline characters in them and so can't
* use getline(). Here we attempt to read 33 bytes so that we
* can properly check the key length (the file should only have
* 32 bytes).
*/
*buf = malloc((WRAPPING_KEY_LEN + 1) * sizeof (uint8_t));
if (*buf == NULL) {
ret = ENOMEM;
goto out;
}
n = fread(*buf, 1, WRAPPING_KEY_LEN + 1, fd);
if (n == 0 || ferror(fd)) {
/* size errors are handled by the calling function */
free(*buf);
*buf = NULL;
ret = errno;
errno = 0;
goto out;
}
*len_out = n;
}
out:
return (ret);
}
static int
get_key_material_file(libzfs_handle_t *hdl, const char *uri,
const char *fsname, zfs_keyformat_t keyformat, boolean_t newkey,
uint8_t **restrict buf, size_t *restrict len_out)
{
FILE *f = NULL;
int ret = 0;
if (strlen(uri) < 7)
return (EINVAL);
if ((f = fopen(uri + 7, "re")) == NULL) {
ret = errno;
errno = 0;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Failed to open key material file: %s"), strerror(ret));
return (ret);
}
ret = get_key_material_raw(f, keyformat, buf, len_out);
(void) fclose(f);
return (ret);
}
+static int
+get_key_material_https(libzfs_handle_t *hdl, const char *uri,
+ const char *fsname, zfs_keyformat_t keyformat, boolean_t newkey,
+ uint8_t **restrict buf, size_t *restrict len_out)
+{
+ int ret = 0;
+ FILE *key = NULL;
+ boolean_t is_http = strncmp(uri, "http:", strlen("http:")) == 0;
+
+ if (strlen(uri) < (is_http ? 7 : 8)) {
+ ret = EINVAL;
+ goto end;
+ }
+
+#if LIBFETCH_DYNAMIC
+#define LOAD_FUNCTION(func) \
+ __typeof__(func) *func = dlsym(hdl->libfetch, #func);
+
+ if (hdl->libfetch == NULL)
+ hdl->libfetch = dlopen(LIBFETCH_SONAME, RTLD_LAZY);
+
+ if (hdl->libfetch == NULL) {
+ hdl->libfetch = (void *)-1;
+ char *err = dlerror();
+ if (err)
+ hdl->libfetch_load_error = strdup(err);
+ }
+
+ if (hdl->libfetch == (void *)-1) {
+ ret = ENOSYS;
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "Couldn't load %s: %s"),
+ LIBFETCH_SONAME, hdl->libfetch_load_error ?: "(?)");
+ goto end;
+ }
+
+ boolean_t ok;
+#if LIBFETCH_IS_FETCH
+ LOAD_FUNCTION(fetchGetURL);
+ char *fetchLastErrString = dlsym(hdl->libfetch, "fetchLastErrString");
+
+ ok = fetchGetURL && fetchLastErrString;
+#elif LIBFETCH_IS_LIBCURL
+ LOAD_FUNCTION(curl_easy_init);
+ LOAD_FUNCTION(curl_easy_setopt);
+ LOAD_FUNCTION(curl_easy_perform);
+ LOAD_FUNCTION(curl_easy_cleanup);
+ LOAD_FUNCTION(curl_easy_strerror);
+ LOAD_FUNCTION(curl_easy_getinfo);
+
+ ok = curl_easy_init && curl_easy_setopt && curl_easy_perform &&
+ curl_easy_cleanup && curl_easy_strerror && curl_easy_getinfo;
+#endif
+ if (!ok) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "keylocation=%s back-end %s missing symbols."),
+ is_http ? "http://" : "https://", LIBFETCH_SONAME);
+ ret = ENOSYS;
+ goto end;
+ }
+#endif
+
+#if LIBFETCH_IS_FETCH
+ key = fetchGetURL(uri, "");
+ if (key == NULL) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "Couldn't GET %s: %s"),
+ uri, fetchLastErrString);
+ ret = ENETDOWN;
+ }
+#elif LIBFETCH_IS_LIBCURL
+ CURL *curl = curl_easy_init();
+ if (curl == NULL) {
+ ret = ENOTSUP;
+ goto end;
+ }
+
+ int kfd = -1;
+#ifdef O_TMPFILE
+ kfd = open(getenv("TMPDIR") ?: "/tmp",
+ O_RDWR | O_TMPFILE | O_EXCL | O_CLOEXEC, 0600);
+ if (kfd != -1)
+ goto kfdok;
+#endif
+
+ char *path;
+ if (asprintf(&path,
+ "%s/libzfs-XXXXXXXX.https", getenv("TMPDIR") ?: "/tmp") == -1) {
+ ret = ENOMEM;
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s"),
+ strerror(ret));
+ goto end;
+ }
+
+ kfd = mkostemps(path, strlen(".https"), O_CLOEXEC);
+ if (kfd == -1) {
+ ret = errno;
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "Couldn't create temporary file %s: %s"),
+ path, strerror(ret));
+ free(path);
+ goto end;
+ }
+ (void) unlink(path);
+ free(path);
+
+kfdok:
+ if ((key = fdopen(kfd, "r+")) == NULL) {
+ ret = errno;
+ free(path);
+ (void) close(kfd);
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "Couldn't reopen temporary file: %s"), strerror(ret));
+ goto end;
+ }
+
+ char errbuf[CURL_ERROR_SIZE] = "";
+ char *cainfo = getenv("SSL_CA_CERT_FILE"); /* matches fetch(3) */
+ char *capath = getenv("SSL_CA_CERT_PATH"); /* matches fetch(3) */
+ char *clcert = getenv("SSL_CLIENT_CERT_FILE"); /* matches fetch(3) */
+ char *clkey = getenv("SSL_CLIENT_KEY_FILE"); /* matches fetch(3) */
+ (void) curl_easy_setopt(curl, CURLOPT_URL, uri);
+ (void) curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
+ (void) curl_easy_setopt(curl, CURLOPT_TIMEOUT_MS, 30000L);
+ (void) curl_easy_setopt(curl, CURLOPT_WRITEDATA, key);
+ (void) curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, errbuf);
+ if (cainfo != NULL)
+ (void) curl_easy_setopt(curl, CURLOPT_CAINFO, cainfo);
+ if (capath != NULL)
+ (void) curl_easy_setopt(curl, CURLOPT_CAPATH, capath);
+ if (clcert != NULL)
+ (void) curl_easy_setopt(curl, CURLOPT_SSLCERT, clcert);
+ if (clkey != NULL)
+ (void) curl_easy_setopt(curl, CURLOPT_SSLKEY, clkey);
+
+ CURLcode res = curl_easy_perform(curl);
+
+ if (res != CURLE_OK) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "Failed to connect to %s: %s"),
+ uri, strlen(errbuf) ? errbuf : curl_easy_strerror(res));
+ ret = ENETDOWN;
+ } else {
+ long resp = 200;
+ (void) curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &resp);
+
+ if (resp < 200 || resp >= 300) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "Couldn't GET %s: %ld"),
+ uri, resp);
+ ret = ENOENT;
+ } else
+ rewind(key);
+ }
+
+ curl_easy_cleanup(curl);
+#else
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "No keylocation=%s back-end."), is_http ? "http://" : "https://");
+ ret = ENOSYS;
+#endif
+
+end:
+ if (ret == 0)
+ ret = get_key_material_raw(key, keyformat, buf, len_out);
+
+ if (key != NULL)
+ fclose(key);
+
+ return (ret);
+}
+
/*
* Attempts to fetch key material, no matter where it might live. The key
* material is allocated and returned in km_out. *can_retry_out will be set
* to B_TRUE if the user is providing the key material interactively, allowing
* for re-entry attempts.
*/
static int
get_key_material(libzfs_handle_t *hdl, boolean_t do_verify, boolean_t newkey,
zfs_keyformat_t keyformat, char *keylocation, const char *fsname,
uint8_t **km_out, size_t *kmlen_out, boolean_t *can_retry_out)
{
int ret;
zfs_keylocation_t keyloc = ZFS_KEYLOCATION_NONE;
uint8_t *km = NULL;
size_t kmlen = 0;
char *uri_scheme = NULL;
zfs_uri_handler_t *handler = NULL;
boolean_t can_retry = B_FALSE;
/* verify and parse the keylocation */
ret = zfs_prop_parse_keylocation(hdl, keylocation, &keyloc,
&uri_scheme);
if (ret != 0)
goto error;
/* open the appropriate file descriptor */
switch (keyloc) {
case ZFS_KEYLOCATION_PROMPT:
if (isatty(fileno(stdin))) {
can_retry = keyformat != ZFS_KEYFORMAT_RAW;
ret = get_key_interactive(hdl, fsname, keyformat,
do_verify, newkey, &km, &kmlen);
} else {
/* fetch the key material into the buffer */
ret = get_key_material_raw(stdin, keyformat, &km,
&kmlen);
}
if (ret != 0)
goto error;
break;
case ZFS_KEYLOCATION_URI:
ret = ENOTSUP;
for (handler = uri_handlers; handler->zuh_scheme != NULL;
handler++) {
if (strcmp(handler->zuh_scheme, uri_scheme) != 0)
continue;
if ((ret = handler->zuh_handler(hdl, keylocation,
fsname, keyformat, newkey, &km, &kmlen)) != 0)
goto error;
break;
}
if (ret == ENOTSUP) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"URI scheme is not supported"));
goto error;
}
break;
default:
ret = EINVAL;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Invalid keylocation."));
goto error;
}
if ((ret = validate_key(hdl, keyformat, (const char *)km, kmlen)) != 0)
goto error;
*km_out = km;
*kmlen_out = kmlen;
if (can_retry_out != NULL)
*can_retry_out = can_retry;
free(uri_scheme);
return (0);
error:
free(km);
*km_out = NULL;
*kmlen_out = 0;
if (can_retry_out != NULL)
*can_retry_out = can_retry;
free(uri_scheme);
return (ret);
}
static int
derive_key(libzfs_handle_t *hdl, zfs_keyformat_t format, uint64_t iters,
uint8_t *key_material, size_t key_material_len, uint64_t salt,
uint8_t **key_out)
{
int ret;
uint8_t *key;
*key_out = NULL;
key = zfs_alloc(hdl, WRAPPING_KEY_LEN);
if (!key)
return (ENOMEM);
switch (format) {
case ZFS_KEYFORMAT_RAW:
bcopy(key_material, key, WRAPPING_KEY_LEN);
break;
case ZFS_KEYFORMAT_HEX:
ret = hex_key_to_raw((char *)key_material,
WRAPPING_KEY_LEN * 2, key);
if (ret != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Invalid hex key provided."));
goto error;
}
break;
case ZFS_KEYFORMAT_PASSPHRASE:
salt = LE_64(salt);
ret = PKCS5_PBKDF2_HMAC_SHA1((char *)key_material,
strlen((char *)key_material), ((uint8_t *)&salt),
sizeof (uint64_t), iters, WRAPPING_KEY_LEN, key);
if (ret != 1) {
ret = EIO;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Failed to generate key from passphrase."));
goto error;
}
break;
default:
ret = EINVAL;
goto error;
}
*key_out = key;
return (0);
error:
free(key);
*key_out = NULL;
return (ret);
}
static boolean_t
encryption_feature_is_enabled(zpool_handle_t *zph)
{
nvlist_t *features;
uint64_t feat_refcount;
/* check that features can be enabled */
if (zpool_get_prop_int(zph, ZPOOL_PROP_VERSION, NULL)
< SPA_VERSION_FEATURES)
return (B_FALSE);
/* check for crypto feature */
features = zpool_get_features(zph);
if (!features || nvlist_lookup_uint64(features,
spa_feature_table[SPA_FEATURE_ENCRYPTION].fi_guid,
&feat_refcount) != 0)
return (B_FALSE);
return (B_TRUE);
}
static int
populate_create_encryption_params_nvlists(libzfs_handle_t *hdl,
zfs_handle_t *zhp, boolean_t newkey, zfs_keyformat_t keyformat,
char *keylocation, nvlist_t *props, uint8_t **wkeydata, uint_t *wkeylen)
{
int ret;
uint64_t iters = 0, salt = 0;
uint8_t *key_material = NULL;
size_t key_material_len = 0;
uint8_t *key_data = NULL;
const char *fsname = (zhp) ? zfs_get_name(zhp) : NULL;
/* get key material from keyformat and keylocation */
ret = get_key_material(hdl, B_TRUE, newkey, keyformat, keylocation,
fsname, &key_material, &key_material_len, NULL);
if (ret != 0)
goto error;
/* passphrase formats require a salt and pbkdf2 iters property */
if (keyformat == ZFS_KEYFORMAT_PASSPHRASE) {
/* always generate a new salt */
ret = pkcs11_get_urandom((uint8_t *)&salt, sizeof (uint64_t));
if (ret != sizeof (uint64_t)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Failed to generate salt."));
goto error;
}
ret = nvlist_add_uint64(props,
zfs_prop_to_name(ZFS_PROP_PBKDF2_SALT), salt);
if (ret != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Failed to add salt to properties."));
goto error;
}
/*
* If not otherwise specified, use the default number of
* pbkdf2 iterations. If specified, we have already checked
* that the given value is greater than MIN_PBKDF2_ITERATIONS
* during zfs_valid_proplist().
*/
ret = nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS), &iters);
if (ret == ENOENT) {
iters = DEFAULT_PBKDF2_ITERATIONS;
ret = nvlist_add_uint64(props,
zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS), iters);
if (ret != 0)
goto error;
} else if (ret != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Failed to get pbkdf2 iterations."));
goto error;
}
} else {
/* check that pbkdf2iters was not specified by the user */
ret = nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS), &iters);
if (ret == 0) {
ret = EINVAL;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Cannot specify pbkdf2iters with a non-passphrase "
"keyformat."));
goto error;
}
}
/* derive a key from the key material */
ret = derive_key(hdl, keyformat, iters, key_material, key_material_len,
salt, &key_data);
if (ret != 0)
goto error;
free(key_material);
*wkeydata = key_data;
*wkeylen = WRAPPING_KEY_LEN;
return (0);
error:
if (key_material != NULL)
free(key_material);
if (key_data != NULL)
free(key_data);
*wkeydata = NULL;
*wkeylen = 0;
return (ret);
}
static boolean_t
proplist_has_encryption_props(nvlist_t *props)
{
int ret;
uint64_t intval;
char *strval;
ret = nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_ENCRYPTION), &intval);
if (ret == 0 && intval != ZIO_CRYPT_OFF)
return (B_TRUE);
ret = nvlist_lookup_string(props,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION), &strval);
if (ret == 0 && strcmp(strval, "none") != 0)
return (B_TRUE);
ret = nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_KEYFORMAT), &intval);
if (ret == 0)
return (B_TRUE);
ret = nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS), &intval);
if (ret == 0)
return (B_TRUE);
return (B_FALSE);
}
int
zfs_crypto_get_encryption_root(zfs_handle_t *zhp, boolean_t *is_encroot,
char *buf)
{
int ret;
char prop_encroot[MAXNAMELEN];
/* if the dataset isn't encrypted, just return */
if (zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) == ZIO_CRYPT_OFF) {
*is_encroot = B_FALSE;
if (buf != NULL)
buf[0] = '\0';
return (0);
}
ret = zfs_prop_get(zhp, ZFS_PROP_ENCRYPTION_ROOT, prop_encroot,
sizeof (prop_encroot), NULL, NULL, 0, B_TRUE);
if (ret != 0) {
*is_encroot = B_FALSE;
if (buf != NULL)
buf[0] = '\0';
return (ret);
}
*is_encroot = strcmp(prop_encroot, zfs_get_name(zhp)) == 0;
if (buf != NULL)
strcpy(buf, prop_encroot);
return (0);
}
int
zfs_crypto_create(libzfs_handle_t *hdl, char *parent_name, nvlist_t *props,
nvlist_t *pool_props, boolean_t stdin_available, uint8_t **wkeydata_out,
uint_t *wkeylen_out)
{
int ret;
char errbuf[1024];
uint64_t crypt = ZIO_CRYPT_INHERIT, pcrypt = ZIO_CRYPT_INHERIT;
uint64_t keyformat = ZFS_KEYFORMAT_NONE;
char *keylocation = NULL;
zfs_handle_t *pzhp = NULL;
uint8_t *wkeydata = NULL;
uint_t wkeylen = 0;
boolean_t local_crypt = B_TRUE;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "Encryption create error"));
/* lookup crypt from props */
ret = nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_ENCRYPTION), &crypt);
if (ret != 0)
local_crypt = B_FALSE;
/* lookup key location and format from props */
(void) nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_KEYFORMAT), &keyformat);
(void) nvlist_lookup_string(props,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION), &keylocation);
if (parent_name != NULL) {
/* get a reference to parent dataset */
pzhp = make_dataset_handle(hdl, parent_name);
if (pzhp == NULL) {
ret = ENOENT;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Failed to lookup parent."));
goto out;
}
/* Lookup parent's crypt */
pcrypt = zfs_prop_get_int(pzhp, ZFS_PROP_ENCRYPTION);
/* Params require the encryption feature */
if (!encryption_feature_is_enabled(pzhp->zpool_hdl)) {
if (proplist_has_encryption_props(props)) {
ret = EINVAL;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Encryption feature not enabled."));
goto out;
}
ret = 0;
goto out;
}
} else {
/*
* special case for root dataset where encryption feature
* feature won't be on disk yet
*/
if (!nvlist_exists(pool_props, "feature@encryption")) {
if (proplist_has_encryption_props(props)) {
ret = EINVAL;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Encryption feature not enabled."));
goto out;
}
ret = 0;
goto out;
}
pcrypt = ZIO_CRYPT_OFF;
}
/* Get the inherited encryption property if we don't have it locally */
if (!local_crypt)
crypt = pcrypt;
/*
* At this point crypt should be the actual encryption value. If
* encryption is off just verify that no encryption properties have
* been specified and return.
*/
if (crypt == ZIO_CRYPT_OFF) {
if (proplist_has_encryption_props(props)) {
ret = EINVAL;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Encryption must be turned on to set encryption "
"properties."));
goto out;
}
ret = 0;
goto out;
}
/*
* If we have a parent crypt it is valid to specify encryption alone.
* This will result in a child that is encrypted with the chosen
* encryption suite that will also inherit the parent's key. If
* the parent is not encrypted we need an encryption suite provided.
*/
if (pcrypt == ZIO_CRYPT_OFF && keylocation == NULL &&
keyformat == ZFS_KEYFORMAT_NONE) {
ret = EINVAL;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Keyformat required for new encryption root."));
goto out;
}
/*
* Specifying a keylocation implies this will be a new encryption root.
* Check that a keyformat is also specified.
*/
if (keylocation != NULL && keyformat == ZFS_KEYFORMAT_NONE) {
ret = EINVAL;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Keyformat required for new encryption root."));
goto out;
}
/* default to prompt if no keylocation is specified */
if (keyformat != ZFS_KEYFORMAT_NONE && keylocation == NULL) {
keylocation = "prompt";
ret = nvlist_add_string(props,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION), keylocation);
if (ret != 0)
goto out;
}
/*
* If a local key is provided, this dataset will be a new
* encryption root. Populate the encryption params.
*/
if (keylocation != NULL) {
/*
* 'zfs recv -o keylocation=prompt' won't work because stdin
* is being used by the send stream, so we disallow it.
*/
if (!stdin_available && strcmp(keylocation, "prompt") == 0) {
ret = EINVAL;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Cannot use "
"'prompt' keylocation because stdin is in use."));
goto out;
}
ret = populate_create_encryption_params_nvlists(hdl, NULL,
B_TRUE, keyformat, keylocation, props, &wkeydata,
&wkeylen);
if (ret != 0)
goto out;
}
if (pzhp != NULL)
zfs_close(pzhp);
*wkeydata_out = wkeydata;
*wkeylen_out = wkeylen;
return (0);
out:
if (pzhp != NULL)
zfs_close(pzhp);
if (wkeydata != NULL)
free(wkeydata);
*wkeydata_out = NULL;
*wkeylen_out = 0;
return (ret);
}
int
zfs_crypto_clone_check(libzfs_handle_t *hdl, zfs_handle_t *origin_zhp,
char *parent_name, nvlist_t *props)
{
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "Encryption clone error"));
/*
* No encryption properties should be specified. They will all be
* inherited from the origin dataset.
*/
if (nvlist_exists(props, zfs_prop_to_name(ZFS_PROP_KEYFORMAT)) ||
nvlist_exists(props, zfs_prop_to_name(ZFS_PROP_KEYLOCATION)) ||
nvlist_exists(props, zfs_prop_to_name(ZFS_PROP_ENCRYPTION)) ||
nvlist_exists(props, zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS))) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Encryption properties must inherit from origin dataset."));
return (EINVAL);
}
return (0);
}
typedef struct loadkeys_cbdata {
uint64_t cb_numfailed;
uint64_t cb_numattempted;
} loadkey_cbdata_t;
static int
load_keys_cb(zfs_handle_t *zhp, void *arg)
{
int ret;
boolean_t is_encroot;
loadkey_cbdata_t *cb = arg;
uint64_t keystatus = zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS);
/* only attempt to load keys for encryption roots */
ret = zfs_crypto_get_encryption_root(zhp, &is_encroot, NULL);
if (ret != 0 || !is_encroot)
goto out;
/* don't attempt to load already loaded keys */
if (keystatus == ZFS_KEYSTATUS_AVAILABLE)
goto out;
/* Attempt to load the key. Record status in cb. */
cb->cb_numattempted++;
ret = zfs_crypto_load_key(zhp, B_FALSE, NULL);
if (ret)
cb->cb_numfailed++;
out:
(void) zfs_iter_filesystems(zhp, load_keys_cb, cb);
zfs_close(zhp);
/* always return 0, since this function is best effort */
return (0);
}
/*
* This function is best effort. It attempts to load all the keys for the given
* filesystem and all of its children.
*/
int
zfs_crypto_attempt_load_keys(libzfs_handle_t *hdl, char *fsname)
{
int ret;
zfs_handle_t *zhp = NULL;
loadkey_cbdata_t cb = { 0 };
zhp = zfs_open(hdl, fsname, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL) {
ret = ENOENT;
goto error;
}
ret = load_keys_cb(zfs_handle_dup(zhp), &cb);
if (ret)
goto error;
(void) printf(gettext("%llu / %llu keys successfully loaded\n"),
(u_longlong_t)(cb.cb_numattempted - cb.cb_numfailed),
(u_longlong_t)cb.cb_numattempted);
if (cb.cb_numfailed != 0) {
ret = -1;
goto error;
}
zfs_close(zhp);
return (0);
error:
if (zhp != NULL)
zfs_close(zhp);
return (ret);
}
int
zfs_crypto_load_key(zfs_handle_t *zhp, boolean_t noop, char *alt_keylocation)
{
int ret, attempts = 0;
char errbuf[1024];
uint64_t keystatus, iters = 0, salt = 0;
uint64_t keyformat = ZFS_KEYFORMAT_NONE;
char prop_keylocation[MAXNAMELEN];
char prop_encroot[MAXNAMELEN];
char *keylocation = NULL;
uint8_t *key_material = NULL, *key_data = NULL;
size_t key_material_len;
boolean_t is_encroot, can_retry = B_FALSE, correctible = B_FALSE;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "Key load error"));
/* check that encryption is enabled for the pool */
if (!encryption_feature_is_enabled(zhp->zpool_hdl)) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Encryption feature not enabled."));
ret = EINVAL;
goto error;
}
/* Fetch the keyformat. Check that the dataset is encrypted. */
keyformat = zfs_prop_get_int(zhp, ZFS_PROP_KEYFORMAT);
if (keyformat == ZFS_KEYFORMAT_NONE) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"'%s' is not encrypted."), zfs_get_name(zhp));
ret = EINVAL;
goto error;
}
/*
* Fetch the key location. Check that we are working with an
* encryption root.
*/
ret = zfs_crypto_get_encryption_root(zhp, &is_encroot, prop_encroot);
if (ret != 0) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Failed to get encryption root for '%s'."),
zfs_get_name(zhp));
goto error;
} else if (!is_encroot) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Keys must be loaded for encryption root of '%s' (%s)."),
zfs_get_name(zhp), prop_encroot);
ret = EINVAL;
goto error;
}
/*
* if the caller has elected to override the keylocation property
* use that instead
*/
if (alt_keylocation != NULL) {
keylocation = alt_keylocation;
} else {
ret = zfs_prop_get(zhp, ZFS_PROP_KEYLOCATION, prop_keylocation,
sizeof (prop_keylocation), NULL, NULL, 0, B_TRUE);
if (ret != 0) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Failed to get keylocation for '%s'."),
zfs_get_name(zhp));
goto error;
}
keylocation = prop_keylocation;
}
/* check that the key is unloaded unless this is a noop */
if (!noop) {
keystatus = zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS);
if (keystatus == ZFS_KEYSTATUS_AVAILABLE) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Key already loaded for '%s'."), zfs_get_name(zhp));
ret = EEXIST;
goto error;
}
}
/* passphrase formats require a salt and pbkdf2_iters property */
if (keyformat == ZFS_KEYFORMAT_PASSPHRASE) {
salt = zfs_prop_get_int(zhp, ZFS_PROP_PBKDF2_SALT);
iters = zfs_prop_get_int(zhp, ZFS_PROP_PBKDF2_ITERS);
}
try_again:
/* fetching and deriving the key are correctable errors. set the flag */
correctible = B_TRUE;
/* get key material from key format and location */
ret = get_key_material(zhp->zfs_hdl, B_FALSE, B_FALSE, keyformat,
keylocation, zfs_get_name(zhp), &key_material, &key_material_len,
&can_retry);
if (ret != 0)
goto error;
/* derive a key from the key material */
ret = derive_key(zhp->zfs_hdl, keyformat, iters, key_material,
key_material_len, salt, &key_data);
if (ret != 0)
goto error;
correctible = B_FALSE;
/* pass the wrapping key and noop flag to the ioctl */
ret = lzc_load_key(zhp->zfs_name, noop, key_data, WRAPPING_KEY_LEN);
if (ret != 0) {
switch (ret) {
case EPERM:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Permission denied."));
break;
case EINVAL:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Invalid parameters provided for dataset %s."),
zfs_get_name(zhp));
break;
case EEXIST:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Key already loaded for '%s'."), zfs_get_name(zhp));
break;
case EBUSY:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"'%s' is busy."), zfs_get_name(zhp));
break;
case EACCES:
correctible = B_TRUE;
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Incorrect key provided for '%s'."),
zfs_get_name(zhp));
break;
}
goto error;
}
free(key_material);
free(key_data);
return (0);
error:
zfs_error(zhp->zfs_hdl, EZFS_CRYPTOFAILED, errbuf);
if (key_material != NULL) {
free(key_material);
key_material = NULL;
}
if (key_data != NULL) {
free(key_data);
key_data = NULL;
}
/*
* Here we decide if it is ok to allow the user to retry entering their
* key. The can_retry flag will be set if the user is entering their
* key from an interactive prompt. The correctable flag will only be
* set if an error that occurred could be corrected by retrying. Both
* flags are needed to allow the user to attempt key entry again
*/
attempts++;
if (can_retry && correctible && attempts < MAX_KEY_PROMPT_ATTEMPTS)
goto try_again;
return (ret);
}
int
zfs_crypto_unload_key(zfs_handle_t *zhp)
{
int ret;
char errbuf[1024];
char prop_encroot[MAXNAMELEN];
uint64_t keystatus, keyformat;
boolean_t is_encroot;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "Key unload error"));
/* check that encryption is enabled for the pool */
if (!encryption_feature_is_enabled(zhp->zpool_hdl)) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Encryption feature not enabled."));
ret = EINVAL;
goto error;
}
/* Fetch the keyformat. Check that the dataset is encrypted. */
keyformat = zfs_prop_get_int(zhp, ZFS_PROP_KEYFORMAT);
if (keyformat == ZFS_KEYFORMAT_NONE) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"'%s' is not encrypted."), zfs_get_name(zhp));
ret = EINVAL;
goto error;
}
/*
* Fetch the key location. Check that we are working with an
* encryption root.
*/
ret = zfs_crypto_get_encryption_root(zhp, &is_encroot, prop_encroot);
if (ret != 0) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Failed to get encryption root for '%s'."),
zfs_get_name(zhp));
goto error;
} else if (!is_encroot) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Keys must be unloaded for encryption root of '%s' (%s)."),
zfs_get_name(zhp), prop_encroot);
ret = EINVAL;
goto error;
}
/* check that the key is loaded */
keystatus = zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS);
if (keystatus == ZFS_KEYSTATUS_UNAVAILABLE) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Key already unloaded for '%s'."), zfs_get_name(zhp));
ret = EACCES;
goto error;
}
/* call the ioctl */
ret = lzc_unload_key(zhp->zfs_name);
if (ret != 0) {
switch (ret) {
case EPERM:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Permission denied."));
break;
case EACCES:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Key already unloaded for '%s'."),
zfs_get_name(zhp));
break;
case EBUSY:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"'%s' is busy."), zfs_get_name(zhp));
break;
}
zfs_error(zhp->zfs_hdl, EZFS_CRYPTOFAILED, errbuf);
}
return (ret);
error:
zfs_error(zhp->zfs_hdl, EZFS_CRYPTOFAILED, errbuf);
return (ret);
}
static int
zfs_crypto_verify_rewrap_nvlist(zfs_handle_t *zhp, nvlist_t *props,
nvlist_t **props_out, char *errbuf)
{
int ret;
nvpair_t *elem = NULL;
zfs_prop_t prop;
nvlist_t *new_props = NULL;
new_props = fnvlist_alloc();
/*
* loop through all provided properties, we should only have
* keyformat, keylocation and pbkdf2iters. The actual validation of
* values is done by zfs_valid_proplist().
*/
while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
const char *propname = nvpair_name(elem);
prop = zfs_name_to_prop(propname);
switch (prop) {
case ZFS_PROP_PBKDF2_ITERS:
case ZFS_PROP_KEYFORMAT:
case ZFS_PROP_KEYLOCATION:
break;
default:
ret = EINVAL;
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Only keyformat, keylocation and pbkdf2iters may "
"be set with this command."));
goto error;
}
}
new_props = zfs_valid_proplist(zhp->zfs_hdl, zhp->zfs_type, props,
zfs_prop_get_int(zhp, ZFS_PROP_ZONED), NULL, zhp->zpool_hdl,
B_TRUE, errbuf);
if (new_props == NULL) {
ret = EINVAL;
goto error;
}
*props_out = new_props;
return (0);
error:
nvlist_free(new_props);
*props_out = NULL;
return (ret);
}
int
zfs_crypto_rewrap(zfs_handle_t *zhp, nvlist_t *raw_props, boolean_t inheritkey)
{
int ret;
char errbuf[1024];
boolean_t is_encroot;
nvlist_t *props = NULL;
uint8_t *wkeydata = NULL;
uint_t wkeylen = 0;
dcp_cmd_t cmd = (inheritkey) ? DCP_CMD_INHERIT : DCP_CMD_NEW_KEY;
uint64_t crypt, pcrypt, keystatus, pkeystatus;
uint64_t keyformat = ZFS_KEYFORMAT_NONE;
zfs_handle_t *pzhp = NULL;
char *keylocation = NULL;
char origin_name[MAXNAMELEN];
char prop_keylocation[MAXNAMELEN];
char parent_name[ZFS_MAX_DATASET_NAME_LEN];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "Key change error"));
/* check that encryption is enabled for the pool */
if (!encryption_feature_is_enabled(zhp->zpool_hdl)) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Encryption feature not enabled."));
ret = EINVAL;
goto error;
}
/* get crypt from dataset */
crypt = zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION);
if (crypt == ZIO_CRYPT_OFF) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Dataset not encrypted."));
ret = EINVAL;
goto error;
}
/* get the encryption root of the dataset */
ret = zfs_crypto_get_encryption_root(zhp, &is_encroot, NULL);
if (ret != 0) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Failed to get encryption root for '%s'."),
zfs_get_name(zhp));
goto error;
}
/* Clones use their origin's key and cannot rewrap it */
ret = zfs_prop_get(zhp, ZFS_PROP_ORIGIN, origin_name,
sizeof (origin_name), NULL, NULL, 0, B_TRUE);
if (ret == 0 && strcmp(origin_name, "") != 0) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Keys cannot be changed on clones."));
ret = EINVAL;
goto error;
}
/*
* If the user wants to use the inheritkey variant of this function
* we don't need to collect any crypto arguments.
*/
if (!inheritkey) {
/* validate the provided properties */
ret = zfs_crypto_verify_rewrap_nvlist(zhp, raw_props, &props,
errbuf);
if (ret != 0)
goto error;
/*
* Load keyformat and keylocation from the nvlist. Fetch from
* the dataset properties if not specified.
*/
(void) nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_KEYFORMAT), &keyformat);
(void) nvlist_lookup_string(props,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION), &keylocation);
if (is_encroot) {
/*
* If this is already an encryption root, just keep
* any properties not set by the user.
*/
if (keyformat == ZFS_KEYFORMAT_NONE) {
keyformat = zfs_prop_get_int(zhp,
ZFS_PROP_KEYFORMAT);
ret = nvlist_add_uint64(props,
zfs_prop_to_name(ZFS_PROP_KEYFORMAT),
keyformat);
if (ret != 0) {
zfs_error_aux(zhp->zfs_hdl,
dgettext(TEXT_DOMAIN, "Failed to "
"get existing keyformat "
"property."));
goto error;
}
}
if (keylocation == NULL) {
ret = zfs_prop_get(zhp, ZFS_PROP_KEYLOCATION,
prop_keylocation, sizeof (prop_keylocation),
NULL, NULL, 0, B_TRUE);
if (ret != 0) {
zfs_error_aux(zhp->zfs_hdl,
dgettext(TEXT_DOMAIN, "Failed to "
"get existing keylocation "
"property."));
goto error;
}
keylocation = prop_keylocation;
}
} else {
/* need a new key for non-encryption roots */
if (keyformat == ZFS_KEYFORMAT_NONE) {
ret = EINVAL;
zfs_error_aux(zhp->zfs_hdl,
dgettext(TEXT_DOMAIN, "Keyformat required "
"for new encryption root."));
goto error;
}
/* default to prompt if no keylocation is specified */
if (keylocation == NULL) {
keylocation = "prompt";
ret = nvlist_add_string(props,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION),
keylocation);
if (ret != 0)
goto error;
}
}
/* fetch the new wrapping key and associated properties */
ret = populate_create_encryption_params_nvlists(zhp->zfs_hdl,
zhp, B_TRUE, keyformat, keylocation, props, &wkeydata,
&wkeylen);
if (ret != 0)
goto error;
} else {
/* check that zhp is an encryption root */
if (!is_encroot) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Key inheritting can only be performed on "
"encryption roots."));
ret = EINVAL;
goto error;
}
/* get the parent's name */
ret = zfs_parent_name(zhp, parent_name, sizeof (parent_name));
if (ret != 0) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Root dataset cannot inherit key."));
ret = EINVAL;
goto error;
}
/* get a handle to the parent */
pzhp = make_dataset_handle(zhp->zfs_hdl, parent_name);
if (pzhp == NULL) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Failed to lookup parent."));
ret = ENOENT;
goto error;
}
/* parent must be encrypted */
pcrypt = zfs_prop_get_int(pzhp, ZFS_PROP_ENCRYPTION);
if (pcrypt == ZIO_CRYPT_OFF) {
zfs_error_aux(pzhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Parent must be encrypted."));
ret = EINVAL;
goto error;
}
/* check that the parent's key is loaded */
pkeystatus = zfs_prop_get_int(pzhp, ZFS_PROP_KEYSTATUS);
if (pkeystatus == ZFS_KEYSTATUS_UNAVAILABLE) {
zfs_error_aux(pzhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Parent key must be loaded."));
ret = EACCES;
goto error;
}
}
/* check that the key is loaded */
keystatus = zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS);
if (keystatus == ZFS_KEYSTATUS_UNAVAILABLE) {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Key must be loaded."));
ret = EACCES;
goto error;
}
/* call the ioctl */
ret = lzc_change_key(zhp->zfs_name, cmd, props, wkeydata, wkeylen);
if (ret != 0) {
switch (ret) {
case EPERM:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Permission denied."));
break;
case EINVAL:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Invalid properties for key change."));
break;
case EACCES:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"Key is not currently loaded."));
break;
}
zfs_error(zhp->zfs_hdl, EZFS_CRYPTOFAILED, errbuf);
}
if (pzhp != NULL)
zfs_close(pzhp);
if (props != NULL)
nvlist_free(props);
if (wkeydata != NULL)
free(wkeydata);
return (ret);
error:
if (pzhp != NULL)
zfs_close(pzhp);
if (props != NULL)
nvlist_free(props);
if (wkeydata != NULL)
free(wkeydata);
zfs_error(zhp->zfs_hdl, EZFS_CRYPTOFAILED, errbuf);
return (ret);
}
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs_sendrecv.c b/sys/contrib/openzfs/lib/libzfs/libzfs_sendrecv.c
index 86ff8c91a91e..b67c9b30c84a 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs_sendrecv.c
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs_sendrecv.c
@@ -1,5202 +1,5202 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2012, Joyent, Inc. All rights reserved.
* Copyright (c) 2012 Pawel Jakub Dawidek <pawel@dawidek.net>.
* All rights reserved
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright 2015, OmniTI Computer Consulting, Inc. All rights reserved.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
* Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
* Copyright (c) 2019 Datto Inc.
*/
#include <assert.h>
#include <ctype.h>
#include <errno.h>
#include <libintl.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <stddef.h>
#include <fcntl.h>
#include <sys/mount.h>
#include <sys/mntent.h>
#include <sys/mnttab.h>
#include <sys/avl.h>
#include <sys/debug.h>
#include <sys/stat.h>
#include <pthread.h>
#include <umem.h>
#include <time.h>
#include <libzfs.h>
#include <libzfs_core.h>
#include <libzutil.h>
#include "zfs_namecheck.h"
#include "zfs_prop.h"
#include "zfs_fletcher.h"
#include "libzfs_impl.h"
#include <cityhash.h>
#include <zlib.h>
#include <sys/zio_checksum.h>
#include <sys/dsl_crypt.h>
#include <sys/ddt.h>
#include <sys/socket.h>
#include <sys/sha2.h>
static int zfs_receive_impl(libzfs_handle_t *, const char *, const char *,
recvflags_t *, int, const char *, nvlist_t *, avl_tree_t *, char **,
const char *, nvlist_t *);
static int guid_to_name_redact_snaps(libzfs_handle_t *hdl, const char *parent,
uint64_t guid, boolean_t bookmark_ok, uint64_t *redact_snap_guids,
uint64_t num_redact_snaps, char *name);
static int guid_to_name(libzfs_handle_t *, const char *,
uint64_t, boolean_t, char *);
typedef struct progress_arg {
zfs_handle_t *pa_zhp;
int pa_fd;
boolean_t pa_parsable;
boolean_t pa_estimate;
int pa_verbosity;
} progress_arg_t;
static int
dump_record(dmu_replay_record_t *drr, void *payload, int payload_len,
zio_cksum_t *zc, int outfd)
{
ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
fletcher_4_incremental_native(drr,
offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), zc);
if (drr->drr_type != DRR_BEGIN) {
ASSERT(ZIO_CHECKSUM_IS_ZERO(&drr->drr_u.
drr_checksum.drr_checksum));
drr->drr_u.drr_checksum.drr_checksum = *zc;
}
fletcher_4_incremental_native(&drr->drr_u.drr_checksum.drr_checksum,
sizeof (zio_cksum_t), zc);
if (write(outfd, drr, sizeof (*drr)) == -1)
return (errno);
if (payload_len != 0) {
fletcher_4_incremental_native(payload, payload_len, zc);
if (write(outfd, payload, payload_len) == -1)
return (errno);
}
return (0);
}
/*
* Routines for dealing with the AVL tree of fs-nvlists
*/
typedef struct fsavl_node {
avl_node_t fn_node;
nvlist_t *fn_nvfs;
char *fn_snapname;
uint64_t fn_guid;
} fsavl_node_t;
static int
fsavl_compare(const void *arg1, const void *arg2)
{
const fsavl_node_t *fn1 = (const fsavl_node_t *)arg1;
const fsavl_node_t *fn2 = (const fsavl_node_t *)arg2;
return (TREE_CMP(fn1->fn_guid, fn2->fn_guid));
}
/*
* Given the GUID of a snapshot, find its containing filesystem and
* (optionally) name.
*/
static nvlist_t *
fsavl_find(avl_tree_t *avl, uint64_t snapguid, char **snapname)
{
fsavl_node_t fn_find;
fsavl_node_t *fn;
fn_find.fn_guid = snapguid;
fn = avl_find(avl, &fn_find, NULL);
if (fn) {
if (snapname)
*snapname = fn->fn_snapname;
return (fn->fn_nvfs);
}
return (NULL);
}
static void
fsavl_destroy(avl_tree_t *avl)
{
fsavl_node_t *fn;
void *cookie;
if (avl == NULL)
return;
cookie = NULL;
while ((fn = avl_destroy_nodes(avl, &cookie)) != NULL)
free(fn);
avl_destroy(avl);
free(avl);
}
/*
* Given an nvlist, produce an avl tree of snapshots, ordered by guid
*/
static avl_tree_t *
fsavl_create(nvlist_t *fss)
{
avl_tree_t *fsavl;
nvpair_t *fselem = NULL;
if ((fsavl = malloc(sizeof (avl_tree_t))) == NULL)
return (NULL);
avl_create(fsavl, fsavl_compare, sizeof (fsavl_node_t),
offsetof(fsavl_node_t, fn_node));
while ((fselem = nvlist_next_nvpair(fss, fselem)) != NULL) {
nvlist_t *nvfs, *snaps;
nvpair_t *snapelem = NULL;
nvfs = fnvpair_value_nvlist(fselem);
snaps = fnvlist_lookup_nvlist(nvfs, "snaps");
while ((snapelem =
nvlist_next_nvpair(snaps, snapelem)) != NULL) {
fsavl_node_t *fn;
uint64_t guid;
guid = fnvpair_value_uint64(snapelem);
if ((fn = malloc(sizeof (fsavl_node_t))) == NULL) {
fsavl_destroy(fsavl);
return (NULL);
}
fn->fn_nvfs = nvfs;
fn->fn_snapname = nvpair_name(snapelem);
fn->fn_guid = guid;
/*
* Note: if there are multiple snaps with the
* same GUID, we ignore all but one.
*/
- if (avl_find(fsavl, fn, NULL) == NULL)
- avl_add(fsavl, fn);
+ avl_index_t where = 0;
+ if (avl_find(fsavl, fn, &where) == NULL)
+ avl_insert(fsavl, fn, where);
else
free(fn);
}
}
return (fsavl);
}
/*
* Routines for dealing with the giant nvlist of fs-nvlists, etc.
*/
typedef struct send_data {
/*
* assigned inside every recursive call,
* restored from *_save on return:
*
* guid of fromsnap snapshot in parent dataset
* txg of fromsnap snapshot in current dataset
* txg of tosnap snapshot in current dataset
*/
uint64_t parent_fromsnap_guid;
uint64_t fromsnap_txg;
uint64_t tosnap_txg;
/* the nvlists get accumulated during depth-first traversal */
nvlist_t *parent_snaps;
nvlist_t *fss;
nvlist_t *snapprops;
nvlist_t *snapholds; /* user holds */
/* send-receive configuration, does not change during traversal */
const char *fsname;
const char *fromsnap;
const char *tosnap;
boolean_t recursive;
boolean_t raw;
boolean_t doall;
boolean_t replicate;
boolean_t skipmissing;
boolean_t verbose;
boolean_t backup;
boolean_t seenfrom;
boolean_t seento;
boolean_t holds; /* were holds requested with send -h */
boolean_t props;
/*
* The header nvlist is of the following format:
* {
* "tosnap" -> string
* "fromsnap" -> string (if incremental)
* "fss" -> {
* id -> {
*
* "name" -> string (full name; for debugging)
* "parentfromsnap" -> number (guid of fromsnap in parent)
*
* "props" -> { name -> value (only if set here) }
* "snaps" -> { name (lastname) -> number (guid) }
* "snapprops" -> { name (lastname) -> { name -> value } }
* "snapholds" -> { name (lastname) -> { holdname -> crtime } }
*
* "origin" -> number (guid) (if clone)
* "is_encroot" -> boolean
* "sent" -> boolean (not on-disk)
* }
* }
* }
*
*/
} send_data_t;
static void
send_iterate_prop(zfs_handle_t *zhp, boolean_t received_only, nvlist_t *nv);
static int
send_iterate_snap(zfs_handle_t *zhp, void *arg)
{
send_data_t *sd = arg;
uint64_t guid = zhp->zfs_dmustats.dds_guid;
uint64_t txg = zhp->zfs_dmustats.dds_creation_txg;
char *snapname;
nvlist_t *nv;
boolean_t isfromsnap, istosnap, istosnapwithnofrom;
snapname = strrchr(zhp->zfs_name, '@')+1;
isfromsnap = (sd->fromsnap != NULL &&
strcmp(sd->fromsnap, snapname) == 0);
istosnap = (sd->tosnap != NULL && (strcmp(sd->tosnap, snapname) == 0));
istosnapwithnofrom = (istosnap && sd->fromsnap == NULL);
if (sd->tosnap_txg != 0 && txg > sd->tosnap_txg) {
if (sd->verbose) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"skipping snapshot %s because it was created "
"after the destination snapshot (%s)\n"),
zhp->zfs_name, sd->tosnap);
}
zfs_close(zhp);
return (0);
}
fnvlist_add_uint64(sd->parent_snaps, snapname, guid);
/*
* NB: if there is no fromsnap here (it's a newly created fs in
* an incremental replication), we will substitute the tosnap.
*/
if (isfromsnap || (sd->parent_fromsnap_guid == 0 && istosnap)) {
sd->parent_fromsnap_guid = guid;
}
if (!sd->recursive) {
/*
* To allow a doall stream to work properly
* with a NULL fromsnap
*/
if (sd->doall && sd->fromsnap == NULL && !sd->seenfrom) {
sd->seenfrom = B_TRUE;
}
if (!sd->seenfrom && isfromsnap) {
sd->seenfrom = B_TRUE;
zfs_close(zhp);
return (0);
}
if ((sd->seento || !sd->seenfrom) && !istosnapwithnofrom) {
zfs_close(zhp);
return (0);
}
if (istosnap)
sd->seento = B_TRUE;
}
nv = fnvlist_alloc();
send_iterate_prop(zhp, sd->backup, nv);
fnvlist_add_nvlist(sd->snapprops, snapname, nv);
fnvlist_free(nv);
if (sd->holds) {
- nvlist_t *holds = fnvlist_alloc();
- int err = lzc_get_holds(zhp->zfs_name, &holds);
- if (err == 0) {
+ nvlist_t *holds;
+ if (lzc_get_holds(zhp->zfs_name, &holds) == 0) {
fnvlist_add_nvlist(sd->snapholds, snapname, holds);
+ fnvlist_free(holds);
}
- fnvlist_free(holds);
}
zfs_close(zhp);
return (0);
}
static void
send_iterate_prop(zfs_handle_t *zhp, boolean_t received_only, nvlist_t *nv)
{
nvlist_t *props = NULL;
nvpair_t *elem = NULL;
if (received_only)
props = zfs_get_recvd_props(zhp);
else
props = zhp->zfs_props;
while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
char *propname = nvpair_name(elem);
zfs_prop_t prop = zfs_name_to_prop(propname);
nvlist_t *propnv;
if (!zfs_prop_user(propname)) {
/*
* Realistically, this should never happen. However,
* we want the ability to add DSL properties without
* needing to make incompatible version changes. We
* need to ignore unknown properties to allow older
* software to still send datasets containing these
* properties, with the unknown properties elided.
*/
if (prop == ZPROP_INVAL)
continue;
if (zfs_prop_readonly(prop))
continue;
}
verify(nvpair_value_nvlist(elem, &propnv) == 0);
if (prop == ZFS_PROP_QUOTA || prop == ZFS_PROP_RESERVATION ||
prop == ZFS_PROP_REFQUOTA ||
prop == ZFS_PROP_REFRESERVATION) {
char *source;
uint64_t value;
verify(nvlist_lookup_uint64(propnv,
ZPROP_VALUE, &value) == 0);
if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT)
continue;
/*
* May have no source before SPA_VERSION_RECVD_PROPS,
* but is still modifiable.
*/
if (nvlist_lookup_string(propnv,
ZPROP_SOURCE, &source) == 0) {
if ((strcmp(source, zhp->zfs_name) != 0) &&
(strcmp(source,
ZPROP_SOURCE_VAL_RECVD) != 0))
continue;
}
} else {
char *source;
if (nvlist_lookup_string(propnv,
ZPROP_SOURCE, &source) != 0)
continue;
if ((strcmp(source, zhp->zfs_name) != 0) &&
(strcmp(source, ZPROP_SOURCE_VAL_RECVD) != 0))
continue;
}
if (zfs_prop_user(propname) ||
zfs_prop_get_type(prop) == PROP_TYPE_STRING) {
char *value;
value = fnvlist_lookup_string(propnv, ZPROP_VALUE);
fnvlist_add_string(nv, propname, value);
} else {
uint64_t value;
value = fnvlist_lookup_uint64(propnv, ZPROP_VALUE);
fnvlist_add_uint64(nv, propname, value);
}
}
}
/*
* returns snapshot creation txg
* and returns 0 if the snapshot does not exist
*/
static uint64_t
get_snap_txg(libzfs_handle_t *hdl, const char *fs, const char *snap)
{
char name[ZFS_MAX_DATASET_NAME_LEN];
uint64_t txg = 0;
if (fs == NULL || fs[0] == '\0' || snap == NULL || snap[0] == '\0')
return (txg);
(void) snprintf(name, sizeof (name), "%s@%s", fs, snap);
if (zfs_dataset_exists(hdl, name, ZFS_TYPE_SNAPSHOT)) {
zfs_handle_t *zhp = zfs_open(hdl, name, ZFS_TYPE_SNAPSHOT);
if (zhp != NULL) {
txg = zfs_prop_get_int(zhp, ZFS_PROP_CREATETXG);
zfs_close(zhp);
}
}
return (txg);
}
/*
* recursively generate nvlists describing datasets. See comment
* for the data structure send_data_t above for description of contents
* of the nvlist.
*/
static int
send_iterate_fs(zfs_handle_t *zhp, void *arg)
{
send_data_t *sd = arg;
nvlist_t *nvfs = NULL, *nv = NULL;
int rv = 0;
uint64_t min_txg = 0, max_txg = 0;
uint64_t parent_fromsnap_guid_save = sd->parent_fromsnap_guid;
uint64_t fromsnap_txg_save = sd->fromsnap_txg;
uint64_t tosnap_txg_save = sd->tosnap_txg;
uint64_t txg = zhp->zfs_dmustats.dds_creation_txg;
uint64_t guid = zhp->zfs_dmustats.dds_guid;
uint64_t fromsnap_txg, tosnap_txg;
char guidstring[64];
fromsnap_txg = get_snap_txg(zhp->zfs_hdl, zhp->zfs_name, sd->fromsnap);
if (fromsnap_txg != 0)
sd->fromsnap_txg = fromsnap_txg;
tosnap_txg = get_snap_txg(zhp->zfs_hdl, zhp->zfs_name, sd->tosnap);
if (tosnap_txg != 0)
sd->tosnap_txg = tosnap_txg;
/*
* on the send side, if the current dataset does not have tosnap,
* perform two additional checks:
*
* - skip sending the current dataset if it was created later than
* the parent tosnap
* - return error if the current dataset was created earlier than
* the parent tosnap, unless --skip-missing specified. Then
* just print a warning
*/
if (sd->tosnap != NULL && tosnap_txg == 0) {
if (sd->tosnap_txg != 0 && txg > sd->tosnap_txg) {
if (sd->verbose) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"skipping dataset %s: snapshot %s does "
"not exist\n"), zhp->zfs_name, sd->tosnap);
}
} else if (sd->skipmissing) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"WARNING: skipping dataset %s and its children:"
" snapshot %s does not exist\n"),
zhp->zfs_name, sd->tosnap);
} else {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"cannot send %s@%s%s: snapshot %s@%s does not "
"exist\n"), sd->fsname, sd->tosnap, sd->recursive ?
dgettext(TEXT_DOMAIN, " recursively") : "",
zhp->zfs_name, sd->tosnap);
rv = EZFS_NOENT;
}
goto out;
}
nvfs = fnvlist_alloc();
fnvlist_add_string(nvfs, "name", zhp->zfs_name);
fnvlist_add_uint64(nvfs, "parentfromsnap",
sd->parent_fromsnap_guid);
if (zhp->zfs_dmustats.dds_origin[0]) {
zfs_handle_t *origin = zfs_open(zhp->zfs_hdl,
zhp->zfs_dmustats.dds_origin, ZFS_TYPE_SNAPSHOT);
if (origin == NULL) {
rv = -1;
goto out;
}
fnvlist_add_uint64(nvfs, "origin",
origin->zfs_dmustats.dds_guid);
zfs_close(origin);
}
/* iterate over props */
if (sd->props || sd->backup || sd->recursive) {
nv = fnvlist_alloc();
send_iterate_prop(zhp, sd->backup, nv);
}
if (zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) != ZIO_CRYPT_OFF) {
boolean_t encroot;
/* determine if this dataset is an encryption root */
if (zfs_crypto_get_encryption_root(zhp, &encroot, NULL) != 0) {
rv = -1;
goto out;
}
if (encroot)
fnvlist_add_boolean(nvfs, "is_encroot");
/*
* Encrypted datasets can only be sent with properties if
* the raw flag is specified because the receive side doesn't
* currently have a mechanism for recursively asking the user
* for new encryption parameters.
*/
if (!sd->raw) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"cannot send %s@%s: encrypted dataset %s may not "
"be sent with properties without the raw flag\n"),
sd->fsname, sd->tosnap, zhp->zfs_name);
rv = -1;
goto out;
}
}
if (nv != NULL)
fnvlist_add_nvlist(nvfs, "props", nv);
/* iterate over snaps, and set sd->parent_fromsnap_guid */
sd->parent_fromsnap_guid = 0;
sd->parent_snaps = fnvlist_alloc();
sd->snapprops = fnvlist_alloc();
if (sd->holds)
sd->snapholds = fnvlist_alloc();
/*
* If this is a "doall" send, a replicate send or we're just trying
* to gather a list of previous snapshots, iterate through all the
* snaps in the txg range. Otherwise just look at the one we're
* interested in.
*/
if (sd->doall || sd->replicate || sd->tosnap == NULL) {
if (!sd->replicate && fromsnap_txg != 0)
min_txg = fromsnap_txg;
if (!sd->replicate && tosnap_txg != 0)
max_txg = tosnap_txg;
(void) zfs_iter_snapshots_sorted(zhp, send_iterate_snap, sd,
min_txg, max_txg);
} else {
char snapname[MAXPATHLEN] = { 0 };
zfs_handle_t *snap;
(void) snprintf(snapname, sizeof (snapname), "%s@%s",
zhp->zfs_name, sd->tosnap);
if (sd->fromsnap != NULL)
sd->seenfrom = B_TRUE;
snap = zfs_open(zhp->zfs_hdl, snapname,
ZFS_TYPE_SNAPSHOT);
if (snap != NULL)
(void) send_iterate_snap(snap, sd);
}
fnvlist_add_nvlist(nvfs, "snaps", sd->parent_snaps);
fnvlist_add_nvlist(nvfs, "snapprops", sd->snapprops);
if (sd->holds)
fnvlist_add_nvlist(nvfs, "snapholds", sd->snapholds);
fnvlist_free(sd->parent_snaps);
fnvlist_free(sd->snapprops);
fnvlist_free(sd->snapholds);
/* Do not allow the size of the properties list to exceed the limit */
if ((fnvlist_size(nvfs) + fnvlist_size(sd->fss)) >
zhp->zfs_hdl->libzfs_max_nvlist) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"warning: cannot send %s@%s: the size of the list of "
"snapshots and properties is too large to be received "
"successfully.\n"
"Select a smaller number of snapshots to send.\n"),
zhp->zfs_name, sd->tosnap);
rv = EZFS_NOSPC;
goto out;
}
/* add this fs to nvlist */
(void) snprintf(guidstring, sizeof (guidstring),
"0x%llx", (longlong_t)guid);
fnvlist_add_nvlist(sd->fss, guidstring, nvfs);
/* iterate over children */
if (sd->recursive)
rv = zfs_iter_filesystems(zhp, send_iterate_fs, sd);
out:
sd->parent_fromsnap_guid = parent_fromsnap_guid_save;
sd->fromsnap_txg = fromsnap_txg_save;
sd->tosnap_txg = tosnap_txg_save;
fnvlist_free(nv);
fnvlist_free(nvfs);
zfs_close(zhp);
return (rv);
}
static int
gather_nvlist(libzfs_handle_t *hdl, const char *fsname, const char *fromsnap,
const char *tosnap, boolean_t recursive, boolean_t raw, boolean_t doall,
boolean_t replicate, boolean_t skipmissing, boolean_t verbose,
boolean_t backup, boolean_t holds, boolean_t props, nvlist_t **nvlp,
avl_tree_t **avlp)
{
zfs_handle_t *zhp;
send_data_t sd = { 0 };
int error;
zhp = zfs_open(hdl, fsname, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL)
return (EZFS_BADTYPE);
sd.fss = fnvlist_alloc();
sd.fsname = fsname;
sd.fromsnap = fromsnap;
sd.tosnap = tosnap;
sd.recursive = recursive;
sd.raw = raw;
sd.doall = doall;
sd.replicate = replicate;
sd.skipmissing = skipmissing;
sd.verbose = verbose;
sd.backup = backup;
sd.holds = holds;
sd.props = props;
if ((error = send_iterate_fs(zhp, &sd)) != 0) {
fnvlist_free(sd.fss);
if (avlp != NULL)
*avlp = NULL;
*nvlp = NULL;
return (error);
}
if (avlp != NULL && (*avlp = fsavl_create(sd.fss)) == NULL) {
fnvlist_free(sd.fss);
*nvlp = NULL;
return (EZFS_NOMEM);
}
*nvlp = sd.fss;
return (0);
}
/*
* Routines specific to "zfs send"
*/
typedef struct send_dump_data {
/* these are all just the short snapname (the part after the @) */
const char *fromsnap;
const char *tosnap;
char prevsnap[ZFS_MAX_DATASET_NAME_LEN];
uint64_t prevsnap_obj;
boolean_t seenfrom, seento, replicate, doall, fromorigin;
boolean_t dryrun, parsable, progress, embed_data, std_out;
boolean_t large_block, compress, raw, holds;
int outfd;
boolean_t err;
nvlist_t *fss;
nvlist_t *snapholds;
avl_tree_t *fsavl;
snapfilter_cb_t *filter_cb;
void *filter_cb_arg;
nvlist_t *debugnv;
char holdtag[ZFS_MAX_DATASET_NAME_LEN];
int cleanup_fd;
int verbosity;
uint64_t size;
} send_dump_data_t;
static int
zfs_send_space(zfs_handle_t *zhp, const char *snapname, const char *from,
enum lzc_send_flags flags, uint64_t *spacep)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
int error;
assert(snapname != NULL);
error = lzc_send_space(snapname, from, flags, spacep);
if (error != 0) {
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"warning: cannot estimate space for '%s'"), snapname);
switch (error) {
case EXDEV:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"not an earlier snapshot from the same fs"));
return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
case ENOENT:
if (zfs_dataset_exists(hdl, snapname,
ZFS_TYPE_SNAPSHOT)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incremental source (%s) does not exist"),
snapname);
}
return (zfs_error(hdl, EZFS_NOENT, errbuf));
case EDQUOT:
case EFBIG:
case EIO:
case ENOLINK:
case ENOSPC:
case ENOSTR:
case ENXIO:
case EPIPE:
case ERANGE:
case EFAULT:
case EROFS:
case EINVAL:
zfs_error_aux(hdl, "%s", strerror(error));
return (zfs_error(hdl, EZFS_BADBACKUP, errbuf));
default:
return (zfs_standard_error(hdl, error, errbuf));
}
}
return (0);
}
/*
* Dumps a backup of the given snapshot (incremental from fromsnap if it's not
* NULL) to the file descriptor specified by outfd.
*/
static int
dump_ioctl(zfs_handle_t *zhp, const char *fromsnap, uint64_t fromsnap_obj,
boolean_t fromorigin, int outfd, enum lzc_send_flags flags,
nvlist_t *debugnv)
{
zfs_cmd_t zc = {"\0"};
libzfs_handle_t *hdl = zhp->zfs_hdl;
nvlist_t *thisdbg;
assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT);
assert(fromsnap_obj == 0 || !fromorigin);
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
zc.zc_cookie = outfd;
zc.zc_obj = fromorigin;
zc.zc_sendobj = zfs_prop_get_int(zhp, ZFS_PROP_OBJSETID);
zc.zc_fromobj = fromsnap_obj;
zc.zc_flags = flags;
thisdbg = fnvlist_alloc();
if (fromsnap && fromsnap[0] != '\0') {
fnvlist_add_string(thisdbg, "fromsnap", fromsnap);
}
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_SEND, &zc) != 0) {
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"warning: cannot send '%s'"), zhp->zfs_name);
fnvlist_add_uint64(thisdbg, "error", errno);
if (debugnv) {
fnvlist_add_nvlist(debugnv, zhp->zfs_name, thisdbg);
}
fnvlist_free(thisdbg);
switch (errno) {
case EXDEV:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"not an earlier snapshot from the same fs"));
return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
case EACCES:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"source key must be loaded"));
return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf));
case ENOENT:
if (zfs_dataset_exists(hdl, zc.zc_name,
ZFS_TYPE_SNAPSHOT)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incremental source (@%s) does not exist"),
zc.zc_value);
}
return (zfs_error(hdl, EZFS_NOENT, errbuf));
case EDQUOT:
case EFBIG:
case EIO:
case ENOLINK:
case ENOSPC:
case ENOSTR:
case ENXIO:
case EPIPE:
case ERANGE:
case EFAULT:
case EROFS:
case EINVAL:
zfs_error_aux(hdl, "%s", strerror(errno));
return (zfs_error(hdl, EZFS_BADBACKUP, errbuf));
default:
return (zfs_standard_error(hdl, errno, errbuf));
}
}
if (debugnv)
fnvlist_add_nvlist(debugnv, zhp->zfs_name, thisdbg);
fnvlist_free(thisdbg);
return (0);
}
static void
gather_holds(zfs_handle_t *zhp, send_dump_data_t *sdd)
{
assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT);
/*
* zfs_send() only sets snapholds for sends that need them,
* e.g. replication and doall.
*/
if (sdd->snapholds == NULL)
return;
fnvlist_add_string(sdd->snapholds, zhp->zfs_name, sdd->holdtag);
}
int
zfs_send_progress(zfs_handle_t *zhp, int fd, uint64_t *bytes_written,
uint64_t *blocks_visited)
{
zfs_cmd_t zc = {"\0"};
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
zc.zc_cookie = fd;
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_SEND_PROGRESS, &zc) != 0)
return (errno);
if (bytes_written != NULL)
*bytes_written = zc.zc_cookie;
if (blocks_visited != NULL)
*blocks_visited = zc.zc_objset_type;
return (0);
}
static void *
send_progress_thread(void *arg)
{
progress_arg_t *pa = arg;
zfs_handle_t *zhp = pa->pa_zhp;
uint64_t bytes;
uint64_t blocks;
char buf[16];
time_t t;
struct tm *tm;
boolean_t firstloop = B_TRUE;
/*
* Print the progress from ZFS_IOC_SEND_PROGRESS every second.
*/
for (;;) {
int err;
(void) sleep(1);
if ((err = zfs_send_progress(zhp, pa->pa_fd, &bytes,
&blocks)) != 0) {
if (err == EINTR || err == ENOENT)
return ((void *)0);
return ((void *)(uintptr_t)err);
}
if (firstloop && !pa->pa_parsable) {
(void) fprintf(stderr,
"TIME %s %sSNAPSHOT %s\n",
pa->pa_estimate ? "BYTES" : " SENT",
pa->pa_verbosity >= 2 ? " BLOCKS " : "",
zhp->zfs_name);
firstloop = B_FALSE;
}
(void) time(&t);
tm = localtime(&t);
if (pa->pa_verbosity >= 2 && pa->pa_parsable) {
(void) fprintf(stderr,
"%02d:%02d:%02d\t%llu\t%llu\t%s\n",
tm->tm_hour, tm->tm_min, tm->tm_sec,
(u_longlong_t)bytes, (u_longlong_t)blocks,
zhp->zfs_name);
} else if (pa->pa_verbosity >= 2) {
zfs_nicenum(bytes, buf, sizeof (buf));
(void) fprintf(stderr,
"%02d:%02d:%02d %5s %8llu %s\n",
tm->tm_hour, tm->tm_min, tm->tm_sec,
buf, (u_longlong_t)blocks, zhp->zfs_name);
} else if (pa->pa_parsable) {
(void) fprintf(stderr, "%02d:%02d:%02d\t%llu\t%s\n",
tm->tm_hour, tm->tm_min, tm->tm_sec,
(u_longlong_t)bytes, zhp->zfs_name);
} else {
zfs_nicebytes(bytes, buf, sizeof (buf));
(void) fprintf(stderr, "%02d:%02d:%02d %5s %s\n",
tm->tm_hour, tm->tm_min, tm->tm_sec,
buf, zhp->zfs_name);
}
}
}
static void
send_print_verbose(FILE *fout, const char *tosnap, const char *fromsnap,
uint64_t size, boolean_t parsable)
{
if (parsable) {
if (fromsnap != NULL) {
(void) fprintf(fout, "incremental\t%s\t%s",
fromsnap, tosnap);
} else {
(void) fprintf(fout, "full\t%s",
tosnap);
}
} else {
if (fromsnap != NULL) {
if (strchr(fromsnap, '@') == NULL &&
strchr(fromsnap, '#') == NULL) {
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
"send from @%s to %s"),
fromsnap, tosnap);
} else {
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
"send from %s to %s"),
fromsnap, tosnap);
}
} else {
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
"full send of %s"),
tosnap);
}
}
if (parsable) {
(void) fprintf(fout, "\t%llu",
(longlong_t)size);
} else if (size != 0) {
char buf[16];
zfs_nicebytes(size, buf, sizeof (buf));
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
" estimated size is %s"), buf);
}
(void) fprintf(fout, "\n");
}
static int
dump_snapshot(zfs_handle_t *zhp, void *arg)
{
send_dump_data_t *sdd = arg;
progress_arg_t pa = { 0 };
pthread_t tid;
char *thissnap;
enum lzc_send_flags flags = 0;
int err;
boolean_t isfromsnap, istosnap, fromorigin;
boolean_t exclude = B_FALSE;
FILE *fout = sdd->std_out ? stdout : stderr;
err = 0;
thissnap = strchr(zhp->zfs_name, '@') + 1;
isfromsnap = (sdd->fromsnap != NULL &&
strcmp(sdd->fromsnap, thissnap) == 0);
if (!sdd->seenfrom && isfromsnap) {
gather_holds(zhp, sdd);
sdd->seenfrom = B_TRUE;
(void) strlcpy(sdd->prevsnap, thissnap,
sizeof (sdd->prevsnap));
sdd->prevsnap_obj = zfs_prop_get_int(zhp, ZFS_PROP_OBJSETID);
zfs_close(zhp);
return (0);
}
if (sdd->seento || !sdd->seenfrom) {
zfs_close(zhp);
return (0);
}
istosnap = (strcmp(sdd->tosnap, thissnap) == 0);
if (istosnap)
sdd->seento = B_TRUE;
if (sdd->large_block)
flags |= LZC_SEND_FLAG_LARGE_BLOCK;
if (sdd->embed_data)
flags |= LZC_SEND_FLAG_EMBED_DATA;
if (sdd->compress)
flags |= LZC_SEND_FLAG_COMPRESS;
if (sdd->raw)
flags |= LZC_SEND_FLAG_RAW;
if (!sdd->doall && !isfromsnap && !istosnap) {
if (sdd->replicate) {
char *snapname;
nvlist_t *snapprops;
/*
* Filter out all intermediate snapshots except origin
* snapshots needed to replicate clones.
*/
nvlist_t *nvfs = fsavl_find(sdd->fsavl,
zhp->zfs_dmustats.dds_guid, &snapname);
snapprops = fnvlist_lookup_nvlist(nvfs, "snapprops");
snapprops = fnvlist_lookup_nvlist(snapprops, thissnap);
exclude = !nvlist_exists(snapprops, "is_clone_origin");
} else {
exclude = B_TRUE;
}
}
/*
* If a filter function exists, call it to determine whether
* this snapshot will be sent.
*/
if (exclude || (sdd->filter_cb != NULL &&
sdd->filter_cb(zhp, sdd->filter_cb_arg) == B_FALSE)) {
/*
* This snapshot is filtered out. Don't send it, and don't
* set prevsnap_obj, so it will be as if this snapshot didn't
* exist, and the next accepted snapshot will be sent as
* an incremental from the last accepted one, or as the
* first (and full) snapshot in the case of a replication,
* non-incremental send.
*/
zfs_close(zhp);
return (0);
}
gather_holds(zhp, sdd);
fromorigin = sdd->prevsnap[0] == '\0' &&
(sdd->fromorigin || sdd->replicate);
if (sdd->verbosity != 0) {
uint64_t size = 0;
char fromds[ZFS_MAX_DATASET_NAME_LEN];
if (sdd->prevsnap[0] != '\0') {
(void) strlcpy(fromds, zhp->zfs_name, sizeof (fromds));
*(strchr(fromds, '@') + 1) = '\0';
(void) strlcat(fromds, sdd->prevsnap, sizeof (fromds));
}
if (zfs_send_space(zhp, zhp->zfs_name,
sdd->prevsnap[0] ? fromds : NULL, flags, &size) != 0) {
size = 0; /* cannot estimate send space */
} else {
send_print_verbose(fout, zhp->zfs_name,
sdd->prevsnap[0] ? sdd->prevsnap : NULL,
size, sdd->parsable);
}
sdd->size += size;
}
if (!sdd->dryrun) {
/*
* If progress reporting is requested, spawn a new thread to
* poll ZFS_IOC_SEND_PROGRESS at a regular interval.
*/
if (sdd->progress) {
pa.pa_zhp = zhp;
pa.pa_fd = sdd->outfd;
pa.pa_parsable = sdd->parsable;
pa.pa_estimate = B_FALSE;
pa.pa_verbosity = sdd->verbosity;
if ((err = pthread_create(&tid, NULL,
send_progress_thread, &pa)) != 0) {
zfs_close(zhp);
return (err);
}
}
err = dump_ioctl(zhp, sdd->prevsnap, sdd->prevsnap_obj,
fromorigin, sdd->outfd, flags, sdd->debugnv);
if (sdd->progress) {
void *status = NULL;
(void) pthread_cancel(tid);
(void) pthread_join(tid, &status);
int error = (int)(uintptr_t)status;
if (error != 0 && status != PTHREAD_CANCELED) {
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"progress thread exited nonzero"));
return (zfs_standard_error(zhp->zfs_hdl, error,
errbuf));
}
}
}
(void) strcpy(sdd->prevsnap, thissnap);
sdd->prevsnap_obj = zfs_prop_get_int(zhp, ZFS_PROP_OBJSETID);
zfs_close(zhp);
return (err);
}
static int
dump_filesystem(zfs_handle_t *zhp, void *arg)
{
int rv = 0;
send_dump_data_t *sdd = arg;
boolean_t missingfrom = B_FALSE;
zfs_cmd_t zc = {"\0"};
uint64_t min_txg = 0, max_txg = 0;
(void) snprintf(zc.zc_name, sizeof (zc.zc_name), "%s@%s",
zhp->zfs_name, sdd->tosnap);
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_OBJSET_STATS, &zc) != 0) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"WARNING: could not send %s@%s: does not exist\n"),
zhp->zfs_name, sdd->tosnap);
sdd->err = B_TRUE;
return (0);
}
if (sdd->replicate && sdd->fromsnap) {
/*
* If this fs does not have fromsnap, and we're doing
* recursive, we need to send a full stream from the
* beginning (or an incremental from the origin if this
* is a clone). If we're doing non-recursive, then let
* them get the error.
*/
(void) snprintf(zc.zc_name, sizeof (zc.zc_name), "%s@%s",
zhp->zfs_name, sdd->fromsnap);
if (zfs_ioctl(zhp->zfs_hdl,
ZFS_IOC_OBJSET_STATS, &zc) != 0) {
missingfrom = B_TRUE;
}
}
sdd->seenfrom = sdd->seento = sdd->prevsnap[0] = 0;
sdd->prevsnap_obj = 0;
if (sdd->fromsnap == NULL || missingfrom)
sdd->seenfrom = B_TRUE;
/*
* Iterate through all snapshots and process the ones we will be
* sending. If we only have a "from" and "to" snapshot to deal
* with, we can avoid iterating through all the other snapshots.
*/
if (sdd->doall || sdd->replicate || sdd->tosnap == NULL) {
if (!sdd->replicate && sdd->fromsnap != NULL)
min_txg = get_snap_txg(zhp->zfs_hdl, zhp->zfs_name,
sdd->fromsnap);
if (!sdd->replicate && sdd->tosnap != NULL)
max_txg = get_snap_txg(zhp->zfs_hdl, zhp->zfs_name,
sdd->tosnap);
rv = zfs_iter_snapshots_sorted(zhp, dump_snapshot, arg,
min_txg, max_txg);
} else {
char snapname[MAXPATHLEN] = { 0 };
zfs_handle_t *snap;
if (!sdd->seenfrom) {
(void) snprintf(snapname, sizeof (snapname),
"%s@%s", zhp->zfs_name, sdd->fromsnap);
snap = zfs_open(zhp->zfs_hdl, snapname,
ZFS_TYPE_SNAPSHOT);
if (snap != NULL)
rv = dump_snapshot(snap, sdd);
else
rv = -1;
}
if (rv == 0) {
(void) snprintf(snapname, sizeof (snapname),
"%s@%s", zhp->zfs_name, sdd->tosnap);
snap = zfs_open(zhp->zfs_hdl, snapname,
ZFS_TYPE_SNAPSHOT);
if (snap != NULL)
rv = dump_snapshot(snap, sdd);
else
rv = -1;
}
}
if (!sdd->seenfrom) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"WARNING: could not send %s@%s:\n"
"incremental source (%s@%s) does not exist\n"),
zhp->zfs_name, sdd->tosnap,
zhp->zfs_name, sdd->fromsnap);
sdd->err = B_TRUE;
} else if (!sdd->seento) {
if (sdd->fromsnap) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"WARNING: could not send %s@%s:\n"
"incremental source (%s@%s) "
"is not earlier than it\n"),
zhp->zfs_name, sdd->tosnap,
zhp->zfs_name, sdd->fromsnap);
} else {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"WARNING: "
"could not send %s@%s: does not exist\n"),
zhp->zfs_name, sdd->tosnap);
}
sdd->err = B_TRUE;
}
return (rv);
}
static int
dump_filesystems(zfs_handle_t *rzhp, void *arg)
{
send_dump_data_t *sdd = arg;
nvpair_t *fspair;
boolean_t needagain, progress;
if (!sdd->replicate)
return (dump_filesystem(rzhp, sdd));
/* Mark the clone origin snapshots. */
for (fspair = nvlist_next_nvpair(sdd->fss, NULL); fspair;
fspair = nvlist_next_nvpair(sdd->fss, fspair)) {
nvlist_t *nvfs;
uint64_t origin_guid = 0;
nvfs = fnvpair_value_nvlist(fspair);
(void) nvlist_lookup_uint64(nvfs, "origin", &origin_guid);
if (origin_guid != 0) {
char *snapname;
nvlist_t *origin_nv = fsavl_find(sdd->fsavl,
origin_guid, &snapname);
if (origin_nv != NULL) {
nvlist_t *snapprops;
snapprops = fnvlist_lookup_nvlist(origin_nv,
"snapprops");
snapprops = fnvlist_lookup_nvlist(snapprops,
snapname);
fnvlist_add_boolean(snapprops,
"is_clone_origin");
}
}
}
again:
needagain = progress = B_FALSE;
for (fspair = nvlist_next_nvpair(sdd->fss, NULL); fspair;
fspair = nvlist_next_nvpair(sdd->fss, fspair)) {
nvlist_t *fslist, *parent_nv;
char *fsname;
zfs_handle_t *zhp;
int err;
uint64_t origin_guid = 0;
uint64_t parent_guid = 0;
fslist = fnvpair_value_nvlist(fspair);
if (nvlist_lookup_boolean(fslist, "sent") == 0)
continue;
fsname = fnvlist_lookup_string(fslist, "name");
(void) nvlist_lookup_uint64(fslist, "origin", &origin_guid);
(void) nvlist_lookup_uint64(fslist, "parentfromsnap",
&parent_guid);
if (parent_guid != 0) {
parent_nv = fsavl_find(sdd->fsavl, parent_guid, NULL);
if (!nvlist_exists(parent_nv, "sent")) {
/* parent has not been sent; skip this one */
needagain = B_TRUE;
continue;
}
}
if (origin_guid != 0) {
nvlist_t *origin_nv = fsavl_find(sdd->fsavl,
origin_guid, NULL);
if (origin_nv != NULL &&
!nvlist_exists(origin_nv, "sent")) {
/*
* origin has not been sent yet;
* skip this clone.
*/
needagain = B_TRUE;
continue;
}
}
zhp = zfs_open(rzhp->zfs_hdl, fsname, ZFS_TYPE_DATASET);
if (zhp == NULL)
return (-1);
err = dump_filesystem(zhp, sdd);
fnvlist_add_boolean(fslist, "sent");
progress = B_TRUE;
zfs_close(zhp);
if (err)
return (err);
}
if (needagain) {
assert(progress);
goto again;
}
/* clean out the sent flags in case we reuse this fss */
for (fspair = nvlist_next_nvpair(sdd->fss, NULL); fspair;
fspair = nvlist_next_nvpair(sdd->fss, fspair)) {
nvlist_t *fslist;
fslist = fnvpair_value_nvlist(fspair);
(void) nvlist_remove_all(fslist, "sent");
}
return (0);
}
nvlist_t *
zfs_send_resume_token_to_nvlist(libzfs_handle_t *hdl, const char *token)
{
unsigned int version;
int nread, i;
unsigned long long checksum, packed_len;
/*
* Decode token header, which is:
* <token version>-<checksum of payload>-<uncompressed payload length>
* Note that the only supported token version is 1.
*/
nread = sscanf(token, "%u-%llx-%llx-",
&version, &checksum, &packed_len);
if (nread != 3) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"resume token is corrupt (invalid format)"));
return (NULL);
}
if (version != ZFS_SEND_RESUME_TOKEN_VERSION) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"resume token is corrupt (invalid version %u)"),
version);
return (NULL);
}
/* convert hexadecimal representation to binary */
token = strrchr(token, '-') + 1;
int len = strlen(token) / 2;
unsigned char *compressed = zfs_alloc(hdl, len);
for (i = 0; i < len; i++) {
nread = sscanf(token + i * 2, "%2hhx", compressed + i);
if (nread != 1) {
free(compressed);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"resume token is corrupt "
"(payload is not hex-encoded)"));
return (NULL);
}
}
/* verify checksum */
zio_cksum_t cksum;
fletcher_4_native_varsize(compressed, len, &cksum);
if (cksum.zc_word[0] != checksum) {
free(compressed);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"resume token is corrupt (incorrect checksum)"));
return (NULL);
}
/* uncompress */
void *packed = zfs_alloc(hdl, packed_len);
uLongf packed_len_long = packed_len;
if (uncompress(packed, &packed_len_long, compressed, len) != Z_OK ||
packed_len_long != packed_len) {
free(packed);
free(compressed);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"resume token is corrupt (decompression failed)"));
return (NULL);
}
/* unpack nvlist */
nvlist_t *nv;
int error = nvlist_unpack(packed, packed_len, &nv, KM_SLEEP);
free(packed);
free(compressed);
if (error != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"resume token is corrupt (nvlist_unpack failed)"));
return (NULL);
}
return (nv);
}
static enum lzc_send_flags
lzc_flags_from_sendflags(const sendflags_t *flags)
{
enum lzc_send_flags lzc_flags = 0;
if (flags->largeblock)
lzc_flags |= LZC_SEND_FLAG_LARGE_BLOCK;
if (flags->embed_data)
lzc_flags |= LZC_SEND_FLAG_EMBED_DATA;
if (flags->compress)
lzc_flags |= LZC_SEND_FLAG_COMPRESS;
if (flags->raw)
lzc_flags |= LZC_SEND_FLAG_RAW;
if (flags->saved)
lzc_flags |= LZC_SEND_FLAG_SAVED;
return (lzc_flags);
}
static int
estimate_size(zfs_handle_t *zhp, const char *from, int fd, sendflags_t *flags,
uint64_t resumeobj, uint64_t resumeoff, uint64_t bytes,
const char *redactbook, char *errbuf)
{
uint64_t size;
FILE *fout = flags->dryrun ? stdout : stderr;
progress_arg_t pa = { 0 };
int err = 0;
pthread_t ptid;
if (flags->progress) {
pa.pa_zhp = zhp;
pa.pa_fd = fd;
pa.pa_parsable = flags->parsable;
pa.pa_estimate = B_TRUE;
pa.pa_verbosity = flags->verbosity;
err = pthread_create(&ptid, NULL,
send_progress_thread, &pa);
if (err != 0) {
zfs_error_aux(zhp->zfs_hdl, "%s", strerror(errno));
return (zfs_error(zhp->zfs_hdl,
EZFS_THREADCREATEFAILED, errbuf));
}
}
err = lzc_send_space_resume_redacted(zhp->zfs_name, from,
lzc_flags_from_sendflags(flags), resumeobj, resumeoff, bytes,
redactbook, fd, &size);
if (flags->progress) {
void *status = NULL;
(void) pthread_cancel(ptid);
(void) pthread_join(ptid, &status);
int error = (int)(uintptr_t)status;
if (error != 0 && status != PTHREAD_CANCELED) {
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "progress thread exited "
"nonzero"));
return (zfs_standard_error(zhp->zfs_hdl, error,
errbuf));
}
}
if (err != 0) {
zfs_error_aux(zhp->zfs_hdl, "%s", strerror(err));
return (zfs_error(zhp->zfs_hdl, EZFS_BADBACKUP,
errbuf));
}
send_print_verbose(fout, zhp->zfs_name, from, size,
flags->parsable);
if (flags->parsable) {
(void) fprintf(fout, "size\t%llu\n", (longlong_t)size);
} else {
char buf[16];
zfs_nicenum(size, buf, sizeof (buf));
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
"total estimated size is %s\n"), buf);
}
return (0);
}
static boolean_t
redact_snaps_contains(const uint64_t *snaps, uint64_t num_snaps, uint64_t guid)
{
for (int i = 0; i < num_snaps; i++) {
if (snaps[i] == guid)
return (B_TRUE);
}
return (B_FALSE);
}
static boolean_t
redact_snaps_equal(const uint64_t *snaps1, uint64_t num_snaps1,
const uint64_t *snaps2, uint64_t num_snaps2)
{
if (num_snaps1 != num_snaps2)
return (B_FALSE);
for (int i = 0; i < num_snaps1; i++) {
if (!redact_snaps_contains(snaps2, num_snaps2, snaps1[i]))
return (B_FALSE);
}
return (B_TRUE);
}
/*
* Check that the list of redaction snapshots in the bookmark matches the send
* we're resuming, and return whether or not it's complete.
*
* Note that the caller needs to free the contents of *bookname with free() if
* this function returns successfully.
*/
static int
find_redact_book(libzfs_handle_t *hdl, const char *path,
const uint64_t *redact_snap_guids, int num_redact_snaps,
char **bookname)
{
char errbuf[1024];
int error = 0;
nvlist_t *props = fnvlist_alloc();
nvlist_t *bmarks;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot resume send"));
fnvlist_add_boolean(props, "redact_complete");
fnvlist_add_boolean(props, zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS));
error = lzc_get_bookmarks(path, props, &bmarks);
fnvlist_free(props);
if (error != 0) {
if (error == ESRCH) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"nonexistent redaction bookmark provided"));
} else if (error == ENOENT) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset to be sent no longer exists"));
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"unknown error: %s"), strerror(error));
}
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
nvpair_t *pair;
for (pair = nvlist_next_nvpair(bmarks, NULL); pair;
pair = nvlist_next_nvpair(bmarks, pair)) {
nvlist_t *bmark = fnvpair_value_nvlist(pair);
nvlist_t *vallist = fnvlist_lookup_nvlist(bmark,
zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS));
uint_t len = 0;
uint64_t *bmarksnaps = fnvlist_lookup_uint64_array(vallist,
ZPROP_VALUE, &len);
if (redact_snaps_equal(redact_snap_guids,
num_redact_snaps, bmarksnaps, len)) {
break;
}
}
if (pair == NULL) {
fnvlist_free(bmarks);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"no appropriate redaction bookmark exists"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
char *name = nvpair_name(pair);
nvlist_t *bmark = fnvpair_value_nvlist(pair);
nvlist_t *vallist = fnvlist_lookup_nvlist(bmark, "redact_complete");
boolean_t complete = fnvlist_lookup_boolean_value(vallist,
ZPROP_VALUE);
if (!complete) {
fnvlist_free(bmarks);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incomplete redaction bookmark provided"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
*bookname = strndup(name, ZFS_MAX_DATASET_NAME_LEN);
ASSERT3P(*bookname, !=, NULL);
fnvlist_free(bmarks);
return (0);
}
static int
zfs_send_resume_impl(libzfs_handle_t *hdl, sendflags_t *flags, int outfd,
nvlist_t *resume_nvl)
{
char errbuf[1024];
char *toname;
char *fromname = NULL;
uint64_t resumeobj, resumeoff, toguid, fromguid, bytes;
zfs_handle_t *zhp;
int error = 0;
char name[ZFS_MAX_DATASET_NAME_LEN];
enum lzc_send_flags lzc_flags = 0;
FILE *fout = (flags->verbosity > 0 && flags->dryrun) ? stdout : stderr;
uint64_t *redact_snap_guids = NULL;
int num_redact_snaps = 0;
char *redact_book = NULL;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot resume send"));
if (flags->verbosity != 0) {
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
"resume token contents:\n"));
nvlist_print(fout, resume_nvl);
}
if (nvlist_lookup_string(resume_nvl, "toname", &toname) != 0 ||
nvlist_lookup_uint64(resume_nvl, "object", &resumeobj) != 0 ||
nvlist_lookup_uint64(resume_nvl, "offset", &resumeoff) != 0 ||
nvlist_lookup_uint64(resume_nvl, "bytes", &bytes) != 0 ||
nvlist_lookup_uint64(resume_nvl, "toguid", &toguid) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"resume token is corrupt"));
return (zfs_error(hdl, EZFS_FAULT, errbuf));
}
fromguid = 0;
(void) nvlist_lookup_uint64(resume_nvl, "fromguid", &fromguid);
if (flags->largeblock || nvlist_exists(resume_nvl, "largeblockok"))
lzc_flags |= LZC_SEND_FLAG_LARGE_BLOCK;
if (flags->embed_data || nvlist_exists(resume_nvl, "embedok"))
lzc_flags |= LZC_SEND_FLAG_EMBED_DATA;
if (flags->compress || nvlist_exists(resume_nvl, "compressok"))
lzc_flags |= LZC_SEND_FLAG_COMPRESS;
if (flags->raw || nvlist_exists(resume_nvl, "rawok"))
lzc_flags |= LZC_SEND_FLAG_RAW;
if (flags->saved || nvlist_exists(resume_nvl, "savedok"))
lzc_flags |= LZC_SEND_FLAG_SAVED;
if (flags->saved) {
(void) strcpy(name, toname);
} else {
error = guid_to_name(hdl, toname, toguid, B_FALSE, name);
if (error != 0) {
if (zfs_dataset_exists(hdl, toname, ZFS_TYPE_DATASET)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is no longer the same snapshot "
"used in the initial send"), toname);
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' used in the initial send no "
"longer exists"), toname);
}
return (zfs_error(hdl, EZFS_BADPATH, errbuf));
}
}
zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET);
if (zhp == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"unable to access '%s'"), name);
return (zfs_error(hdl, EZFS_BADPATH, errbuf));
}
if (nvlist_lookup_uint64_array(resume_nvl, "book_redact_snaps",
&redact_snap_guids, (uint_t *)&num_redact_snaps) != 0) {
num_redact_snaps = -1;
}
if (fromguid != 0) {
if (guid_to_name_redact_snaps(hdl, toname, fromguid, B_TRUE,
redact_snap_guids, num_redact_snaps, name) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incremental source %#llx no longer exists"),
(longlong_t)fromguid);
return (zfs_error(hdl, EZFS_BADPATH, errbuf));
}
fromname = name;
}
redact_snap_guids = NULL;
if (nvlist_lookup_uint64_array(resume_nvl,
zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS), &redact_snap_guids,
(uint_t *)&num_redact_snaps) == 0) {
char path[ZFS_MAX_DATASET_NAME_LEN];
(void) strlcpy(path, toname, sizeof (path));
char *at = strchr(path, '@');
ASSERT3P(at, !=, NULL);
*at = '\0';
if ((error = find_redact_book(hdl, path, redact_snap_guids,
num_redact_snaps, &redact_book)) != 0) {
return (error);
}
}
if (flags->verbosity != 0) {
/*
* Some of these may have come from the resume token, set them
* here for size estimate purposes.
*/
sendflags_t tmpflags = *flags;
if (lzc_flags & LZC_SEND_FLAG_LARGE_BLOCK)
tmpflags.largeblock = B_TRUE;
if (lzc_flags & LZC_SEND_FLAG_COMPRESS)
tmpflags.compress = B_TRUE;
if (lzc_flags & LZC_SEND_FLAG_EMBED_DATA)
tmpflags.embed_data = B_TRUE;
if (lzc_flags & LZC_SEND_FLAG_RAW)
tmpflags.raw = B_TRUE;
if (lzc_flags & LZC_SEND_FLAG_SAVED)
tmpflags.saved = B_TRUE;
error = estimate_size(zhp, fromname, outfd, &tmpflags,
resumeobj, resumeoff, bytes, redact_book, errbuf);
}
if (!flags->dryrun) {
progress_arg_t pa = { 0 };
pthread_t tid;
/*
* If progress reporting is requested, spawn a new thread to
* poll ZFS_IOC_SEND_PROGRESS at a regular interval.
*/
if (flags->progress) {
pa.pa_zhp = zhp;
pa.pa_fd = outfd;
pa.pa_parsable = flags->parsable;
pa.pa_estimate = B_FALSE;
pa.pa_verbosity = flags->verbosity;
error = pthread_create(&tid, NULL,
send_progress_thread, &pa);
if (error != 0) {
if (redact_book != NULL)
free(redact_book);
zfs_close(zhp);
return (error);
}
}
error = lzc_send_resume_redacted(zhp->zfs_name, fromname, outfd,
lzc_flags, resumeobj, resumeoff, redact_book);
if (redact_book != NULL)
free(redact_book);
if (flags->progress) {
void *status = NULL;
(void) pthread_cancel(tid);
(void) pthread_join(tid, &status);
int error = (int)(uintptr_t)status;
if (error != 0 && status != PTHREAD_CANCELED) {
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"progress thread exited nonzero"));
return (zfs_standard_error(hdl, error, errbuf));
}
}
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"warning: cannot send '%s'"), zhp->zfs_name);
zfs_close(zhp);
switch (error) {
case 0:
return (0);
case EACCES:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"source key must be loaded"));
return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf));
case ESRCH:
if (lzc_exists(zhp->zfs_name)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incremental source could not be found"));
}
return (zfs_error(hdl, EZFS_NOENT, errbuf));
case EXDEV:
case ENOENT:
case EDQUOT:
case EFBIG:
case EIO:
case ENOLINK:
case ENOSPC:
case ENOSTR:
case ENXIO:
case EPIPE:
case ERANGE:
case EFAULT:
case EROFS:
zfs_error_aux(hdl, "%s", strerror(errno));
return (zfs_error(hdl, EZFS_BADBACKUP, errbuf));
default:
return (zfs_standard_error(hdl, errno, errbuf));
}
} else {
if (redact_book != NULL)
free(redact_book);
}
zfs_close(zhp);
return (error);
}
int
zfs_send_resume(libzfs_handle_t *hdl, sendflags_t *flags, int outfd,
const char *resume_token)
{
int ret;
char errbuf[1024];
nvlist_t *resume_nvl;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot resume send"));
resume_nvl = zfs_send_resume_token_to_nvlist(hdl, resume_token);
if (resume_nvl == NULL) {
/*
* zfs_error_aux has already been set by
* zfs_send_resume_token_to_nvlist()
*/
return (zfs_error(hdl, EZFS_FAULT, errbuf));
}
ret = zfs_send_resume_impl(hdl, flags, outfd, resume_nvl);
fnvlist_free(resume_nvl);
return (ret);
}
int
zfs_send_saved(zfs_handle_t *zhp, sendflags_t *flags, int outfd,
const char *resume_token)
{
int ret;
libzfs_handle_t *hdl = zhp->zfs_hdl;
nvlist_t *saved_nvl = NULL, *resume_nvl = NULL;
uint64_t saved_guid = 0, resume_guid = 0;
uint64_t obj = 0, off = 0, bytes = 0;
char token_buf[ZFS_MAXPROPLEN];
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"saved send failed"));
ret = zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN,
token_buf, sizeof (token_buf), NULL, NULL, 0, B_TRUE);
if (ret != 0)
goto out;
saved_nvl = zfs_send_resume_token_to_nvlist(hdl, token_buf);
if (saved_nvl == NULL) {
/*
* zfs_error_aux has already been set by
* zfs_send_resume_token_to_nvlist()
*/
ret = zfs_error(hdl, EZFS_FAULT, errbuf);
goto out;
}
/*
* If a resume token is provided we use the object and offset
* from that instead of the default, which starts from the
* beginning.
*/
if (resume_token != NULL) {
resume_nvl = zfs_send_resume_token_to_nvlist(hdl,
resume_token);
if (resume_nvl == NULL) {
ret = zfs_error(hdl, EZFS_FAULT, errbuf);
goto out;
}
if (nvlist_lookup_uint64(resume_nvl, "object", &obj) != 0 ||
nvlist_lookup_uint64(resume_nvl, "offset", &off) != 0 ||
nvlist_lookup_uint64(resume_nvl, "bytes", &bytes) != 0 ||
nvlist_lookup_uint64(resume_nvl, "toguid",
&resume_guid) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"provided resume token is corrupt"));
ret = zfs_error(hdl, EZFS_FAULT, errbuf);
goto out;
}
if (nvlist_lookup_uint64(saved_nvl, "toguid",
&saved_guid)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset's resume token is corrupt"));
ret = zfs_error(hdl, EZFS_FAULT, errbuf);
goto out;
}
if (resume_guid != saved_guid) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"provided resume token does not match dataset"));
ret = zfs_error(hdl, EZFS_BADBACKUP, errbuf);
goto out;
}
}
(void) nvlist_remove_all(saved_nvl, "object");
fnvlist_add_uint64(saved_nvl, "object", obj);
(void) nvlist_remove_all(saved_nvl, "offset");
fnvlist_add_uint64(saved_nvl, "offset", off);
(void) nvlist_remove_all(saved_nvl, "bytes");
fnvlist_add_uint64(saved_nvl, "bytes", bytes);
(void) nvlist_remove_all(saved_nvl, "toname");
fnvlist_add_string(saved_nvl, "toname", zhp->zfs_name);
ret = zfs_send_resume_impl(hdl, flags, outfd, saved_nvl);
out:
fnvlist_free(saved_nvl);
fnvlist_free(resume_nvl);
return (ret);
}
/*
* This function informs the target system that the recursive send is complete.
* The record is also expected in the case of a send -p.
*/
static int
send_conclusion_record(int fd, zio_cksum_t *zc)
{
dmu_replay_record_t drr = { 0 };
drr.drr_type = DRR_END;
if (zc != NULL)
drr.drr_u.drr_end.drr_checksum = *zc;
if (write(fd, &drr, sizeof (drr)) == -1) {
return (errno);
}
return (0);
}
/*
* This function is responsible for sending the records that contain the
* necessary information for the target system's libzfs to be able to set the
* properties of the filesystem being received, or to be able to prepare for
* a recursive receive.
*
* The "zhp" argument is the handle of the snapshot we are sending
* (the "tosnap"). The "from" argument is the short snapshot name (the part
* after the @) of the incremental source.
*/
static int
send_prelim_records(zfs_handle_t *zhp, const char *from, int fd,
boolean_t gather_props, boolean_t recursive, boolean_t verbose,
boolean_t dryrun, boolean_t raw, boolean_t replicate, boolean_t skipmissing,
boolean_t backup, boolean_t holds, boolean_t props, boolean_t doall,
nvlist_t **fssp, avl_tree_t **fsavlp)
{
int err = 0;
char *packbuf = NULL;
size_t buflen = 0;
zio_cksum_t zc = { {0} };
int featureflags = 0;
/* name of filesystem/volume that contains snapshot we are sending */
char tofs[ZFS_MAX_DATASET_NAME_LEN];
/* short name of snap we are sending */
char *tosnap = "";
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"warning: cannot send '%s'"), zhp->zfs_name);
if (zhp->zfs_type == ZFS_TYPE_FILESYSTEM && zfs_prop_get_int(zhp,
ZFS_PROP_VERSION) >= ZPL_VERSION_SA) {
featureflags |= DMU_BACKUP_FEATURE_SA_SPILL;
}
if (holds)
featureflags |= DMU_BACKUP_FEATURE_HOLDS;
(void) strlcpy(tofs, zhp->zfs_name, ZFS_MAX_DATASET_NAME_LEN);
char *at = strchr(tofs, '@');
if (at != NULL) {
*at = '\0';
tosnap = at + 1;
}
if (gather_props) {
nvlist_t *hdrnv = fnvlist_alloc();
nvlist_t *fss = NULL;
if (from != NULL)
fnvlist_add_string(hdrnv, "fromsnap", from);
fnvlist_add_string(hdrnv, "tosnap", tosnap);
if (!recursive)
fnvlist_add_boolean(hdrnv, "not_recursive");
if (raw) {
fnvlist_add_boolean(hdrnv, "raw");
}
if ((err = gather_nvlist(zhp->zfs_hdl, tofs,
from, tosnap, recursive, raw, doall, replicate, skipmissing,
verbose, backup, holds, props, &fss, fsavlp)) != 0) {
return (zfs_error(zhp->zfs_hdl, EZFS_BADBACKUP,
errbuf));
}
/*
* Do not allow the size of the properties list to exceed
* the limit
*/
if ((fnvlist_size(fss) + fnvlist_size(hdrnv)) >
zhp->zfs_hdl->libzfs_max_nvlist) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "warning: cannot send '%s': "
"the size of the list of snapshots and properties "
"is too large to be received successfully.\n"
"Select a smaller number of snapshots to send.\n"),
zhp->zfs_name);
return (zfs_error(zhp->zfs_hdl, EZFS_NOSPC,
errbuf));
}
fnvlist_add_nvlist(hdrnv, "fss", fss);
VERIFY0(nvlist_pack(hdrnv, &packbuf, &buflen, NV_ENCODE_XDR,
0));
if (fssp != NULL) {
*fssp = fss;
} else {
fnvlist_free(fss);
}
fnvlist_free(hdrnv);
}
if (!dryrun) {
dmu_replay_record_t drr = { 0 };
/* write first begin record */
drr.drr_type = DRR_BEGIN;
drr.drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
DMU_SET_STREAM_HDRTYPE(drr.drr_u.drr_begin.
drr_versioninfo, DMU_COMPOUNDSTREAM);
DMU_SET_FEATUREFLAGS(drr.drr_u.drr_begin.
drr_versioninfo, featureflags);
if (snprintf(drr.drr_u.drr_begin.drr_toname,
sizeof (drr.drr_u.drr_begin.drr_toname), "%s@%s", tofs,
tosnap) >= sizeof (drr.drr_u.drr_begin.drr_toname)) {
return (zfs_error(zhp->zfs_hdl, EZFS_BADBACKUP,
errbuf));
}
drr.drr_payloadlen = buflen;
err = dump_record(&drr, packbuf, buflen, &zc, fd);
free(packbuf);
if (err != 0) {
zfs_error_aux(zhp->zfs_hdl, "%s", strerror(err));
return (zfs_error(zhp->zfs_hdl, EZFS_BADBACKUP,
errbuf));
}
err = send_conclusion_record(fd, &zc);
if (err != 0) {
zfs_error_aux(zhp->zfs_hdl, "%s", strerror(err));
return (zfs_error(zhp->zfs_hdl, EZFS_BADBACKUP,
errbuf));
}
}
return (0);
}
/*
* Generate a send stream. The "zhp" argument is the filesystem/volume
* that contains the snapshot to send. The "fromsnap" argument is the
* short name (the part after the '@') of the snapshot that is the
* incremental source to send from (if non-NULL). The "tosnap" argument
* is the short name of the snapshot to send.
*
* The content of the send stream is the snapshot identified by
* 'tosnap'. Incremental streams are requested in two ways:
* - from the snapshot identified by "fromsnap" (if non-null) or
* - from the origin of the dataset identified by zhp, which must
* be a clone. In this case, "fromsnap" is null and "fromorigin"
* is TRUE.
*
* The send stream is recursive (i.e. dumps a hierarchy of snapshots) and
* uses a special header (with a hdrtype field of DMU_COMPOUNDSTREAM)
* if "replicate" is set. If "doall" is set, dump all the intermediate
* snapshots. The DMU_COMPOUNDSTREAM header is used in the "doall"
* case too. If "props" is set, send properties.
*/
int
zfs_send(zfs_handle_t *zhp, const char *fromsnap, const char *tosnap,
sendflags_t *flags, int outfd, snapfilter_cb_t filter_func,
void *cb_arg, nvlist_t **debugnvp)
{
char errbuf[1024];
send_dump_data_t sdd = { 0 };
int err = 0;
nvlist_t *fss = NULL;
avl_tree_t *fsavl = NULL;
static uint64_t holdseq;
int spa_version;
int featureflags = 0;
FILE *fout;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot send '%s'"), zhp->zfs_name);
if (fromsnap && fromsnap[0] == '\0') {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"zero-length incremental source"));
return (zfs_error(zhp->zfs_hdl, EZFS_NOENT, errbuf));
}
if (zhp->zfs_type == ZFS_TYPE_FILESYSTEM) {
uint64_t version;
version = zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
if (version >= ZPL_VERSION_SA) {
featureflags |= DMU_BACKUP_FEATURE_SA_SPILL;
}
}
if (flags->holds)
featureflags |= DMU_BACKUP_FEATURE_HOLDS;
if (flags->replicate || flags->doall || flags->props ||
flags->holds || flags->backup) {
char full_tosnap_name[ZFS_MAX_DATASET_NAME_LEN];
if (snprintf(full_tosnap_name, sizeof (full_tosnap_name),
"%s@%s", zhp->zfs_name, tosnap) >=
sizeof (full_tosnap_name)) {
err = EINVAL;
goto stderr_out;
}
zfs_handle_t *tosnap = zfs_open(zhp->zfs_hdl,
full_tosnap_name, ZFS_TYPE_SNAPSHOT);
if (tosnap == NULL) {
err = -1;
goto err_out;
}
err = send_prelim_records(tosnap, fromsnap, outfd,
flags->replicate || flags->props || flags->holds,
flags->replicate, flags->verbosity > 0, flags->dryrun,
flags->raw, flags->replicate, flags->skipmissing,
flags->backup, flags->holds, flags->props, flags->doall,
&fss, &fsavl);
zfs_close(tosnap);
if (err != 0)
goto err_out;
}
/* dump each stream */
sdd.fromsnap = fromsnap;
sdd.tosnap = tosnap;
sdd.outfd = outfd;
sdd.replicate = flags->replicate;
sdd.doall = flags->doall;
sdd.fromorigin = flags->fromorigin;
sdd.fss = fss;
sdd.fsavl = fsavl;
sdd.verbosity = flags->verbosity;
sdd.parsable = flags->parsable;
sdd.progress = flags->progress;
sdd.dryrun = flags->dryrun;
sdd.large_block = flags->largeblock;
sdd.embed_data = flags->embed_data;
sdd.compress = flags->compress;
sdd.raw = flags->raw;
sdd.holds = flags->holds;
sdd.filter_cb = filter_func;
sdd.filter_cb_arg = cb_arg;
if (debugnvp)
sdd.debugnv = *debugnvp;
if (sdd.verbosity != 0 && sdd.dryrun)
sdd.std_out = B_TRUE;
fout = sdd.std_out ? stdout : stderr;
/*
* Some flags require that we place user holds on the datasets that are
* being sent so they don't get destroyed during the send. We can skip
* this step if the pool is imported read-only since the datasets cannot
* be destroyed.
*/
if (!flags->dryrun && !zpool_get_prop_int(zfs_get_pool_handle(zhp),
ZPOOL_PROP_READONLY, NULL) &&
zfs_spa_version(zhp, &spa_version) == 0 &&
spa_version >= SPA_VERSION_USERREFS &&
(flags->doall || flags->replicate)) {
++holdseq;
(void) snprintf(sdd.holdtag, sizeof (sdd.holdtag),
".send-%d-%llu", getpid(), (u_longlong_t)holdseq);
sdd.cleanup_fd = open(ZFS_DEV, O_RDWR | O_CLOEXEC);
if (sdd.cleanup_fd < 0) {
err = errno;
goto stderr_out;
}
sdd.snapholds = fnvlist_alloc();
} else {
sdd.cleanup_fd = -1;
sdd.snapholds = NULL;
}
if (flags->verbosity != 0 || sdd.snapholds != NULL) {
/*
* Do a verbose no-op dry run to get all the verbose output
* or to gather snapshot hold's before generating any data,
* then do a non-verbose real run to generate the streams.
*/
sdd.dryrun = B_TRUE;
err = dump_filesystems(zhp, &sdd);
if (err != 0)
goto stderr_out;
if (flags->verbosity != 0) {
if (flags->parsable) {
(void) fprintf(fout, "size\t%llu\n",
(longlong_t)sdd.size);
} else {
char buf[16];
zfs_nicebytes(sdd.size, buf, sizeof (buf));
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
"total estimated size is %s\n"), buf);
}
}
/* Ensure no snaps found is treated as an error. */
if (!sdd.seento) {
err = ENOENT;
goto err_out;
}
/* Skip the second run if dryrun was requested. */
if (flags->dryrun)
goto err_out;
if (sdd.snapholds != NULL) {
err = zfs_hold_nvl(zhp, sdd.cleanup_fd, sdd.snapholds);
if (err != 0)
goto stderr_out;
fnvlist_free(sdd.snapholds);
sdd.snapholds = NULL;
}
sdd.dryrun = B_FALSE;
sdd.verbosity = 0;
}
err = dump_filesystems(zhp, &sdd);
fsavl_destroy(fsavl);
fnvlist_free(fss);
/* Ensure no snaps found is treated as an error. */
if (err == 0 && !sdd.seento)
err = ENOENT;
if (sdd.cleanup_fd != -1) {
VERIFY(0 == close(sdd.cleanup_fd));
sdd.cleanup_fd = -1;
}
if (!flags->dryrun && (flags->replicate || flags->doall ||
flags->props || flags->backup || flags->holds)) {
/*
* write final end record. NB: want to do this even if
* there was some error, because it might not be totally
* failed.
*/
err = send_conclusion_record(outfd, NULL);
if (err != 0)
return (zfs_standard_error(zhp->zfs_hdl, err, errbuf));
}
return (err || sdd.err);
stderr_out:
err = zfs_standard_error(zhp->zfs_hdl, err, errbuf);
err_out:
fsavl_destroy(fsavl);
fnvlist_free(fss);
fnvlist_free(sdd.snapholds);
if (sdd.cleanup_fd != -1)
VERIFY(0 == close(sdd.cleanup_fd));
return (err);
}
static zfs_handle_t *
name_to_dir_handle(libzfs_handle_t *hdl, const char *snapname)
{
char dirname[ZFS_MAX_DATASET_NAME_LEN];
(void) strlcpy(dirname, snapname, ZFS_MAX_DATASET_NAME_LEN);
char *c = strchr(dirname, '@');
if (c != NULL)
*c = '\0';
return (zfs_open(hdl, dirname, ZFS_TYPE_DATASET));
}
/*
* Returns B_TRUE if earlier is an earlier snapshot in later's timeline; either
* an earlier snapshot in the same filesystem, or a snapshot before later's
* origin, or it's origin's origin, etc.
*/
static boolean_t
snapshot_is_before(zfs_handle_t *earlier, zfs_handle_t *later)
{
boolean_t ret;
uint64_t later_txg =
(later->zfs_type == ZFS_TYPE_FILESYSTEM ||
later->zfs_type == ZFS_TYPE_VOLUME ?
UINT64_MAX : zfs_prop_get_int(later, ZFS_PROP_CREATETXG));
uint64_t earlier_txg = zfs_prop_get_int(earlier, ZFS_PROP_CREATETXG);
if (earlier_txg >= later_txg)
return (B_FALSE);
zfs_handle_t *earlier_dir = name_to_dir_handle(earlier->zfs_hdl,
earlier->zfs_name);
zfs_handle_t *later_dir = name_to_dir_handle(later->zfs_hdl,
later->zfs_name);
if (strcmp(earlier_dir->zfs_name, later_dir->zfs_name) == 0) {
zfs_close(earlier_dir);
zfs_close(later_dir);
return (B_TRUE);
}
char clonename[ZFS_MAX_DATASET_NAME_LEN];
if (zfs_prop_get(later_dir, ZFS_PROP_ORIGIN, clonename,
ZFS_MAX_DATASET_NAME_LEN, NULL, NULL, 0, B_TRUE) != 0) {
zfs_close(earlier_dir);
zfs_close(later_dir);
return (B_FALSE);
}
zfs_handle_t *origin = zfs_open(earlier->zfs_hdl, clonename,
ZFS_TYPE_DATASET);
uint64_t origin_txg = zfs_prop_get_int(origin, ZFS_PROP_CREATETXG);
/*
* If "earlier" is exactly the origin, then
* snapshot_is_before(earlier, origin) will return false (because
* they're the same).
*/
if (origin_txg == earlier_txg &&
strcmp(origin->zfs_name, earlier->zfs_name) == 0) {
zfs_close(earlier_dir);
zfs_close(later_dir);
zfs_close(origin);
return (B_TRUE);
}
zfs_close(earlier_dir);
zfs_close(later_dir);
ret = snapshot_is_before(earlier, origin);
zfs_close(origin);
return (ret);
}
/*
* The "zhp" argument is the handle of the dataset to send (typically a
* snapshot). The "from" argument is the full name of the snapshot or
* bookmark that is the incremental source.
*/
int
zfs_send_one(zfs_handle_t *zhp, const char *from, int fd, sendflags_t *flags,
const char *redactbook)
{
int err;
libzfs_handle_t *hdl = zhp->zfs_hdl;
char *name = zhp->zfs_name;
pthread_t ptid;
progress_arg_t pa = { 0 };
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"warning: cannot send '%s'"), name);
if (from != NULL && strchr(from, '@')) {
zfs_handle_t *from_zhp = zfs_open(hdl, from,
ZFS_TYPE_DATASET);
if (from_zhp == NULL)
return (-1);
if (!snapshot_is_before(from_zhp, zhp)) {
zfs_close(from_zhp);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"not an earlier snapshot from the same fs"));
return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
}
zfs_close(from_zhp);
}
if (redactbook != NULL) {
char bookname[ZFS_MAX_DATASET_NAME_LEN];
nvlist_t *redact_snaps;
zfs_handle_t *book_zhp;
char *at, *pound;
int dsnamelen;
pound = strchr(redactbook, '#');
if (pound != NULL)
redactbook = pound + 1;
at = strchr(name, '@');
if (at == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot do a redacted send to a filesystem"));
return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
}
dsnamelen = at - name;
if (snprintf(bookname, sizeof (bookname), "%.*s#%s",
dsnamelen, name, redactbook)
>= sizeof (bookname)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid bookmark name"));
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
}
book_zhp = zfs_open(hdl, bookname, ZFS_TYPE_BOOKMARK);
if (book_zhp == NULL)
return (-1);
if (nvlist_lookup_nvlist(book_zhp->zfs_props,
zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS),
&redact_snaps) != 0 || redact_snaps == NULL) {
zfs_close(book_zhp);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"not a redaction bookmark"));
return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
}
zfs_close(book_zhp);
}
/*
* Send fs properties
*/
if (flags->props || flags->holds || flags->backup) {
/*
* Note: the header generated by send_prelim_records()
* assumes that the incremental source is in the same
* filesystem/volume as the target (which is a requirement
* when doing "zfs send -R"). But that isn't always the
* case here (e.g. send from snap in origin, or send from
* bookmark). We pass from=NULL, which will omit this
* information from the prelim records; it isn't used
* when receiving this type of stream.
*/
err = send_prelim_records(zhp, NULL, fd, B_TRUE, B_FALSE,
flags->verbosity > 0, flags->dryrun, flags->raw,
flags->replicate, B_FALSE, flags->backup, flags->holds,
flags->props, flags->doall, NULL, NULL);
if (err != 0)
return (err);
}
/*
* Perform size estimate if verbose was specified.
*/
if (flags->verbosity != 0) {
err = estimate_size(zhp, from, fd, flags, 0, 0, 0, redactbook,
errbuf);
if (err != 0)
return (err);
}
if (flags->dryrun)
return (0);
/*
* If progress reporting is requested, spawn a new thread to poll
* ZFS_IOC_SEND_PROGRESS at a regular interval.
*/
if (flags->progress) {
pa.pa_zhp = zhp;
pa.pa_fd = fd;
pa.pa_parsable = flags->parsable;
pa.pa_estimate = B_FALSE;
pa.pa_verbosity = flags->verbosity;
err = pthread_create(&ptid, NULL,
send_progress_thread, &pa);
if (err != 0) {
zfs_error_aux(zhp->zfs_hdl, "%s", strerror(errno));
return (zfs_error(zhp->zfs_hdl,
EZFS_THREADCREATEFAILED, errbuf));
}
}
err = lzc_send_redacted(name, from, fd,
lzc_flags_from_sendflags(flags), redactbook);
if (flags->progress) {
void *status = NULL;
if (err != 0)
(void) pthread_cancel(ptid);
(void) pthread_join(ptid, &status);
int error = (int)(uintptr_t)status;
if (error != 0 && status != PTHREAD_CANCELED)
return (zfs_standard_error_fmt(hdl, error,
dgettext(TEXT_DOMAIN,
"progress thread exited nonzero")));
}
- if (flags->props || flags->holds || flags->backup) {
+ if (err == 0 && (flags->props || flags->holds || flags->backup)) {
/* Write the final end record. */
err = send_conclusion_record(fd, NULL);
if (err != 0)
return (zfs_standard_error(hdl, err, errbuf));
}
if (err != 0) {
switch (errno) {
case EXDEV:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"not an earlier snapshot from the same fs"));
return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
case ENOENT:
case ESRCH:
if (lzc_exists(name)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incremental source (%s) does not exist"),
from);
}
return (zfs_error(hdl, EZFS_NOENT, errbuf));
case EACCES:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset key must be loaded"));
return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf));
case EBUSY:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"target is busy; if a filesystem, "
"it must not be mounted"));
return (zfs_error(hdl, EZFS_BUSY, errbuf));
case EDQUOT:
case EFAULT:
case EFBIG:
case EINVAL:
case EIO:
case ENOLINK:
case ENOSPC:
case ENOSTR:
case ENXIO:
case EPIPE:
case ERANGE:
case EROFS:
zfs_error_aux(hdl, "%s", strerror(errno));
return (zfs_error(hdl, EZFS_BADBACKUP, errbuf));
default:
return (zfs_standard_error(hdl, errno, errbuf));
}
}
return (err != 0);
}
/*
* Routines specific to "zfs recv"
*/
static int
recv_read(libzfs_handle_t *hdl, int fd, void *buf, int ilen,
boolean_t byteswap, zio_cksum_t *zc)
{
char *cp = buf;
int rv;
int len = ilen;
do {
rv = read(fd, cp, len);
cp += rv;
len -= rv;
} while (rv > 0);
if (rv < 0 || len != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"failed to read from stream"));
return (zfs_error(hdl, EZFS_BADSTREAM, dgettext(TEXT_DOMAIN,
"cannot receive")));
}
if (zc) {
if (byteswap)
fletcher_4_incremental_byteswap(buf, ilen, zc);
else
fletcher_4_incremental_native(buf, ilen, zc);
}
return (0);
}
static int
recv_read_nvlist(libzfs_handle_t *hdl, int fd, int len, nvlist_t **nvp,
boolean_t byteswap, zio_cksum_t *zc)
{
char *buf;
int err;
buf = zfs_alloc(hdl, len);
if (buf == NULL)
return (ENOMEM);
if (len > hdl->libzfs_max_nvlist) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "nvlist too large"));
free(buf);
return (ENOMEM);
}
err = recv_read(hdl, fd, buf, len, byteswap, zc);
if (err != 0) {
free(buf);
return (err);
}
err = nvlist_unpack(buf, len, nvp, 0);
free(buf);
if (err != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
"stream (malformed nvlist)"));
return (EINVAL);
}
return (0);
}
/*
* Returns the grand origin (origin of origin of origin...) of a given handle.
* If this dataset is not a clone, it simply returns a copy of the original
* handle.
*/
static zfs_handle_t *
recv_open_grand_origin(zfs_handle_t *zhp)
{
char origin[ZFS_MAX_DATASET_NAME_LEN];
zprop_source_t src;
zfs_handle_t *ozhp = zfs_handle_dup(zhp);
while (ozhp != NULL) {
if (zfs_prop_get(ozhp, ZFS_PROP_ORIGIN, origin,
sizeof (origin), &src, NULL, 0, B_FALSE) != 0)
break;
(void) zfs_close(ozhp);
ozhp = zfs_open(zhp->zfs_hdl, origin, ZFS_TYPE_FILESYSTEM);
}
return (ozhp);
}
static int
recv_rename_impl(zfs_handle_t *zhp, const char *name, const char *newname)
{
int err;
zfs_handle_t *ozhp = NULL;
/*
* Attempt to rename the dataset. If it fails with EACCES we have
* attempted to rename the dataset outside of its encryption root.
* Force the dataset to become an encryption root and try again.
*/
err = lzc_rename(name, newname);
if (err == EACCES) {
ozhp = recv_open_grand_origin(zhp);
if (ozhp == NULL) {
err = ENOENT;
goto out;
}
err = lzc_change_key(ozhp->zfs_name, DCP_CMD_FORCE_NEW_KEY,
NULL, NULL, 0);
if (err != 0)
goto out;
err = lzc_rename(name, newname);
}
out:
if (ozhp != NULL)
zfs_close(ozhp);
return (err);
}
static int
recv_rename(libzfs_handle_t *hdl, const char *name, const char *tryname,
int baselen, char *newname, recvflags_t *flags)
{
static int seq;
int err;
prop_changelist_t *clp = NULL;
zfs_handle_t *zhp = NULL;
zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET);
if (zhp == NULL) {
err = -1;
goto out;
}
clp = changelist_gather(zhp, ZFS_PROP_NAME, 0,
flags->force ? MS_FORCE : 0);
if (clp == NULL) {
err = -1;
goto out;
}
err = changelist_prefix(clp);
if (err)
goto out;
if (tryname) {
(void) strcpy(newname, tryname);
if (flags->verbose) {
(void) printf("attempting rename %s to %s\n",
name, newname);
}
err = recv_rename_impl(zhp, name, newname);
if (err == 0)
changelist_rename(clp, name, tryname);
} else {
err = ENOENT;
}
if (err != 0 && strncmp(name + baselen, "recv-", 5) != 0) {
seq++;
(void) snprintf(newname, ZFS_MAX_DATASET_NAME_LEN,
"%.*srecv-%u-%u", baselen, name, getpid(), seq);
if (flags->verbose) {
(void) printf("failed - trying rename %s to %s\n",
name, newname);
}
err = recv_rename_impl(zhp, name, newname);
if (err == 0)
changelist_rename(clp, name, newname);
if (err && flags->verbose) {
(void) printf("failed (%u) - "
"will try again on next pass\n", errno);
}
err = EAGAIN;
} else if (flags->verbose) {
if (err == 0)
(void) printf("success\n");
else
(void) printf("failed (%u)\n", errno);
}
(void) changelist_postfix(clp);
out:
if (clp != NULL)
changelist_free(clp);
if (zhp != NULL)
zfs_close(zhp);
return (err);
}
static int
recv_promote(libzfs_handle_t *hdl, const char *fsname,
const char *origin_fsname, recvflags_t *flags)
{
int err;
zfs_cmd_t zc = {"\0"};
zfs_handle_t *zhp = NULL, *ozhp = NULL;
if (flags->verbose)
(void) printf("promoting %s\n", fsname);
(void) strlcpy(zc.zc_value, origin_fsname, sizeof (zc.zc_value));
(void) strlcpy(zc.zc_name, fsname, sizeof (zc.zc_name));
/*
* Attempt to promote the dataset. If it fails with EACCES the
* promotion would cause this dataset to leave its encryption root.
* Force the origin to become an encryption root and try again.
*/
err = zfs_ioctl(hdl, ZFS_IOC_PROMOTE, &zc);
if (err == EACCES) {
zhp = zfs_open(hdl, fsname, ZFS_TYPE_DATASET);
if (zhp == NULL) {
err = -1;
goto out;
}
ozhp = recv_open_grand_origin(zhp);
if (ozhp == NULL) {
err = -1;
goto out;
}
err = lzc_change_key(ozhp->zfs_name, DCP_CMD_FORCE_NEW_KEY,
NULL, NULL, 0);
if (err != 0)
goto out;
err = zfs_ioctl(hdl, ZFS_IOC_PROMOTE, &zc);
}
out:
if (zhp != NULL)
zfs_close(zhp);
if (ozhp != NULL)
zfs_close(ozhp);
return (err);
}
static int
recv_destroy(libzfs_handle_t *hdl, const char *name, int baselen,
char *newname, recvflags_t *flags)
{
int err = 0;
prop_changelist_t *clp;
zfs_handle_t *zhp;
boolean_t defer = B_FALSE;
int spa_version;
zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET);
if (zhp == NULL)
return (-1);
clp = changelist_gather(zhp, ZFS_PROP_NAME, 0,
flags->force ? MS_FORCE : 0);
if (zfs_get_type(zhp) == ZFS_TYPE_SNAPSHOT &&
zfs_spa_version(zhp, &spa_version) == 0 &&
spa_version >= SPA_VERSION_USERREFS)
defer = B_TRUE;
zfs_close(zhp);
if (clp == NULL)
return (-1);
err = changelist_prefix(clp);
if (err)
return (err);
if (flags->verbose)
(void) printf("attempting destroy %s\n", name);
if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) {
nvlist_t *nv = fnvlist_alloc();
fnvlist_add_boolean(nv, name);
err = lzc_destroy_snaps(nv, defer, NULL);
fnvlist_free(nv);
} else {
err = lzc_destroy(name);
}
if (err == 0) {
if (flags->verbose)
(void) printf("success\n");
changelist_remove(clp, name);
}
(void) changelist_postfix(clp);
changelist_free(clp);
/*
* Deferred destroy might destroy the snapshot or only mark it to be
* destroyed later, and it returns success in either case.
*/
if (err != 0 || (defer && zfs_dataset_exists(hdl, name,
ZFS_TYPE_SNAPSHOT))) {
err = recv_rename(hdl, name, NULL, baselen, newname, flags);
}
return (err);
}
typedef struct guid_to_name_data {
uint64_t guid;
boolean_t bookmark_ok;
char *name;
char *skip;
uint64_t *redact_snap_guids;
uint64_t num_redact_snaps;
} guid_to_name_data_t;
static boolean_t
redact_snaps_match(zfs_handle_t *zhp, guid_to_name_data_t *gtnd)
{
uint64_t *bmark_snaps;
uint_t bmark_num_snaps;
nvlist_t *nvl;
if (zhp->zfs_type != ZFS_TYPE_BOOKMARK)
return (B_FALSE);
nvl = fnvlist_lookup_nvlist(zhp->zfs_props,
zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS));
bmark_snaps = fnvlist_lookup_uint64_array(nvl, ZPROP_VALUE,
&bmark_num_snaps);
if (bmark_num_snaps != gtnd->num_redact_snaps)
return (B_FALSE);
int i = 0;
for (; i < bmark_num_snaps; i++) {
int j = 0;
for (; j < bmark_num_snaps; j++) {
if (bmark_snaps[i] == gtnd->redact_snap_guids[j])
break;
}
if (j == bmark_num_snaps)
break;
}
return (i == bmark_num_snaps);
}
static int
guid_to_name_cb(zfs_handle_t *zhp, void *arg)
{
guid_to_name_data_t *gtnd = arg;
const char *slash;
int err;
if (gtnd->skip != NULL &&
(slash = strrchr(zhp->zfs_name, '/')) != NULL &&
strcmp(slash + 1, gtnd->skip) == 0) {
zfs_close(zhp);
return (0);
}
if (zfs_prop_get_int(zhp, ZFS_PROP_GUID) == gtnd->guid &&
(gtnd->num_redact_snaps == -1 || redact_snaps_match(zhp, gtnd))) {
(void) strcpy(gtnd->name, zhp->zfs_name);
zfs_close(zhp);
return (EEXIST);
}
err = zfs_iter_children(zhp, guid_to_name_cb, gtnd);
if (err != EEXIST && gtnd->bookmark_ok)
err = zfs_iter_bookmarks(zhp, guid_to_name_cb, gtnd);
zfs_close(zhp);
return (err);
}
/*
* Attempt to find the local dataset associated with this guid. In the case of
* multiple matches, we attempt to find the "best" match by searching
* progressively larger portions of the hierarchy. This allows one to send a
* tree of datasets individually and guarantee that we will find the source
* guid within that hierarchy, even if there are multiple matches elsewhere.
*
* If num_redact_snaps is not -1, we attempt to find a redaction bookmark with
* the specified number of redaction snapshots. If num_redact_snaps isn't 0 or
* -1, then redact_snap_guids will be an array of the guids of the snapshots the
* redaction bookmark was created with. If num_redact_snaps is -1, then we will
* attempt to find a snapshot or bookmark (if bookmark_ok is passed) with the
* given guid. Note that a redaction bookmark can be returned if
* num_redact_snaps == -1.
*/
static int
guid_to_name_redact_snaps(libzfs_handle_t *hdl, const char *parent,
uint64_t guid, boolean_t bookmark_ok, uint64_t *redact_snap_guids,
uint64_t num_redact_snaps, char *name)
{
char pname[ZFS_MAX_DATASET_NAME_LEN];
guid_to_name_data_t gtnd;
gtnd.guid = guid;
gtnd.bookmark_ok = bookmark_ok;
gtnd.name = name;
gtnd.skip = NULL;
gtnd.redact_snap_guids = redact_snap_guids;
gtnd.num_redact_snaps = num_redact_snaps;
/*
* Search progressively larger portions of the hierarchy, starting
* with the filesystem specified by 'parent'. This will
* select the "most local" version of the origin snapshot in the case
* that there are multiple matching snapshots in the system.
*/
(void) strlcpy(pname, parent, sizeof (pname));
char *cp = strrchr(pname, '@');
if (cp == NULL)
cp = strchr(pname, '\0');
for (; cp != NULL; cp = strrchr(pname, '/')) {
/* Chop off the last component and open the parent */
*cp = '\0';
zfs_handle_t *zhp = make_dataset_handle(hdl, pname);
if (zhp == NULL)
continue;
int err = guid_to_name_cb(zfs_handle_dup(zhp), &gtnd);
if (err != EEXIST)
err = zfs_iter_children(zhp, guid_to_name_cb, &gtnd);
if (err != EEXIST && bookmark_ok)
err = zfs_iter_bookmarks(zhp, guid_to_name_cb, &gtnd);
zfs_close(zhp);
if (err == EEXIST)
return (0);
/*
* Remember the last portion of the dataset so we skip it next
* time through (as we've already searched that portion of the
* hierarchy).
*/
gtnd.skip = strrchr(pname, '/') + 1;
}
return (ENOENT);
}
static int
guid_to_name(libzfs_handle_t *hdl, const char *parent, uint64_t guid,
boolean_t bookmark_ok, char *name)
{
return (guid_to_name_redact_snaps(hdl, parent, guid, bookmark_ok, NULL,
-1, name));
}
/*
* Return +1 if guid1 is before guid2, 0 if they are the same, and -1 if
* guid1 is after guid2.
*/
static int
created_before(libzfs_handle_t *hdl, avl_tree_t *avl,
uint64_t guid1, uint64_t guid2)
{
nvlist_t *nvfs;
char *fsname = NULL, *snapname = NULL;
char buf[ZFS_MAX_DATASET_NAME_LEN];
int rv;
zfs_handle_t *guid1hdl, *guid2hdl;
uint64_t create1, create2;
if (guid2 == 0)
return (0);
if (guid1 == 0)
return (1);
nvfs = fsavl_find(avl, guid1, &snapname);
fsname = fnvlist_lookup_string(nvfs, "name");
(void) snprintf(buf, sizeof (buf), "%s@%s", fsname, snapname);
guid1hdl = zfs_open(hdl, buf, ZFS_TYPE_SNAPSHOT);
if (guid1hdl == NULL)
return (-1);
nvfs = fsavl_find(avl, guid2, &snapname);
fsname = fnvlist_lookup_string(nvfs, "name");
(void) snprintf(buf, sizeof (buf), "%s@%s", fsname, snapname);
guid2hdl = zfs_open(hdl, buf, ZFS_TYPE_SNAPSHOT);
if (guid2hdl == NULL) {
zfs_close(guid1hdl);
return (-1);
}
create1 = zfs_prop_get_int(guid1hdl, ZFS_PROP_CREATETXG);
create2 = zfs_prop_get_int(guid2hdl, ZFS_PROP_CREATETXG);
if (create1 < create2)
rv = -1;
else if (create1 > create2)
rv = +1;
else
rv = 0;
zfs_close(guid1hdl);
zfs_close(guid2hdl);
return (rv);
}
/*
* This function reestablishes the hierarchy of encryption roots after a
* recursive incremental receive has completed. This must be done after the
* second call to recv_incremental_replication() has renamed and promoted all
* sent datasets to their final locations in the dataset hierarchy.
*/
static int
recv_fix_encryption_hierarchy(libzfs_handle_t *hdl, const char *top_zfs,
nvlist_t *stream_nv, avl_tree_t *stream_avl)
{
int err;
nvpair_t *fselem = NULL;
nvlist_t *stream_fss;
stream_fss = fnvlist_lookup_nvlist(stream_nv, "fss");
while ((fselem = nvlist_next_nvpair(stream_fss, fselem)) != NULL) {
zfs_handle_t *zhp = NULL;
uint64_t crypt;
nvlist_t *snaps, *props, *stream_nvfs = NULL;
nvpair_t *snapel = NULL;
boolean_t is_encroot, is_clone, stream_encroot;
char *cp;
char *stream_keylocation = NULL;
char keylocation[MAXNAMELEN];
char fsname[ZFS_MAX_DATASET_NAME_LEN];
keylocation[0] = '\0';
stream_nvfs = fnvpair_value_nvlist(fselem);
snaps = fnvlist_lookup_nvlist(stream_nvfs, "snaps");
props = fnvlist_lookup_nvlist(stream_nvfs, "props");
stream_encroot = nvlist_exists(stream_nvfs, "is_encroot");
/* find a snapshot from the stream that exists locally */
err = ENOENT;
while ((snapel = nvlist_next_nvpair(snaps, snapel)) != NULL) {
uint64_t guid;
guid = fnvpair_value_uint64(snapel);
err = guid_to_name(hdl, top_zfs, guid, B_FALSE,
fsname);
if (err == 0)
break;
}
if (err != 0)
continue;
cp = strchr(fsname, '@');
if (cp != NULL)
*cp = '\0';
zhp = zfs_open(hdl, fsname, ZFS_TYPE_DATASET);
if (zhp == NULL) {
err = ENOENT;
goto error;
}
crypt = zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION);
is_clone = zhp->zfs_dmustats.dds_origin[0] != '\0';
(void) zfs_crypto_get_encryption_root(zhp, &is_encroot, NULL);
/* we don't need to do anything for unencrypted datasets */
if (crypt == ZIO_CRYPT_OFF) {
zfs_close(zhp);
continue;
}
/*
* If the dataset is flagged as an encryption root, was not
* received as a clone and is not currently an encryption root,
* force it to become one. Fixup the keylocation if necessary.
*/
if (stream_encroot) {
if (!is_clone && !is_encroot) {
err = lzc_change_key(fsname,
DCP_CMD_FORCE_NEW_KEY, NULL, NULL, 0);
if (err != 0) {
zfs_close(zhp);
goto error;
}
}
stream_keylocation = fnvlist_lookup_string(props,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION));
/*
* Refresh the properties in case the call to
* lzc_change_key() changed the value.
*/
zfs_refresh_properties(zhp);
err = zfs_prop_get(zhp, ZFS_PROP_KEYLOCATION,
keylocation, sizeof (keylocation), NULL, NULL,
0, B_TRUE);
if (err != 0) {
zfs_close(zhp);
goto error;
}
if (strcmp(keylocation, stream_keylocation) != 0) {
err = zfs_prop_set(zhp,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION),
stream_keylocation);
if (err != 0) {
zfs_close(zhp);
goto error;
}
}
}
/*
* If the dataset is not flagged as an encryption root and is
* currently an encryption root, force it to inherit from its
* parent. The root of a raw send should never be
* force-inherited.
*/
if (!stream_encroot && is_encroot &&
strcmp(top_zfs, fsname) != 0) {
err = lzc_change_key(fsname, DCP_CMD_FORCE_INHERIT,
NULL, NULL, 0);
if (err != 0) {
zfs_close(zhp);
goto error;
}
}
zfs_close(zhp);
}
return (0);
error:
return (err);
}
static int
recv_incremental_replication(libzfs_handle_t *hdl, const char *tofs,
recvflags_t *flags, nvlist_t *stream_nv, avl_tree_t *stream_avl,
nvlist_t *renamed)
{
nvlist_t *local_nv, *deleted = NULL;
avl_tree_t *local_avl;
nvpair_t *fselem, *nextfselem;
char *fromsnap;
char newname[ZFS_MAX_DATASET_NAME_LEN];
char guidname[32];
int error;
boolean_t needagain, progress, recursive;
char *s1, *s2;
fromsnap = fnvlist_lookup_string(stream_nv, "fromsnap");
recursive = (nvlist_lookup_boolean(stream_nv, "not_recursive") ==
ENOENT);
if (flags->dryrun)
return (0);
again:
needagain = progress = B_FALSE;
deleted = fnvlist_alloc();
if ((error = gather_nvlist(hdl, tofs, fromsnap, NULL,
recursive, B_TRUE, B_FALSE, recursive, B_FALSE, B_FALSE, B_FALSE,
B_FALSE, B_TRUE, &local_nv, &local_avl)) != 0)
return (error);
/*
* Process deletes and renames
*/
for (fselem = nvlist_next_nvpair(local_nv, NULL);
fselem; fselem = nextfselem) {
nvlist_t *nvfs, *snaps;
nvlist_t *stream_nvfs = NULL;
nvpair_t *snapelem, *nextsnapelem;
uint64_t fromguid = 0;
uint64_t originguid = 0;
uint64_t stream_originguid = 0;
uint64_t parent_fromsnap_guid, stream_parent_fromsnap_guid;
char *fsname, *stream_fsname;
nextfselem = nvlist_next_nvpair(local_nv, fselem);
nvfs = fnvpair_value_nvlist(fselem);
snaps = fnvlist_lookup_nvlist(nvfs, "snaps");
fsname = fnvlist_lookup_string(nvfs, "name");
parent_fromsnap_guid = fnvlist_lookup_uint64(nvfs,
"parentfromsnap");
(void) nvlist_lookup_uint64(nvfs, "origin", &originguid);
/*
* First find the stream's fs, so we can check for
* a different origin (due to "zfs promote")
*/
for (snapelem = nvlist_next_nvpair(snaps, NULL);
snapelem; snapelem = nvlist_next_nvpair(snaps, snapelem)) {
uint64_t thisguid;
thisguid = fnvpair_value_uint64(snapelem);
stream_nvfs = fsavl_find(stream_avl, thisguid, NULL);
if (stream_nvfs != NULL)
break;
}
/* check for promote */
(void) nvlist_lookup_uint64(stream_nvfs, "origin",
&stream_originguid);
if (stream_nvfs && originguid != stream_originguid) {
switch (created_before(hdl, local_avl,
stream_originguid, originguid)) {
case 1: {
/* promote it! */
nvlist_t *origin_nvfs;
char *origin_fsname;
origin_nvfs = fsavl_find(local_avl, originguid,
NULL);
origin_fsname = fnvlist_lookup_string(
origin_nvfs, "name");
error = recv_promote(hdl, fsname, origin_fsname,
flags);
if (error == 0)
progress = B_TRUE;
break;
}
default:
break;
case -1:
fsavl_destroy(local_avl);
fnvlist_free(local_nv);
return (-1);
}
/*
* We had/have the wrong origin, therefore our
* list of snapshots is wrong. Need to handle
* them on the next pass.
*/
needagain = B_TRUE;
continue;
}
for (snapelem = nvlist_next_nvpair(snaps, NULL);
snapelem; snapelem = nextsnapelem) {
uint64_t thisguid;
char *stream_snapname;
nvlist_t *found, *props;
nextsnapelem = nvlist_next_nvpair(snaps, snapelem);
thisguid = fnvpair_value_uint64(snapelem);
found = fsavl_find(stream_avl, thisguid,
&stream_snapname);
/* check for delete */
if (found == NULL) {
char name[ZFS_MAX_DATASET_NAME_LEN];
if (!flags->force)
continue;
(void) snprintf(name, sizeof (name), "%s@%s",
fsname, nvpair_name(snapelem));
error = recv_destroy(hdl, name,
strlen(fsname)+1, newname, flags);
if (error)
needagain = B_TRUE;
else
progress = B_TRUE;
sprintf(guidname, "%llu",
(u_longlong_t)thisguid);
nvlist_add_boolean(deleted, guidname);
continue;
}
stream_nvfs = found;
if (0 == nvlist_lookup_nvlist(stream_nvfs, "snapprops",
&props) && 0 == nvlist_lookup_nvlist(props,
stream_snapname, &props)) {
zfs_cmd_t zc = {"\0"};
zc.zc_cookie = B_TRUE; /* received */
(void) snprintf(zc.zc_name, sizeof (zc.zc_name),
"%s@%s", fsname, nvpair_name(snapelem));
if (zcmd_write_src_nvlist(hdl, &zc,
props) == 0) {
(void) zfs_ioctl(hdl,
ZFS_IOC_SET_PROP, &zc);
zcmd_free_nvlists(&zc);
}
}
/* check for different snapname */
if (strcmp(nvpair_name(snapelem),
stream_snapname) != 0) {
char name[ZFS_MAX_DATASET_NAME_LEN];
char tryname[ZFS_MAX_DATASET_NAME_LEN];
(void) snprintf(name, sizeof (name), "%s@%s",
fsname, nvpair_name(snapelem));
(void) snprintf(tryname, sizeof (name), "%s@%s",
fsname, stream_snapname);
error = recv_rename(hdl, name, tryname,
strlen(fsname)+1, newname, flags);
if (error)
needagain = B_TRUE;
else
progress = B_TRUE;
}
if (strcmp(stream_snapname, fromsnap) == 0)
fromguid = thisguid;
}
/* check for delete */
if (stream_nvfs == NULL) {
if (!flags->force)
continue;
error = recv_destroy(hdl, fsname, strlen(tofs)+1,
newname, flags);
if (error)
needagain = B_TRUE;
else
progress = B_TRUE;
sprintf(guidname, "%llu",
(u_longlong_t)parent_fromsnap_guid);
nvlist_add_boolean(deleted, guidname);
continue;
}
if (fromguid == 0) {
if (flags->verbose) {
(void) printf("local fs %s does not have "
"fromsnap (%s in stream); must have "
"been deleted locally; ignoring\n",
fsname, fromsnap);
}
continue;
}
stream_fsname = fnvlist_lookup_string(stream_nvfs, "name");
stream_parent_fromsnap_guid = fnvlist_lookup_uint64(
stream_nvfs, "parentfromsnap");
s1 = strrchr(fsname, '/');
s2 = strrchr(stream_fsname, '/');
/*
* Check if we're going to rename based on parent guid change
* and the current parent guid was also deleted. If it was then
* rename will fail and is likely unneeded, so avoid this and
* force an early retry to determine the new
* parent_fromsnap_guid.
*/
if (stream_parent_fromsnap_guid != 0 &&
parent_fromsnap_guid != 0 &&
stream_parent_fromsnap_guid != parent_fromsnap_guid) {
sprintf(guidname, "%llu",
(u_longlong_t)parent_fromsnap_guid);
if (nvlist_exists(deleted, guidname)) {
progress = B_TRUE;
needagain = B_TRUE;
goto doagain;
}
}
/*
* Check for rename. If the exact receive path is specified, it
* does not count as a rename, but we still need to check the
* datasets beneath it.
*/
if ((stream_parent_fromsnap_guid != 0 &&
parent_fromsnap_guid != 0 &&
stream_parent_fromsnap_guid != parent_fromsnap_guid) ||
((flags->isprefix || strcmp(tofs, fsname) != 0) &&
(s1 != NULL) && (s2 != NULL) && strcmp(s1, s2) != 0)) {
nvlist_t *parent;
char tryname[ZFS_MAX_DATASET_NAME_LEN];
parent = fsavl_find(local_avl,
stream_parent_fromsnap_guid, NULL);
/*
* NB: parent might not be found if we used the
* tosnap for stream_parent_fromsnap_guid,
* because the parent is a newly-created fs;
* we'll be able to rename it after we recv the
* new fs.
*/
if (parent != NULL) {
char *pname;
pname = fnvlist_lookup_string(parent, "name");
(void) snprintf(tryname, sizeof (tryname),
"%s%s", pname, strrchr(stream_fsname, '/'));
} else {
tryname[0] = '\0';
if (flags->verbose) {
(void) printf("local fs %s new parent "
"not found\n", fsname);
}
}
newname[0] = '\0';
error = recv_rename(hdl, fsname, tryname,
strlen(tofs)+1, newname, flags);
if (renamed != NULL && newname[0] != '\0') {
fnvlist_add_boolean(renamed, newname);
}
if (error)
needagain = B_TRUE;
else
progress = B_TRUE;
}
}
doagain:
fsavl_destroy(local_avl);
fnvlist_free(local_nv);
fnvlist_free(deleted);
if (needagain && progress) {
/* do another pass to fix up temporary names */
if (flags->verbose)
(void) printf("another pass:\n");
goto again;
}
return (needagain || error != 0);
}
static int
zfs_receive_package(libzfs_handle_t *hdl, int fd, const char *destname,
recvflags_t *flags, dmu_replay_record_t *drr, zio_cksum_t *zc,
char **top_zfs, nvlist_t *cmdprops)
{
nvlist_t *stream_nv = NULL;
avl_tree_t *stream_avl = NULL;
char *fromsnap = NULL;
char *sendsnap = NULL;
char *cp;
char tofs[ZFS_MAX_DATASET_NAME_LEN];
char sendfs[ZFS_MAX_DATASET_NAME_LEN];
char errbuf[1024];
dmu_replay_record_t drre;
int error;
boolean_t anyerr = B_FALSE;
boolean_t softerr = B_FALSE;
boolean_t recursive, raw;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot receive"));
assert(drr->drr_type == DRR_BEGIN);
assert(drr->drr_u.drr_begin.drr_magic == DMU_BACKUP_MAGIC);
assert(DMU_GET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo) ==
DMU_COMPOUNDSTREAM);
/*
* Read in the nvlist from the stream.
*/
if (drr->drr_payloadlen != 0) {
error = recv_read_nvlist(hdl, fd, drr->drr_payloadlen,
&stream_nv, flags->byteswap, zc);
if (error) {
error = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
goto out;
}
}
recursive = (nvlist_lookup_boolean(stream_nv, "not_recursive") ==
ENOENT);
raw = (nvlist_lookup_boolean(stream_nv, "raw") == 0);
if (recursive && strchr(destname, '@')) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot specify snapshot name for multi-snapshot stream"));
error = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
goto out;
}
/*
* Read in the end record and verify checksum.
*/
if (0 != (error = recv_read(hdl, fd, &drre, sizeof (drre),
flags->byteswap, NULL)))
goto out;
if (flags->byteswap) {
drre.drr_type = BSWAP_32(drre.drr_type);
drre.drr_u.drr_end.drr_checksum.zc_word[0] =
BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[0]);
drre.drr_u.drr_end.drr_checksum.zc_word[1] =
BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[1]);
drre.drr_u.drr_end.drr_checksum.zc_word[2] =
BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[2]);
drre.drr_u.drr_end.drr_checksum.zc_word[3] =
BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[3]);
}
if (drre.drr_type != DRR_END) {
error = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
goto out;
}
if (!ZIO_CHECKSUM_EQUAL(drre.drr_u.drr_end.drr_checksum, *zc)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incorrect header checksum"));
error = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
goto out;
}
(void) nvlist_lookup_string(stream_nv, "fromsnap", &fromsnap);
if (drr->drr_payloadlen != 0) {
nvlist_t *stream_fss;
stream_fss = fnvlist_lookup_nvlist(stream_nv, "fss");
if ((stream_avl = fsavl_create(stream_fss)) == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"couldn't allocate avl tree"));
error = zfs_error(hdl, EZFS_NOMEM, errbuf);
goto out;
}
if (fromsnap != NULL && recursive) {
nvlist_t *renamed = NULL;
nvpair_t *pair = NULL;
(void) strlcpy(tofs, destname, sizeof (tofs));
if (flags->isprefix) {
struct drr_begin *drrb = &drr->drr_u.drr_begin;
int i;
if (flags->istail) {
cp = strrchr(drrb->drr_toname, '/');
if (cp == NULL) {
(void) strlcat(tofs, "/",
sizeof (tofs));
i = 0;
} else {
i = (cp - drrb->drr_toname);
}
} else {
i = strcspn(drrb->drr_toname, "/@");
}
/* zfs_receive_one() will create_parents() */
(void) strlcat(tofs, &drrb->drr_toname[i],
sizeof (tofs));
*strchr(tofs, '@') = '\0';
}
if (!flags->dryrun && !flags->nomount) {
renamed = fnvlist_alloc();
}
softerr = recv_incremental_replication(hdl, tofs, flags,
stream_nv, stream_avl, renamed);
/* Unmount renamed filesystems before receiving. */
while ((pair = nvlist_next_nvpair(renamed,
pair)) != NULL) {
zfs_handle_t *zhp;
prop_changelist_t *clp = NULL;
zhp = zfs_open(hdl, nvpair_name(pair),
ZFS_TYPE_FILESYSTEM);
if (zhp != NULL) {
clp = changelist_gather(zhp,
ZFS_PROP_MOUNTPOINT, 0,
flags->forceunmount ? MS_FORCE : 0);
zfs_close(zhp);
if (clp != NULL) {
softerr |=
changelist_prefix(clp);
changelist_free(clp);
}
}
}
fnvlist_free(renamed);
}
}
/*
* Get the fs specified by the first path in the stream (the top level
* specified by 'zfs send') and pass it to each invocation of
* zfs_receive_one().
*/
(void) strlcpy(sendfs, drr->drr_u.drr_begin.drr_toname,
sizeof (sendfs));
if ((cp = strchr(sendfs, '@')) != NULL) {
*cp = '\0';
/*
* Find the "sendsnap", the final snapshot in a replication
* stream. zfs_receive_one() handles certain errors
* differently, depending on if the contained stream is the
* last one or not.
*/
sendsnap = (cp + 1);
}
/* Finally, receive each contained stream */
do {
/*
* we should figure out if it has a recoverable
* error, in which case do a recv_skip() and drive on.
* Note, if we fail due to already having this guid,
* zfs_receive_one() will take care of it (ie,
* recv_skip() and return 0).
*/
error = zfs_receive_impl(hdl, destname, NULL, flags, fd,
sendfs, stream_nv, stream_avl, top_zfs, sendsnap, cmdprops);
if (error == ENODATA) {
error = 0;
break;
}
anyerr |= error;
} while (error == 0);
if (drr->drr_payloadlen != 0 && recursive && fromsnap != NULL) {
/*
* Now that we have the fs's they sent us, try the
* renames again.
*/
softerr = recv_incremental_replication(hdl, tofs, flags,
stream_nv, stream_avl, NULL);
}
if (raw && softerr == 0 && *top_zfs != NULL) {
softerr = recv_fix_encryption_hierarchy(hdl, *top_zfs,
stream_nv, stream_avl);
}
out:
fsavl_destroy(stream_avl);
fnvlist_free(stream_nv);
if (softerr)
error = -2;
if (anyerr)
error = -1;
return (error);
}
static void
trunc_prop_errs(int truncated)
{
ASSERT(truncated != 0);
if (truncated == 1)
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"1 more property could not be set\n"));
else
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"%d more properties could not be set\n"), truncated);
}
static int
recv_skip(libzfs_handle_t *hdl, int fd, boolean_t byteswap)
{
dmu_replay_record_t *drr;
void *buf = zfs_alloc(hdl, SPA_MAXBLOCKSIZE);
uint64_t payload_size;
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot receive"));
/* XXX would be great to use lseek if possible... */
drr = buf;
while (recv_read(hdl, fd, drr, sizeof (dmu_replay_record_t),
byteswap, NULL) == 0) {
if (byteswap)
drr->drr_type = BSWAP_32(drr->drr_type);
switch (drr->drr_type) {
case DRR_BEGIN:
if (drr->drr_payloadlen != 0) {
(void) recv_read(hdl, fd, buf,
drr->drr_payloadlen, B_FALSE, NULL);
}
break;
case DRR_END:
free(buf);
return (0);
case DRR_OBJECT:
if (byteswap) {
drr->drr_u.drr_object.drr_bonuslen =
BSWAP_32(drr->drr_u.drr_object.
drr_bonuslen);
drr->drr_u.drr_object.drr_raw_bonuslen =
BSWAP_32(drr->drr_u.drr_object.
drr_raw_bonuslen);
}
payload_size =
DRR_OBJECT_PAYLOAD_SIZE(&drr->drr_u.drr_object);
(void) recv_read(hdl, fd, buf, payload_size,
B_FALSE, NULL);
break;
case DRR_WRITE:
if (byteswap) {
drr->drr_u.drr_write.drr_logical_size =
BSWAP_64(
drr->drr_u.drr_write.drr_logical_size);
drr->drr_u.drr_write.drr_compressed_size =
BSWAP_64(
drr->drr_u.drr_write.drr_compressed_size);
}
payload_size =
DRR_WRITE_PAYLOAD_SIZE(&drr->drr_u.drr_write);
assert(payload_size <= SPA_MAXBLOCKSIZE);
(void) recv_read(hdl, fd, buf,
payload_size, B_FALSE, NULL);
break;
case DRR_SPILL:
if (byteswap) {
drr->drr_u.drr_spill.drr_length =
BSWAP_64(drr->drr_u.drr_spill.drr_length);
drr->drr_u.drr_spill.drr_compressed_size =
BSWAP_64(drr->drr_u.drr_spill.
drr_compressed_size);
}
payload_size =
DRR_SPILL_PAYLOAD_SIZE(&drr->drr_u.drr_spill);
(void) recv_read(hdl, fd, buf, payload_size,
B_FALSE, NULL);
break;
case DRR_WRITE_EMBEDDED:
if (byteswap) {
drr->drr_u.drr_write_embedded.drr_psize =
BSWAP_32(drr->drr_u.drr_write_embedded.
drr_psize);
}
(void) recv_read(hdl, fd, buf,
P2ROUNDUP(drr->drr_u.drr_write_embedded.drr_psize,
8), B_FALSE, NULL);
break;
case DRR_OBJECT_RANGE:
case DRR_WRITE_BYREF:
case DRR_FREEOBJECTS:
case DRR_FREE:
break;
default:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid record type"));
free(buf);
return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
}
}
free(buf);
return (-1);
}
static void
recv_ecksum_set_aux(libzfs_handle_t *hdl, const char *target_snap,
boolean_t resumable, boolean_t checksum)
{
char target_fs[ZFS_MAX_DATASET_NAME_LEN];
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, (checksum ?
"checksum mismatch" : "incomplete stream")));
if (!resumable)
return;
(void) strlcpy(target_fs, target_snap, sizeof (target_fs));
*strchr(target_fs, '@') = '\0';
zfs_handle_t *zhp = zfs_open(hdl, target_fs,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL)
return;
char token_buf[ZFS_MAXPROPLEN];
int error = zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN,
token_buf, sizeof (token_buf),
NULL, NULL, 0, B_TRUE);
if (error == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"checksum mismatch or incomplete stream.\n"
"Partially received snapshot is saved.\n"
"A resuming stream can be generated on the sending "
"system by running:\n"
" zfs send -t %s"),
token_buf);
}
zfs_close(zhp);
}
/*
* Prepare a new nvlist of properties that are to override (-o) or be excluded
* (-x) from the received dataset
* recvprops: received properties from the send stream
* cmdprops: raw input properties from command line
* origprops: properties, both locally-set and received, currently set on the
* target dataset if it exists, NULL otherwise.
* oxprops: valid output override (-o) and excluded (-x) properties
*/
static int
zfs_setup_cmdline_props(libzfs_handle_t *hdl, zfs_type_t type,
char *fsname, boolean_t zoned, boolean_t recursive, boolean_t newfs,
boolean_t raw, boolean_t toplevel, nvlist_t *recvprops, nvlist_t *cmdprops,
nvlist_t *origprops, nvlist_t **oxprops, uint8_t **wkeydata_out,
uint_t *wkeylen_out, const char *errbuf)
{
nvpair_t *nvp;
nvlist_t *oprops, *voprops;
zfs_handle_t *zhp = NULL;
zpool_handle_t *zpool_hdl = NULL;
char *cp;
int ret = 0;
char namebuf[ZFS_MAX_DATASET_NAME_LEN];
if (nvlist_empty(cmdprops))
return (0); /* No properties to override or exclude */
*oxprops = fnvlist_alloc();
oprops = fnvlist_alloc();
strlcpy(namebuf, fsname, ZFS_MAX_DATASET_NAME_LEN);
/*
* Get our dataset handle. The target dataset may not exist yet.
*/
if (zfs_dataset_exists(hdl, namebuf, ZFS_TYPE_DATASET)) {
zhp = zfs_open(hdl, namebuf, ZFS_TYPE_DATASET);
if (zhp == NULL) {
ret = -1;
goto error;
}
}
/* open the zpool handle */
cp = strchr(namebuf, '/');
if (cp != NULL)
*cp = '\0';
zpool_hdl = zpool_open(hdl, namebuf);
if (zpool_hdl == NULL) {
ret = -1;
goto error;
}
/* restore namebuf to match fsname for later use */
if (cp != NULL)
*cp = '/';
/*
* first iteration: process excluded (-x) properties now and gather
* added (-o) properties to be later processed by zfs_valid_proplist()
*/
nvp = NULL;
while ((nvp = nvlist_next_nvpair(cmdprops, nvp)) != NULL) {
const char *name = nvpair_name(nvp);
zfs_prop_t prop = zfs_name_to_prop(name);
/* "origin" is processed separately, don't handle it here */
if (prop == ZFS_PROP_ORIGIN)
continue;
/* raw streams can't override encryption properties */
if ((zfs_prop_encryption_key_param(prop) ||
prop == ZFS_PROP_ENCRYPTION) && raw) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"encryption property '%s' cannot "
"be set or excluded for raw streams."), name);
ret = zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
/* incremental streams can only exclude encryption properties */
if ((zfs_prop_encryption_key_param(prop) ||
prop == ZFS_PROP_ENCRYPTION) && !newfs &&
nvpair_type(nvp) != DATA_TYPE_BOOLEAN) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"encryption property '%s' cannot "
"be set for incremental streams."), name);
ret = zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
switch (nvpair_type(nvp)) {
case DATA_TYPE_BOOLEAN: /* -x property */
/*
* DATA_TYPE_BOOLEAN is the way we're asked to "exclude"
* a property: this is done by forcing an explicit
* inherit on the destination so the effective value is
* not the one we received from the send stream.
*/
if (!zfs_prop_valid_for_type(prop, type, B_FALSE) &&
!zfs_prop_user(name)) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"Warning: %s: property '%s' does not "
"apply to datasets of this type\n"),
fsname, name);
continue;
}
/*
* We do this only if the property is not already
* locally-set, in which case its value will take
* priority over the received anyway.
*/
if (nvlist_exists(origprops, name)) {
nvlist_t *attrs;
char *source = NULL;
attrs = fnvlist_lookup_nvlist(origprops, name);
if (nvlist_lookup_string(attrs,
ZPROP_SOURCE, &source) == 0 &&
strcmp(source, ZPROP_SOURCE_VAL_RECVD) != 0)
continue;
}
/*
* We can't force an explicit inherit on non-inheritable
* properties: if we're asked to exclude this kind of
* values we remove them from "recvprops" input nvlist.
*/
if (!zfs_prop_inheritable(prop) &&
!zfs_prop_user(name) && /* can be inherited too */
nvlist_exists(recvprops, name))
fnvlist_remove(recvprops, name);
else
fnvlist_add_nvpair(*oxprops, nvp);
break;
case DATA_TYPE_STRING: /* -o property=value */
/*
* we're trying to override a property that does not
* make sense for this type of dataset, but we don't
* want to fail if the receive is recursive: this comes
* in handy when the send stream contains, for
* instance, a child ZVOL and we're trying to receive
* it with "-o atime=on"
*/
if (!zfs_prop_valid_for_type(prop, type, B_FALSE) &&
!zfs_prop_user(name)) {
if (recursive)
continue;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' does not apply to datasets "
"of this type"), name);
ret = zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
fnvlist_add_nvpair(oprops, nvp);
break;
default:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' must be a string or boolean"), name);
ret = zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
}
if (toplevel) {
/* convert override strings properties to native */
if ((voprops = zfs_valid_proplist(hdl, ZFS_TYPE_DATASET,
oprops, zoned, zhp, zpool_hdl, B_FALSE, errbuf)) == NULL) {
ret = zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
/*
* zfs_crypto_create() requires the parent name. Get it
* by truncating the fsname copy stored in namebuf.
*/
cp = strrchr(namebuf, '/');
if (cp != NULL)
*cp = '\0';
if (!raw && zfs_crypto_create(hdl, namebuf, voprops, NULL,
B_FALSE, wkeydata_out, wkeylen_out) != 0) {
fnvlist_free(voprops);
ret = zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf);
goto error;
}
/* second pass: process "-o" properties */
fnvlist_merge(*oxprops, voprops);
fnvlist_free(voprops);
} else {
/* override props on child dataset are inherited */
nvp = NULL;
while ((nvp = nvlist_next_nvpair(oprops, nvp)) != NULL) {
const char *name = nvpair_name(nvp);
fnvlist_add_boolean(*oxprops, name);
}
}
error:
if (zhp != NULL)
zfs_close(zhp);
if (zpool_hdl != NULL)
zpool_close(zpool_hdl);
fnvlist_free(oprops);
return (ret);
}
/*
* Restores a backup of tosnap from the file descriptor specified by infd.
*/
static int
zfs_receive_one(libzfs_handle_t *hdl, int infd, const char *tosnap,
const char *originsnap, recvflags_t *flags, dmu_replay_record_t *drr,
dmu_replay_record_t *drr_noswap, const char *sendfs, nvlist_t *stream_nv,
avl_tree_t *stream_avl, char **top_zfs,
const char *finalsnap, nvlist_t *cmdprops)
{
time_t begin_time;
int ioctl_err, ioctl_errno, err;
char *cp;
struct drr_begin *drrb = &drr->drr_u.drr_begin;
char errbuf[1024];
const char *chopprefix;
boolean_t newfs = B_FALSE;
boolean_t stream_wantsnewfs, stream_resumingnewfs;
boolean_t newprops = B_FALSE;
uint64_t read_bytes = 0;
uint64_t errflags = 0;
uint64_t parent_snapguid = 0;
prop_changelist_t *clp = NULL;
nvlist_t *snapprops_nvlist = NULL;
nvlist_t *snapholds_nvlist = NULL;
zprop_errflags_t prop_errflags;
nvlist_t *prop_errors = NULL;
boolean_t recursive;
char *snapname = NULL;
char destsnap[MAXPATHLEN * 2];
char origin[MAXNAMELEN];
char name[MAXPATHLEN];
char tmp_keylocation[MAXNAMELEN];
nvlist_t *rcvprops = NULL; /* props received from the send stream */
nvlist_t *oxprops = NULL; /* override (-o) and exclude (-x) props */
nvlist_t *origprops = NULL; /* original props (if destination exists) */
zfs_type_t type;
boolean_t toplevel = B_FALSE;
boolean_t zoned = B_FALSE;
boolean_t hastoken = B_FALSE;
boolean_t redacted;
uint8_t *wkeydata = NULL;
uint_t wkeylen = 0;
begin_time = time(NULL);
bzero(origin, MAXNAMELEN);
bzero(tmp_keylocation, MAXNAMELEN);
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot receive"));
recursive = (nvlist_lookup_boolean(stream_nv, "not_recursive") ==
ENOENT);
/* Did the user request holds be skipped via zfs recv -k? */
boolean_t holds = flags->holds && !flags->skipholds;
if (stream_avl != NULL) {
char *keylocation = NULL;
nvlist_t *lookup = NULL;
nvlist_t *fs = fsavl_find(stream_avl, drrb->drr_toguid,
&snapname);
(void) nvlist_lookup_uint64(fs, "parentfromsnap",
&parent_snapguid);
err = nvlist_lookup_nvlist(fs, "props", &rcvprops);
if (err) {
rcvprops = fnvlist_alloc();
newprops = B_TRUE;
}
/*
* The keylocation property may only be set on encryption roots,
* but this dataset might not become an encryption root until
* recv_fix_encryption_hierarchy() is called. That function
* will fixup the keylocation anyway, so we temporarily unset
* the keylocation for now to avoid any errors from the receive
* ioctl.
*/
err = nvlist_lookup_string(rcvprops,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION), &keylocation);
if (err == 0) {
strcpy(tmp_keylocation, keylocation);
(void) nvlist_remove_all(rcvprops,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION));
}
if (flags->canmountoff) {
fnvlist_add_uint64(rcvprops,
zfs_prop_to_name(ZFS_PROP_CANMOUNT), 0);
} else if (newprops) { /* nothing in rcvprops, eliminate it */
fnvlist_free(rcvprops);
rcvprops = NULL;
newprops = B_FALSE;
}
if (0 == nvlist_lookup_nvlist(fs, "snapprops", &lookup)) {
snapprops_nvlist = fnvlist_lookup_nvlist(lookup,
snapname);
}
if (holds) {
if (0 == nvlist_lookup_nvlist(fs, "snapholds",
&lookup)) {
snapholds_nvlist = fnvlist_lookup_nvlist(
lookup, snapname);
}
}
}
cp = NULL;
/*
* Determine how much of the snapshot name stored in the stream
* we are going to tack on to the name they specified on the
* command line, and how much we are going to chop off.
*
* If they specified a snapshot, chop the entire name stored in
* the stream.
*/
if (flags->istail) {
/*
* A filesystem was specified with -e. We want to tack on only
* the tail of the sent snapshot path.
*/
if (strchr(tosnap, '@')) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
"argument - snapshot not allowed with -e"));
err = zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
goto out;
}
chopprefix = strrchr(sendfs, '/');
if (chopprefix == NULL) {
/*
* The tail is the poolname, so we need to
* prepend a path separator.
*/
int len = strlen(drrb->drr_toname);
cp = malloc(len + 2);
cp[0] = '/';
(void) strcpy(&cp[1], drrb->drr_toname);
chopprefix = cp;
} else {
chopprefix = drrb->drr_toname + (chopprefix - sendfs);
}
} else if (flags->isprefix) {
/*
* A filesystem was specified with -d. We want to tack on
* everything but the first element of the sent snapshot path
* (all but the pool name).
*/
if (strchr(tosnap, '@')) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
"argument - snapshot not allowed with -d"));
err = zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
goto out;
}
chopprefix = strchr(drrb->drr_toname, '/');
if (chopprefix == NULL)
chopprefix = strchr(drrb->drr_toname, '@');
} else if (strchr(tosnap, '@') == NULL) {
/*
* If a filesystem was specified without -d or -e, we want to
* tack on everything after the fs specified by 'zfs send'.
*/
chopprefix = drrb->drr_toname + strlen(sendfs);
} else {
/* A snapshot was specified as an exact path (no -d or -e). */
if (recursive) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot specify snapshot name for multi-snapshot "
"stream"));
err = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
goto out;
}
chopprefix = drrb->drr_toname + strlen(drrb->drr_toname);
}
ASSERT(strstr(drrb->drr_toname, sendfs) == drrb->drr_toname);
ASSERT(chopprefix > drrb->drr_toname || strchr(sendfs, '/') == NULL);
ASSERT(chopprefix <= drrb->drr_toname + strlen(drrb->drr_toname) ||
strchr(sendfs, '/') == NULL);
ASSERT(chopprefix[0] == '/' || chopprefix[0] == '@' ||
chopprefix[0] == '\0');
/*
* Determine name of destination snapshot.
*/
(void) strlcpy(destsnap, tosnap, sizeof (destsnap));
(void) strlcat(destsnap, chopprefix, sizeof (destsnap));
free(cp);
if (!zfs_name_valid(destsnap, ZFS_TYPE_SNAPSHOT)) {
err = zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
goto out;
}
/*
* Determine the name of the origin snapshot.
*/
if (originsnap) {
(void) strlcpy(origin, originsnap, sizeof (origin));
if (flags->verbose)
(void) printf("using provided clone origin %s\n",
origin);
} else if (drrb->drr_flags & DRR_FLAG_CLONE) {
if (guid_to_name(hdl, destsnap,
drrb->drr_fromguid, B_FALSE, origin) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"local origin for clone %s does not exist"),
destsnap);
err = zfs_error(hdl, EZFS_NOENT, errbuf);
goto out;
}
if (flags->verbose)
(void) printf("found clone origin %s\n", origin);
}
if ((DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_DEDUP)) {
(void) fprintf(stderr,
gettext("ERROR: \"zfs receive\" no longer supports "
"deduplicated send streams. Use\n"
"the \"zstream redup\" command to convert this stream "
"to a regular,\n"
"non-deduplicated stream.\n"));
err = zfs_error(hdl, EZFS_NOTSUP, errbuf);
goto out;
}
boolean_t resuming = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_RESUMING;
boolean_t raw = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_RAW;
boolean_t embedded = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_EMBED_DATA;
stream_wantsnewfs = (drrb->drr_fromguid == 0 ||
(drrb->drr_flags & DRR_FLAG_CLONE) || originsnap) && !resuming;
stream_resumingnewfs = (drrb->drr_fromguid == 0 ||
(drrb->drr_flags & DRR_FLAG_CLONE) || originsnap) && resuming;
if (stream_wantsnewfs) {
/*
* if the parent fs does not exist, look for it based on
* the parent snap GUID
*/
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot receive new filesystem stream"));
(void) strcpy(name, destsnap);
cp = strrchr(name, '/');
if (cp)
*cp = '\0';
if (cp &&
!zfs_dataset_exists(hdl, name, ZFS_TYPE_DATASET)) {
char suffix[ZFS_MAX_DATASET_NAME_LEN];
(void) strcpy(suffix, strrchr(destsnap, '/'));
if (guid_to_name(hdl, name, parent_snapguid,
B_FALSE, destsnap) == 0) {
*strchr(destsnap, '@') = '\0';
(void) strcat(destsnap, suffix);
}
}
} else {
/*
* If the fs does not exist, look for it based on the
* fromsnap GUID.
*/
if (resuming) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot receive resume stream"));
} else {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot receive incremental stream"));
}
(void) strcpy(name, destsnap);
*strchr(name, '@') = '\0';
/*
* If the exact receive path was specified and this is the
* topmost path in the stream, then if the fs does not exist we
* should look no further.
*/
if ((flags->isprefix || (*(chopprefix = drrb->drr_toname +
strlen(sendfs)) != '\0' && *chopprefix != '@')) &&
!zfs_dataset_exists(hdl, name, ZFS_TYPE_DATASET)) {
char snap[ZFS_MAX_DATASET_NAME_LEN];
(void) strcpy(snap, strchr(destsnap, '@'));
if (guid_to_name(hdl, name, drrb->drr_fromguid,
B_FALSE, destsnap) == 0) {
*strchr(destsnap, '@') = '\0';
(void) strcat(destsnap, snap);
}
}
}
(void) strcpy(name, destsnap);
*strchr(name, '@') = '\0';
redacted = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_REDACTED;
if (zfs_dataset_exists(hdl, name, ZFS_TYPE_DATASET)) {
zfs_cmd_t zc = {"\0"};
zfs_handle_t *zhp;
boolean_t encrypted;
(void) strcpy(zc.zc_name, name);
/*
* Destination fs exists. It must be one of these cases:
* - an incremental send stream
* - the stream specifies a new fs (full stream or clone)
* and they want us to blow away the existing fs (and
* have therefore specified -F and removed any snapshots)
* - we are resuming a failed receive.
*/
if (stream_wantsnewfs) {
boolean_t is_volume = drrb->drr_type == DMU_OST_ZVOL;
if (!flags->force) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination '%s' exists\n"
"must specify -F to overwrite it"), name);
err = zfs_error(hdl, EZFS_EXISTS, errbuf);
goto out;
}
if (zfs_ioctl(hdl, ZFS_IOC_SNAPSHOT_LIST_NEXT,
&zc) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination has snapshots (eg. %s)\n"
"must destroy them to overwrite it"),
zc.zc_name);
err = zfs_error(hdl, EZFS_EXISTS, errbuf);
goto out;
}
if (is_volume && strrchr(name, '/') == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination %s is the root dataset\n"
"cannot overwrite with a ZVOL"),
name);
err = zfs_error(hdl, EZFS_EXISTS, errbuf);
goto out;
}
if (is_volume &&
zfs_ioctl(hdl, ZFS_IOC_DATASET_LIST_NEXT,
&zc) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination has children (eg. %s)\n"
"cannot overwrite with a ZVOL"),
zc.zc_name);
err = zfs_error(hdl, EZFS_WRONG_PARENT, errbuf);
goto out;
}
}
if ((zhp = zfs_open(hdl, name,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME)) == NULL) {
err = -1;
goto out;
}
if (stream_wantsnewfs &&
zhp->zfs_dmustats.dds_origin[0]) {
zfs_close(zhp);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination '%s' is a clone\n"
"must destroy it to overwrite it"), name);
err = zfs_error(hdl, EZFS_EXISTS, errbuf);
goto out;
}
/*
* Raw sends can not be performed as an incremental on top
* of existing unencrypted datasets. zfs recv -F can't be
* used to blow away an existing encrypted filesystem. This
* is because it would require the dsl dir to point to the
* new key (or lack of a key) and the old key at the same
* time. The -F flag may still be used for deleting
* intermediate snapshots that would otherwise prevent the
* receive from working.
*/
encrypted = zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) !=
ZIO_CRYPT_OFF;
if (!stream_wantsnewfs && !encrypted && raw) {
zfs_close(zhp);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot perform raw receive on top of "
"existing unencrypted dataset"));
err = zfs_error(hdl, EZFS_BADRESTORE, errbuf);
goto out;
}
if (stream_wantsnewfs && flags->force &&
((raw && !encrypted) || encrypted)) {
zfs_close(zhp);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"zfs receive -F cannot be used to destroy an "
"encrypted filesystem or overwrite an "
"unencrypted one with an encrypted one"));
err = zfs_error(hdl, EZFS_BADRESTORE, errbuf);
goto out;
}
if (!flags->dryrun && zhp->zfs_type == ZFS_TYPE_FILESYSTEM &&
(stream_wantsnewfs || stream_resumingnewfs)) {
/* We can't do online recv in this case */
clp = changelist_gather(zhp, ZFS_PROP_NAME, 0,
flags->forceunmount ? MS_FORCE : 0);
if (clp == NULL) {
zfs_close(zhp);
err = -1;
goto out;
}
if (changelist_prefix(clp) != 0) {
changelist_free(clp);
zfs_close(zhp);
err = -1;
goto out;
}
}
/*
* If we are resuming a newfs, set newfs here so that we will
* mount it if the recv succeeds this time. We can tell
* that it was a newfs on the first recv because the fs
* itself will be inconsistent (if the fs existed when we
* did the first recv, we would have received it into
* .../%recv).
*/
if (resuming && zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT))
newfs = B_TRUE;
/* we want to know if we're zoned when validating -o|-x props */
zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED);
/* may need this info later, get it now we have zhp around */
if (zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN, NULL, 0,
NULL, NULL, 0, B_TRUE) == 0)
hastoken = B_TRUE;
/* gather existing properties on destination */
origprops = fnvlist_alloc();
fnvlist_merge(origprops, zhp->zfs_props);
fnvlist_merge(origprops, zhp->zfs_user_props);
zfs_close(zhp);
} else {
zfs_handle_t *zhp;
/*
* Destination filesystem does not exist. Therefore we better
* be creating a new filesystem (either from a full backup, or
* a clone). It would therefore be invalid if the user
* specified only the pool name (i.e. if the destination name
* contained no slash character).
*/
cp = strrchr(name, '/');
if (!stream_wantsnewfs || cp == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination '%s' does not exist"), name);
err = zfs_error(hdl, EZFS_NOENT, errbuf);
goto out;
}
/*
* Trim off the final dataset component so we perform the
* recvbackup ioctl to the filesystems's parent.
*/
*cp = '\0';
if (flags->isprefix && !flags->istail && !flags->dryrun &&
create_parents(hdl, destsnap, strlen(tosnap)) != 0) {
err = zfs_error(hdl, EZFS_BADRESTORE, errbuf);
goto out;
}
/* validate parent */
zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET);
if (zhp == NULL) {
err = zfs_error(hdl, EZFS_BADRESTORE, errbuf);
goto out;
}
if (zfs_get_type(zhp) != ZFS_TYPE_FILESYSTEM) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"parent '%s' is not a filesystem"), name);
err = zfs_error(hdl, EZFS_WRONG_PARENT, errbuf);
zfs_close(zhp);
goto out;
}
zfs_close(zhp);
newfs = B_TRUE;
*cp = '/';
}
if (flags->verbose) {
(void) printf("%s %s stream of %s into %s\n",
flags->dryrun ? "would receive" : "receiving",
drrb->drr_fromguid ? "incremental" : "full",
drrb->drr_toname, destsnap);
(void) fflush(stdout);
}
/*
* If this is the top-level dataset, record it so we can use it
* for recursive operations later.
*/
if (top_zfs != NULL &&
(*top_zfs == NULL || strcmp(*top_zfs, name) == 0)) {
toplevel = B_TRUE;
if (*top_zfs == NULL)
*top_zfs = zfs_strdup(hdl, name);
}
if (drrb->drr_type == DMU_OST_ZVOL) {
type = ZFS_TYPE_VOLUME;
} else if (drrb->drr_type == DMU_OST_ZFS) {
type = ZFS_TYPE_FILESYSTEM;
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid record type: 0x%d"), drrb->drr_type);
err = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
goto out;
}
if ((err = zfs_setup_cmdline_props(hdl, type, name, zoned, recursive,
stream_wantsnewfs, raw, toplevel, rcvprops, cmdprops, origprops,
&oxprops, &wkeydata, &wkeylen, errbuf)) != 0)
goto out;
/*
* When sending with properties (zfs send -p), the encryption property
* is not included because it is a SETONCE property and therefore
* treated as read only. However, we are always able to determine its
* value because raw sends will include it in the DRR_BDEGIN payload
* and non-raw sends with properties are not allowed for encrypted
* datasets. Therefore, if this is a non-raw properties stream, we can
* infer that the value should be ZIO_CRYPT_OFF and manually add that
* to the received properties.
*/
if (stream_wantsnewfs && !raw && rcvprops != NULL &&
!nvlist_exists(cmdprops, zfs_prop_to_name(ZFS_PROP_ENCRYPTION))) {
if (oxprops == NULL)
oxprops = fnvlist_alloc();
fnvlist_add_uint64(oxprops,
zfs_prop_to_name(ZFS_PROP_ENCRYPTION), ZIO_CRYPT_OFF);
}
if (flags->dryrun) {
void *buf = zfs_alloc(hdl, SPA_MAXBLOCKSIZE);
/*
* We have read the DRR_BEGIN record, but we have
* not yet read the payload. For non-dryrun sends
* this will be done by the kernel, so we must
* emulate that here, before attempting to read
* more records.
*/
err = recv_read(hdl, infd, buf, drr->drr_payloadlen,
flags->byteswap, NULL);
free(buf);
if (err != 0)
goto out;
err = recv_skip(hdl, infd, flags->byteswap);
goto out;
}
err = ioctl_err = lzc_receive_with_cmdprops(destsnap, rcvprops,
oxprops, wkeydata, wkeylen, origin, flags->force, flags->resumable,
raw, infd, drr_noswap, -1, &read_bytes, &errflags,
NULL, &prop_errors);
ioctl_errno = ioctl_err;
prop_errflags = errflags;
if (err == 0) {
nvpair_t *prop_err = NULL;
while ((prop_err = nvlist_next_nvpair(prop_errors,
prop_err)) != NULL) {
char tbuf[1024];
zfs_prop_t prop;
int intval;
prop = zfs_name_to_prop(nvpair_name(prop_err));
(void) nvpair_value_int32(prop_err, &intval);
if (strcmp(nvpair_name(prop_err),
ZPROP_N_MORE_ERRORS) == 0) {
trunc_prop_errs(intval);
break;
} else if (snapname == NULL || finalsnap == NULL ||
strcmp(finalsnap, snapname) == 0 ||
strcmp(nvpair_name(prop_err),
zfs_prop_to_name(ZFS_PROP_REFQUOTA)) != 0) {
/*
* Skip the special case of, for example,
* "refquota", errors on intermediate
* snapshots leading up to a final one.
* That's why we have all of the checks above.
*
* See zfs_ioctl.c's extract_delay_props() for
* a list of props which can fail on
* intermediate snapshots, but shouldn't
* affect the overall receive.
*/
(void) snprintf(tbuf, sizeof (tbuf),
dgettext(TEXT_DOMAIN,
"cannot receive %s property on %s"),
nvpair_name(prop_err), name);
zfs_setprop_error(hdl, prop, intval, tbuf);
}
}
}
if (err == 0 && snapprops_nvlist) {
zfs_cmd_t zc = {"\0"};
(void) strcpy(zc.zc_name, destsnap);
zc.zc_cookie = B_TRUE; /* received */
if (zcmd_write_src_nvlist(hdl, &zc, snapprops_nvlist) == 0) {
(void) zfs_ioctl(hdl, ZFS_IOC_SET_PROP, &zc);
zcmd_free_nvlists(&zc);
}
}
if (err == 0 && snapholds_nvlist) {
nvpair_t *pair;
nvlist_t *holds, *errors = NULL;
int cleanup_fd = -1;
VERIFY(0 == nvlist_alloc(&holds, 0, KM_SLEEP));
for (pair = nvlist_next_nvpair(snapholds_nvlist, NULL);
pair != NULL;
pair = nvlist_next_nvpair(snapholds_nvlist, pair)) {
fnvlist_add_string(holds, destsnap, nvpair_name(pair));
}
(void) lzc_hold(holds, cleanup_fd, &errors);
fnvlist_free(snapholds_nvlist);
fnvlist_free(holds);
}
if (err && (ioctl_errno == ENOENT || ioctl_errno == EEXIST)) {
/*
* It may be that this snapshot already exists,
* in which case we want to consume & ignore it
* rather than failing.
*/
avl_tree_t *local_avl;
nvlist_t *local_nv, *fs;
cp = strchr(destsnap, '@');
/*
* XXX Do this faster by just iterating over snaps in
* this fs. Also if zc_value does not exist, we will
* get a strange "does not exist" error message.
*/
*cp = '\0';
if (gather_nvlist(hdl, destsnap, NULL, NULL, B_FALSE, B_TRUE,
B_FALSE, B_FALSE, B_FALSE, B_FALSE, B_FALSE, B_FALSE,
B_TRUE, &local_nv, &local_avl) == 0) {
*cp = '@';
fs = fsavl_find(local_avl, drrb->drr_toguid, NULL);
fsavl_destroy(local_avl);
fnvlist_free(local_nv);
if (fs != NULL) {
if (flags->verbose) {
(void) printf("snap %s already exists; "
"ignoring\n", destsnap);
}
err = ioctl_err = recv_skip(hdl, infd,
flags->byteswap);
}
}
*cp = '@';
}
if (ioctl_err != 0) {
switch (ioctl_errno) {
case ENODEV:
cp = strchr(destsnap, '@');
*cp = '\0';
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"most recent snapshot of %s does not\n"
"match incremental source"), destsnap);
(void) zfs_error(hdl, EZFS_BADRESTORE, errbuf);
*cp = '@';
break;
case ETXTBSY:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination %s has been modified\n"
"since most recent snapshot"), name);
(void) zfs_error(hdl, EZFS_BADRESTORE, errbuf);
break;
case EACCES:
if (raw && stream_wantsnewfs) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"failed to create encryption key"));
} else if (raw && !stream_wantsnewfs) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"encryption key does not match "
"existing key"));
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"inherited key must be loaded"));
}
(void) zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf);
break;
case EEXIST:
cp = strchr(destsnap, '@');
if (newfs) {
/* it's the containing fs that exists */
*cp = '\0';
}
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination already exists"));
(void) zfs_error_fmt(hdl, EZFS_EXISTS,
dgettext(TEXT_DOMAIN, "cannot restore to %s"),
destsnap);
*cp = '@';
break;
case EINVAL:
if (flags->resumable) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"kernel modules must be upgraded to "
"receive this stream."));
} else if (embedded && !raw) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incompatible embedded data stream "
"feature with encrypted receive."));
}
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
break;
case ECKSUM:
case ZFS_ERR_STREAM_TRUNCATED:
recv_ecksum_set_aux(hdl, destsnap, flags->resumable,
ioctl_err == ECKSUM);
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
break;
case ZFS_ERR_STREAM_LARGE_BLOCK_MISMATCH:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incremental send stream requires -L "
"(--large-block), to match previous receive."));
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
break;
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded to receive this stream."));
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
case EDQUOT:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination %s space quota exceeded."), name);
(void) zfs_error(hdl, EZFS_NOSPC, errbuf);
break;
case ZFS_ERR_FROM_IVSET_GUID_MISSING:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"IV set guid missing. See errata %u at "
"https://openzfs.github.io/openzfs-docs/msg/"
"ZFS-8000-ER."),
ZPOOL_ERRATA_ZOL_8308_ENCRYPTION);
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
break;
case ZFS_ERR_FROM_IVSET_GUID_MISMATCH:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"IV set guid mismatch. See the 'zfs receive' "
"man page section\n discussing the limitations "
"of raw encrypted send streams."));
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
break;
case ZFS_ERR_SPILL_BLOCK_FLAG_MISSING:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Spill block flag missing for raw send.\n"
"The zfs software on the sending system must "
"be updated."));
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
break;
case EBUSY:
if (hastoken) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination %s contains "
"partially-complete state from "
"\"zfs receive -s\"."), name);
(void) zfs_error(hdl, EZFS_BUSY, errbuf);
break;
}
fallthrough;
default:
(void) zfs_standard_error(hdl, ioctl_errno, errbuf);
}
}
/*
* Mount the target filesystem (if created). Also mount any
* children of the target filesystem if we did a replication
* receive (indicated by stream_avl being non-NULL).
*/
if (clp) {
if (!flags->nomount)
err |= changelist_postfix(clp);
changelist_free(clp);
}
if ((newfs || stream_avl) && type == ZFS_TYPE_FILESYSTEM && !redacted)
flags->domount = B_TRUE;
if (prop_errflags & ZPROP_ERR_NOCLEAR) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Warning: "
"failed to clear unreceived properties on %s"), name);
(void) fprintf(stderr, "\n");
}
if (prop_errflags & ZPROP_ERR_NORESTORE) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Warning: "
"failed to restore original properties on %s"), name);
(void) fprintf(stderr, "\n");
}
if (err || ioctl_err) {
err = -1;
goto out;
}
if (flags->verbose) {
char buf1[64];
char buf2[64];
uint64_t bytes = read_bytes;
time_t delta = time(NULL) - begin_time;
if (delta == 0)
delta = 1;
zfs_nicebytes(bytes, buf1, sizeof (buf1));
zfs_nicebytes(bytes/delta, buf2, sizeof (buf1));
(void) printf("received %s stream in %lld seconds (%s/sec)\n",
buf1, (longlong_t)delta, buf2);
}
err = 0;
out:
if (prop_errors != NULL)
fnvlist_free(prop_errors);
if (tmp_keylocation[0] != '\0') {
fnvlist_add_string(rcvprops,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION), tmp_keylocation);
}
if (newprops)
fnvlist_free(rcvprops);
fnvlist_free(oxprops);
fnvlist_free(origprops);
return (err);
}
/*
* Check properties we were asked to override (both -o|-x)
*/
static boolean_t
zfs_receive_checkprops(libzfs_handle_t *hdl, nvlist_t *props,
const char *errbuf)
{
nvpair_t *nvp;
zfs_prop_t prop;
const char *name;
nvp = NULL;
while ((nvp = nvlist_next_nvpair(props, nvp)) != NULL) {
name = nvpair_name(nvp);
prop = zfs_name_to_prop(name);
if (prop == ZPROP_INVAL) {
if (!zfs_prop_user(name)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid property '%s'"), name);
return (B_FALSE);
}
continue;
}
/*
* "origin" is readonly but is used to receive datasets as
* clones so we don't raise an error here
*/
if (prop == ZFS_PROP_ORIGIN)
continue;
/* encryption params have their own verification later */
if (prop == ZFS_PROP_ENCRYPTION ||
zfs_prop_encryption_key_param(prop))
continue;
/*
* cannot override readonly, set-once and other specific
* settable properties
*/
if (zfs_prop_readonly(prop) || prop == ZFS_PROP_VERSION ||
prop == ZFS_PROP_VOLSIZE) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid property '%s'"), name);
return (B_FALSE);
}
}
return (B_TRUE);
}
static int
zfs_receive_impl(libzfs_handle_t *hdl, const char *tosnap,
const char *originsnap, recvflags_t *flags, int infd, const char *sendfs,
nvlist_t *stream_nv, avl_tree_t *stream_avl, char **top_zfs,
const char *finalsnap, nvlist_t *cmdprops)
{
int err;
dmu_replay_record_t drr, drr_noswap;
struct drr_begin *drrb = &drr.drr_u.drr_begin;
char errbuf[1024];
zio_cksum_t zcksum = { { 0 } };
uint64_t featureflags;
int hdrtype;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot receive"));
/* check cmdline props, raise an error if they cannot be received */
if (!zfs_receive_checkprops(hdl, cmdprops, errbuf)) {
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
if (flags->isprefix &&
!zfs_dataset_exists(hdl, tosnap, ZFS_TYPE_DATASET)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "specified fs "
"(%s) does not exist"), tosnap);
return (zfs_error(hdl, EZFS_NOENT, errbuf));
}
if (originsnap &&
!zfs_dataset_exists(hdl, originsnap, ZFS_TYPE_DATASET)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "specified origin fs "
"(%s) does not exist"), originsnap);
return (zfs_error(hdl, EZFS_NOENT, errbuf));
}
/* read in the BEGIN record */
if (0 != (err = recv_read(hdl, infd, &drr, sizeof (drr), B_FALSE,
&zcksum)))
return (err);
if (drr.drr_type == DRR_END || drr.drr_type == BSWAP_32(DRR_END)) {
/* It's the double end record at the end of a package */
return (ENODATA);
}
/* the kernel needs the non-byteswapped begin record */
drr_noswap = drr;
flags->byteswap = B_FALSE;
if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) {
/*
* We computed the checksum in the wrong byteorder in
* recv_read() above; do it again correctly.
*/
bzero(&zcksum, sizeof (zio_cksum_t));
fletcher_4_incremental_byteswap(&drr, sizeof (drr), &zcksum);
flags->byteswap = B_TRUE;
drr.drr_type = BSWAP_32(drr.drr_type);
drr.drr_payloadlen = BSWAP_32(drr.drr_payloadlen);
drrb->drr_magic = BSWAP_64(drrb->drr_magic);
drrb->drr_versioninfo = BSWAP_64(drrb->drr_versioninfo);
drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time);
drrb->drr_type = BSWAP_32(drrb->drr_type);
drrb->drr_flags = BSWAP_32(drrb->drr_flags);
drrb->drr_toguid = BSWAP_64(drrb->drr_toguid);
drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid);
}
if (drrb->drr_magic != DMU_BACKUP_MAGIC || drr.drr_type != DRR_BEGIN) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
"stream (bad magic number)"));
return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
}
featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
hdrtype = DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo);
if (!DMU_STREAM_SUPPORTED(featureflags) ||
(hdrtype != DMU_SUBSTREAM && hdrtype != DMU_COMPOUNDSTREAM)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"stream has unsupported feature, feature flags = %llx"),
(unsigned long long)featureflags);
return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
}
/* Holds feature is set once in the compound stream header. */
if (featureflags & DMU_BACKUP_FEATURE_HOLDS)
flags->holds = B_TRUE;
if (strchr(drrb->drr_toname, '@') == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
"stream (bad snapshot name)"));
return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
}
if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) == DMU_SUBSTREAM) {
char nonpackage_sendfs[ZFS_MAX_DATASET_NAME_LEN];
if (sendfs == NULL) {
/*
* We were not called from zfs_receive_package(). Get
* the fs specified by 'zfs send'.
*/
char *cp;
(void) strlcpy(nonpackage_sendfs,
drr.drr_u.drr_begin.drr_toname,
sizeof (nonpackage_sendfs));
if ((cp = strchr(nonpackage_sendfs, '@')) != NULL)
*cp = '\0';
sendfs = nonpackage_sendfs;
VERIFY(finalsnap == NULL);
}
return (zfs_receive_one(hdl, infd, tosnap, originsnap, flags,
&drr, &drr_noswap, sendfs, stream_nv, stream_avl, top_zfs,
finalsnap, cmdprops));
} else {
assert(DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
DMU_COMPOUNDSTREAM);
return (zfs_receive_package(hdl, infd, tosnap, flags, &drr,
&zcksum, top_zfs, cmdprops));
}
}
/*
* Restores a backup of tosnap from the file descriptor specified by infd.
* Return 0 on total success, -2 if some things couldn't be
* destroyed/renamed/promoted, -1 if some things couldn't be received.
* (-1 will override -2, if -1 and the resumable flag was specified the
* transfer can be resumed if the sending side supports it).
*/
int
zfs_receive(libzfs_handle_t *hdl, const char *tosnap, nvlist_t *props,
recvflags_t *flags, int infd, avl_tree_t *stream_avl)
{
char *top_zfs = NULL;
int err;
struct stat sb;
char *originsnap = NULL;
/*
* The only way fstat can fail is if we do not have a valid file
* descriptor.
*/
if (fstat(infd, &sb) == -1) {
perror("fstat");
return (-2);
}
/*
* It is not uncommon for gigabytes to be processed in zfs receive.
* Speculatively increase the buffer size if supported by the platform.
*/
if (S_ISFIFO(sb.st_mode))
libzfs_set_pipe_max(infd);
if (props) {
err = nvlist_lookup_string(props, "origin", &originsnap);
if (err && err != ENOENT)
return (err);
}
err = zfs_receive_impl(hdl, tosnap, originsnap, flags, infd, NULL, NULL,
stream_avl, &top_zfs, NULL, props);
if (err == 0 && !flags->nomount && flags->domount && top_zfs) {
zfs_handle_t *zhp = NULL;
prop_changelist_t *clp = NULL;
zhp = zfs_open(hdl, top_zfs,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL) {
err = -1;
goto out;
} else {
if (zhp->zfs_type == ZFS_TYPE_VOLUME) {
zfs_close(zhp);
goto out;
}
clp = changelist_gather(zhp, ZFS_PROP_MOUNTPOINT,
CL_GATHER_MOUNT_ALWAYS,
flags->forceunmount ? MS_FORCE : 0);
zfs_close(zhp);
if (clp == NULL) {
err = -1;
goto out;
}
/* mount and share received datasets */
err = changelist_postfix(clp);
changelist_free(clp);
if (err != 0)
err = -1;
}
}
out:
if (top_zfs)
free(top_zfs);
return (err);
}
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs_util.c b/sys/contrib/openzfs/lib/libzfs/libzfs_util.c
index 6e57d8e42563..7dd38bb3d838 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs_util.c
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs_util.c
@@ -1,2102 +1,2110 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2020 Joyent, Inc. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
* Copyright (c) 2017 Datto Inc.
* Copyright (c) 2020 The FreeBSD Foundation
*
* Portions of this software were developed by Allan Jude
* under sponsorship from the FreeBSD Foundation.
*/
/*
* Internal utility routines for the ZFS library.
*/
#include <errno.h>
#include <fcntl.h>
#include <libintl.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <math.h>
+#if LIBFETCH_DYNAMIC
+#include <dlfcn.h>
+#endif
#include <sys/stat.h>
#include <sys/mnttab.h>
#include <sys/mntent.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <libzfs.h>
#include <libzfs_core.h>
#include "libzfs_impl.h"
#include "zfs_prop.h"
#include "zfeature_common.h"
#include <zfs_fletcher.h>
#include <libzutil.h>
/*
* We only care about the scheme in order to match the scheme
* with the handler. Each handler should validate the full URI
* as necessary.
*/
#define URI_REGEX "^\\([A-Za-z][A-Za-z0-9+.\\-]*\\):"
int
libzfs_errno(libzfs_handle_t *hdl)
{
return (hdl->libzfs_error);
}
const char *
libzfs_error_action(libzfs_handle_t *hdl)
{
return (hdl->libzfs_action);
}
const char *
libzfs_error_description(libzfs_handle_t *hdl)
{
if (hdl->libzfs_desc[0] != '\0')
return (hdl->libzfs_desc);
switch (hdl->libzfs_error) {
case EZFS_NOMEM:
return (dgettext(TEXT_DOMAIN, "out of memory"));
case EZFS_BADPROP:
return (dgettext(TEXT_DOMAIN, "invalid property value"));
case EZFS_PROPREADONLY:
return (dgettext(TEXT_DOMAIN, "read-only property"));
case EZFS_PROPTYPE:
return (dgettext(TEXT_DOMAIN, "property doesn't apply to "
"datasets of this type"));
case EZFS_PROPNONINHERIT:
return (dgettext(TEXT_DOMAIN, "property cannot be inherited"));
case EZFS_PROPSPACE:
return (dgettext(TEXT_DOMAIN, "invalid quota or reservation"));
case EZFS_BADTYPE:
return (dgettext(TEXT_DOMAIN, "operation not applicable to "
"datasets of this type"));
case EZFS_BUSY:
return (dgettext(TEXT_DOMAIN, "pool or dataset is busy"));
case EZFS_EXISTS:
return (dgettext(TEXT_DOMAIN, "pool or dataset exists"));
case EZFS_NOENT:
return (dgettext(TEXT_DOMAIN, "no such pool or dataset"));
case EZFS_BADSTREAM:
return (dgettext(TEXT_DOMAIN, "invalid backup stream"));
case EZFS_DSREADONLY:
return (dgettext(TEXT_DOMAIN, "dataset is read-only"));
case EZFS_VOLTOOBIG:
return (dgettext(TEXT_DOMAIN, "volume size exceeds limit for "
"this system"));
case EZFS_INVALIDNAME:
return (dgettext(TEXT_DOMAIN, "invalid name"));
case EZFS_BADRESTORE:
return (dgettext(TEXT_DOMAIN, "unable to restore to "
"destination"));
case EZFS_BADBACKUP:
return (dgettext(TEXT_DOMAIN, "backup failed"));
case EZFS_BADTARGET:
return (dgettext(TEXT_DOMAIN, "invalid target vdev"));
case EZFS_NODEVICE:
return (dgettext(TEXT_DOMAIN, "no such device in pool"));
case EZFS_BADDEV:
return (dgettext(TEXT_DOMAIN, "invalid device"));
case EZFS_NOREPLICAS:
return (dgettext(TEXT_DOMAIN, "no valid replicas"));
case EZFS_RESILVERING:
return (dgettext(TEXT_DOMAIN, "currently resilvering"));
case EZFS_BADVERSION:
return (dgettext(TEXT_DOMAIN, "unsupported version or "
"feature"));
case EZFS_POOLUNAVAIL:
return (dgettext(TEXT_DOMAIN, "pool is unavailable"));
case EZFS_DEVOVERFLOW:
return (dgettext(TEXT_DOMAIN, "too many devices in one vdev"));
case EZFS_BADPATH:
return (dgettext(TEXT_DOMAIN, "must be an absolute path"));
case EZFS_CROSSTARGET:
return (dgettext(TEXT_DOMAIN, "operation crosses datasets or "
"pools"));
case EZFS_ZONED:
return (dgettext(TEXT_DOMAIN, "dataset in use by local zone"));
case EZFS_MOUNTFAILED:
return (dgettext(TEXT_DOMAIN, "mount failed"));
case EZFS_UMOUNTFAILED:
return (dgettext(TEXT_DOMAIN, "unmount failed"));
case EZFS_UNSHARENFSFAILED:
return (dgettext(TEXT_DOMAIN, "NFS share removal failed"));
case EZFS_SHARENFSFAILED:
return (dgettext(TEXT_DOMAIN, "NFS share creation failed"));
case EZFS_UNSHARESMBFAILED:
return (dgettext(TEXT_DOMAIN, "SMB share removal failed"));
case EZFS_SHARESMBFAILED:
return (dgettext(TEXT_DOMAIN, "SMB share creation failed"));
case EZFS_PERM:
return (dgettext(TEXT_DOMAIN, "permission denied"));
case EZFS_NOSPC:
return (dgettext(TEXT_DOMAIN, "out of space"));
case EZFS_FAULT:
return (dgettext(TEXT_DOMAIN, "bad address"));
case EZFS_IO:
return (dgettext(TEXT_DOMAIN, "I/O error"));
case EZFS_INTR:
return (dgettext(TEXT_DOMAIN, "signal received"));
case EZFS_ISSPARE:
return (dgettext(TEXT_DOMAIN, "device is reserved as a hot "
"spare"));
case EZFS_INVALCONFIG:
return (dgettext(TEXT_DOMAIN, "invalid vdev configuration"));
case EZFS_RECURSIVE:
return (dgettext(TEXT_DOMAIN, "recursive dataset dependency"));
case EZFS_NOHISTORY:
return (dgettext(TEXT_DOMAIN, "no history available"));
case EZFS_POOLPROPS:
return (dgettext(TEXT_DOMAIN, "failed to retrieve "
"pool properties"));
case EZFS_POOL_NOTSUP:
return (dgettext(TEXT_DOMAIN, "operation not supported "
"on this type of pool"));
case EZFS_POOL_INVALARG:
return (dgettext(TEXT_DOMAIN, "invalid argument for "
"this pool operation"));
case EZFS_NAMETOOLONG:
return (dgettext(TEXT_DOMAIN, "dataset name is too long"));
case EZFS_OPENFAILED:
return (dgettext(TEXT_DOMAIN, "open failed"));
case EZFS_NOCAP:
return (dgettext(TEXT_DOMAIN,
"disk capacity information could not be retrieved"));
case EZFS_LABELFAILED:
return (dgettext(TEXT_DOMAIN, "write of label failed"));
case EZFS_BADWHO:
return (dgettext(TEXT_DOMAIN, "invalid user/group"));
case EZFS_BADPERM:
return (dgettext(TEXT_DOMAIN, "invalid permission"));
case EZFS_BADPERMSET:
return (dgettext(TEXT_DOMAIN, "invalid permission set name"));
case EZFS_NODELEGATION:
return (dgettext(TEXT_DOMAIN, "delegated administration is "
"disabled on pool"));
case EZFS_BADCACHE:
return (dgettext(TEXT_DOMAIN, "invalid or missing cache file"));
case EZFS_ISL2CACHE:
return (dgettext(TEXT_DOMAIN, "device is in use as a cache"));
case EZFS_VDEVNOTSUP:
return (dgettext(TEXT_DOMAIN, "vdev specification is not "
"supported"));
case EZFS_NOTSUP:
return (dgettext(TEXT_DOMAIN, "operation not supported "
"on this dataset"));
case EZFS_IOC_NOTSUPPORTED:
return (dgettext(TEXT_DOMAIN, "operation not supported by "
"zfs kernel module"));
case EZFS_ACTIVE_SPARE:
return (dgettext(TEXT_DOMAIN, "pool has active shared spare "
"device"));
case EZFS_UNPLAYED_LOGS:
return (dgettext(TEXT_DOMAIN, "log device has unplayed intent "
"logs"));
case EZFS_REFTAG_RELE:
return (dgettext(TEXT_DOMAIN, "no such tag on this dataset"));
case EZFS_REFTAG_HOLD:
return (dgettext(TEXT_DOMAIN, "tag already exists on this "
"dataset"));
case EZFS_TAGTOOLONG:
return (dgettext(TEXT_DOMAIN, "tag too long"));
case EZFS_PIPEFAILED:
return (dgettext(TEXT_DOMAIN, "pipe create failed"));
case EZFS_THREADCREATEFAILED:
return (dgettext(TEXT_DOMAIN, "thread create failed"));
case EZFS_POSTSPLIT_ONLINE:
return (dgettext(TEXT_DOMAIN, "disk was split from this pool "
"into a new one"));
case EZFS_SCRUB_PAUSED:
return (dgettext(TEXT_DOMAIN, "scrub is paused; "
"use 'zpool scrub' to resume"));
case EZFS_SCRUBBING:
return (dgettext(TEXT_DOMAIN, "currently scrubbing; "
"use 'zpool scrub -s' to cancel current scrub"));
case EZFS_NO_SCRUB:
return (dgettext(TEXT_DOMAIN, "there is no active scrub"));
case EZFS_DIFF:
return (dgettext(TEXT_DOMAIN, "unable to generate diffs"));
case EZFS_DIFFDATA:
return (dgettext(TEXT_DOMAIN, "invalid diff data"));
case EZFS_POOLREADONLY:
return (dgettext(TEXT_DOMAIN, "pool is read-only"));
case EZFS_NO_PENDING:
return (dgettext(TEXT_DOMAIN, "operation is not "
"in progress"));
case EZFS_CHECKPOINT_EXISTS:
return (dgettext(TEXT_DOMAIN, "checkpoint exists"));
case EZFS_DISCARDING_CHECKPOINT:
return (dgettext(TEXT_DOMAIN, "currently discarding "
"checkpoint"));
case EZFS_NO_CHECKPOINT:
return (dgettext(TEXT_DOMAIN, "checkpoint does not exist"));
case EZFS_DEVRM_IN_PROGRESS:
return (dgettext(TEXT_DOMAIN, "device removal in progress"));
case EZFS_VDEV_TOO_BIG:
return (dgettext(TEXT_DOMAIN, "device exceeds supported size"));
case EZFS_ACTIVE_POOL:
return (dgettext(TEXT_DOMAIN, "pool is imported on a "
"different host"));
case EZFS_CRYPTOFAILED:
return (dgettext(TEXT_DOMAIN, "encryption failure"));
case EZFS_TOOMANY:
return (dgettext(TEXT_DOMAIN, "argument list too long"));
case EZFS_INITIALIZING:
return (dgettext(TEXT_DOMAIN, "currently initializing"));
case EZFS_NO_INITIALIZE:
return (dgettext(TEXT_DOMAIN, "there is no active "
"initialization"));
case EZFS_WRONG_PARENT:
return (dgettext(TEXT_DOMAIN, "invalid parent dataset"));
case EZFS_TRIMMING:
return (dgettext(TEXT_DOMAIN, "currently trimming"));
case EZFS_NO_TRIM:
return (dgettext(TEXT_DOMAIN, "there is no active trim"));
case EZFS_TRIM_NOTSUP:
return (dgettext(TEXT_DOMAIN, "trim operations are not "
"supported by this device"));
case EZFS_NO_RESILVER_DEFER:
return (dgettext(TEXT_DOMAIN, "this action requires the "
"resilver_defer feature"));
case EZFS_EXPORT_IN_PROGRESS:
return (dgettext(TEXT_DOMAIN, "pool export in progress"));
case EZFS_REBUILDING:
return (dgettext(TEXT_DOMAIN, "currently sequentially "
"resilvering"));
case EZFS_UNKNOWN:
return (dgettext(TEXT_DOMAIN, "unknown error"));
default:
assert(hdl->libzfs_error == 0);
return (dgettext(TEXT_DOMAIN, "no error"));
}
}
/*PRINTFLIKE2*/
void
zfs_error_aux(libzfs_handle_t *hdl, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
(void) vsnprintf(hdl->libzfs_desc, sizeof (hdl->libzfs_desc),
fmt, ap);
hdl->libzfs_desc_active = 1;
va_end(ap);
}
static void
zfs_verror(libzfs_handle_t *hdl, int error, const char *fmt, va_list ap)
{
(void) vsnprintf(hdl->libzfs_action, sizeof (hdl->libzfs_action),
fmt, ap);
hdl->libzfs_error = error;
if (hdl->libzfs_desc_active)
hdl->libzfs_desc_active = 0;
else
hdl->libzfs_desc[0] = '\0';
if (hdl->libzfs_printerr) {
if (error == EZFS_UNKNOWN) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "internal "
"error: %s: %s\n"), hdl->libzfs_action,
libzfs_error_description(hdl));
abort();
}
(void) fprintf(stderr, "%s: %s\n", hdl->libzfs_action,
libzfs_error_description(hdl));
if (error == EZFS_NOMEM)
exit(1);
}
}
int
zfs_error(libzfs_handle_t *hdl, int error, const char *msg)
{
return (zfs_error_fmt(hdl, error, "%s", msg));
}
/*PRINTFLIKE3*/
int
zfs_error_fmt(libzfs_handle_t *hdl, int error, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
zfs_verror(hdl, error, fmt, ap);
va_end(ap);
return (-1);
}
static int
zfs_common_error(libzfs_handle_t *hdl, int error, const char *fmt,
va_list ap)
{
switch (error) {
case EPERM:
case EACCES:
zfs_verror(hdl, EZFS_PERM, fmt, ap);
return (-1);
case ECANCELED:
zfs_verror(hdl, EZFS_NODELEGATION, fmt, ap);
return (-1);
case EIO:
zfs_verror(hdl, EZFS_IO, fmt, ap);
return (-1);
case EFAULT:
zfs_verror(hdl, EZFS_FAULT, fmt, ap);
return (-1);
case EINTR:
zfs_verror(hdl, EZFS_INTR, fmt, ap);
return (-1);
}
return (0);
}
int
zfs_standard_error(libzfs_handle_t *hdl, int error, const char *msg)
{
return (zfs_standard_error_fmt(hdl, error, "%s", msg));
}
/*PRINTFLIKE3*/
int
zfs_standard_error_fmt(libzfs_handle_t *hdl, int error, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
if (zfs_common_error(hdl, error, fmt, ap) != 0) {
va_end(ap);
return (-1);
}
switch (error) {
case ENXIO:
case ENODEV:
case EPIPE:
zfs_verror(hdl, EZFS_IO, fmt, ap);
break;
case ENOENT:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset does not exist"));
zfs_verror(hdl, EZFS_NOENT, fmt, ap);
break;
case ENOSPC:
case EDQUOT:
zfs_verror(hdl, EZFS_NOSPC, fmt, ap);
break;
case EEXIST:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset already exists"));
zfs_verror(hdl, EZFS_EXISTS, fmt, ap);
break;
case EBUSY:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset is busy"));
zfs_verror(hdl, EZFS_BUSY, fmt, ap);
break;
case EROFS:
zfs_verror(hdl, EZFS_POOLREADONLY, fmt, ap);
break;
case ENAMETOOLONG:
zfs_verror(hdl, EZFS_NAMETOOLONG, fmt, ap);
break;
case ENOTSUP:
zfs_verror(hdl, EZFS_BADVERSION, fmt, ap);
break;
case EAGAIN:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool I/O is currently suspended"));
zfs_verror(hdl, EZFS_POOLUNAVAIL, fmt, ap);
break;
case EREMOTEIO:
zfs_verror(hdl, EZFS_ACTIVE_POOL, fmt, ap);
break;
case ZFS_ERR_UNKNOWN_SEND_STREAM_FEATURE:
case ZFS_ERR_IOC_CMD_UNAVAIL:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "the loaded zfs "
"module does not support this operation. A reboot may "
"be required to enable this operation."));
zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap);
break;
case ZFS_ERR_IOC_ARG_UNAVAIL:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "the loaded zfs "
"module does not support an option for this operation. "
"A reboot may be required to enable this option."));
zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap);
break;
case ZFS_ERR_IOC_ARG_REQUIRED:
case ZFS_ERR_IOC_ARG_BADTYPE:
zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap);
break;
case ZFS_ERR_WRONG_PARENT:
zfs_verror(hdl, EZFS_WRONG_PARENT, fmt, ap);
break;
case ZFS_ERR_BADPROP:
zfs_verror(hdl, EZFS_BADPROP, fmt, ap);
break;
default:
zfs_error_aux(hdl, "%s", strerror(error));
zfs_verror(hdl, EZFS_UNKNOWN, fmt, ap);
break;
}
va_end(ap);
return (-1);
}
void
zfs_setprop_error(libzfs_handle_t *hdl, zfs_prop_t prop, int err,
char *errbuf)
{
switch (err) {
case ENOSPC:
/*
* For quotas and reservations, ENOSPC indicates
* something different; setting a quota or reservation
* doesn't use any disk space.
*/
switch (prop) {
case ZFS_PROP_QUOTA:
case ZFS_PROP_REFQUOTA:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"size is less than current used or "
"reserved space"));
(void) zfs_error(hdl, EZFS_PROPSPACE, errbuf);
break;
case ZFS_PROP_RESERVATION:
case ZFS_PROP_REFRESERVATION:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"size is greater than available space"));
(void) zfs_error(hdl, EZFS_PROPSPACE, errbuf);
break;
default:
(void) zfs_standard_error(hdl, err, errbuf);
break;
}
break;
case EBUSY:
(void) zfs_standard_error(hdl, EBUSY, errbuf);
break;
case EROFS:
(void) zfs_error(hdl, EZFS_DSREADONLY, errbuf);
break;
case E2BIG:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property value too long"));
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
break;
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool and or dataset must be upgraded to set this "
"property or value"));
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
case ERANGE:
if (prop == ZFS_PROP_COMPRESSION ||
prop == ZFS_PROP_DNODESIZE ||
prop == ZFS_PROP_RECORDSIZE) {
(void) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property setting is not allowed on "
"bootable datasets"));
(void) zfs_error(hdl, EZFS_NOTSUP, errbuf);
} else if (prop == ZFS_PROP_CHECKSUM ||
prop == ZFS_PROP_DEDUP) {
(void) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property setting is not allowed on "
"root pools"));
(void) zfs_error(hdl, EZFS_NOTSUP, errbuf);
} else {
(void) zfs_standard_error(hdl, err, errbuf);
}
break;
case EINVAL:
if (prop == ZPROP_INVAL) {
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
} else {
(void) zfs_standard_error(hdl, err, errbuf);
}
break;
case ZFS_ERR_BADPROP:
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
break;
case EACCES:
if (prop == ZFS_PROP_KEYLOCATION) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"keylocation may only be set on encryption roots"));
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
} else {
(void) zfs_standard_error(hdl, err, errbuf);
}
break;
case EOVERFLOW:
/*
* This platform can't address a volume this big.
*/
#ifdef _ILP32
if (prop == ZFS_PROP_VOLSIZE) {
(void) zfs_error(hdl, EZFS_VOLTOOBIG, errbuf);
break;
}
#endif
fallthrough;
default:
(void) zfs_standard_error(hdl, err, errbuf);
}
}
int
zpool_standard_error(libzfs_handle_t *hdl, int error, const char *msg)
{
return (zpool_standard_error_fmt(hdl, error, "%s", msg));
}
/*PRINTFLIKE3*/
int
zpool_standard_error_fmt(libzfs_handle_t *hdl, int error, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
if (zfs_common_error(hdl, error, fmt, ap) != 0) {
va_end(ap);
return (-1);
}
switch (error) {
case ENODEV:
zfs_verror(hdl, EZFS_NODEVICE, fmt, ap);
break;
case ENOENT:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "no such pool or dataset"));
zfs_verror(hdl, EZFS_NOENT, fmt, ap);
break;
case EEXIST:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool already exists"));
zfs_verror(hdl, EZFS_EXISTS, fmt, ap);
break;
case EBUSY:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool is busy"));
zfs_verror(hdl, EZFS_BUSY, fmt, ap);
break;
/* There is no pending operation to cancel */
case ENOTACTIVE:
zfs_verror(hdl, EZFS_NO_PENDING, fmt, ap);
break;
case ENXIO:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more devices is currently unavailable"));
zfs_verror(hdl, EZFS_BADDEV, fmt, ap);
break;
case ENAMETOOLONG:
zfs_verror(hdl, EZFS_DEVOVERFLOW, fmt, ap);
break;
case ENOTSUP:
zfs_verror(hdl, EZFS_POOL_NOTSUP, fmt, ap);
break;
case EINVAL:
zfs_verror(hdl, EZFS_POOL_INVALARG, fmt, ap);
break;
case ENOSPC:
case EDQUOT:
zfs_verror(hdl, EZFS_NOSPC, fmt, ap);
return (-1);
case EAGAIN:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool I/O is currently suspended"));
zfs_verror(hdl, EZFS_POOLUNAVAIL, fmt, ap);
break;
case EROFS:
zfs_verror(hdl, EZFS_POOLREADONLY, fmt, ap);
break;
case EDOM:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"block size out of range or does not match"));
zfs_verror(hdl, EZFS_BADPROP, fmt, ap);
break;
case EREMOTEIO:
zfs_verror(hdl, EZFS_ACTIVE_POOL, fmt, ap);
break;
case ZFS_ERR_CHECKPOINT_EXISTS:
zfs_verror(hdl, EZFS_CHECKPOINT_EXISTS, fmt, ap);
break;
case ZFS_ERR_DISCARDING_CHECKPOINT:
zfs_verror(hdl, EZFS_DISCARDING_CHECKPOINT, fmt, ap);
break;
case ZFS_ERR_NO_CHECKPOINT:
zfs_verror(hdl, EZFS_NO_CHECKPOINT, fmt, ap);
break;
case ZFS_ERR_DEVRM_IN_PROGRESS:
zfs_verror(hdl, EZFS_DEVRM_IN_PROGRESS, fmt, ap);
break;
case ZFS_ERR_VDEV_TOO_BIG:
zfs_verror(hdl, EZFS_VDEV_TOO_BIG, fmt, ap);
break;
case ZFS_ERR_EXPORT_IN_PROGRESS:
zfs_verror(hdl, EZFS_EXPORT_IN_PROGRESS, fmt, ap);
break;
case ZFS_ERR_RESILVER_IN_PROGRESS:
zfs_verror(hdl, EZFS_RESILVERING, fmt, ap);
break;
case ZFS_ERR_REBUILD_IN_PROGRESS:
zfs_verror(hdl, EZFS_REBUILDING, fmt, ap);
break;
case ZFS_ERR_BADPROP:
zfs_verror(hdl, EZFS_BADPROP, fmt, ap);
break;
case ZFS_ERR_IOC_CMD_UNAVAIL:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "the loaded zfs "
"module does not support this operation. A reboot may "
"be required to enable this operation."));
zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap);
break;
case ZFS_ERR_IOC_ARG_UNAVAIL:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "the loaded zfs "
"module does not support an option for this operation. "
"A reboot may be required to enable this option."));
zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap);
break;
case ZFS_ERR_IOC_ARG_REQUIRED:
case ZFS_ERR_IOC_ARG_BADTYPE:
zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap);
break;
default:
zfs_error_aux(hdl, "%s", strerror(error));
zfs_verror(hdl, EZFS_UNKNOWN, fmt, ap);
}
va_end(ap);
return (-1);
}
/*
* Display an out of memory error message and abort the current program.
*/
int
no_memory(libzfs_handle_t *hdl)
{
return (zfs_error(hdl, EZFS_NOMEM, "internal error"));
}
/*
* A safe form of malloc() which will die if the allocation fails.
*/
void *
zfs_alloc(libzfs_handle_t *hdl, size_t size)
{
void *data;
if ((data = calloc(1, size)) == NULL)
(void) no_memory(hdl);
return (data);
}
/*
* A safe form of asprintf() which will die if the allocation fails.
*/
/*PRINTFLIKE2*/
char *
zfs_asprintf(libzfs_handle_t *hdl, const char *fmt, ...)
{
va_list ap;
char *ret;
int err;
va_start(ap, fmt);
err = vasprintf(&ret, fmt, ap);
va_end(ap);
if (err < 0) {
(void) no_memory(hdl);
ret = NULL;
}
return (ret);
}
/*
* A safe form of realloc(), which also zeroes newly allocated space.
*/
void *
zfs_realloc(libzfs_handle_t *hdl, void *ptr, size_t oldsize, size_t newsize)
{
void *ret;
if ((ret = realloc(ptr, newsize)) == NULL) {
(void) no_memory(hdl);
return (NULL);
}
bzero((char *)ret + oldsize, (newsize - oldsize));
return (ret);
}
/*
* A safe form of strdup() which will die if the allocation fails.
*/
char *
zfs_strdup(libzfs_handle_t *hdl, const char *str)
{
char *ret;
if ((ret = strdup(str)) == NULL)
(void) no_memory(hdl);
return (ret);
}
void
libzfs_print_on_error(libzfs_handle_t *hdl, boolean_t printerr)
{
hdl->libzfs_printerr = printerr;
}
/*
* Read lines from an open file descriptor and store them in an array of
* strings until EOF. lines[] will be allocated and populated with all the
* lines read. All newlines are replaced with NULL terminators for
* convenience. lines[] must be freed after use with libzfs_free_str_array().
*
* Returns the number of lines read.
*/
static int
libzfs_read_stdout_from_fd(int fd, char **lines[])
{
FILE *fp;
int lines_cnt = 0;
size_t len = 0;
char *line = NULL;
char **tmp_lines = NULL, **tmp;
char *nl = NULL;
int rc;
fp = fdopen(fd, "r");
if (fp == NULL)
return (0);
while (1) {
rc = getline(&line, &len, fp);
if (rc == -1)
break;
tmp = realloc(tmp_lines, sizeof (*tmp_lines) * (lines_cnt + 1));
if (tmp == NULL) {
/* Return the lines we were able to process */
break;
}
tmp_lines = tmp;
/* Terminate newlines */
if ((nl = strchr(line, '\n')) != NULL)
*nl = '\0';
tmp_lines[lines_cnt] = line;
lines_cnt++;
line = NULL;
}
fclose(fp);
*lines = tmp_lines;
return (lines_cnt);
}
static int
libzfs_run_process_impl(const char *path, char *argv[], char *env[], int flags,
char **lines[], int *lines_cnt)
{
pid_t pid;
int error, devnull_fd;
int link[2];
/*
* Setup a pipe between our child and parent process if we're
* reading stdout.
*/
if ((lines != NULL) && pipe2(link, O_CLOEXEC) == -1)
return (-EPIPE);
pid = vfork();
if (pid == 0) {
/* Child process */
devnull_fd = open("/dev/null", O_WRONLY | O_CLOEXEC);
if (devnull_fd < 0)
_exit(-1);
if (!(flags & STDOUT_VERBOSE) && (lines == NULL))
(void) dup2(devnull_fd, STDOUT_FILENO);
else if (lines != NULL) {
/* Save the output to lines[] */
dup2(link[1], STDOUT_FILENO);
}
if (!(flags & STDERR_VERBOSE))
(void) dup2(devnull_fd, STDERR_FILENO);
if (flags & NO_DEFAULT_PATH) {
if (env == NULL)
execv(path, argv);
else
execve(path, argv, env);
} else {
if (env == NULL)
execvp(path, argv);
else
execvpe(path, argv, env);
}
_exit(-1);
} else if (pid > 0) {
/* Parent process */
int status;
while ((error = waitpid(pid, &status, 0)) == -1 &&
errno == EINTR) { }
if (error < 0 || !WIFEXITED(status))
return (-1);
if (lines != NULL) {
close(link[1]);
*lines_cnt = libzfs_read_stdout_from_fd(link[0], lines);
}
return (WEXITSTATUS(status));
}
return (-1);
}
int
libzfs_run_process(const char *path, char *argv[], int flags)
{
return (libzfs_run_process_impl(path, argv, NULL, flags, NULL, NULL));
}
/*
* Run a command and store its stdout lines in an array of strings (lines[]).
* lines[] is allocated and populated for you, and the number of lines is set in
* lines_cnt. lines[] must be freed after use with libzfs_free_str_array().
* All newlines (\n) in lines[] are terminated for convenience.
*/
int
libzfs_run_process_get_stdout(const char *path, char *argv[], char *env[],
char **lines[], int *lines_cnt)
{
return (libzfs_run_process_impl(path, argv, env, 0, lines, lines_cnt));
}
/*
* Same as libzfs_run_process_get_stdout(), but run without $PATH set. This
* means that *path needs to be the full path to the executable.
*/
int
libzfs_run_process_get_stdout_nopath(const char *path, char *argv[],
char *env[], char **lines[], int *lines_cnt)
{
return (libzfs_run_process_impl(path, argv, env, NO_DEFAULT_PATH,
lines, lines_cnt));
}
/*
* Free an array of strings. Free both the strings contained in the array and
* the array itself.
*/
void
libzfs_free_str_array(char **strs, int count)
{
while (--count >= 0)
free(strs[count]);
free(strs);
}
/*
* Returns 1 if environment variable is set to "YES", "yes", "ON", "on", or
* a non-zero number.
*
* Returns 0 otherwise.
*/
int
libzfs_envvar_is_set(char *envvar)
{
char *env = getenv(envvar);
if (env && (strtoul(env, NULL, 0) > 0 ||
(!strncasecmp(env, "YES", 3) && strnlen(env, 4) == 3) ||
(!strncasecmp(env, "ON", 2) && strnlen(env, 3) == 2)))
return (1);
return (0);
}
libzfs_handle_t *
libzfs_init(void)
{
libzfs_handle_t *hdl;
int error;
char *env;
if ((error = libzfs_load_module()) != 0) {
errno = error;
return (NULL);
}
if ((hdl = calloc(1, sizeof (libzfs_handle_t))) == NULL) {
return (NULL);
}
if (regcomp(&hdl->libzfs_urire, URI_REGEX, 0) != 0) {
free(hdl);
return (NULL);
}
if ((hdl->libzfs_fd = open(ZFS_DEV, O_RDWR|O_EXCL|O_CLOEXEC)) < 0) {
free(hdl);
return (NULL);
}
#ifdef HAVE_SETMNTENT
if ((hdl->libzfs_mnttab = setmntent(MNTTAB, "re")) == NULL) {
#else
if ((hdl->libzfs_mnttab = fopen(MNTTAB, "re")) == NULL) {
#endif
(void) close(hdl->libzfs_fd);
free(hdl);
return (NULL);
}
if (libzfs_core_init() != 0) {
(void) close(hdl->libzfs_fd);
(void) fclose(hdl->libzfs_mnttab);
free(hdl);
return (NULL);
}
zfs_prop_init();
zpool_prop_init();
zpool_feature_init();
libzfs_mnttab_init(hdl);
fletcher_4_init();
if (getenv("ZFS_PROP_DEBUG") != NULL) {
hdl->libzfs_prop_debug = B_TRUE;
}
if ((env = getenv("ZFS_SENDRECV_MAX_NVLIST")) != NULL) {
if ((error = zfs_nicestrtonum(hdl, env,
&hdl->libzfs_max_nvlist))) {
errno = error;
(void) close(hdl->libzfs_fd);
(void) fclose(hdl->libzfs_mnttab);
free(hdl);
return (NULL);
}
} else {
hdl->libzfs_max_nvlist = (SPA_MAXBLOCKSIZE * 4);
}
/*
* For testing, remove some settable properties and features
*/
if (libzfs_envvar_is_set("ZFS_SYSFS_PROP_SUPPORT_TEST")) {
zprop_desc_t *proptbl;
proptbl = zpool_prop_get_table();
proptbl[ZPOOL_PROP_COMMENT].pd_zfs_mod_supported = B_FALSE;
proptbl = zfs_prop_get_table();
proptbl[ZFS_PROP_DNODESIZE].pd_zfs_mod_supported = B_FALSE;
zfeature_info_t *ftbl = spa_feature_table;
ftbl[SPA_FEATURE_LARGE_BLOCKS].fi_zfs_mod_supported = B_FALSE;
}
return (hdl);
}
void
libzfs_fini(libzfs_handle_t *hdl)
{
(void) close(hdl->libzfs_fd);
if (hdl->libzfs_mnttab)
#ifdef HAVE_SETMNTENT
(void) endmntent(hdl->libzfs_mnttab);
#else
(void) fclose(hdl->libzfs_mnttab);
#endif
zpool_free_handles(hdl);
namespace_clear(hdl);
libzfs_mnttab_fini(hdl);
libzfs_core_fini();
regfree(&hdl->libzfs_urire);
fletcher_4_fini();
+#if LIBFETCH_DYNAMIC
+ if (hdl->libfetch != (void *)-1 && hdl->libfetch != NULL)
+ (void) dlclose(hdl->libfetch);
+ free(hdl->libfetch_load_error);
+#endif
free(hdl);
}
libzfs_handle_t *
zpool_get_handle(zpool_handle_t *zhp)
{
return (zhp->zpool_hdl);
}
libzfs_handle_t *
zfs_get_handle(zfs_handle_t *zhp)
{
return (zhp->zfs_hdl);
}
zpool_handle_t *
zfs_get_pool_handle(const zfs_handle_t *zhp)
{
return (zhp->zpool_hdl);
}
/*
* Given a name, determine whether or not it's a valid path
* (starts with '/' or "./"). If so, walk the mnttab trying
* to match the device number. If not, treat the path as an
* fs/vol/snap/bkmark name.
*/
zfs_handle_t *
zfs_path_to_zhandle(libzfs_handle_t *hdl, const char *path, zfs_type_t argtype)
{
struct stat64 statbuf;
struct extmnttab entry;
if (path[0] != '/' && strncmp(path, "./", strlen("./")) != 0) {
/*
* It's not a valid path, assume it's a name of type 'argtype'.
*/
return (zfs_open(hdl, path, argtype));
}
/* Reopen MNTTAB to prevent reading stale data from open file */
if (freopen(MNTTAB, "re", hdl->libzfs_mnttab) == NULL)
return (NULL);
if (getextmntent(path, &entry, &statbuf) != 0)
return (NULL);
if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0) {
(void) fprintf(stderr, gettext("'%s': not a ZFS filesystem\n"),
path);
return (NULL);
}
return (zfs_open(hdl, entry.mnt_special, ZFS_TYPE_FILESYSTEM));
}
/*
* Initialize the zc_nvlist_dst member to prepare for receiving an nvlist from
* an ioctl().
*/
int
zcmd_alloc_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, size_t len)
{
if (len == 0)
len = 256 * 1024;
zc->zc_nvlist_dst_size = len;
zc->zc_nvlist_dst =
(uint64_t)(uintptr_t)zfs_alloc(hdl, zc->zc_nvlist_dst_size);
if (zc->zc_nvlist_dst == 0)
return (-1);
return (0);
}
/*
* Called when an ioctl() which returns an nvlist fails with ENOMEM. This will
* expand the nvlist to the size specified in 'zc_nvlist_dst_size', which was
* filled in by the kernel to indicate the actual required size.
*/
int
zcmd_expand_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc)
{
free((void *)(uintptr_t)zc->zc_nvlist_dst);
zc->zc_nvlist_dst =
(uint64_t)(uintptr_t)zfs_alloc(hdl, zc->zc_nvlist_dst_size);
if (zc->zc_nvlist_dst == 0)
return (-1);
return (0);
}
/*
* Called to free the src and dst nvlists stored in the command structure.
*/
void
zcmd_free_nvlists(zfs_cmd_t *zc)
{
free((void *)(uintptr_t)zc->zc_nvlist_conf);
free((void *)(uintptr_t)zc->zc_nvlist_src);
free((void *)(uintptr_t)zc->zc_nvlist_dst);
zc->zc_nvlist_conf = 0;
zc->zc_nvlist_src = 0;
zc->zc_nvlist_dst = 0;
}
static int
zcmd_write_nvlist_com(libzfs_handle_t *hdl, uint64_t *outnv, uint64_t *outlen,
nvlist_t *nvl)
{
char *packed;
size_t len;
verify(nvlist_size(nvl, &len, NV_ENCODE_NATIVE) == 0);
if ((packed = zfs_alloc(hdl, len)) == NULL)
return (-1);
verify(nvlist_pack(nvl, &packed, &len, NV_ENCODE_NATIVE, 0) == 0);
*outnv = (uint64_t)(uintptr_t)packed;
*outlen = len;
return (0);
}
int
zcmd_write_conf_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t *nvl)
{
return (zcmd_write_nvlist_com(hdl, &zc->zc_nvlist_conf,
&zc->zc_nvlist_conf_size, nvl));
}
int
zcmd_write_src_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t *nvl)
{
return (zcmd_write_nvlist_com(hdl, &zc->zc_nvlist_src,
&zc->zc_nvlist_src_size, nvl));
}
/*
* Unpacks an nvlist from the ZFS ioctl command structure.
*/
int
zcmd_read_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t **nvlp)
{
if (nvlist_unpack((void *)(uintptr_t)zc->zc_nvlist_dst,
zc->zc_nvlist_dst_size, nvlp, 0) != 0)
return (no_memory(hdl));
return (0);
}
/*
* ================================================================
* API shared by zfs and zpool property management
* ================================================================
*/
static void
zprop_print_headers(zprop_get_cbdata_t *cbp, zfs_type_t type)
{
zprop_list_t *pl = cbp->cb_proplist;
int i;
char *title;
size_t len;
cbp->cb_first = B_FALSE;
if (cbp->cb_scripted)
return;
/*
* Start with the length of the column headers.
*/
cbp->cb_colwidths[GET_COL_NAME] = strlen(dgettext(TEXT_DOMAIN, "NAME"));
cbp->cb_colwidths[GET_COL_PROPERTY] = strlen(dgettext(TEXT_DOMAIN,
"PROPERTY"));
cbp->cb_colwidths[GET_COL_VALUE] = strlen(dgettext(TEXT_DOMAIN,
"VALUE"));
cbp->cb_colwidths[GET_COL_RECVD] = strlen(dgettext(TEXT_DOMAIN,
"RECEIVED"));
cbp->cb_colwidths[GET_COL_SOURCE] = strlen(dgettext(TEXT_DOMAIN,
"SOURCE"));
/* first property is always NAME */
assert(cbp->cb_proplist->pl_prop ==
((type == ZFS_TYPE_POOL) ? ZPOOL_PROP_NAME : ZFS_PROP_NAME));
/*
* Go through and calculate the widths for each column. For the
* 'source' column, we kludge it up by taking the worst-case scenario of
* inheriting from the longest name. This is acceptable because in the
* majority of cases 'SOURCE' is the last column displayed, and we don't
* use the width anyway. Note that the 'VALUE' column can be oversized,
* if the name of the property is much longer than any values we find.
*/
for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) {
/*
* 'PROPERTY' column
*/
if (pl->pl_prop != ZPROP_INVAL) {
const char *propname = (type == ZFS_TYPE_POOL) ?
zpool_prop_to_name(pl->pl_prop) :
zfs_prop_to_name(pl->pl_prop);
len = strlen(propname);
if (len > cbp->cb_colwidths[GET_COL_PROPERTY])
cbp->cb_colwidths[GET_COL_PROPERTY] = len;
} else {
len = strlen(pl->pl_user_prop);
if (len > cbp->cb_colwidths[GET_COL_PROPERTY])
cbp->cb_colwidths[GET_COL_PROPERTY] = len;
}
/*
* 'VALUE' column. The first property is always the 'name'
* property that was tacked on either by /sbin/zfs's
* zfs_do_get() or when calling zprop_expand_list(), so we
* ignore its width. If the user specified the name property
* to display, then it will be later in the list in any case.
*/
if (pl != cbp->cb_proplist &&
pl->pl_width > cbp->cb_colwidths[GET_COL_VALUE])
cbp->cb_colwidths[GET_COL_VALUE] = pl->pl_width;
/* 'RECEIVED' column. */
if (pl != cbp->cb_proplist &&
pl->pl_recvd_width > cbp->cb_colwidths[GET_COL_RECVD])
cbp->cb_colwidths[GET_COL_RECVD] = pl->pl_recvd_width;
/*
* 'NAME' and 'SOURCE' columns
*/
if (pl->pl_prop == (type == ZFS_TYPE_POOL ? ZPOOL_PROP_NAME :
ZFS_PROP_NAME) &&
pl->pl_width > cbp->cb_colwidths[GET_COL_NAME]) {
cbp->cb_colwidths[GET_COL_NAME] = pl->pl_width;
cbp->cb_colwidths[GET_COL_SOURCE] = pl->pl_width +
strlen(dgettext(TEXT_DOMAIN, "inherited from"));
}
}
/*
* Now go through and print the headers.
*/
for (i = 0; i < ZFS_GET_NCOLS; i++) {
switch (cbp->cb_columns[i]) {
case GET_COL_NAME:
title = dgettext(TEXT_DOMAIN, "NAME");
break;
case GET_COL_PROPERTY:
title = dgettext(TEXT_DOMAIN, "PROPERTY");
break;
case GET_COL_VALUE:
title = dgettext(TEXT_DOMAIN, "VALUE");
break;
case GET_COL_RECVD:
title = dgettext(TEXT_DOMAIN, "RECEIVED");
break;
case GET_COL_SOURCE:
title = dgettext(TEXT_DOMAIN, "SOURCE");
break;
default:
title = NULL;
}
if (title != NULL) {
if (i == (ZFS_GET_NCOLS - 1) ||
cbp->cb_columns[i + 1] == GET_COL_NONE)
(void) printf("%s", title);
else
(void) printf("%-*s ",
cbp->cb_colwidths[cbp->cb_columns[i]],
title);
}
}
(void) printf("\n");
}
/*
* Display a single line of output, according to the settings in the callback
* structure.
*/
void
zprop_print_one_property(const char *name, zprop_get_cbdata_t *cbp,
const char *propname, const char *value, zprop_source_t sourcetype,
const char *source, const char *recvd_value)
{
int i;
const char *str = NULL;
char buf[128];
/*
* Ignore those source types that the user has chosen to ignore.
*/
if ((sourcetype & cbp->cb_sources) == 0)
return;
if (cbp->cb_first)
zprop_print_headers(cbp, cbp->cb_type);
for (i = 0; i < ZFS_GET_NCOLS; i++) {
switch (cbp->cb_columns[i]) {
case GET_COL_NAME:
str = name;
break;
case GET_COL_PROPERTY:
str = propname;
break;
case GET_COL_VALUE:
str = value;
break;
case GET_COL_SOURCE:
switch (sourcetype) {
case ZPROP_SRC_NONE:
str = "-";
break;
case ZPROP_SRC_DEFAULT:
str = "default";
break;
case ZPROP_SRC_LOCAL:
str = "local";
break;
case ZPROP_SRC_TEMPORARY:
str = "temporary";
break;
case ZPROP_SRC_INHERITED:
(void) snprintf(buf, sizeof (buf),
"inherited from %s", source);
str = buf;
break;
case ZPROP_SRC_RECEIVED:
str = "received";
break;
default:
str = NULL;
assert(!"unhandled zprop_source_t");
}
break;
case GET_COL_RECVD:
str = (recvd_value == NULL ? "-" : recvd_value);
break;
default:
continue;
}
if (i == (ZFS_GET_NCOLS - 1) ||
cbp->cb_columns[i + 1] == GET_COL_NONE)
(void) printf("%s", str);
else if (cbp->cb_scripted)
(void) printf("%s\t", str);
else
(void) printf("%-*s ",
cbp->cb_colwidths[cbp->cb_columns[i]],
str);
}
(void) printf("\n");
}
/*
* Given a numeric suffix, convert the value into a number of bits that the
* resulting value must be shifted.
*/
static int
str2shift(libzfs_handle_t *hdl, const char *buf)
{
const char *ends = "BKMGTPEZ";
int i;
if (buf[0] == '\0')
return (0);
for (i = 0; i < strlen(ends); i++) {
if (toupper(buf[0]) == ends[i])
break;
}
if (i == strlen(ends)) {
if (hdl)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid numeric suffix '%s'"), buf);
return (-1);
}
/*
* Allow 'G' = 'GB' = 'GiB', case-insensitively.
* However, 'BB' and 'BiB' are disallowed.
*/
if (buf[1] == '\0' ||
(toupper(buf[0]) != 'B' &&
((toupper(buf[1]) == 'B' && buf[2] == '\0') ||
(toupper(buf[1]) == 'I' && toupper(buf[2]) == 'B' &&
buf[3] == '\0'))))
return (10 * i);
if (hdl)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid numeric suffix '%s'"), buf);
return (-1);
}
/*
* Convert a string of the form '100G' into a real number. Used when setting
* properties or creating a volume. 'buf' is used to place an extended error
* message for the caller to use.
*/
int
zfs_nicestrtonum(libzfs_handle_t *hdl, const char *value, uint64_t *num)
{
char *end;
int shift;
*num = 0;
/* Check to see if this looks like a number. */
if ((value[0] < '0' || value[0] > '9') && value[0] != '.') {
if (hdl)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"bad numeric value '%s'"), value);
return (-1);
}
/* Rely on strtoull() to process the numeric portion. */
errno = 0;
*num = strtoull(value, &end, 10);
/*
* Check for ERANGE, which indicates that the value is too large to fit
* in a 64-bit value.
*/
if (errno == ERANGE) {
if (hdl)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"numeric value is too large"));
return (-1);
}
/*
* If we have a decimal value, then do the computation with floating
* point arithmetic. Otherwise, use standard arithmetic.
*/
if (*end == '.') {
double fval = strtod(value, &end);
if ((shift = str2shift(hdl, end)) == -1)
return (-1);
fval *= pow(2, shift);
/*
* UINT64_MAX is not exactly representable as a double.
* The closest representation is UINT64_MAX + 1, so we
* use a >= comparison instead of > for the bounds check.
*/
if (fval >= (double)UINT64_MAX) {
if (hdl)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"numeric value is too large"));
return (-1);
}
*num = (uint64_t)fval;
} else {
if ((shift = str2shift(hdl, end)) == -1)
return (-1);
/* Check for overflow */
if (shift >= 64 || (*num << shift) >> shift != *num) {
if (hdl)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"numeric value is too large"));
return (-1);
}
*num <<= shift;
}
return (0);
}
/*
* Given a propname=value nvpair to set, parse any numeric properties
* (index, boolean, etc) if they are specified as strings and add the
* resulting nvpair to the returned nvlist.
*
* At the DSL layer, all properties are either 64-bit numbers or strings.
* We want the user to be able to ignore this fact and specify properties
* as native values (numbers, for example) or as strings (to simplify
* command line utilities). This also handles converting index types
* (compression, checksum, etc) from strings to their on-disk index.
*/
int
zprop_parse_value(libzfs_handle_t *hdl, nvpair_t *elem, int prop,
zfs_type_t type, nvlist_t *ret, char **svalp, uint64_t *ivalp,
const char *errbuf)
{
data_type_t datatype = nvpair_type(elem);
zprop_type_t proptype;
const char *propname;
char *value;
boolean_t isnone = B_FALSE;
boolean_t isauto = B_FALSE;
int err = 0;
if (type == ZFS_TYPE_POOL) {
proptype = zpool_prop_get_type(prop);
propname = zpool_prop_to_name(prop);
} else {
proptype = zfs_prop_get_type(prop);
propname = zfs_prop_to_name(prop);
}
/*
* Convert any properties to the internal DSL value types.
*/
*svalp = NULL;
*ivalp = 0;
switch (proptype) {
case PROP_TYPE_STRING:
if (datatype != DATA_TYPE_STRING) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a string"), nvpair_name(elem));
goto error;
}
err = nvpair_value_string(elem, svalp);
if (err != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is invalid"), nvpair_name(elem));
goto error;
}
if (strlen(*svalp) >= ZFS_MAXPROPLEN) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is too long"), nvpair_name(elem));
goto error;
}
break;
case PROP_TYPE_NUMBER:
if (datatype == DATA_TYPE_STRING) {
(void) nvpair_value_string(elem, &value);
if (strcmp(value, "none") == 0) {
isnone = B_TRUE;
} else if (strcmp(value, "auto") == 0) {
isauto = B_TRUE;
} else if (zfs_nicestrtonum(hdl, value, ivalp) != 0) {
goto error;
}
} else if (datatype == DATA_TYPE_UINT64) {
(void) nvpair_value_uint64(elem, ivalp);
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a number"), nvpair_name(elem));
goto error;
}
/*
* Quota special: force 'none' and don't allow 0.
*/
if ((type & ZFS_TYPE_DATASET) && *ivalp == 0 && !isnone &&
(prop == ZFS_PROP_QUOTA || prop == ZFS_PROP_REFQUOTA)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"use 'none' to disable quota/refquota"));
goto error;
}
/*
* Special handling for "*_limit=none". In this case it's not
* 0 but UINT64_MAX.
*/
if ((type & ZFS_TYPE_DATASET) && isnone &&
(prop == ZFS_PROP_FILESYSTEM_LIMIT ||
prop == ZFS_PROP_SNAPSHOT_LIMIT)) {
*ivalp = UINT64_MAX;
}
/*
* Special handling for setting 'refreservation' to 'auto'. Use
* UINT64_MAX to tell the caller to use zfs_fix_auto_resv().
* 'auto' is only allowed on volumes.
*/
if (isauto) {
switch (prop) {
case ZFS_PROP_REFRESERVATION:
if ((type & ZFS_TYPE_VOLUME) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s=auto' only allowed on "
"volumes"), nvpair_name(elem));
goto error;
}
*ivalp = UINT64_MAX;
break;
default:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'auto' is invalid value for '%s'"),
nvpair_name(elem));
goto error;
}
}
break;
case PROP_TYPE_INDEX:
if (datatype != DATA_TYPE_STRING) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a string"), nvpair_name(elem));
goto error;
}
(void) nvpair_value_string(elem, &value);
if (zprop_string_to_index(prop, value, ivalp, type) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be one of '%s'"), propname,
zprop_values(prop, type));
goto error;
}
break;
default:
abort();
}
/*
* Add the result to our return set of properties.
*/
if (*svalp != NULL) {
if (nvlist_add_string(ret, propname, *svalp) != 0) {
(void) no_memory(hdl);
return (-1);
}
} else {
if (nvlist_add_uint64(ret, propname, *ivalp) != 0) {
(void) no_memory(hdl);
return (-1);
}
}
return (0);
error:
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
return (-1);
}
static int
addlist(libzfs_handle_t *hdl, char *propname, zprop_list_t **listp,
zfs_type_t type)
{
int prop;
zprop_list_t *entry;
prop = zprop_name_to_prop(propname, type);
if (prop != ZPROP_INVAL && !zprop_valid_for_type(prop, type, B_FALSE))
prop = ZPROP_INVAL;
/*
* When no property table entry can be found, return failure if
* this is a pool property or if this isn't a user-defined
* dataset property,
*/
if (prop == ZPROP_INVAL && ((type == ZFS_TYPE_POOL &&
!zpool_prop_feature(propname) &&
!zpool_prop_unsupported(propname)) ||
(type == ZFS_TYPE_DATASET && !zfs_prop_user(propname) &&
!zfs_prop_userquota(propname) && !zfs_prop_written(propname)))) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid property '%s'"), propname);
return (zfs_error(hdl, EZFS_BADPROP,
dgettext(TEXT_DOMAIN, "bad property list")));
}
if ((entry = zfs_alloc(hdl, sizeof (zprop_list_t))) == NULL)
return (-1);
entry->pl_prop = prop;
if (prop == ZPROP_INVAL) {
if ((entry->pl_user_prop = zfs_strdup(hdl, propname)) ==
NULL) {
free(entry);
return (-1);
}
entry->pl_width = strlen(propname);
} else {
entry->pl_width = zprop_width(prop, &entry->pl_fixed,
type);
}
*listp = entry;
return (0);
}
/*
* Given a comma-separated list of properties, construct a property list
* containing both user-defined and native properties. This function will
* return a NULL list if 'all' is specified, which can later be expanded
* by zprop_expand_list().
*/
int
zprop_get_list(libzfs_handle_t *hdl, char *props, zprop_list_t **listp,
zfs_type_t type)
{
*listp = NULL;
/*
* If 'all' is specified, return a NULL list.
*/
if (strcmp(props, "all") == 0)
return (0);
/*
* If no props were specified, return an error.
*/
if (props[0] == '\0') {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"no properties specified"));
return (zfs_error(hdl, EZFS_BADPROP, dgettext(TEXT_DOMAIN,
"bad property list")));
}
/*
* It would be nice to use getsubopt() here, but the inclusion of column
* aliases makes this more effort than it's worth.
*/
while (*props != '\0') {
size_t len;
char *p;
char c;
if ((p = strchr(props, ',')) == NULL) {
len = strlen(props);
p = props + len;
} else {
len = p - props;
}
/*
* Check for empty options.
*/
if (len == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"empty property name"));
return (zfs_error(hdl, EZFS_BADPROP,
dgettext(TEXT_DOMAIN, "bad property list")));
}
/*
* Check all regular property names.
*/
c = props[len];
props[len] = '\0';
if (strcmp(props, "space") == 0) {
static char *spaceprops[] = {
"name", "avail", "used", "usedbysnapshots",
"usedbydataset", "usedbyrefreservation",
"usedbychildren", NULL
};
int i;
for (i = 0; spaceprops[i]; i++) {
if (addlist(hdl, spaceprops[i], listp, type))
return (-1);
listp = &(*listp)->pl_next;
}
} else {
if (addlist(hdl, props, listp, type))
return (-1);
listp = &(*listp)->pl_next;
}
props = p;
if (c == ',')
props++;
}
return (0);
}
void
zprop_free_list(zprop_list_t *pl)
{
zprop_list_t *next;
while (pl != NULL) {
next = pl->pl_next;
free(pl->pl_user_prop);
free(pl);
pl = next;
}
}
typedef struct expand_data {
zprop_list_t **last;
libzfs_handle_t *hdl;
zfs_type_t type;
} expand_data_t;
static int
zprop_expand_list_cb(int prop, void *cb)
{
zprop_list_t *entry;
expand_data_t *edp = cb;
if ((entry = zfs_alloc(edp->hdl, sizeof (zprop_list_t))) == NULL)
return (ZPROP_INVAL);
entry->pl_prop = prop;
entry->pl_width = zprop_width(prop, &entry->pl_fixed, edp->type);
entry->pl_all = B_TRUE;
*(edp->last) = entry;
edp->last = &entry->pl_next;
return (ZPROP_CONT);
}
int
zprop_expand_list(libzfs_handle_t *hdl, zprop_list_t **plp, zfs_type_t type)
{
zprop_list_t *entry;
zprop_list_t **last;
expand_data_t exp;
if (*plp == NULL) {
/*
* If this is the very first time we've been called for an 'all'
* specification, expand the list to include all native
* properties.
*/
last = plp;
exp.last = last;
exp.hdl = hdl;
exp.type = type;
if (zprop_iter_common(zprop_expand_list_cb, &exp, B_FALSE,
B_FALSE, type) == ZPROP_INVAL)
return (-1);
/*
* Add 'name' to the beginning of the list, which is handled
* specially.
*/
if ((entry = zfs_alloc(hdl, sizeof (zprop_list_t))) == NULL)
return (-1);
entry->pl_prop = (type == ZFS_TYPE_POOL) ? ZPOOL_PROP_NAME :
ZFS_PROP_NAME;
entry->pl_width = zprop_width(entry->pl_prop,
&entry->pl_fixed, type);
entry->pl_all = B_TRUE;
entry->pl_next = *plp;
*plp = entry;
}
return (0);
}
int
zprop_iter(zprop_func func, void *cb, boolean_t show_all, boolean_t ordered,
zfs_type_t type)
{
return (zprop_iter_common(func, cb, show_all, ordered, type));
}
/*
* Fill given version buffer with zfs userland version
*/
void
zfs_version_userland(char *version, int len)
{
(void) strlcpy(version, ZFS_META_ALIAS, len);
}
/*
* Prints both zfs userland and kernel versions
* Returns 0 on success, and -1 on error (with errno set)
*/
int
zfs_version_print(void)
{
char zver_userland[128];
char zver_kernel[128];
zfs_version_userland(zver_userland, sizeof (zver_userland));
(void) printf("%s\n", zver_userland);
if (zfs_version_kernel(zver_kernel, sizeof (zver_kernel)) == -1) {
fprintf(stderr, "zfs_version_kernel() failed: %s\n",
strerror(errno));
return (-1);
}
(void) printf("zfs-kmod-%s\n", zver_kernel);
return (0);
}
/*
* Return 1 if the user requested ANSI color output, and our terminal supports
* it. Return 0 for no color.
*/
static int
use_color(void)
{
static int use_color = -1;
char *term;
/*
* Optimization:
*
* For each zpool invocation, we do a single check to see if we should
* be using color or not, and cache that value for the lifetime of the
* the zpool command. That makes it cheap to call use_color() when
* we're printing with color. We assume that the settings are not going
* to change during the invocation of a zpool command (the user isn't
* going to change the ZFS_COLOR value while zpool is running, for
* example).
*/
if (use_color != -1) {
/*
* We've already figured out if we should be using color or
* not. Return the cached value.
*/
return (use_color);
}
term = getenv("TERM");
/*
* The user sets the ZFS_COLOR env var set to enable zpool ANSI color
* output. However if NO_COLOR is set (https://no-color.org/) then
* don't use it. Also, don't use color if terminal doesn't support
* it.
*/
if (libzfs_envvar_is_set("ZFS_COLOR") &&
!libzfs_envvar_is_set("NO_COLOR") &&
isatty(STDOUT_FILENO) && term && strcmp("dumb", term) != 0 &&
strcmp("unknown", term) != 0) {
/* Color supported */
use_color = 1;
} else {
use_color = 0;
}
return (use_color);
}
/*
* color_start() and color_end() are used for when you want to colorize a block
* of text. For example:
*
* color_start(ANSI_RED_FG)
* printf("hello");
* printf("world");
* color_end();
*/
void
color_start(char *color)
{
if (use_color())
printf("%s", color);
}
void
color_end(void)
{
if (use_color())
printf(ANSI_RESET);
}
/* printf() with a color. If color is NULL, then do a normal printf. */
int
printf_color(char *color, char *format, ...)
{
va_list aptr;
int rc;
if (color)
color_start(color);
va_start(aptr, format);
rc = vprintf(format, aptr);
va_end(aptr);
if (color)
color_end();
return (rc);
}
diff --git a/sys/contrib/openzfs/lib/libzfs/os/linux/libzfs_mount_os.c b/sys/contrib/openzfs/lib/libzfs/os/linux/libzfs_mount_os.c
index 21d64053862e..b800e069e707 100644
--- a/sys/contrib/openzfs/lib/libzfs/os/linux/libzfs_mount_os.c
+++ b/sys/contrib/openzfs/lib/libzfs/os/linux/libzfs_mount_os.c
@@ -1,413 +1,413 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2021 by Delphix. All rights reserved.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
* Copyright 2017 RackTop Systems.
* Copyright (c) 2018 Datto Inc.
* Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
*/
#include <dirent.h>
#include <dlfcn.h>
#include <errno.h>
#include <fcntl.h>
#include <libgen.h>
#include <libintl.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <zone.h>
#include <sys/mntent.h>
#include <sys/mount.h>
#include <sys/stat.h>
#include <sys/vfs.h>
#include <sys/dsl_crypt.h>
#include <libzfs.h>
#include "libzfs_impl.h"
#include <thread_pool.h>
#define ZS_COMMENT 0x00000000 /* comment */
#define ZS_ZFSUTIL 0x00000001 /* caller is zfs(8) */
typedef struct option_map {
const char *name;
unsigned long mntmask;
unsigned long zfsmask;
} option_map_t;
static const option_map_t option_map[] = {
/* Canonicalized filesystem independent options from mount(8) */
{ MNTOPT_NOAUTO, MS_COMMENT, ZS_COMMENT },
{ MNTOPT_DEFAULTS, MS_COMMENT, ZS_COMMENT },
{ MNTOPT_NODEVICES, MS_NODEV, ZS_COMMENT },
{ MNTOPT_DEVICES, MS_COMMENT, ZS_COMMENT },
{ MNTOPT_DIRSYNC, MS_DIRSYNC, ZS_COMMENT },
{ MNTOPT_NOEXEC, MS_NOEXEC, ZS_COMMENT },
{ MNTOPT_EXEC, MS_COMMENT, ZS_COMMENT },
{ MNTOPT_GROUP, MS_GROUP, ZS_COMMENT },
{ MNTOPT_NETDEV, MS_COMMENT, ZS_COMMENT },
{ MNTOPT_NOFAIL, MS_COMMENT, ZS_COMMENT },
{ MNTOPT_NOSUID, MS_NOSUID, ZS_COMMENT },
{ MNTOPT_SUID, MS_COMMENT, ZS_COMMENT },
{ MNTOPT_OWNER, MS_OWNER, ZS_COMMENT },
{ MNTOPT_REMOUNT, MS_REMOUNT, ZS_COMMENT },
{ MNTOPT_RO, MS_RDONLY, ZS_COMMENT },
{ MNTOPT_RW, MS_COMMENT, ZS_COMMENT },
{ MNTOPT_SYNC, MS_SYNCHRONOUS, ZS_COMMENT },
{ MNTOPT_USER, MS_USERS, ZS_COMMENT },
{ MNTOPT_USERS, MS_USERS, ZS_COMMENT },
/* acl flags passed with util-linux-2.24 mount command */
{ MNTOPT_ACL, MS_POSIXACL, ZS_COMMENT },
{ MNTOPT_NOACL, MS_COMMENT, ZS_COMMENT },
{ MNTOPT_POSIXACL, MS_POSIXACL, ZS_COMMENT },
#ifdef MS_NOATIME
{ MNTOPT_NOATIME, MS_NOATIME, ZS_COMMENT },
{ MNTOPT_ATIME, MS_COMMENT, ZS_COMMENT },
#endif
#ifdef MS_NODIRATIME
{ MNTOPT_NODIRATIME, MS_NODIRATIME, ZS_COMMENT },
{ MNTOPT_DIRATIME, MS_COMMENT, ZS_COMMENT },
#endif
#ifdef MS_RELATIME
{ MNTOPT_RELATIME, MS_RELATIME, ZS_COMMENT },
{ MNTOPT_NORELATIME, MS_COMMENT, ZS_COMMENT },
#endif
#ifdef MS_STRICTATIME
{ MNTOPT_STRICTATIME, MS_STRICTATIME, ZS_COMMENT },
{ MNTOPT_NOSTRICTATIME, MS_COMMENT, ZS_COMMENT },
#endif
#ifdef MS_LAZYTIME
{ MNTOPT_LAZYTIME, MS_LAZYTIME, ZS_COMMENT },
#endif
{ MNTOPT_CONTEXT, MS_COMMENT, ZS_COMMENT },
{ MNTOPT_FSCONTEXT, MS_COMMENT, ZS_COMMENT },
{ MNTOPT_DEFCONTEXT, MS_COMMENT, ZS_COMMENT },
{ MNTOPT_ROOTCONTEXT, MS_COMMENT, ZS_COMMENT },
#ifdef MS_I_VERSION
{ MNTOPT_IVERSION, MS_I_VERSION, ZS_COMMENT },
#endif
#ifdef MS_MANDLOCK
{ MNTOPT_NBMAND, MS_MANDLOCK, ZS_COMMENT },
{ MNTOPT_NONBMAND, MS_COMMENT, ZS_COMMENT },
#endif
/* Valid options not found in mount(8) */
{ MNTOPT_BIND, MS_BIND, ZS_COMMENT },
#ifdef MS_REC
{ MNTOPT_RBIND, MS_BIND|MS_REC, ZS_COMMENT },
#endif
{ MNTOPT_COMMENT, MS_COMMENT, ZS_COMMENT },
#ifdef MS_NOSUB
{ MNTOPT_NOSUB, MS_NOSUB, ZS_COMMENT },
#endif
#ifdef MS_SILENT
{ MNTOPT_QUIET, MS_SILENT, ZS_COMMENT },
#endif
/* Custom zfs options */
{ MNTOPT_XATTR, MS_COMMENT, ZS_COMMENT },
{ MNTOPT_NOXATTR, MS_COMMENT, ZS_COMMENT },
{ MNTOPT_ZFSUTIL, MS_COMMENT, ZS_ZFSUTIL },
{ NULL, 0, 0 } };
/*
* Break the mount option in to a name/value pair. The name is
* validated against the option map and mount flags set accordingly.
*/
static int
parse_option(char *mntopt, unsigned long *mntflags,
unsigned long *zfsflags, int sloppy)
{
const option_map_t *opt;
char *ptr, *name, *value = NULL;
int error = 0;
name = strdup(mntopt);
if (name == NULL)
return (ENOMEM);
for (ptr = name; ptr && *ptr; ptr++) {
if (*ptr == '=') {
*ptr = '\0';
value = ptr+1;
VERIFY3P(value, !=, NULL);
break;
}
}
for (opt = option_map; opt->name != NULL; opt++) {
if (strncmp(name, opt->name, strlen(name)) == 0) {
*mntflags |= opt->mntmask;
*zfsflags |= opt->zfsmask;
error = 0;
goto out;
}
}
if (!sloppy)
error = ENOENT;
out:
/* If required further process on the value may be done here */
free(name);
return (error);
}
/*
* Translate the mount option string in to MS_* mount flags for the
* kernel vfs. When sloppy is non-zero unknown options will be ignored
* otherwise they are considered fatal are copied in to badopt.
*/
int
zfs_parse_mount_options(char *mntopts, unsigned long *mntflags,
unsigned long *zfsflags, int sloppy, char *badopt, char *mtabopt)
{
int error = 0, quote = 0, flag = 0, count = 0;
char *ptr, *opt, *opts;
opts = strdup(mntopts);
if (opts == NULL)
return (ENOMEM);
*mntflags = 0;
opt = NULL;
/*
* Scan through all mount options which must be comma delimited.
* We must be careful to notice regions which are double quoted
* and skip commas in these regions. Each option is then checked
* to determine if it is a known option.
*/
for (ptr = opts; ptr && !flag; ptr++) {
if (opt == NULL)
opt = ptr;
if (*ptr == '"')
quote = !quote;
if (quote)
continue;
if (*ptr == '\0')
flag = 1;
if ((*ptr == ',') || (*ptr == '\0')) {
*ptr = '\0';
error = parse_option(opt, mntflags, zfsflags, sloppy);
if (error) {
strcpy(badopt, opt);
goto out;
}
if (!(*mntflags & MS_REMOUNT) &&
!(*zfsflags & ZS_ZFSUTIL) &&
mtabopt != NULL) {
if (count > 0)
strlcat(mtabopt, ",", MNT_LINE_MAX);
strlcat(mtabopt, opt, MNT_LINE_MAX);
count++;
}
opt = NULL;
}
}
out:
free(opts);
return (error);
}
static void
append_mntopt(const char *name, const char *val, char *mntopts,
char *mtabopt, boolean_t quote)
{
char tmp[MNT_LINE_MAX];
snprintf(tmp, MNT_LINE_MAX, quote ? ",%s=\"%s\"" : ",%s=%s", name, val);
if (mntopts)
strlcat(mntopts, tmp, MNT_LINE_MAX);
if (mtabopt)
strlcat(mtabopt, tmp, MNT_LINE_MAX);
}
static void
zfs_selinux_setcontext(zfs_handle_t *zhp, zfs_prop_t zpt, const char *name,
char *mntopts, char *mtabopt)
{
char context[ZFS_MAXPROPLEN];
if (zfs_prop_get(zhp, zpt, context, sizeof (context),
NULL, NULL, 0, B_FALSE) == 0) {
if (strcmp(context, "none") != 0)
append_mntopt(name, context, mntopts, mtabopt, B_TRUE);
}
}
void
zfs_adjust_mount_options(zfs_handle_t *zhp, const char *mntpoint,
char *mntopts, char *mtabopt)
{
char prop[ZFS_MAXPROPLEN];
/*
* Checks to see if the ZFS_PROP_SELINUX_CONTEXT exists
* if it does, create a tmp variable in case it's needed
* checks to see if the selinux context is set to the default
* if it is, allow the setting of the other context properties
* this is needed because the 'context' property overrides others
* if it is not the default, set the 'context' property
*/
if (zfs_prop_get(zhp, ZFS_PROP_SELINUX_CONTEXT, prop, sizeof (prop),
NULL, NULL, 0, B_FALSE) == 0) {
if (strcmp(prop, "none") == 0) {
zfs_selinux_setcontext(zhp, ZFS_PROP_SELINUX_FSCONTEXT,
MNTOPT_FSCONTEXT, mntopts, mtabopt);
zfs_selinux_setcontext(zhp, ZFS_PROP_SELINUX_DEFCONTEXT,
MNTOPT_DEFCONTEXT, mntopts, mtabopt);
zfs_selinux_setcontext(zhp,
ZFS_PROP_SELINUX_ROOTCONTEXT, MNTOPT_ROOTCONTEXT,
mntopts, mtabopt);
} else {
append_mntopt(MNTOPT_CONTEXT, prop,
mntopts, mtabopt, B_TRUE);
}
}
/* A hint used to determine an auto-mounted snapshot mount point */
append_mntopt(MNTOPT_MNTPOINT, mntpoint, mntopts, NULL, B_FALSE);
}
/*
* By default the filesystem by preparing the mount options (i.e. parsing
* some flags from the "opts" parameter into the "flags" parameter) and then
* directly calling the system call mount(2). We don't need the mount utility
* or update /etc/mtab, because this is a symlink on all modern systems.
*
* If the environment variable ZFS_MOUNT_HELPER is set, we fall back to the
* previous behavior:
* The filesystem is mounted by invoking the system mount utility rather
* than by the system call mount(2). This ensures that the /etc/mtab
* file is correctly locked for the update. Performing our own locking
* and /etc/mtab update requires making an unsafe assumption about how
* the mount utility performs its locking. Unfortunately, this also means
* in the case of a mount failure we do not have the exact errno. We must
* make due with return value from the mount process.
*/
int
do_mount(zfs_handle_t *zhp, const char *mntpt, char *opts, int flags)
{
const char *src = zfs_get_name(zhp);
int error = 0;
if (!libzfs_envvar_is_set("ZFS_MOUNT_HELPER")) {
char badopt[MNT_LINE_MAX] = {0};
- unsigned long mntflags = flags, zfsflags;
+ unsigned long mntflags = flags, zfsflags = 0;
char myopts[MNT_LINE_MAX] = {0};
if (zfs_parse_mount_options(opts, &mntflags,
&zfsflags, 0, badopt, NULL)) {
return (EINVAL);
}
strlcat(myopts, opts, MNT_LINE_MAX);
zfs_adjust_mount_options(zhp, mntpt, myopts, NULL);
if (mount(src, mntpt, MNTTYPE_ZFS, mntflags, myopts)) {
return (errno);
}
} else {
char *argv[9] = {
"/bin/mount",
"--no-canonicalize",
"-t", MNTTYPE_ZFS,
"-o", opts,
(char *)src,
(char *)mntpt,
(char *)NULL };
/* Return only the most critical mount error */
error = libzfs_run_process(argv[0], argv,
STDOUT_VERBOSE|STDERR_VERBOSE);
if (error) {
if (error & MOUNT_FILEIO) {
error = EIO;
} else if (error & MOUNT_USER) {
error = EINTR;
} else if (error & MOUNT_SOFTWARE) {
error = EPIPE;
} else if (error & MOUNT_BUSY) {
error = EBUSY;
} else if (error & MOUNT_SYSERR) {
error = EAGAIN;
} else if (error & MOUNT_USAGE) {
error = EINVAL;
} else
error = ENXIO; /* Generic error */
}
}
return (error);
}
int
do_unmount(const char *mntpt, int flags)
{
if (!libzfs_envvar_is_set("ZFS_MOUNT_HELPER")) {
int rv = umount2(mntpt, flags);
return (rv < 0 ? errno : 0);
}
char force_opt[] = "-f";
char lazy_opt[] = "-l";
char *argv[7] = {
"/bin/umount",
"-t", MNTTYPE_ZFS,
NULL, NULL, NULL, NULL };
int rc, count = 3;
if (flags & MS_FORCE) {
argv[count] = force_opt;
count++;
}
if (flags & MS_DETACH) {
argv[count] = lazy_opt;
count++;
}
argv[count] = (char *)mntpt;
rc = libzfs_run_process(argv[0], argv, STDOUT_VERBOSE|STDERR_VERBOSE);
return (rc ? EINVAL : 0);
}
int
zfs_mount_delegation_check(void)
{
return ((geteuid() != 0) ? EACCES : 0);
}
diff --git a/sys/contrib/openzfs/man/man4/zfs.4 b/sys/contrib/openzfs/man/man4/zfs.4
index 20b24d898d84..657afc6169b6 100644
--- a/sys/contrib/openzfs/man/man4/zfs.4
+++ b/sys/contrib/openzfs/man/man4/zfs.4
@@ -1,2382 +1,2389 @@
.\"
.\" Copyright (c) 2013 by Turbo Fredriksson <turbo@bayour.com>. All rights reserved.
.\" Copyright (c) 2019, 2021 by Delphix. All rights reserved.
.\" Copyright (c) 2019 Datto Inc.
.\" The contents of this file are subject to the terms of the Common Development
.\" and Distribution License (the "License"). You may not use this file except
.\" in compliance with the License. You can obtain a copy of the license at
.\" usr/src/OPENSOLARIS.LICENSE or http://www.opensolaris.org/os/licensing.
.\"
.\" See the License for the specific language governing permissions and
.\" limitations under the License. When distributing Covered Code, include this
.\" CDDL HEADER in each file and include the License file at
.\" usr/src/OPENSOLARIS.LICENSE. If applicable, add the following below this
.\" CDDL HEADER, with the fields enclosed by brackets "[]" replaced with your
.\" own identifying information:
.\" Portions Copyright [yyyy] [name of copyright owner]
.\"
.Dd June 1, 2021
.Dt ZFS 4
.Os
.
.Sh NAME
.Nm zfs
.Nd tuning of the ZFS kernel module
.
.Sh DESCRIPTION
The ZFS module supports these parameters:
.Bl -tag -width Ds
.It Sy dbuf_cache_max_bytes Ns = Ns Sy ULONG_MAX Ns B Pq ulong
Maximum size in bytes of the dbuf cache.
The target size is determined by the MIN versus
.No 1/2^ Ns Sy dbuf_cache_shift Pq 1/32nd
of the target ARC size.
The behavior of the dbuf cache and its associated settings
can be observed via the
.Pa /proc/spl/kstat/zfs/dbufstats
kstat.
.
.It Sy dbuf_metadata_cache_max_bytes Ns = Ns Sy ULONG_MAX Ns B Pq ulong
Maximum size in bytes of the metadata dbuf cache.
The target size is determined by the MIN versus
.No 1/2^ Ns Sy dbuf_metadata_cache_shift Pq 1/64th
of the target ARC size.
The behavior of the metadata dbuf cache and its associated settings
can be observed via the
.Pa /proc/spl/kstat/zfs/dbufstats
kstat.
.
.It Sy dbuf_cache_hiwater_pct Ns = Ns Sy 10 Ns % Pq uint
The percentage over
.Sy dbuf_cache_max_bytes
when dbufs must be evicted directly.
.
.It Sy dbuf_cache_lowater_pct Ns = Ns Sy 10 Ns % Pq uint
The percentage below
.Sy dbuf_cache_max_bytes
when the evict thread stops evicting dbufs.
.
.It Sy dbuf_cache_shift Ns = Ns Sy 5 Pq int
Set the size of the dbuf cache
.Pq Sy dbuf_cache_max_bytes
to a log2 fraction of the target ARC size.
.
.It Sy dbuf_metadata_cache_shift Ns = Ns Sy 6 Pq int
Set the size of the dbuf metadata cache
.Pq Sy dbuf_metadata_cache_max_bytes
to a log2 fraction of the target ARC size.
.
.It Sy dmu_object_alloc_chunk_shift Ns = Ns Sy 7 Po 128 Pc Pq int
dnode slots allocated in a single operation as a power of 2.
The default value minimizes lock contention for the bulk operation performed.
.
.It Sy dmu_prefetch_max Ns = Ns Sy 134217728 Ns B Po 128MB Pc Pq int
Limit the amount we can prefetch with one call to this amount in bytes.
This helps to limit the amount of memory that can be used by prefetching.
.
.It Sy ignore_hole_birth Pq int
Alias for
.Sy send_holes_without_birth_time .
.
.It Sy l2arc_feed_again Ns = Ns Sy 1 Ns | Ns 0 Pq int
Turbo L2ARC warm-up.
When the L2ARC is cold the fill interval will be set as fast as possible.
.
.It Sy l2arc_feed_min_ms Ns = Ns Sy 200 Pq ulong
Min feed interval in milliseconds.
Requires
.Sy l2arc_feed_again Ns = Ns Ar 1
and only applicable in related situations.
.
.It Sy l2arc_feed_secs Ns = Ns Sy 1 Pq ulong
Seconds between L2ARC writing.
.
.It Sy l2arc_headroom Ns = Ns Sy 2 Pq ulong
How far through the ARC lists to search for L2ARC cacheable content,
expressed as a multiplier of
.Sy l2arc_write_max .
ARC persistence across reboots can be achieved with persistent L2ARC
by setting this parameter to
.Sy 0 ,
allowing the full length of ARC lists to be searched for cacheable content.
.
.It Sy l2arc_headroom_boost Ns = Ns Sy 200 Ns % Pq ulong
Scales
.Sy l2arc_headroom
by this percentage when L2ARC contents are being successfully compressed
before writing.
A value of
.Sy 100
disables this feature.
.
.It Sy l2arc_mfuonly Ns = Ns Sy 0 Ns | Ns 1 Pq int
Controls whether only MFU metadata and data are cached from ARC into L2ARC.
This may be desired to avoid wasting space on L2ARC when reading/writing large
amounts of data that are not expected to be accessed more than once.
.Pp
The default is off,
meaning both MRU and MFU data and metadata are cached.
When turning off this feature, some MRU buffers will still be present
in ARC and eventually cached on L2ARC.
.No If Sy l2arc_noprefetch Ns = Ns Sy 0 ,
some prefetched buffers will be cached to L2ARC, and those might later
transition to MRU, in which case the
.Sy l2arc_mru_asize No arcstat will not be Sy 0 .
.Pp
Regardless of
.Sy l2arc_noprefetch ,
some MFU buffers might be evicted from ARC,
accessed later on as prefetches and transition to MRU as prefetches.
If accessed again they are counted as MRU and the
.Sy l2arc_mru_asize No arcstat will not be Sy 0 .
.Pp
The ARC status of L2ARC buffers when they were first cached in
L2ARC can be seen in the
.Sy l2arc_mru_asize , Sy l2arc_mfu_asize , No and Sy l2arc_prefetch_asize
arcstats when importing the pool or onlining a cache
device if persistent L2ARC is enabled.
.Pp
The
.Sy evict_l2_eligible_mru
arcstat does not take into account if this option is enabled as the information
provided by the
.Sy evict_l2_eligible_m[rf]u
arcstats can be used to decide if toggling this option is appropriate
for the current workload.
.
.It Sy l2arc_meta_percent Ns = Ns Sy 33 Ns % Pq int
Percent of ARC size allowed for L2ARC-only headers.
Since L2ARC buffers are not evicted on memory pressure,
too many headers on a system with an irrationally large L2ARC
can render it slow or unusable.
This parameter limits L2ARC writes and rebuilds to achieve the target.
.
.It Sy l2arc_trim_ahead Ns = Ns Sy 0 Ns % Pq ulong
Trims ahead of the current write size
.Pq Sy l2arc_write_max
on L2ARC devices by this percentage of write size if we have filled the device.
If set to
.Sy 100
we TRIM twice the space required to accommodate upcoming writes.
A minimum of
.Sy 64MB
will be trimmed.
It also enables TRIM of the whole L2ARC device upon creation
or addition to an existing pool or if the header of the device is
invalid upon importing a pool or onlining a cache device.
A value of
.Sy 0
disables TRIM on L2ARC altogether and is the default as it can put significant
stress on the underlying storage devices.
This will vary depending of how well the specific device handles these commands.
.
.It Sy l2arc_noprefetch Ns = Ns Sy 1 Ns | Ns 0 Pq int
Do not write buffers to L2ARC if they were prefetched but not used by
applications.
In case there are prefetched buffers in L2ARC and this option
is later set, we do not read the prefetched buffers from L2ARC.
Unsetting this option is useful for caching sequential reads from the
disks to L2ARC and serve those reads from L2ARC later on.
This may be beneficial in case the L2ARC device is significantly faster
in sequential reads than the disks of the pool.
.Pp
Use
.Sy 1
to disable and
.Sy 0
to enable caching/reading prefetches to/from L2ARC.
.
.It Sy l2arc_norw Ns = Ns Sy 0 Ns | Ns 1 Pq int
No reads during writes.
.
.It Sy l2arc_write_boost Ns = Ns Sy 8388608 Ns B Po 8MB Pc Pq ulong
Cold L2ARC devices will have
.Sy l2arc_write_max
increased by this amount while they remain cold.
.
.It Sy l2arc_write_max Ns = Ns Sy 8388608 Ns B Po 8MB Pc Pq ulong
Max write bytes per interval.
.
.It Sy l2arc_rebuild_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Rebuild the L2ARC when importing a pool (persistent L2ARC).
This can be disabled if there are problems importing a pool
or attaching an L2ARC device (e.g. the L2ARC device is slow
in reading stored log metadata, or the metadata
has become somehow fragmented/unusable).
.
.It Sy l2arc_rebuild_blocks_min_l2size Ns = Ns Sy 1073741824 Ns B Po 1GB Pc Pq ulong
Mininum size of an L2ARC device required in order to write log blocks in it.
The log blocks are used upon importing the pool to rebuild the persistent L2ARC.
.Pp
For L2ARC devices less than 1GB, the amount of data
.Fn l2arc_evict
evicts is significant compared to the amount of restored L2ARC data.
In this case, do not write log blocks in L2ARC in order not to waste space.
.
.It Sy metaslab_aliquot Ns = Ns Sy 524288 Ns B Po 512kB Pc Pq ulong
Metaslab granularity, in bytes.
This is roughly similar to what would be referred to as the "stripe size"
in traditional RAID arrays.
In normal operation, ZFS will try to write this amount of data
to a top-level vdev before moving on to the next one.
.
.It Sy metaslab_bias_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable metaslab group biasing based on their vdevs' over- or under-utilization
relative to the pool.
.
.It Sy metaslab_force_ganging Ns = Ns Sy 16777217 Ns B Ns B Po 16MB + 1B Pc Pq ulong
Make some blocks above a certain size be gang blocks.
This option is used by the test suite to facilitate testing.
.
.It Sy zfs_history_output_max Ns = Ns Sy 1048576 Ns B Ns B Po 1MB Pc Pq int
When attempting to log an output nvlist of an ioctl in the on-disk history,
the output will not be stored if it is larger than this size (in bytes).
This must be less than
.Sy DMU_MAX_ACCESS Pq 64MB .
This applies primarily to
.Fn zfs_ioc_channel_program Pq cf. Xr zfs-program 8 .
.
.It Sy zfs_keep_log_spacemaps_at_export Ns = Ns Sy 0 Ns | Ns 1 Pq int
Prevent log spacemaps from being destroyed during pool exports and destroys.
.
.It Sy zfs_metaslab_segment_weight_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable/disable segment-based metaslab selection.
.
.It Sy zfs_metaslab_switch_threshold Ns = Ns Sy 2 Pq int
When using segment-based metaslab selection, continue allocating
from the active metaslab until this option's
worth of buckets have been exhausted.
.
.It Sy metaslab_debug_load Ns = Ns Sy 0 Ns | Ns 1 Pq int
Load all metaslabs during pool import.
.
.It Sy metaslab_debug_unload Ns = Ns Sy 0 Ns | Ns 1 Pq int
Prevent metaslabs from being unloaded.
.
.It Sy metaslab_fragmentation_factor_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable use of the fragmentation metric in computing metaslab weights.
.
.It Sy metaslab_df_max_search Ns = Ns Sy 16777216 Ns B Po 16MB Pc Pq int
Maximum distance to search forward from the last offset.
Without this limit, fragmented pools can see
.Em >100`000
iterations and
.Fn metaslab_block_picker
becomes the performance limiting factor on high-performance storage.
.Pp
With the default setting of
.Sy 16MB ,
we typically see less than
.Em 500
iterations, even with very fragmented
.Sy ashift Ns = Ns Sy 9
pools.
The maximum number of iterations possible is
.Sy metaslab_df_max_search / 2^(ashift+1) .
With the default setting of
.Sy 16MB
this is
.Em 16*1024 Pq with Sy ashift Ns = Ns Sy 9
or
.Em 2*1024 Pq with Sy ashift Ns = Ns Sy 12 .
.
.It Sy metaslab_df_use_largest_segment Ns = Ns Sy 0 Ns | Ns 1 Pq int
If not searching forward (due to
.Sy metaslab_df_max_search , metaslab_df_free_pct ,
.No or Sy metaslab_df_alloc_threshold ) ,
this tunable controls which segment is used.
If set, we will use the largest free segment.
If unset, we will use a segment of at least the requested size.
.
.It Sy zfs_metaslab_max_size_cache_sec Ns = Ns Sy 3600 Ns s Po 1h Pc Pq ulong
When we unload a metaslab, we cache the size of the largest free chunk.
We use that cached size to determine whether or not to load a metaslab
for a given allocation.
As more frees accumulate in that metaslab while it's unloaded,
the cached max size becomes less and less accurate.
After a number of seconds controlled by this tunable,
we stop considering the cached max size and start
considering only the histogram instead.
.
.It Sy zfs_metaslab_mem_limit Ns = Ns Sy 25 Ns % Pq int
When we are loading a new metaslab, we check the amount of memory being used
to store metaslab range trees.
If it is over a threshold, we attempt to unload the least recently used metaslab
to prevent the system from clogging all of its memory with range trees.
This tunable sets the percentage of total system memory that is the threshold.
.
.It Sy zfs_metaslab_try_hard_before_gang Ns = Ns Sy 0 Ns | Ns 1 Pq int
.Bl -item -compact
.It
If unset, we will first try normal allocation.
.It
If that fails then we will do a gang allocation.
.It
If that fails then we will do a "try hard" gang allocation.
.It
If that fails then we will have a multi-layer gang block.
.El
.Pp
.Bl -item -compact
.It
If set, we will first try normal allocation.
.It
If that fails then we will do a "try hard" allocation.
.It
If that fails we will do a gang allocation.
.It
If that fails we will do a "try hard" gang allocation.
.It
If that fails then we will have a multi-layer gang block.
.El
.
.It Sy zfs_metaslab_find_max_tries Ns = Ns Sy 100 Pq int
When not trying hard, we only consider this number of the best metaslabs.
This improves performance, especially when there are many metaslabs per vdev
and the allocation can't actually be satisfied
(so we would otherwise iterate all metaslabs).
.
.It Sy zfs_vdev_default_ms_count Ns = Ns Sy 200 Pq int
When a vdev is added, target this number of metaslabs per top-level vdev.
.
.It Sy zfs_vdev_default_ms_shift Ns = Ns Sy 29 Po 512MB Pc Pq int
Default limit for metaslab size.
.
.It Sy zfs_vdev_max_auto_ashift Ns = Ns Sy ASHIFT_MAX Po 16 Pc Pq ulong
Maximum ashift used when optimizing for logical -> physical sector size on new
top-level vdevs.
.
.It Sy zfs_vdev_min_auto_ashift Ns = Ns Sy ASHIFT_MIN Po 9 Pc Pq ulong
Minimum ashift used when creating new top-level vdevs.
.
.It Sy zfs_vdev_min_ms_count Ns = Ns Sy 16 Pq int
Minimum number of metaslabs to create in a top-level vdev.
.
.It Sy vdev_validate_skip Ns = Ns Sy 0 Ns | Ns 1 Pq int
Skip label validation steps during pool import.
Changing is not recommended unless you know what you're doing
and are recovering a damaged label.
.
.It Sy zfs_vdev_ms_count_limit Ns = Ns Sy 131072 Po 128k Pc Pq int
Practical upper limit of total metaslabs per top-level vdev.
.
.It Sy metaslab_preload_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable metaslab group preloading.
.
.It Sy metaslab_lba_weighting_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Give more weight to metaslabs with lower LBAs,
assuming they have greater bandwidth,
as is typically the case on a modern constant angular velocity disk drive.
.
.It Sy metaslab_unload_delay Ns = Ns Sy 32 Pq int
After a metaslab is used, we keep it loaded for this many TXGs, to attempt to
reduce unnecessary reloading.
Note that both this many TXGs and
.Sy metaslab_unload_delay_ms
milliseconds must pass before unloading will occur.
.
.It Sy metaslab_unload_delay_ms Ns = Ns Sy 600000 Ns ms Po 10min Pc Pq int
After a metaslab is used, we keep it loaded for this many milliseconds,
to attempt to reduce unnecessary reloading.
Note, that both this many milliseconds and
.Sy metaslab_unload_delay
TXGs must pass before unloading will occur.
.
.It Sy reference_history Ns = Ns Sy 3 Pq int
Maximum reference holders being tracked when reference_tracking_enable is active.
.
.It Sy reference_tracking_enable Ns = Ns Sy 0 Ns | Ns 1 Pq int
Track reference holders to
.Sy refcount_t
objects (debug builds only).
.
.It Sy send_holes_without_birth_time Ns = Ns Sy 1 Ns | Ns 0 Pq int
When set, the
.Sy hole_birth
optimization will not be used, and all holes will always be sent during a
.Nm zfs Cm send .
This is useful if you suspect your datasets are affected by a bug in
.Sy hole_birth .
.
.It Sy spa_config_path Ns = Ns Pa /etc/zfs/zpool.cache Pq charp
SPA config file.
.
.It Sy spa_asize_inflation Ns = Ns Sy 24 Pq int
Multiplication factor used to estimate actual disk consumption from the
size of data being written.
The default value is a worst case estimate,
but lower values may be valid for a given pool depending on its configuration.
Pool administrators who understand the factors involved
may wish to specify a more realistic inflation factor,
particularly if they operate close to quota or capacity limits.
.
.It Sy spa_load_print_vdev_tree Ns = Ns Sy 0 Ns | Ns 1 Pq int
Whether to print the vdev tree in the debugging message buffer during pool import.
.
.It Sy spa_load_verify_data Ns = Ns Sy 1 Ns | Ns 0 Pq int
Whether to traverse data blocks during an "extreme rewind"
.Pq Fl X
import.
.Pp
An extreme rewind import normally performs a full traversal of all
blocks in the pool for verification.
If this parameter is unset, the traversal skips non-metadata blocks.
It can be toggled once the
import has started to stop or start the traversal of non-metadata blocks.
.
.It Sy spa_load_verify_metadata Ns = Ns Sy 1 Ns | Ns 0 Pq int
Whether to traverse blocks during an "extreme rewind"
.Pq Fl X
pool import.
.Pp
An extreme rewind import normally performs a full traversal of all
blocks in the pool for verification.
If this parameter is unset, the traversal is not performed.
It can be toggled once the import has started to stop or start the traversal.
.
.It Sy spa_load_verify_shift Ns = Ns Sy 4 Po 1/16th Pc Pq int
Sets the maximum number of bytes to consume during pool import to the log2
fraction of the target ARC size.
.
.It Sy spa_slop_shift Ns = Ns Sy 5 Po 1/32nd Pc Pq int
Normally, we don't allow the last
.Sy 3.2% Pq Sy 1/2^spa_slop_shift
of space in the pool to be consumed.
This ensures that we don't run the pool completely out of space,
due to unaccounted changes (e.g. to the MOS).
It also limits the worst-case time to allocate space.
If we have less than this amount of free space,
most ZPL operations (e.g. write, create) will return
.Sy ENOSPC .
.
.It Sy vdev_removal_max_span Ns = Ns Sy 32768 Ns B Po 32kB Pc Pq int
During top-level vdev removal, chunks of data are copied from the vdev
which may include free space in order to trade bandwidth for IOPS.
This parameter determines the maximum span of free space, in bytes,
which will be included as "unnecessary" data in a chunk of copied data.
.Pp
The default value here was chosen to align with
.Sy zfs_vdev_read_gap_limit ,
which is a similar concept when doing
regular reads (but there's no reason it has to be the same).
.
.It Sy vdev_file_logical_ashift Ns = Ns Sy 9 Po 512B Pc Pq ulong
Logical ashift for file-based devices.
.
.It Sy vdev_file_physical_ashift Ns = Ns Sy 9 Po 512B Pc Pq ulong
Physical ashift for file-based devices.
.
.It Sy zap_iterate_prefetch Ns = Ns Sy 1 Ns | Ns 0 Pq int
If set, when we start iterating over a ZAP object,
prefetch the entire object (all leaf blocks).
However, this is limited by
.Sy dmu_prefetch_max .
.
.It Sy zfetch_array_rd_sz Ns = Ns Sy 1048576 Ns B Po 1MB Pc Pq ulong
If prefetching is enabled, disable prefetching for reads larger than this size.
.
.It Sy zfetch_max_distance Ns = Ns Sy 8388608 Ns B Po 8MB Pc Pq uint
Max bytes to prefetch per stream.
.
.It Sy zfetch_max_idistance Ns = Ns Sy 67108864 Ns B Po 64MB Pc Pq uint
Max bytes to prefetch indirects for per stream.
.
.It Sy zfetch_max_streams Ns = Ns Sy 8 Pq uint
Max number of streams per zfetch (prefetch streams per file).
.
.It Sy zfetch_min_sec_reap Ns = Ns Sy 2 Pq uint
Min time before an active prefetch stream can be reclaimed
.
.It Sy zfs_abd_scatter_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enables ARC from using scatter/gather lists and forces all allocations to be
linear in kernel memory.
Disabling can improve performance in some code paths
at the expense of fragmented kernel memory.
.
.It Sy zfs_abd_scatter_max_order Ns = Ns Sy MAX_ORDER-1 Pq uint
Maximum number of consecutive memory pages allocated in a single block for
scatter/gather lists.
.Pp
The value of
.Sy MAX_ORDER
depends on kernel configuration.
.
.It Sy zfs_abd_scatter_min_size Ns = Ns Sy 1536 Ns B Po 1.5kB Pc Pq uint
This is the minimum allocation size that will use scatter (page-based) ABDs.
Smaller allocations will use linear ABDs.
.
.It Sy zfs_arc_dnode_limit Ns = Ns Sy 0 Ns B Pq ulong
When the number of bytes consumed by dnodes in the ARC exceeds this number of
bytes, try to unpin some of it in response to demand for non-metadata.
This value acts as a ceiling to the amount of dnode metadata, and defaults to
.Sy 0 ,
which indicates that a percent which is based on
.Sy zfs_arc_dnode_limit_percent
of the ARC meta buffers that may be used for dnodes.
.Pp
Also see
.Sy zfs_arc_meta_prune
which serves a similar purpose but is used
when the amount of metadata in the ARC exceeds
.Sy zfs_arc_meta_limit
rather than in response to overall demand for non-metadata.
.
.It Sy zfs_arc_dnode_limit_percent Ns = Ns Sy 10 Ns % Pq ulong
Percentage that can be consumed by dnodes of ARC meta buffers.
.Pp
See also
.Sy zfs_arc_dnode_limit ,
which serves a similar purpose but has a higher priority if nonzero.
.
.It Sy zfs_arc_dnode_reduce_percent Ns = Ns Sy 10 Ns % Pq ulong
Percentage of ARC dnodes to try to scan in response to demand for non-metadata
when the number of bytes consumed by dnodes exceeds
.Sy zfs_arc_dnode_limit .
.
.It Sy zfs_arc_average_blocksize Ns = Ns Sy 8192 Ns B Po 8kB Pc Pq int
The ARC's buffer hash table is sized based on the assumption of an average
block size of this value.
This works out to roughly 1MB of hash table per 1GB of physical memory
with 8-byte pointers.
For configurations with a known larger average block size,
this value can be increased to reduce the memory footprint.
.
.It Sy zfs_arc_eviction_pct Ns = Ns Sy 200 Ns % Pq int
When
.Fn arc_is_overflowing ,
.Fn arc_get_data_impl
waits for this percent of the requested amount of data to be evicted.
For example, by default, for every
.Em 2kB
that's evicted,
.Em 1kB
of it may be "reused" by a new allocation.
Since this is above
.Sy 100 Ns % ,
it ensures that progress is made towards getting
.Sy arc_size No under Sy arc_c .
Since this is finite, it ensures that allocations can still happen,
even during the potentially long time that
.Sy arc_size No is more than Sy arc_c .
.
.It Sy zfs_arc_evict_batch_limit Ns = Ns Sy 10 Pq int
Number ARC headers to evict per sub-list before proceeding to another sub-list.
This batch-style operation prevents entire sub-lists from being evicted at once
but comes at a cost of additional unlocking and locking.
.
.It Sy zfs_arc_grow_retry Ns = Ns Sy 0 Ns s Pq int
If set to a non zero value, it will replace the
.Sy arc_grow_retry
value with this value.
The
.Sy arc_grow_retry
.No value Pq default Sy 5 Ns s
is the number of seconds the ARC will wait before
trying to resume growth after a memory pressure event.
.
.It Sy zfs_arc_lotsfree_percent Ns = Ns Sy 10 Ns % Pq int
Throttle I/O when free system memory drops below this percentage of total
system memory.
Setting this value to
.Sy 0
will disable the throttle.
.
.It Sy zfs_arc_max Ns = Ns Sy 0 Ns B Pq ulong
Max size of ARC in bytes.
If
.Sy 0 ,
then the max size of ARC is determined by the amount of system memory installed.
Under Linux, half of system memory will be used as the limit.
Under
.Fx ,
the larger of
.Sy all_system_memory - 1GB No and Sy 5/8 * all_system_memory
will be used as the limit.
This value must be at least
.Sy 67108864 Ns B Pq 64MB .
.Pp
This value can be changed dynamically, with some caveats.
It cannot be set back to
.Sy 0
while running, and reducing it below the current ARC size will not cause
the ARC to shrink without memory pressure to induce shrinking.
.
.It Sy zfs_arc_meta_adjust_restarts Ns = Ns Sy 4096 Pq ulong
The number of restart passes to make while scanning the ARC attempting
the free buffers in order to stay below the
.Sy fs_arc_meta_limit .
This value should not need to be tuned but is available to facilitate
performance analysis.
.
.It Sy zfs_arc_meta_limit Ns = Ns Sy 0 Ns B Pq ulong
The maximum allowed size in bytes that metadata buffers are allowed to
consume in the ARC.
When this limit is reached, metadata buffers will be reclaimed,
even if the overall
.Sy arc_c_max
has not been reached.
It defaults to
.Sy 0 ,
which indicates that a percentage based on
.Sy zfs_arc_meta_limit_percent
of the ARC may be used for metadata.
.Pp
This value my be changed dynamically, except that must be set to an explicit value
.Pq cannot be set back to Sy 0 .
.
.It Sy zfs_arc_meta_limit_percent Ns = Ns Sy 75 Ns % Pq ulong
Percentage of ARC buffers that can be used for metadata.
.Pp
See also
.Sy zfs_arc_meta_limit ,
which serves a similar purpose but has a higher priority if nonzero.
.
.It Sy zfs_arc_meta_min Ns = Ns Sy 0 Ns B Pq ulong
The minimum allowed size in bytes that metadata buffers may consume in
the ARC.
.
.It Sy zfs_arc_meta_prune Ns = Ns Sy 10000 Pq int
The number of dentries and inodes to be scanned looking for entries
which can be dropped.
This may be required when the ARC reaches the
.Sy zfs_arc_meta_limit
because dentries and inodes can pin buffers in the ARC.
Increasing this value will cause to dentry and inode caches
to be pruned more aggressively.
Setting this value to
.Sy 0
will disable pruning the inode and dentry caches.
.
.It Sy zfs_arc_meta_strategy Ns = Ns Sy 1 Ns | Ns 0 Pq int
Define the strategy for ARC metadata buffer eviction (meta reclaim strategy):
.Bl -tag -compact -offset 4n -width "0 (META_ONLY)"
.It Sy 0 Pq META_ONLY
evict only the ARC metadata buffers
.It Sy 1 Pq BALANCED
additional data buffers may be evicted if required
to evict the required number of metadata buffers.
.El
.
.It Sy zfs_arc_min Ns = Ns Sy 0 Ns B Pq ulong
Min size of ARC in bytes.
.No If set to Sy 0 , arc_c_min
will default to consuming the larger of
.Sy 32MB No or Sy all_system_memory/32 .
.
.It Sy zfs_arc_min_prefetch_ms Ns = Ns Sy 0 Ns ms Ns Po Ns ≡ Ns 1s Pc Pq int
Minimum time prefetched blocks are locked in the ARC.
.
.It Sy zfs_arc_min_prescient_prefetch_ms Ns = Ns Sy 0 Ns ms Ns Po Ns ≡ Ns 6s Pc Pq int
Minimum time "prescient prefetched" blocks are locked in the ARC.
These blocks are meant to be prefetched fairly aggressively ahead of
the code that may use them.
.
+.It Sy zfs_arc_prune_task_threads Ns = Ns Sy 1 Pq int
+Number of arc_prune threads.
+.Fx
+does not need more than one.
+Linux may theoretically use one per mount point up to number of CPUs,
+but that was not proven to be useful.
+.
.It Sy zfs_max_missing_tvds Ns = Ns Sy 0 Pq int
Number of missing top-level vdevs which will be allowed during
pool import (only in read-only mode).
.
.It Sy zfs_max_nvlist_src_size Ns = Sy 0 Pq ulong
Maximum size in bytes allowed to be passed as
.Sy zc_nvlist_src_size
for ioctls on
.Pa /dev/zfs .
This prevents a user from causing the kernel to allocate
an excessive amount of memory.
When the limit is exceeded, the ioctl fails with
.Sy EINVAL
and a description of the error is sent to the
.Pa zfs-dbgmsg
log.
This parameter should not need to be touched under normal circumstances.
If
.Sy 0 ,
equivalent to a quarter of the user-wired memory limit under
.Fx
and to
.Sy 134217728 Ns B Pq 128MB
under Linux.
.
.It Sy zfs_multilist_num_sublists Ns = Ns Sy 0 Pq int
To allow more fine-grained locking, each ARC state contains a series
of lists for both data and metadata objects.
Locking is performed at the level of these "sub-lists".
This parameters controls the number of sub-lists per ARC state,
and also applies to other uses of the multilist data structure.
.Pp
If
.Sy 0 ,
equivalent to the greater of the number of online CPUs and
.Sy 4 .
.
.It Sy zfs_arc_overflow_shift Ns = Ns Sy 8 Pq int
The ARC size is considered to be overflowing if it exceeds the current
ARC target size
.Pq Sy arc_c
by thresholds determined by this parameter.
Exceeding by
.Sy ( arc_c >> zfs_arc_overflow_shift ) * 0.5
starts ARC reclamation process.
If that appears insufficient, exceeding by
.Sy ( arc_c >> zfs_arc_overflow_shift ) * 1.5
blocks new buffer allocation until the reclaim thread catches up.
Started reclamation process continues till ARC size returns below the
target size.
.Pp
The default value of
.Sy 8
causes the ARC to start reclamation if it exceeds the target size by
.Em 0.2%
of the target size, and block allocations by
.Em 0.6% .
.
.It Sy zfs_arc_p_min_shift Ns = Ns Sy 0 Pq int
If nonzero, this will update
.Sy arc_p_min_shift Pq default Sy 4
with the new value.
.Sy arc_p_min_shift No is used as a shift of Sy arc_c
when calculating the minumum
.Sy arc_p No size.
.
.It Sy zfs_arc_p_dampener_disable Ns = Ns Sy 1 Ns | Ns 0 Pq int
Disable
.Sy arc_p
adapt dampener, which reduces the maximum single adjustment to
.Sy arc_p .
.
.It Sy zfs_arc_shrink_shift Ns = Ns Sy 0 Pq int
If nonzero, this will update
.Sy arc_shrink_shift Pq default Sy 7
with the new value.
.
.It Sy zfs_arc_pc_percent Ns = Ns Sy 0 Ns % Po off Pc Pq uint
Percent of pagecache to reclaim ARC to.
.Pp
This tunable allows the ZFS ARC to play more nicely
with the kernel's LRU pagecache.
It can guarantee that the ARC size won't collapse under scanning
pressure on the pagecache, yet still allows the ARC to be reclaimed down to
.Sy zfs_arc_min
if necessary.
This value is specified as percent of pagecache size (as measured by
.Sy NR_FILE_PAGES ) ,
where that percent may exceed
.Sy 100 .
This
only operates during memory pressure/reclaim.
.
.It Sy zfs_arc_shrinker_limit Ns = Ns Sy 10000 Pq int
This is a limit on how many pages the ARC shrinker makes available for
eviction in response to one page allocation attempt.
Note that in practice, the kernel's shrinker can ask us to evict
up to about four times this for one allocation attempt.
.Pp
The default limit of
.Sy 10000 Pq in practice, Em 160MB No per allocation attempt with 4kB pages
limits the amount of time spent attempting to reclaim ARC memory to
less than 100ms per allocation attempt,
even with a small average compressed block size of ~8kB.
.Pp
The parameter can be set to 0 (zero) to disable the limit,
and only applies on Linux.
.
.It Sy zfs_arc_sys_free Ns = Ns Sy 0 Ns B Pq ulong
The target number of bytes the ARC should leave as free memory on the system.
If zero, equivalent to the bigger of
.Sy 512kB No and Sy all_system_memory/64 .
.
.It Sy zfs_autoimport_disable Ns = Ns Sy 1 Ns | Ns 0 Pq int
Disable pool import at module load by ignoring the cache file
.Pq Sy spa_config_path .
.
.It Sy zfs_checksum_events_per_second Ns = Ns Sy 20 Ns /s Pq uint
Rate limit checksum events to this many per second.
Note that this should not be set below the ZED thresholds
(currently 10 checksums over 10 seconds)
or else the daemon may not trigger any action.
.
.It Sy zfs_commit_timeout_pct Ns = Ns Sy 5 Ns % Pq int
This controls the amount of time that a ZIL block (lwb) will remain "open"
when it isn't "full", and it has a thread waiting for it to be committed to
stable storage.
The timeout is scaled based on a percentage of the last lwb
latency to avoid significantly impacting the latency of each individual
transaction record (itx).
.
.It Sy zfs_condense_indirect_commit_entry_delay_ms Ns = Ns Sy 0 Ns ms Pq int
Vdev indirection layer (used for device removal) sleeps for this many
milliseconds during mapping generation.
Intended for use with the test suite to throttle vdev removal speed.
.
.It Sy zfs_condense_indirect_obsolete_pct Ns = Ns Sy 25 Ns % Pq int
Minimum percent of obsolete bytes in vdev mapping required to attempt to condense
.Pq see Sy zfs_condense_indirect_vdevs_enable .
Intended for use with the test suite
to facilitate triggering condensing as needed.
.
.It Sy zfs_condense_indirect_vdevs_enable Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable condensing indirect vdev mappings.
When set, attempt to condense indirect vdev mappings
if the mapping uses more than
.Sy zfs_condense_min_mapping_bytes
bytes of memory and if the obsolete space map object uses more than
.Sy zfs_condense_max_obsolete_bytes
bytes on-disk.
The condensing process is an attempt to save memory by removing obsolete mappings.
.
.It Sy zfs_condense_max_obsolete_bytes Ns = Ns Sy 1073741824 Ns B Po 1GB Pc Pq ulong
Only attempt to condense indirect vdev mappings if the on-disk size
of the obsolete space map object is greater than this number of bytes
.Pq see Sy zfs_condense_indirect_vdevs_enable .
.
.It Sy zfs_condense_min_mapping_bytes Ns = Ns Sy 131072 Ns B Po 128kB Pc Pq ulong
Minimum size vdev mapping to attempt to condense
.Pq see Sy zfs_condense_indirect_vdevs_enable .
.
.It Sy zfs_dbgmsg_enable Ns = Ns Sy 1 Ns | Ns 0 Pq int
Internally ZFS keeps a small log to facilitate debugging.
The log is enabled by default, and can be disabled by unsetting this option.
The contents of the log can be accessed by reading
.Pa /proc/spl/kstat/zfs/dbgmsg .
Writing
.Sy 0
to the file clears the log.
.Pp
This setting does not influence debug prints due to
.Sy zfs_flags .
.
.It Sy zfs_dbgmsg_maxsize Ns = Ns Sy 4194304 Ns B Po 4MB Pc Pq int
Maximum size of the internal ZFS debug log.
.
.It Sy zfs_dbuf_state_index Ns = Ns Sy 0 Pq int
Historically used for controlling what reporting was available under
.Pa /proc/spl/kstat/zfs .
No effect.
.
.It Sy zfs_deadman_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
When a pool sync operation takes longer than
.Sy zfs_deadman_synctime_ms ,
or when an individual I/O operation takes longer than
.Sy zfs_deadman_ziotime_ms ,
then the operation is considered to be "hung".
If
.Sy zfs_deadman_enabled
is set, then the deadman behavior is invoked as described by
.Sy zfs_deadman_failmode .
By default, the deadman is enabled and set to
.Sy wait
which results in "hung" I/Os only being logged.
The deadman is automatically disabled when a pool gets suspended.
.
.It Sy zfs_deadman_failmode Ns = Ns Sy wait Pq charp
Controls the failure behavior when the deadman detects a "hung" I/O operation.
Valid values are:
.Bl -tag -compact -offset 4n -width "continue"
.It Sy wait
Wait for a "hung" operation to complete.
For each "hung" operation a "deadman" event will be posted
describing that operation.
.It Sy continue
Attempt to recover from a "hung" operation by re-dispatching it
to the I/O pipeline if possible.
.It Sy panic
Panic the system.
This can be used to facilitate automatic fail-over
to a properly configured fail-over partner.
.El
.
.It Sy zfs_deadman_checktime_ms Ns = Ns Sy 60000 Ns ms Po 1min Pc Pq int
Check time in milliseconds.
This defines the frequency at which we check for hung I/O requests
and potentially invoke the
.Sy zfs_deadman_failmode
behavior.
.
.It Sy zfs_deadman_synctime_ms Ns = Ns Sy 600000 Ns ms Po 10min Pc Pq ulong
Interval in milliseconds after which the deadman is triggered and also
the interval after which a pool sync operation is considered to be "hung".
Once this limit is exceeded the deadman will be invoked every
.Sy zfs_deadman_checktime_ms
milliseconds until the pool sync completes.
.
.It Sy zfs_deadman_ziotime_ms Ns = Ns Sy 300000 Ns ms Po 5min Pc Pq ulong
Interval in milliseconds after which the deadman is triggered and an
individual I/O operation is considered to be "hung".
As long as the operation remains "hung",
the deadman will be invoked every
.Sy zfs_deadman_checktime_ms
milliseconds until the operation completes.
.
.It Sy zfs_dedup_prefetch Ns = Ns Sy 0 Ns | Ns 1 Pq int
Enable prefetching dedup-ed blocks which are going to be freed.
.
.It Sy zfs_delay_min_dirty_percent Ns = Ns Sy 60 Ns % Pq int
Start to delay each transaction once there is this amount of dirty data,
expressed as a percentage of
.Sy zfs_dirty_data_max .
This value should be at least
.Sy zfs_vdev_async_write_active_max_dirty_percent .
.No See Sx ZFS TRANSACTION DELAY .
.
.It Sy zfs_delay_scale Ns = Ns Sy 500000 Pq int
This controls how quickly the transaction delay approaches infinity.
Larger values cause longer delays for a given amount of dirty data.
.Pp
For the smoothest delay, this value should be about 1 billion divided
by the maximum number of operations per second.
This will smoothly handle between ten times and a tenth of this number.
.No See Sx ZFS TRANSACTION DELAY .
.Pp
.Sy zfs_delay_scale * zfs_dirty_data_max Em must be smaller than Sy 2^64 .
.
.It Sy zfs_disable_ivset_guid_check Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disables requirement for IVset GUIDs to be present and match when doing a raw
receive of encrypted datasets.
Intended for users whose pools were created with
OpenZFS pre-release versions and now have compatibility issues.
.
.It Sy zfs_key_max_salt_uses Ns = Ns Sy 400000000 Po 4*10^8 Pc Pq ulong
Maximum number of uses of a single salt value before generating a new one for
encrypted datasets.
The default value is also the maximum.
.
.It Sy zfs_object_mutex_size Ns = Ns Sy 64 Pq uint
Size of the znode hashtable used for holds.
.Pp
Due to the need to hold locks on objects that may not exist yet, kernel mutexes
are not created per-object and instead a hashtable is used where collisions
will result in objects waiting when there is not actually contention on the
same object.
.
.It Sy zfs_slow_io_events_per_second Ns = Ns Sy 20 Ns /s Pq int
Rate limit delay and deadman zevents (which report slow I/Os) to this many per
second.
.
.It Sy zfs_unflushed_max_mem_amt Ns = Ns Sy 1073741824 Ns B Po 1GB Pc Pq ulong
Upper-bound limit for unflushed metadata changes to be held by the
log spacemap in memory, in bytes.
.
.It Sy zfs_unflushed_max_mem_ppm Ns = Ns Sy 1000 Ns ppm Po 0.1% Pc Pq ulong
Part of overall system memory that ZFS allows to be used
for unflushed metadata changes by the log spacemap, in millionths.
.
.It Sy zfs_unflushed_log_block_max Ns = Ns Sy 262144 Po 256k Pc Pq ulong
Describes the maximum number of log spacemap blocks allowed for each pool.
The default value means that the space in all the log spacemaps
can add up to no more than
.Sy 262144
blocks (which means
.Em 32GB
of logical space before compression and ditto blocks,
assuming that blocksize is
.Em 128kB ) .
.Pp
This tunable is important because it involves a trade-off between import
time after an unclean export and the frequency of flushing metaslabs.
The higher this number is, the more log blocks we allow when the pool is
active which means that we flush metaslabs less often and thus decrease
the number of I/Os for spacemap updates per TXG.
At the same time though, that means that in the event of an unclean export,
there will be more log spacemap blocks for us to read, inducing overhead
in the import time of the pool.
The lower the number, the amount of flushing increases, destroying log
blocks quicker as they become obsolete faster, which leaves less blocks
to be read during import time after a crash.
.Pp
Each log spacemap block existing during pool import leads to approximately
one extra logical I/O issued.
This is the reason why this tunable is exposed in terms of blocks rather
than space used.
.
.It Sy zfs_unflushed_log_block_min Ns = Ns Sy 1000 Pq ulong
If the number of metaslabs is small and our incoming rate is high,
we could get into a situation that we are flushing all our metaslabs every TXG.
Thus we always allow at least this many log blocks.
.
.It Sy zfs_unflushed_log_block_pct Ns = Ns Sy 400 Ns % Pq ulong
Tunable used to determine the number of blocks that can be used for
the spacemap log, expressed as a percentage of the total number of
metaslabs in the pool.
.
.It Sy zfs_unlink_suspend_progress Ns = Ns Sy 0 Ns | Ns 1 Pq uint
When enabled, files will not be asynchronously removed from the list of pending
unlinks and the space they consume will be leaked.
Once this option has been disabled and the dataset is remounted,
the pending unlinks will be processed and the freed space returned to the pool.
This option is used by the test suite.
.
.It Sy zfs_delete_blocks Ns = Ns Sy 20480 Pq ulong
This is the used to define a large file for the purposes of deletion.
Files containing more than
.Sy zfs_delete_blocks
will be deleted asynchronously, while smaller files are deleted synchronously.
Decreasing this value will reduce the time spent in an
.Xr unlink 2
system call, at the expense of a longer delay before the freed space is available.
.
.It Sy zfs_dirty_data_max Ns = Pq int
Determines the dirty space limit in bytes.
Once this limit is exceeded, new writes are halted until space frees up.
This parameter takes precedence over
.Sy zfs_dirty_data_max_percent .
.No See Sx ZFS TRANSACTION DELAY .
.Pp
Defaults to
.Sy physical_ram/10 ,
capped at
.Sy zfs_dirty_data_max_max .
.
.It Sy zfs_dirty_data_max_max Ns = Pq int
Maximum allowable value of
.Sy zfs_dirty_data_max ,
expressed in bytes.
This limit is only enforced at module load time, and will be ignored if
.Sy zfs_dirty_data_max
is later changed.
This parameter takes precedence over
.Sy zfs_dirty_data_max_max_percent .
.No See Sx ZFS TRANSACTION DELAY .
.Pp
Defaults to
.Sy physical_ram/4 ,
.
.It Sy zfs_dirty_data_max_max_percent Ns = Ns Sy 25 Ns % Pq int
Maximum allowable value of
.Sy zfs_dirty_data_max ,
expressed as a percentage of physical RAM.
This limit is only enforced at module load time, and will be ignored if
.Sy zfs_dirty_data_max
is later changed.
The parameter
.Sy zfs_dirty_data_max_max
takes precedence over this one.
.No See Sx ZFS TRANSACTION DELAY .
.
.It Sy zfs_dirty_data_max_percent Ns = Ns Sy 10 Ns % Pq int
Determines the dirty space limit, expressed as a percentage of all memory.
Once this limit is exceeded, new writes are halted until space frees up.
The parameter
.Sy zfs_dirty_data_max
takes precedence over this one.
.No See Sx ZFS TRANSACTION DELAY .
.Pp
Subject to
.Sy zfs_dirty_data_max_max .
.
.It Sy zfs_dirty_data_sync_percent Ns = Ns Sy 20 Ns % Pq int
Start syncing out a transaction group if there's at least this much dirty data
.Pq as a percentage of Sy zfs_dirty_data_max .
This should be less than
.Sy zfs_vdev_async_write_active_min_dirty_percent .
.
.It Sy zfs_fallocate_reserve_percent Ns = Ns Sy 110 Ns % Pq uint
Since ZFS is a copy-on-write filesystem with snapshots, blocks cannot be
preallocated for a file in order to guarantee that later writes will not
run out of space.
Instead,
.Xr fallocate 2
space preallocation only checks that sufficient space is currently available
in the pool or the user's project quota allocation,
and then creates a sparse file of the requested size.
The requested space is multiplied by
.Sy zfs_fallocate_reserve_percent
to allow additional space for indirect blocks and other internal metadata.
Setting this to
.Sy 0
disables support for
.Xr fallocate 2
and causes it to return
.Sy EOPNOTSUPP .
.
.It Sy zfs_fletcher_4_impl Ns = Ns Sy fastest Pq string
Select a fletcher 4 implementation.
.Pp
Supported selectors are:
.Sy fastest , scalar , sse2 , ssse3 , avx2 , avx512f , avx512bw ,
.No and Sy aarch64_neon .
All except
.Sy fastest No and Sy scalar
require instruction set extensions to be available,
and will only appear if ZFS detects that they are present at runtime.
If multiple implementations of fletcher 4 are available, the
.Sy fastest
will be chosen using a micro benchmark.
Selecting
.Sy scalar
results in the original CPU-based calculation being used.
Selecting any option other than
.Sy fastest No or Sy scalar
results in vector instructions
from the respective CPU instruction set being used.
.
.It Sy zfs_free_bpobj_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Enable/disable the processing of the free_bpobj object.
.
.It Sy zfs_async_block_max_blocks Ns = Ns Sy ULONG_MAX Po unlimited Pc Pq ulong
Maximum number of blocks freed in a single TXG.
.
.It Sy zfs_max_async_dedup_frees Ns = Ns Sy 100000 Po 10^5 Pc Pq ulong
Maximum number of dedup blocks freed in a single TXG.
.
.It Sy zfs_override_estimate_recordsize Ns = Ns Sy 0 Pq ulong
If nonzer, override record size calculation for
.Nm zfs Cm send
estimates.
.
.It Sy zfs_vdev_async_read_max_active Ns = Ns Sy 3 Pq int
Maximum asynchronous read I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_async_read_min_active Ns = Ns Sy 1 Pq int
Minimum asynchronous read I/O operation active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_async_write_active_max_dirty_percent Ns = Ns Sy 60 Ns % Pq int
When the pool has more than this much dirty data, use
.Sy zfs_vdev_async_write_max_active
to limit active async writes.
If the dirty data is between the minimum and maximum,
the active I/O limit is linearly interpolated.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_async_write_active_min_dirty_percent Ns = Ns Sy 30 Ns % Pq int
When the pool has less than this much dirty data, use
.Sy zfs_vdev_async_write_min_active
to limit active async writes.
If the dirty data is between the minimum and maximum,
the active I/O limit is linearly
interpolated.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_async_write_max_active Ns = Ns Sy 30 Pq int
Maximum asynchronous write I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_async_write_min_active Ns = Ns Sy 2 Pq int
Minimum asynchronous write I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.Pp
Lower values are associated with better latency on rotational media but poorer
resilver performance.
The default value of
.Sy 2
was chosen as a compromise.
A value of
.Sy 3
has been shown to improve resilver performance further at a cost of
further increasing latency.
.
.It Sy zfs_vdev_initializing_max_active Ns = Ns Sy 1 Pq int
Maximum initializing I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_initializing_min_active Ns = Ns Sy 1 Pq int
Minimum initializing I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_max_active Ns = Ns Sy 1000 Pq int
The maximum number of I/O operations active to each device.
Ideally, this will be at least the sum of each queue's
.Sy max_active .
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_rebuild_max_active Ns = Ns Sy 3 Pq int
Maximum sequential resilver I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_rebuild_min_active Ns = Ns Sy 1 Pq int
Minimum sequential resilver I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_removal_max_active Ns = Ns Sy 2 Pq int
Maximum removal I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_removal_min_active Ns = Ns Sy 1 Pq int
Minimum removal I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_scrub_max_active Ns = Ns Sy 2 Pq int
Maximum scrub I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_scrub_min_active Ns = Ns Sy 1 Pq int
Minimum scrub I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_sync_read_max_active Ns = Ns Sy 10 Pq int
Maximum synchronous read I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_sync_read_min_active Ns = Ns Sy 10 Pq int
Minimum synchronous read I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_sync_write_max_active Ns = Ns Sy 10 Pq int
Maximum synchronous write I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_sync_write_min_active Ns = Ns Sy 10 Pq int
Minimum synchronous write I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_trim_max_active Ns = Ns Sy 2 Pq int
Maximum trim/discard I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_trim_min_active Ns = Ns Sy 1 Pq int
Minimum trim/discard I/O operations active to each device.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_nia_delay Ns = Ns Sy 5 Pq int
For non-interactive I/O (scrub, resilver, removal, initialize and rebuild),
the number of concurrently-active I/O operations is limited to
.Sy zfs_*_min_active ,
unless the vdev is "idle".
When there are no interactive I/O operatinons active (synchronous or otherwise),
and
.Sy zfs_vdev_nia_delay
operations have completed since the last interactive operation,
then the vdev is considered to be "idle",
and the number of concurrently-active non-interactive operations is increased to
.Sy zfs_*_max_active .
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_nia_credit Ns = Ns Sy 5 Pq int
Some HDDs tend to prioritize sequential I/O so strongly, that concurrent
random I/O latency reaches several seconds.
On some HDDs this happens even if sequential I/O operations
are submitted one at a time, and so setting
.Sy zfs_*_max_active Ns = Sy 1
does not help.
To prevent non-interactive I/O, like scrub,
from monopolizing the device, no more than
.Sy zfs_vdev_nia_credit operations can be sent
while there are outstanding incomplete interactive operations.
This enforced wait ensures the HDD services the interactive I/O
within a reasonable amount of time.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_queue_depth_pct Ns = Ns Sy 1000 Ns % Pq int
Maximum number of queued allocations per top-level vdev expressed as
a percentage of
.Sy zfs_vdev_async_write_max_active ,
which allows the system to detect devices that are more capable
of handling allocations and to allocate more blocks to those devices.
This allows for dynamic allocation distribution when devices are imbalanced,
as fuller devices will tend to be slower than empty devices.
.Pp
Also see
.Sy zio_dva_throttle_enabled .
.
.It Sy zfs_expire_snapshot Ns = Ns Sy 300 Ns s Pq int
Time before expiring
.Pa .zfs/snapshot .
.
.It Sy zfs_admin_snapshot Ns = Ns Sy 0 Ns | Ns 1 Pq int
Allow the creation, removal, or renaming of entries in the
.Sy .zfs/snapshot
directory to cause the creation, destruction, or renaming of snapshots.
When enabled, this functionality works both locally and over NFS exports
which have the
.Em no_root_squash
option set.
.
.It Sy zfs_flags Ns = Ns Sy 0 Pq int
Set additional debugging flags.
The following flags may be bitwise-ored together:
.TS
box;
lbz r l l .
Value Symbolic Name Description
_
1 ZFS_DEBUG_DPRINTF Enable dprintf entries in the debug log.
* 2 ZFS_DEBUG_DBUF_VERIFY Enable extra dbuf verifications.
* 4 ZFS_DEBUG_DNODE_VERIFY Enable extra dnode verifications.
8 ZFS_DEBUG_SNAPNAMES Enable snapshot name verification.
16 ZFS_DEBUG_MODIFY Check for illegally modified ARC buffers.
64 ZFS_DEBUG_ZIO_FREE Enable verification of block frees.
128 ZFS_DEBUG_HISTOGRAM_VERIFY Enable extra spacemap histogram verifications.
256 ZFS_DEBUG_METASLAB_VERIFY Verify space accounting on disk matches in-memory \fBrange_trees\fP.
512 ZFS_DEBUG_SET_ERROR Enable \fBSET_ERROR\fP and dprintf entries in the debug log.
1024 ZFS_DEBUG_INDIRECT_REMAP Verify split blocks created by device removal.
2048 ZFS_DEBUG_TRIM Verify TRIM ranges are always within the allocatable range tree.
4096 ZFS_DEBUG_LOG_SPACEMAP Verify that the log summary is consistent with the spacemap log
and enable \fBzfs_dbgmsgs\fP for metaslab loading and flushing.
.TE
.Sy \& * No Requires debug build.
.
.It Sy zfs_free_leak_on_eio Ns = Ns Sy 0 Ns | Ns 1 Pq int
If destroy encounters an
.Sy EIO
while reading metadata (e.g. indirect blocks),
space referenced by the missing metadata can not be freed.
Normally this causes the background destroy to become "stalled",
as it is unable to make forward progress.
While in this stalled state, all remaining space to free
from the error-encountering filesystem is "temporarily leaked".
Set this flag to cause it to ignore the
.Sy EIO ,
permanently leak the space from indirect blocks that can not be read,
and continue to free everything else that it can.
.Pp
The default "stalling" behavior is useful if the storage partially
fails (i.e. some but not all I/O operations fail), and then later recovers.
In this case, we will be able to continue pool operations while it is
partially failed, and when it recovers, we can continue to free the
space, with no leaks.
Note, however, that this case is actually fairly rare.
.Pp
Typically pools either
.Bl -enum -compact -offset 4n -width "1."
.It
fail completely (but perhaps temporarily,
e.g. due to a top-level vdev going offline), or
.It
have localized, permanent errors (e.g. disk returns the wrong data
due to bit flip or firmware bug).
.El
In the former case, this setting does not matter because the
pool will be suspended and the sync thread will not be able to make
forward progress regardless.
In the latter, because the error is permanent, the best we can do
is leak the minimum amount of space,
which is what setting this flag will do.
It is therefore reasonable for this flag to normally be set,
but we chose the more conservative approach of not setting it,
so that there is no possibility of
leaking space in the "partial temporary" failure case.
.
.It Sy zfs_free_min_time_ms Ns = Ns Sy 1000 Ns ms Po 1s Pc Pq int
During a
.Nm zfs Cm destroy
operation using the
.Sy async_destroy
feature,
a minimum of this much time will be spent working on freeing blocks per TXG.
.
.It Sy zfs_obsolete_min_time_ms Ns = Ns Sy 500 Ns ms Pq int
Similar to
.Sy zfs_free_min_time_ms ,
but for cleanup of old indirection records for removed vdevs.
.
.It Sy zfs_immediate_write_sz Ns = Ns Sy 32768 Ns B Po 32kB Pc Pq long
Largest data block to write to the ZIL.
Larger blocks will be treated as if the dataset being written to had the
.Sy logbias Ns = Ns Sy throughput
property set.
.
.It Sy zfs_initialize_value Ns = Ns Sy 16045690984833335022 Po 0xDEADBEEFDEADBEEE Pc Pq ulong
Pattern written to vdev free space by
.Xr zpool-initialize 8 .
.
.It Sy zfs_initialize_chunk_size Ns = Ns Sy 1048576 Ns B Po 1MB Pc Pq ulong
Size of writes used by
.Xr zpool-initialize 8 .
This option is used by the test suite.
.
.It Sy zfs_livelist_max_entries Ns = Ns Sy 500000 Po 5*10^5 Pc Pq ulong
The threshold size (in block pointers) at which we create a new sub-livelist.
Larger sublists are more costly from a memory perspective but the fewer
sublists there are, the lower the cost of insertion.
.
.It Sy zfs_livelist_min_percent_shared Ns = Ns Sy 75 Ns % Pq int
If the amount of shared space between a snapshot and its clone drops below
this threshold, the clone turns off the livelist and reverts to the old
deletion method.
This is in place because livelists no long give us a benefit
once a clone has been overwritten enough.
.
.It Sy zfs_livelist_condense_new_alloc Ns = Ns Sy 0 Pq int
Incremented each time an extra ALLOC blkptr is added to a livelist entry while
it is being condensed.
This option is used by the test suite to track race conditions.
.
.It Sy zfs_livelist_condense_sync_cancel Ns = Ns Sy 0 Pq int
Incremented each time livelist condensing is canceled while in
.Fn spa_livelist_condense_sync .
This option is used by the test suite to track race conditions.
.
.It Sy zfs_livelist_condense_sync_pause Ns = Ns Sy 0 Ns | Ns 1 Pq int
When set, the livelist condense process pauses indefinitely before
executing the synctask -
.Fn spa_livelist_condense_sync .
This option is used by the test suite to trigger race conditions.
.
.It Sy zfs_livelist_condense_zthr_cancel Ns = Ns Sy 0 Pq int
Incremented each time livelist condensing is canceled while in
.Fn spa_livelist_condense_cb .
This option is used by the test suite to track race conditions.
.
.It Sy zfs_livelist_condense_zthr_pause Ns = Ns Sy 0 Ns | Ns 1 Pq int
When set, the livelist condense process pauses indefinitely before
executing the open context condensing work in
.Fn spa_livelist_condense_cb .
This option is used by the test suite to trigger race conditions.
.
.It Sy zfs_lua_max_instrlimit Ns = Ns Sy 100000000 Po 10^8 Pc Pq ulong
The maximum execution time limit that can be set for a ZFS channel program,
specified as a number of Lua instructions.
.
.It Sy zfs_lua_max_memlimit Ns = Ns Sy 104857600 Po 100MB Pc Pq ulong
The maximum memory limit that can be set for a ZFS channel program, specified
in bytes.
.
.It Sy zfs_max_dataset_nesting Ns = Ns Sy 50 Pq int
The maximum depth of nested datasets.
This value can be tuned temporarily to
fix existing datasets that exceed the predefined limit.
.
.It Sy zfs_max_log_walking Ns = Ns Sy 5 Pq ulong
The number of past TXGs that the flushing algorithm of the log spacemap
feature uses to estimate incoming log blocks.
.
.It Sy zfs_max_logsm_summary_length Ns = Ns Sy 10 Pq ulong
Maximum number of rows allowed in the summary of the spacemap log.
.
.It Sy zfs_max_recordsize Ns = Ns Sy 1048576 Po 1MB Pc Pq int
We currently support block sizes from
.Em 512B No to Em 16MB .
The benefits of larger blocks, and thus larger I/O,
need to be weighed against the cost of COWing a giant block to modify one byte.
Additionally, very large blocks can have an impact on I/O latency,
and also potentially on the memory allocator.
Therefore, we do not allow the recordsize to be set larger than this tunable.
Larger blocks can be created by changing it,
and pools with larger blocks can always be imported and used,
regardless of this setting.
.
.It Sy zfs_allow_redacted_dataset_mount Ns = Ns Sy 0 Ns | Ns 1 Pq int
Allow datasets received with redacted send/receive to be mounted.
Normally disabled because these datasets may be missing key data.
.
.It Sy zfs_min_metaslabs_to_flush Ns = Ns Sy 1 Pq ulong
Minimum number of metaslabs to flush per dirty TXG.
.
.It Sy zfs_metaslab_fragmentation_threshold Ns = Ns Sy 70 Ns % Pq int
Allow metaslabs to keep their active state as long as their fragmentation
percentage is no more than this value.
An active metaslab that exceeds this threshold
will no longer keep its active status allowing better metaslabs to be selected.
.
.It Sy zfs_mg_fragmentation_threshold Ns = Ns Sy 95 Ns % Pq int
Metaslab groups are considered eligible for allocations if their
fragmentation metric (measured as a percentage) is less than or equal to
this value.
If a metaslab group exceeds this threshold then it will be
skipped unless all metaslab groups within the metaslab class have also
crossed this threshold.
.
.It Sy zfs_mg_noalloc_threshold Ns = Ns Sy 0 Ns % Pq int
Defines a threshold at which metaslab groups should be eligible for allocations.
The value is expressed as a percentage of free space
beyond which a metaslab group is always eligible for allocations.
If a metaslab group's free space is less than or equal to the
threshold, the allocator will avoid allocating to that group
unless all groups in the pool have reached the threshold.
Once all groups have reached the threshold, all groups are allowed to accept
allocations.
The default value of
.Sy 0
disables the feature and causes all metaslab groups to be eligible for allocations.
.Pp
This parameter allows one to deal with pools having heavily imbalanced
vdevs such as would be the case when a new vdev has been added.
Setting the threshold to a non-zero percentage will stop allocations
from being made to vdevs that aren't filled to the specified percentage
and allow lesser filled vdevs to acquire more allocations than they
otherwise would under the old
.Sy zfs_mg_alloc_failures
facility.
.
.It Sy zfs_ddt_data_is_special Ns = Ns Sy 1 Ns | Ns 0 Pq int
If enabled, ZFS will place DDT data into the special allocation class.
.
.It Sy zfs_user_indirect_is_special Ns = Ns Sy 1 Ns | Ns 0 Pq int
If enabled, ZFS will place user data indirect blocks
into the special allocation class.
.
.It Sy zfs_multihost_history Ns = Ns Sy 0 Pq int
Historical statistics for this many latest multihost updates will be available in
.Pa /proc/spl/kstat/zfs/ Ns Ao Ar pool Ac Ns Pa /multihost .
.
.It Sy zfs_multihost_interval Ns = Ns Sy 1000 Ns ms Po 1s Pc Pq ulong
Used to control the frequency of multihost writes which are performed when the
.Sy multihost
pool property is on.
This is one of the factors used to determine the
length of the activity check during import.
.Pp
The multihost write period is
.Sy zfs_multihost_interval / leaf-vdevs .
On average a multihost write will be issued for each leaf vdev
every
.Sy zfs_multihost_interval
milliseconds.
In practice, the observed period can vary with the I/O load
and this observed value is the delay which is stored in the uberblock.
.
.It Sy zfs_multihost_import_intervals Ns = Ns Sy 20 Pq uint
Used to control the duration of the activity test on import.
Smaller values of
.Sy zfs_multihost_import_intervals
will reduce the import time but increase
the risk of failing to detect an active pool.
The total activity check time is never allowed to drop below one second.
.Pp
On import the activity check waits a minimum amount of time determined by
.Sy zfs_multihost_interval * zfs_multihost_import_intervals ,
or the same product computed on the host which last had the pool imported,
whichever is greater.
The activity check time may be further extended if the value of MMP
delay found in the best uberblock indicates actual multihost updates happened
at longer intervals than
.Sy zfs_multihost_interval .
A minimum of
.Em 100ms
is enforced.
.Pp
.Sy 0 No is equivalent to Sy 1 .
.
.It Sy zfs_multihost_fail_intervals Ns = Ns Sy 10 Pq uint
Controls the behavior of the pool when multihost write failures or delays are
detected.
.Pp
When
.Sy 0 ,
multihost write failures or delays are ignored.
The failures will still be reported to the ZED which depending on
its configuration may take action such as suspending the pool or offlining a
device.
.Pp
Otherwise, the pool will be suspended if
.Sy zfs_multihost_fail_intervals * zfs_multihost_interval
milliseconds pass without a successful MMP write.
This guarantees the activity test will see MMP writes if the pool is imported.
.Sy 1 No is equivalent to Sy 2 ;
this is necessary to prevent the pool from being suspended
due to normal, small I/O latency variations.
.
.It Sy zfs_no_scrub_io Ns = Ns Sy 0 Ns | Ns 1 Pq int
Set to disable scrub I/O.
This results in scrubs not actually scrubbing data and
simply doing a metadata crawl of the pool instead.
.
.It Sy zfs_no_scrub_prefetch Ns = Ns Sy 0 Ns | Ns 1 Pq int
Set to disable block prefetching for scrubs.
.
.It Sy zfs_nocacheflush Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable cache flush operations on disks when writing.
Setting this will cause pool corruption on power loss
if a volatile out-of-order write cache is enabled.
.
.It Sy zfs_nopwrite_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Allow no-operation writes.
The occurrence of nopwrites will further depend on other pool properties
.Pq i.a. the checksumming and compression algorithms .
.
.It Sy zfs_dmu_offset_next_sync Ns = Ns Sy 0 Ns | Ns 1 Pq int
Enable forcing TXG sync to find holes.
When enabled forces ZFS to act like prior versions when
.Sy SEEK_HOLE No or Sy SEEK_DATA
flags are used, which, when a dnode is dirty,
causes TXGs to be synced so that this data can be found.
.
.It Sy zfs_pd_bytes_max Ns = Ns Sy 52428800 Ns B Po 50MB Pc Pq int
The number of bytes which should be prefetched during a pool traversal, like
.Nm zfs Cm send
or other data crawling operations.
.
.It Sy zfs_traverse_indirect_prefetch_limit Ns = Ns Sy 32 Pq int
The number of blocks pointed by indirect (non-L0) block which should be
prefetched during a pool traversal, like
.Nm zfs Cm send
or other data crawling operations.
.
.It Sy zfs_per_txg_dirty_frees_percent Ns = Ns Sy 5 Ns % Pq ulong
Control percentage of dirtied indirect blocks from frees allowed into one TXG.
After this threshold is crossed, additional frees will wait until the next TXG.
.Sy 0 No disables this throttle.
.
.It Sy zfs_prefetch_disable Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable predictive prefetch.
Note that it leaves "prescient" prefetch (for. e.g.\&
.Nm zfs Cm send )
intact.
Unlike predictive prefetch, prescient prefetch never issues I/O
that ends up not being needed, so it can't hurt performance.
.
.It Sy zfs_qat_checksum_disable Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable QAT hardware acceleration for SHA256 checksums.
May be unset after the ZFS modules have been loaded to initialize the QAT
hardware as long as support is compiled in and the QAT driver is present.
.
.It Sy zfs_qat_compress_disable Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable QAT hardware acceleration for gzip compression.
May be unset after the ZFS modules have been loaded to initialize the QAT
hardware as long as support is compiled in and the QAT driver is present.
.
.It Sy zfs_qat_encrypt_disable Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable QAT hardware acceleration for AES-GCM encryption.
May be unset after the ZFS modules have been loaded to initialize the QAT
hardware as long as support is compiled in and the QAT driver is present.
.
.It Sy zfs_vnops_read_chunk_size Ns = Ns Sy 1048576 Ns B Po 1MB Pc Pq long
Bytes to read per chunk.
.
.It Sy zfs_read_history Ns = Ns Sy 0 Pq int
Historical statistics for this many latest reads will be available in
.Pa /proc/spl/kstat/zfs/ Ns Ao Ar pool Ac Ns Pa /reads .
.
.It Sy zfs_read_history_hits Ns = Ns Sy 0 Ns | Ns 1 Pq int
Include cache hits in read history
.
.It Sy zfs_rebuild_max_segment Ns = Ns Sy 1048576 Ns B Po 1MB Pc Pq ulong
Maximum read segment size to issue when sequentially resilvering a
top-level vdev.
.
.It Sy zfs_rebuild_scrub_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Automatically start a pool scrub when the last active sequential resilver
completes in order to verify the checksums of all blocks which have been
resilvered.
This is enabled by default and strongly recommended.
.
.It Sy zfs_rebuild_vdev_limit Ns = Ns Sy 33554432 Ns B Po 32MB Pc Pq ulong
Maximum amount of I/O that can be concurrently issued for a sequential
resilver per leaf device, given in bytes.
.
.It Sy zfs_reconstruct_indirect_combinations_max Ns = Ns Sy 4096 Pq int
If an indirect split block contains more than this many possible unique
combinations when being reconstructed, consider it too computationally
expensive to check them all.
Instead, try at most this many randomly selected
combinations each time the block is accessed.
This allows all segment copies to participate fairly
in the reconstruction when all combinations
cannot be checked and prevents repeated use of one bad copy.
.
.It Sy zfs_recover Ns = Ns Sy 0 Ns | Ns 1 Pq int
Set to attempt to recover from fatal errors.
This should only be used as a last resort,
as it typically results in leaked space, or worse.
.
.It Sy zfs_removal_ignore_errors Ns = Ns Sy 0 Ns | Ns 1 Pq int
Ignore hard IO errors during device removal.
When set, if a device encounters a hard IO error during the removal process
the removal will not be cancelled.
This can result in a normally recoverable block becoming permanently damaged
and is hence not recommended.
This should only be used as a last resort when the
pool cannot be returned to a healthy state prior to removing the device.
.
.It Sy zfs_removal_suspend_progress Ns = Ns Sy 0 Ns | Ns 1 Pq int
This is used by the test suite so that it can ensure that certain actions
happen while in the middle of a removal.
.
.It Sy zfs_remove_max_segment Ns = Ns Sy 16777216 Ns B Po 16MB Pc Pq int
The largest contiguous segment that we will attempt to allocate when removing
a device.
If there is a performance problem with attempting to allocate large blocks,
consider decreasing this.
The default value is also the maximum.
.
.It Sy zfs_resilver_disable_defer Ns = Ns Sy 0 Ns | Ns 1 Pq int
Ignore the
.Sy resilver_defer
feature, causing an operation that would start a resilver to
immediately restart the one in progress.
.
.It Sy zfs_resilver_min_time_ms Ns = Ns Sy 3000 Ns ms Po 3s Pc Pq int
Resilvers are processed by the sync thread.
While resilvering, it will spend at least this much time
working on a resilver between TXG flushes.
.
.It Sy zfs_scan_ignore_errors Ns = Ns Sy 0 Ns | Ns 1 Pq int
If set, remove the DTL (dirty time list) upon completion of a pool scan (scrub),
even if there were unrepairable errors.
Intended to be used during pool repair or recovery to
stop resilvering when the pool is next imported.
.
.It Sy zfs_scrub_min_time_ms Ns = Ns Sy 1000 Ns ms Po 1s Pc Pq int
Scrubs are processed by the sync thread.
While scrubbing, it will spend at least this much time
working on a scrub between TXG flushes.
.
.It Sy zfs_scan_checkpoint_intval Ns = Ns Sy 7200 Ns s Po 2h Pc Pq int
To preserve progress across reboots, the sequential scan algorithm periodically
needs to stop metadata scanning and issue all the verification I/O to disk.
The frequency of this flushing is determined by this tunable.
.
.It Sy zfs_scan_fill_weight Ns = Ns Sy 3 Pq int
This tunable affects how scrub and resilver I/O segments are ordered.
A higher number indicates that we care more about how filled in a segment is,
while a lower number indicates we care more about the size of the extent without
considering the gaps within a segment.
This value is only tunable upon module insertion.
Changing the value afterwards will have no affect on scrub or resilver performance.
.
.It Sy zfs_scan_issue_strategy Ns = Ns Sy 0 Pq int
Determines the order that data will be verified while scrubbing or resilvering:
.Bl -tag -compact -offset 4n -width "a"
.It Sy 1
Data will be verified as sequentially as possible, given the
amount of memory reserved for scrubbing
.Pq see Sy zfs_scan_mem_lim_fact .
This may improve scrub performance if the pool's data is very fragmented.
.It Sy 2
The largest mostly-contiguous chunk of found data will be verified first.
By deferring scrubbing of small segments, we may later find adjacent data
to coalesce and increase the segment size.
.It Sy 0
.No Use strategy Sy 1 No during normal verification
.No and strategy Sy 2 No while taking a checkpoint.
.El
.
.It Sy zfs_scan_legacy Ns = Ns Sy 0 Ns | Ns 1 Pq int
If unset, indicates that scrubs and resilvers will gather metadata in
memory before issuing sequential I/O.
Otherwise indicates that the legacy algorithm will be used,
where I/O is initiated as soon as it is discovered.
Unsetting will not affect scrubs or resilvers that are already in progress.
.
.It Sy zfs_scan_max_ext_gap Ns = Ns Sy 2097152 Ns B Po 2MB Pc Pq int
Sets the largest gap in bytes between scrub/resilver I/O operations
that will still be considered sequential for sorting purposes.
Changing this value will not
affect scrubs or resilvers that are already in progress.
.
.It Sy zfs_scan_mem_lim_fact Ns = Ns Sy 20 Ns ^-1 Pq int
Maximum fraction of RAM used for I/O sorting by sequential scan algorithm.
This tunable determines the hard limit for I/O sorting memory usage.
When the hard limit is reached we stop scanning metadata and start issuing
data verification I/O.
This is done until we get below the soft limit.
.
.It Sy zfs_scan_mem_lim_soft_fact Ns = Ns Sy 20 Ns ^-1 Pq int
The fraction of the hard limit used to determined the soft limit for I/O sorting
by the sequential scan algorithm.
When we cross this limit from below no action is taken.
When we cross this limit from above it is because we are issuing verification I/O.
In this case (unless the metadata scan is done) we stop issuing verification I/O
and start scanning metadata again until we get to the hard limit.
.
.It Sy zfs_scan_strict_mem_lim Ns = Ns Sy 0 Ns | Ns 1 Pq int
Enforce tight memory limits on pool scans when a sequential scan is in progress.
When disabled, the memory limit may be exceeded by fast disks.
.
.It Sy zfs_scan_suspend_progress Ns = Ns Sy 0 Ns | Ns 1 Pq int
Freezes a scrub/resilver in progress without actually pausing it.
Intended for testing/debugging.
.
.It Sy zfs_scan_vdev_limit Ns = Ns Sy 4194304 Ns B Po 4MB Pc Pq int
Maximum amount of data that can be concurrently issued at once for scrubs and
resilvers per leaf device, given in bytes.
.
.It Sy zfs_send_corrupt_data Ns = Ns Sy 0 Ns | Ns 1 Pq int
Allow sending of corrupt data (ignore read/checksum errors when sending).
.
.It Sy zfs_send_unmodified_spill_blocks Ns = Ns Sy 1 Ns | Ns 0 Pq int
Include unmodified spill blocks in the send stream.
Under certain circumstances, previous versions of ZFS could incorrectly
remove the spill block from an existing object.
Including unmodified copies of the spill blocks creates a backwards-compatible
stream which will recreate a spill block if it was incorrectly removed.
.
.It Sy zfs_send_no_prefetch_queue_ff Ns = Ns Sy 20 Ns ^-1 Pq int
The fill fraction of the
.Nm zfs Cm send
internal queues.
The fill fraction controls the timing with which internal threads are woken up.
.
.It Sy zfs_send_no_prefetch_queue_length Ns = Ns Sy 1048576 Ns B Po 1MB Pc Pq int
The maximum number of bytes allowed in
.Nm zfs Cm send Ns 's
internal queues.
.
.It Sy zfs_send_queue_ff Ns = Ns Sy 20 Ns ^-1 Pq int
The fill fraction of the
.Nm zfs Cm send
prefetch queue.
The fill fraction controls the timing with which internal threads are woken up.
.
.It Sy zfs_send_queue_length Ns = Ns Sy 16777216 Ns B Po 16MB Pc Pq int
The maximum number of bytes allowed that will be prefetched by
.Nm zfs Cm send .
This value must be at least twice the maximum block size in use.
.
.It Sy zfs_recv_queue_ff Ns = Ns Sy 20 Ns ^-1 Pq int
The fill fraction of the
.Nm zfs Cm receive
queue.
The fill fraction controls the timing with which internal threads are woken up.
.
.It Sy zfs_recv_queue_length Ns = Ns Sy 16777216 Ns B Po 16MB Pc Pq int
The maximum number of bytes allowed in the
.Nm zfs Cm receive
queue.
This value must be at least twice the maximum block size in use.
.
.It Sy zfs_recv_write_batch_size Ns = Ns Sy 1048576 Ns B Po 1MB Pc Pq int
The maximum amount of data, in bytes, that
.Nm zfs Cm receive
will write in one DMU transaction.
This is the uncompressed size, even when receiving a compressed send stream.
This setting will not reduce the write size below a single block.
Capped at a maximum of
.Sy 32MB .
.
.It Sy zfs_override_estimate_recordsize Ns = Ns Sy 0 Ns | Ns 1 Pq ulong
Setting this variable overrides the default logic for estimating block
sizes when doing a
.Nm zfs Cm send .
The default heuristic is that the average block size
will be the current recordsize.
Override this value if most data in your dataset is not of that size
and you require accurate zfs send size estimates.
.
.It Sy zfs_sync_pass_deferred_free Ns = Ns Sy 2 Pq int
Flushing of data to disk is done in passes.
Defer frees starting in this pass.
.
.It Sy zfs_spa_discard_memory_limit Ns = Ns Sy 16777216 Ns B Po 16MB Pc Pq int
Maximum memory used for prefetching a checkpoint's space map on each
vdev while discarding the checkpoint.
.
.It Sy zfs_special_class_metadata_reserve_pct Ns = Ns Sy 25 Ns % Pq int
Only allow small data blocks to be allocated on the special and dedup vdev
types when the available free space percentage on these vdevs exceeds this value.
This ensures reserved space is available for pool metadata as the
special vdevs approach capacity.
.
.It Sy zfs_sync_pass_dont_compress Ns = Ns Sy 8 Pq int
Starting in this sync pass, disable compression (including of metadata).
With the default setting, in practice, we don't have this many sync passes,
so this has no effect.
.Pp
The original intent was that disabling compression would help the sync passes
to converge.
However, in practice, disabling compression increases
the average number of sync passes; because when we turn compression off,
many blocks' size will change, and thus we have to re-allocate
(not overwrite) them.
It also increases the number of
.Em 128kB
allocations (e.g. for indirect blocks and spacemaps)
because these will not be compressed.
The
.Em 128kB
allocations are especially detrimental to performance
on highly fragmented systems, which may have very few free segments of this size,
and may need to load new metaslabs to satisfy these allocations.
.
.It Sy zfs_sync_pass_rewrite Ns = Ns Sy 2 Pq int
Rewrite new block pointers starting in this pass.
.
.It Sy zfs_sync_taskq_batch_pct Ns = Ns Sy 75 Ns % Pq int
This controls the number of threads used by
.Sy dp_sync_taskq .
The default value of
.Sy 75%
will create a maximum of one thread per CPU.
.
.It Sy zfs_trim_extent_bytes_max Ns = Ns Sy 134217728 Ns B Po 128MB Pc Pq uint
Maximum size of TRIM command.
Larger ranges will be split into chunks no larger than this value before issuing.
.
.It Sy zfs_trim_extent_bytes_min Ns = Ns Sy 32768 Ns B Po 32kB Pc Pq uint
Minimum size of TRIM commands.
TRIM ranges smaller than this will be skipped,
unless they're part of a larger range which was chunked.
This is done because it's common for these small TRIMs
to negatively impact overall performance.
.
.It Sy zfs_trim_metaslab_skip Ns = Ns Sy 0 Ns | Ns 1 Pq uint
Skip uninitialized metaslabs during the TRIM process.
This option is useful for pools constructed from large thinly-provisioned devices
where TRIM operations are slow.
As a pool ages, an increasing fraction of the pool's metaslabs
will be initialized, progressively degrading the usefulness of this option.
This setting is stored when starting a manual TRIM and will
persist for the duration of the requested TRIM.
.
.It Sy zfs_trim_queue_limit Ns = Ns Sy 10 Pq uint
Maximum number of queued TRIMs outstanding per leaf vdev.
The number of concurrent TRIM commands issued to the device is controlled by
.Sy zfs_vdev_trim_min_active No and Sy zfs_vdev_trim_max_active .
.
.It Sy zfs_trim_txg_batch Ns = Ns Sy 32 Pq uint
The number of transaction groups' worth of frees which should be aggregated
before TRIM operations are issued to the device.
This setting represents a trade-off between issuing larger,
more efficient TRIM operations and the delay
before the recently trimmed space is available for use by the device.
.Pp
Increasing this value will allow frees to be aggregated for a longer time.
This will result is larger TRIM operations and potentially increased memory usage.
Decreasing this value will have the opposite effect.
The default of
.Sy 32
was determined to be a reasonable compromise.
.
.It Sy zfs_txg_history Ns = Ns Sy 0 Pq int
Historical statistics for this many latest TXGs will be available in
.Pa /proc/spl/kstat/zfs/ Ns Ao Ar pool Ac Ns Pa /TXGs .
.
.It Sy zfs_txg_timeout Ns = Ns Sy 5 Ns s Pq int
Flush dirty data to disk at least every this many seconds (maximum TXG duration).
.
.It Sy zfs_vdev_aggregate_trim Ns = Ns Sy 0 Ns | Ns 1 Pq int
Allow TRIM I/Os to be aggregated.
This is normally not helpful because the extents to be trimmed
will have been already been aggregated by the metaslab.
This option is provided for debugging and performance analysis.
.
.It Sy zfs_vdev_aggregation_limit Ns = Ns Sy 1048576 Ns B Po 1MB Pc Pq int
Max vdev I/O aggregation size.
.
.It Sy zfs_vdev_aggregation_limit_non_rotating Ns = Ns Sy 131072 Ns B Po 128kB Pc Pq int
Max vdev I/O aggregation size for non-rotating media.
.
.It Sy zfs_vdev_cache_bshift Ns = Ns Sy 16 Po 64kB Pc Pq int
Shift size to inflate reads to.
.
.It Sy zfs_vdev_cache_max Ns = Ns Sy 16384 Ns B Po 16kB Pc Pq int
Inflate reads smaller than this value to meet the
.Sy zfs_vdev_cache_bshift
size
.Pq default Sy 64kB .
.
.It Sy zfs_vdev_cache_size Ns = Ns Sy 0 Pq int
Total size of the per-disk cache in bytes.
.Pp
Currently this feature is disabled, as it has been found to not be helpful
for performance and in some cases harmful.
.
.It Sy zfs_vdev_mirror_rotating_inc Ns = Ns Sy 0 Pq int
A number by which the balancing algorithm increments the load calculation for
the purpose of selecting the least busy mirror member when an I/O operation
immediately follows its predecessor on rotational vdevs
for the purpose of making decisions based on load.
.
.It Sy zfs_vdev_mirror_rotating_seek_inc Ns = Ns Sy 5 Pq int
A number by which the balancing algorithm increments the load calculation for
the purpose of selecting the least busy mirror member when an I/O operation
lacks locality as defined by
.Sy zfs_vdev_mirror_rotating_seek_offset .
Operations within this that are not immediately following the previous operation
are incremented by half.
.
.It Sy zfs_vdev_mirror_rotating_seek_offset Ns = Ns Sy 1048576 Ns B Po 1MB Pc Pq int
The maximum distance for the last queued I/O operation in which
the balancing algorithm considers an operation to have locality.
.No See Sx ZFS I/O SCHEDULER .
.
.It Sy zfs_vdev_mirror_non_rotating_inc Ns = Ns Sy 0 Pq int
A number by which the balancing algorithm increments the load calculation for
the purpose of selecting the least busy mirror member on non-rotational vdevs
when I/O operations do not immediately follow one another.
.
.It Sy zfs_vdev_mirror_non_rotating_seek_inc Ns = Ns Sy 1 Pq int
A number by which the balancing algorithm increments the load calculation for
the purpose of selecting the least busy mirror member when an I/O operation lacks
locality as defined by the
.Sy zfs_vdev_mirror_rotating_seek_offset .
Operations within this that are not immediately following the previous operation
are incremented by half.
.
.It Sy zfs_vdev_read_gap_limit Ns = Ns Sy 32768 Ns B Po 32kB Pc Pq int
Aggregate read I/O operations if the on-disk gap between them is within this
threshold.
.
.It Sy zfs_vdev_write_gap_limit Ns = Ns Sy 4096 Ns B Po 4kB Pc Pq int
Aggregate write I/O operations if the on-disk gap between them is within this
threshold.
.
.It Sy zfs_vdev_raidz_impl Ns = Ns Sy fastest Pq string
Select the raidz parity implementation to use.
.Pp
Variants that don't depend on CPU-specific features
may be selected on module load, as they are supported on all systems.
The remaining options may only be set after the module is loaded,
as they are available only if the implementations are compiled in
and supported on the running system.
.Pp
Once the module is loaded,
.Pa /sys/module/zfs/parameters/zfs_vdev_raidz_impl
will show the available options,
with the currently selected one enclosed in square brackets.
.Pp
.TS
lb l l .
fastest selected by built-in benchmark
original original implementation
scalar scalar implementation
sse2 SSE2 instruction set 64-bit x86
ssse3 SSSE3 instruction set 64-bit x86
avx2 AVX2 instruction set 64-bit x86
avx512f AVX512F instruction set 64-bit x86
avx512bw AVX512F & AVX512BW instruction sets 64-bit x86
aarch64_neon NEON Aarch64/64-bit ARMv8
aarch64_neonx2 NEON with more unrolling Aarch64/64-bit ARMv8
powerpc_altivec Altivec PowerPC
.TE
.
.It Sy zfs_vdev_scheduler Pq charp
.Sy DEPRECATED .
Prints warning to kernel log for compatibility.
.
.It Sy zfs_zevent_len_max Ns = Ns Sy 512 Pq int
Max event queue length.
Events in the queue can be viewed with
.Xr zpool-events 8 .
.
.It Sy zfs_zevent_retain_max Ns = Ns Sy 2000 Pq int
Maximum recent zevent records to retain for duplicate checking.
Setting this to
.Sy 0
disables duplicate detection.
.
.It Sy zfs_zevent_retain_expire_secs Ns = Ns Sy 900 Ns s Po 15min Pc Pq int
Lifespan for a recent ereport that was retained for duplicate checking.
.
.It Sy zfs_zil_clean_taskq_maxalloc Ns = Ns Sy 1048576 Pq int
The maximum number of taskq entries that are allowed to be cached.
When this limit is exceeded transaction records (itxs)
will be cleaned synchronously.
.
.It Sy zfs_zil_clean_taskq_minalloc Ns = Ns Sy 1024 Pq int
The number of taskq entries that are pre-populated when the taskq is first
created and are immediately available for use.
.
.It Sy zfs_zil_clean_taskq_nthr_pct Ns = Ns Sy 100 Ns % Pq int
This controls the number of threads used by
.Sy dp_zil_clean_taskq .
The default value of
.Sy 100%
will create a maximum of one thread per cpu.
.
.It Sy zil_maxblocksize Ns = Ns Sy 131072 Ns B Po 128kB Pc Pq int
This sets the maximum block size used by the ZIL.
On very fragmented pools, lowering this
.Pq typically to Sy 36kB
can improve performance.
.
.It Sy zil_nocacheflush Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable the cache flush commands that are normally sent to disk by
the ZIL after an LWB write has completed.
Setting this will cause ZIL corruption on power loss
if a volatile out-of-order write cache is enabled.
.
.It Sy zil_replay_disable Ns = Ns Sy 0 Ns | Ns 1 Pq int
Disable intent logging replay.
Can be disabled for recovery from corrupted ZIL.
.
.It Sy zil_slog_bulk Ns = Ns Sy 786432 Ns B Po 768kB Pc Pq ulong
Limit SLOG write size per commit executed with synchronous priority.
Any writes above that will be executed with lower (asynchronous) priority
to limit potential SLOG device abuse by single active ZIL writer.
.
.It Sy zfs_embedded_slog_min_ms Ns = Ns Sy 64 Pq int
Usually, one metaslab from each normal-class vdev is dedicated for use by
the ZIL to log synchronous writes.
However, if there are fewer than
.Sy zfs_embedded_slog_min_ms
metaslabs in the vdev, this functionality is disabled.
This ensures that we don't set aside an unreasonable amount of space for the ZIL.
.
.It Sy zio_deadman_log_all Ns = Ns Sy 0 Ns | Ns 1 Pq int
If non-zero, the zio deadman will produce debugging messages
.Pq see Sy zfs_dbgmsg_enable
for all zios, rather than only for leaf zios possessing a vdev.
This is meant to be used by developers to gain
diagnostic information for hang conditions which don't involve a mutex
or other locking primitive: typically conditions in which a thread in
the zio pipeline is looping indefinitely.
.
.It Sy zio_slow_io_ms Ns = Ns Sy 30000 Ns ms Po 30s Pc Pq int
When an I/O operation takes more than this much time to complete,
it's marked as slow.
Each slow operation causes a delay zevent.
Slow I/O counters can be seen with
.Nm zpool Cm status Fl s .
.
.It Sy zio_dva_throttle_enabled Ns = Ns Sy 1 Ns | Ns 0 Pq int
Throttle block allocations in the I/O pipeline.
This allows for dynamic allocation distribution when devices are imbalanced.
When enabled, the maximum number of pending allocations per top-level vdev
is limited by
.Sy zfs_vdev_queue_depth_pct .
.
.It Sy zio_requeue_io_start_cut_in_line Ns = Ns Sy 0 Ns | Ns 1 Pq int
Prioritize requeued I/O.
.
.It Sy zio_taskq_batch_pct Ns = Ns Sy 80 Ns % Pq uint
Percentage of online CPUs which will run a worker thread for I/O.
These workers are responsible for I/O work such as compression and
checksum calculations.
Fractional number of CPUs will be rounded down.
.Pp
The default value of
.Sy 80%
was chosen to avoid using all CPUs which can result in
latency issues and inconsistent application performance,
especially when slower compression and/or checksumming is enabled.
.
.It Sy zio_taskq_batch_tpq Ns = Ns Sy 0 Pq uint
Number of worker threads per taskq.
Lower values improve I/O ordering and CPU utilization,
while higher reduces lock contention.
.Pp
If
.Sy 0 ,
generate a system-dependent value close to 6 threads per taskq.
.
.It Sy zvol_inhibit_dev Ns = Ns Sy 0 Ns | Ns 1 Pq uint
Do not create zvol device nodes.
This may slightly improve startup time on
systems with a very large number of zvols.
.
.It Sy zvol_major Ns = Ns Sy 230 Pq uint
Major number for zvol block devices.
.
.It Sy zvol_max_discard_blocks Ns = Ns Sy 16384 Pq ulong
Discard (TRIM) operations done on zvols will be done in batches of this
many blocks, where block size is determined by the
.Sy volblocksize
property of a zvol.
.
.It Sy zvol_prefetch_bytes Ns = Ns Sy 131072 Ns B Po 128kB Pc Pq uint
When adding a zvol to the system, prefetch this many bytes
from the start and end of the volume.
Prefetching these regions of the volume is desirable,
because they are likely to be accessed immediately by
.Xr blkid 8
or the kernel partitioner.
.
.It Sy zvol_request_sync Ns = Ns Sy 0 Ns | Ns 1 Pq uint
When processing I/O requests for a zvol, submit them synchronously.
This effectively limits the queue depth to
.Em 1
for each I/O submitter.
When unset, requests are handled asynchronously by a thread pool.
The number of requests which can be handled concurrently is controlled by
.Sy zvol_threads .
.
.It Sy zvol_threads Ns = Ns Sy 32 Pq uint
Max number of threads which can handle zvol I/O requests concurrently.
.
.It Sy zvol_volmode Ns = Ns Sy 1 Pq uint
Defines zvol block devices behaviour when
.Sy volmode Ns = Ns Sy default :
.Bl -tag -compact -offset 4n -width "a"
.It Sy 1
.No equivalent to Sy full
.It Sy 2
.No equivalent to Sy dev
.It Sy 3
.No equivalent to Sy none
.El
.El
.
.Sh ZFS I/O SCHEDULER
ZFS issues I/O operations to leaf vdevs to satisfy and complete I/O operations.
The scheduler determines when and in what order those operations are issued.
The scheduler divides operations into five I/O classes,
prioritized in the following order: sync read, sync write, async read,
async write, and scrub/resilver.
Each queue defines the minimum and maximum number of concurrent operations
that may be issued to the device.
In addition, the device has an aggregate maximum,
.Sy zfs_vdev_max_active .
Note that the sum of the per-queue minima must not exceed the aggregate maximum.
If the sum of the per-queue maxima exceeds the aggregate maximum,
then the number of active operations may reach
.Sy zfs_vdev_max_active ,
in which case no further operations will be issued,
regardless of whether all per-queue minima have been met.
.Pp
For many physical devices, throughput increases with the number of
concurrent operations, but latency typically suffers.
Furthermore, physical devices typically have a limit
at which more concurrent operations have no
effect on throughput or can actually cause it to decrease.
.Pp
The scheduler selects the next operation to issue by first looking for an
I/O class whose minimum has not been satisfied.
Once all are satisfied and the aggregate maximum has not been hit,
the scheduler looks for classes whose maximum has not been satisfied.
Iteration through the I/O classes is done in the order specified above.
No further operations are issued
if the aggregate maximum number of concurrent operations has been hit,
or if there are no operations queued for an I/O class that has not hit its maximum.
Every time an I/O operation is queued or an operation completes,
the scheduler looks for new operations to issue.
.Pp
In general, smaller
.Sy max_active Ns s
will lead to lower latency of synchronous operations.
Larger
.Sy max_active Ns s
may lead to higher overall throughput, depending on underlying storage.
.Pp
The ratio of the queues'
.Sy max_active Ns s
determines the balance of performance between reads, writes, and scrubs.
For example, increasing
.Sy zfs_vdev_scrub_max_active
will cause the scrub or resilver to complete more quickly,
but reads and writes to have higher latency and lower throughput.
.Pp
All I/O classes have a fixed maximum number of outstanding operations,
except for the async write class.
Asynchronous writes represent the data that is committed to stable storage
during the syncing stage for transaction groups.
Transaction groups enter the syncing state periodically,
so the number of queued async writes will quickly burst up
and then bleed down to zero.
Rather than servicing them as quickly as possible,
the I/O scheduler changes the maximum number of active async write operations
according to the amount of dirty data in the pool.
Since both throughput and latency typically increase with the number of
concurrent operations issued to physical devices, reducing the
burstiness in the number of concurrent operations also stabilizes the
response time of operations from other – and in particular synchronous – queues.
In broad strokes, the I/O scheduler will issue more concurrent operations
from the async write queue as there's more dirty data in the pool.
.
.Ss Async Writes
The number of concurrent operations issued for the async write I/O class
follows a piece-wise linear function defined by a few adjustable points:
.Bd -literal
| o---------| <-- \fBzfs_vdev_async_write_max_active\fP
^ | /^ |
| | / | |
active | / | |
I/O | / | |
count | / | |
| / | |
|-------o | | <-- \fBzfs_vdev_async_write_min_active\fP
0|_______^______|_________|
0% | | 100% of \fBzfs_dirty_data_max\fP
| |
| `-- \fBzfs_vdev_async_write_active_max_dirty_percent\fP
`--------- \fBzfs_vdev_async_write_active_min_dirty_percent\fP
.Ed
.Pp
Until the amount of dirty data exceeds a minimum percentage of the dirty
data allowed in the pool, the I/O scheduler will limit the number of
concurrent operations to the minimum.
As that threshold is crossed, the number of concurrent operations issued
increases linearly to the maximum at the specified maximum percentage
of the dirty data allowed in the pool.
.Pp
Ideally, the amount of dirty data on a busy pool will stay in the sloped
part of the function between
.Sy zfs_vdev_async_write_active_min_dirty_percent
and
.Sy zfs_vdev_async_write_active_max_dirty_percent .
If it exceeds the maximum percentage,
this indicates that the rate of incoming data is
greater than the rate that the backend storage can handle.
In this case, we must further throttle incoming writes,
as described in the next section.
.
.Sh ZFS TRANSACTION DELAY
We delay transactions when we've determined that the backend storage
isn't able to accommodate the rate of incoming writes.
.Pp
If there is already a transaction waiting, we delay relative to when
that transaction will finish waiting.
This way the calculated delay time
is independent of the number of threads concurrently executing transactions.
.Pp
If we are the only waiter, wait relative to when the transaction started,
rather than the current time.
This credits the transaction for "time already served",
e.g. reading indirect blocks.
.Pp
The minimum time for a transaction to take is calculated as
.Dl min_time = min( Ns Sy zfs_delay_scale No * (dirty - min) / (max - dirty), 100ms)
.Pp
The delay has two degrees of freedom that can be adjusted via tunables.
The percentage of dirty data at which we start to delay is defined by
.Sy zfs_delay_min_dirty_percent .
This should typically be at or above
.Sy zfs_vdev_async_write_active_max_dirty_percent ,
so that we only start to delay after writing at full speed
has failed to keep up with the incoming write rate.
The scale of the curve is defined by
.Sy zfs_delay_scale .
Roughly speaking, this variable determines the amount of delay at the midpoint of the curve.
.Bd -literal
delay
10ms +-------------------------------------------------------------*+
| *|
9ms + *+
| *|
8ms + *+
| * |
7ms + * +
| * |
6ms + * +
| * |
5ms + * +
| * |
4ms + * +
| * |
3ms + * +
| * |
2ms + (midpoint) * +
| | ** |
1ms + v *** +
| \fBzfs_delay_scale\fP ----------> ******** |
0 +-------------------------------------*********----------------+
0% <- \fBzfs_dirty_data_max\fP -> 100%
.Ed
.Pp
Note, that since the delay is added to the outstanding time remaining on the
most recent transaction it's effectively the inverse of IOPS.
Here, the midpoint of
.Em 500us
translates to
.Em 2000 IOPS .
The shape of the curve
was chosen such that small changes in the amount of accumulated dirty data
in the first three quarters of the curve yield relatively small differences
in the amount of delay.
.Pp
The effects can be easier to understand when the amount of delay is
represented on a logarithmic scale:
.Bd -literal
delay
100ms +-------------------------------------------------------------++
+ +
| |
+ *+
10ms + *+
+ ** +
| (midpoint) ** |
+ | ** +
1ms + v **** +
+ \fBzfs_delay_scale\fP ----------> ***** +
| **** |
+ **** +
100us + ** +
+ * +
| * |
+ * +
10us + * +
+ +
| |
+ +
+--------------------------------------------------------------+
0% <- \fBzfs_dirty_data_max\fP -> 100%
.Ed
.Pp
Note here that only as the amount of dirty data approaches its limit does
the delay start to increase rapidly.
The goal of a properly tuned system should be to keep the amount of dirty data
out of that range by first ensuring that the appropriate limits are set
for the I/O scheduler to reach optimal throughput on the back-end storage,
and then by changing the value of
.Sy zfs_delay_scale
to increase the steepness of the curve.
diff --git a/sys/contrib/openzfs/man/man7/zfsprops.7 b/sys/contrib/openzfs/man/man7/zfsprops.7
index 387e51b0b5ee..b753ec321ad0 100644
--- a/sys/contrib/openzfs/man/man7/zfsprops.7
+++ b/sys/contrib/openzfs/man/man7/zfsprops.7
@@ -1,2045 +1,2068 @@
.\"
.\" CDDL HEADER START
.\"
.\" The contents of this file are subject to the terms of the
.\" Common Development and Distribution License (the "License").
.\" You may not use this file except in compliance with the License.
.\"
.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
.\" or http://www.opensolaris.org/os/licensing.
.\" See the License for the specific language governing permissions
.\" and limitations under the License.
.\"
.\" When distributing Covered Code, include this CDDL HEADER in each
.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
.\" If applicable, add the following below this CDDL HEADER, with the
.\" fields enclosed by brackets "[]" replaced with your own identifying
.\" information: Portions Copyright [yyyy] [name of copyright owner]
.\"
.\" CDDL HEADER END
.\"
.\" Copyright (c) 2009 Sun Microsystems, Inc. All Rights Reserved.
.\" Copyright 2011 Joshua M. Clulow <josh@sysmgr.org>
.\" Copyright (c) 2011, 2019 by Delphix. All rights reserved.
.\" Copyright (c) 2011, Pawel Jakub Dawidek <pjd@FreeBSD.org>
.\" Copyright (c) 2012, Glen Barber <gjb@FreeBSD.org>
.\" Copyright (c) 2012, Bryan Drewery <bdrewery@FreeBSD.org>
.\" Copyright (c) 2013, Steven Hartland <smh@FreeBSD.org>
.\" Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
.\" Copyright (c) 2014, Joyent, Inc. All rights reserved.
.\" Copyright (c) 2014 by Adam Stevko. All rights reserved.
.\" Copyright (c) 2014 Integros [integros.com]
.\" Copyright (c) 2016 Nexenta Systems, Inc. All Rights Reserved.
.\" Copyright (c) 2014, Xin LI <delphij@FreeBSD.org>
.\" Copyright (c) 2014-2015, The FreeBSD Foundation, All Rights Reserved.
.\" Copyright 2019 Richard Laager. All rights reserved.
.\" Copyright 2018 Nexenta Systems, Inc.
.\" Copyright 2019 Joyent, Inc.
.\" Copyright (c) 2019, Kjeld Schouten-Lebbing
.\"
.Dd May 24, 2021
.Dt ZFSPROPS 7
.Os
.
.Sh NAME
.Nm zfsprops
.Nd native and user-defined properties of ZFS datasets
.
.Sh DESCRIPTION
Properties are divided into two types, native properties and user-defined
.Po or
.Qq user
.Pc
properties.
Native properties either export internal statistics or control ZFS behavior.
In addition, native properties are either editable or read-only.
User properties have no effect on ZFS behavior, but you can use them to annotate
datasets in a way that is meaningful in your environment.
For more information about user properties, see the
.Sx User Properties
section, below.
.
.Ss Native Properties
Every dataset has a set of properties that export statistics about the dataset
as well as control various behaviors.
Properties are inherited from the parent unless overridden by the child.
Some properties apply only to certain types of datasets
.Pq file systems, volumes, or snapshots .
.Pp
The values of numeric properties can be specified using human-readable suffixes
.Po for example,
.Sy k ,
.Sy KB ,
.Sy M ,
.Sy Gb ,
and so forth, up to
.Sy Z
for zettabyte
.Pc .
The following are all valid
.Pq and equal
specifications:
.Li 1536M, 1.5g, 1.50GB .
.Pp
The values of non-numeric properties are case sensitive and must be lowercase,
except for
.Sy mountpoint ,
.Sy sharenfs ,
and
.Sy sharesmb .
.Pp
The following native properties consist of read-only statistics about the
dataset.
These properties can be neither set, nor inherited.
Native properties apply to all dataset types unless otherwise noted.
.Bl -tag -width "usedbyrefreservation"
.It Sy available
The amount of space available to the dataset and all its children, assuming that
there is no other activity in the pool.
Because space is shared within a pool, availability can be limited by any number
of factors, including physical pool size, quotas, reservations, or other
datasets within the pool.
.Pp
This property can also be referred to by its shortened column name,
.Sy avail .
.It Sy compressratio
For non-snapshots, the compression ratio achieved for the
.Sy used
space of this dataset, expressed as a multiplier.
The
.Sy used
property includes descendant datasets, and, for clones, does not include the
space shared with the origin snapshot.
For snapshots, the
.Sy compressratio
is the same as the
.Sy refcompressratio
property.
Compression can be turned on by running:
.Nm zfs Cm set Sy compression Ns = Ns Sy on Ar dataset .
The default value is
.Sy off .
.It Sy createtxg
The transaction group (txg) in which the dataset was created.
Bookmarks have the same
.Sy createtxg
as the snapshot they are initially tied to.
This property is suitable for ordering a list of snapshots,
e.g. for incremental send and receive.
.It Sy creation
The time this dataset was created.
.It Sy clones
For snapshots, this property is a comma-separated list of filesystems or volumes
which are clones of this snapshot.
The clones'
.Sy origin
property is this snapshot.
If the
.Sy clones
property is not empty, then this snapshot can not be destroyed
.Po even with the
.Fl r
or
.Fl f
options
.Pc .
The roles of origin and clone can be swapped by promoting the clone with the
.Nm zfs Cm promote
command.
.It Sy defer_destroy
This property is
.Sy on
if the snapshot has been marked for deferred destroy by using the
.Nm zfs Cm destroy Fl d
command.
Otherwise, the property is
.Sy off .
.It Sy encryptionroot
For encrypted datasets, indicates where the dataset is currently inheriting its
encryption key from.
Loading or unloading a key for the
.Sy encryptionroot
will implicitly load / unload the key for any inheriting datasets (see
.Nm zfs Cm load-key
and
.Nm zfs Cm unload-key
for details).
Clones will always share an
encryption key with their origin.
See the
.Sx Encryption
section of
.Xr zfs-load-key 8
for details.
.It Sy filesystem_count
The total number of filesystems and volumes that exist under this location in
the dataset tree.
This value is only available when a
.Sy filesystem_limit
has been set somewhere in the tree under which the dataset resides.
.It Sy keystatus
Indicates if an encryption key is currently loaded into ZFS.
The possible values are
.Sy none ,
.Sy available ,
and
.Sy unavailable .
See
.Nm zfs Cm load-key
and
.Nm zfs Cm unload-key .
.It Sy guid
The 64 bit GUID of this dataset or bookmark which does not change over its
entire lifetime.
When a snapshot is sent to another pool, the received snapshot has the same GUID.
Thus, the
.Sy guid
is suitable to identify a snapshot across pools.
.It Sy logicalreferenced
The amount of space that is
.Qq logically
accessible by this dataset.
See the
.Sy referenced
property.
The logical space ignores the effect of the
.Sy compression
and
.Sy copies
properties, giving a quantity closer to the amount of data that applications
see.
However, it does include space consumed by metadata.
.Pp
This property can also be referred to by its shortened column name,
.Sy lrefer .
.It Sy logicalused
The amount of space that is
.Qq logically
consumed by this dataset and all its descendents.
See the
.Sy used
property.
The logical space ignores the effect of the
.Sy compression
and
.Sy copies
properties, giving a quantity closer to the amount of data that applications
see.
However, it does include space consumed by metadata.
.Pp
This property can also be referred to by its shortened column name,
.Sy lused .
.It Sy mounted
For file systems, indicates whether the file system is currently mounted.
This property can be either
.Sy yes
or
.Sy no .
.It Sy objsetid
A unique identifier for this dataset within the pool.
Unlike the dataset's
.Sy guid , No the Sy objsetid
of a dataset is not transferred to other pools when the snapshot is copied
with a send/receive operation.
The
.Sy objsetid
can be reused (for a new dataset) after the dataset is deleted.
.It Sy origin
For cloned file systems or volumes, the snapshot from which the clone was
created.
See also the
.Sy clones
property.
.It Sy receive_resume_token
For filesystems or volumes which have saved partially-completed state from
.Nm zfs Cm receive Fl s ,
this opaque token can be provided to
.Nm zfs Cm send Fl t
to resume and complete the
.Nm zfs Cm receive .
.It Sy redact_snaps
For bookmarks, this is the list of snapshot guids the bookmark contains a redaction
list for.
For snapshots, this is the list of snapshot guids the snapshot is redacted with
respect to.
.It Sy referenced
The amount of data that is accessible by this dataset, which may or may not be
shared with other datasets in the pool.
When a snapshot or clone is created, it initially references the same amount of
space as the file system or snapshot it was created from, since its contents are
identical.
.Pp
This property can also be referred to by its shortened column name,
.Sy refer .
.It Sy refcompressratio
The compression ratio achieved for the
.Sy referenced
space of this dataset, expressed as a multiplier.
See also the
.Sy compressratio
property.
.It Sy snapshot_count
The total number of snapshots that exist under this location in the dataset
tree.
This value is only available when a
.Sy snapshot_limit
has been set somewhere in the tree under which the dataset resides.
.It Sy type
The type of dataset:
.Sy filesystem ,
.Sy volume ,
.Sy snapshot ,
or
.Sy bookmark .
.It Sy used
The amount of space consumed by this dataset and all its descendents.
This is the value that is checked against this dataset's quota and reservation.
The space used does not include this dataset's reservation, but does take into
account the reservations of any descendent datasets.
The amount of space that a dataset consumes from its parent, as well as the
amount of space that is freed if this dataset is recursively destroyed, is the
greater of its space used and its reservation.
.Pp
The used space of a snapshot
.Po see the
.Sx Snapshots
section of
.Xr zfsconcepts 7
.Pc
is space that is referenced exclusively by this snapshot.
If this snapshot is destroyed, the amount of
.Sy used
space will be freed.
Space that is shared by multiple snapshots isn't accounted for in this metric.
When a snapshot is destroyed, space that was previously shared with this
snapshot can become unique to snapshots adjacent to it, thus changing the used
space of those snapshots.
The used space of the latest snapshot can also be affected by changes in the
file system.
Note that the
.Sy used
space of a snapshot is a subset of the
.Sy written
space of the snapshot.
.Pp
The amount of space used, available, or referenced does not take into account
pending changes.
Pending changes are generally accounted for within a few seconds.
Committing a change to a disk using
.Xr fsync 2
or
.Sy O_SYNC
does not necessarily guarantee that the space usage information is updated
immediately.
.It Sy usedby*
The
.Sy usedby*
properties decompose the
.Sy used
properties into the various reasons that space is used.
Specifically,
.Sy used No =
.Sy usedbychildren No +
.Sy usedbydataset No +
.Sy usedbyrefreservation No +
.Sy usedbysnapshots .
These properties are only available for datasets created on
.Nm zpool
.Qo version 13 Qc
pools.
.It Sy usedbychildren
The amount of space used by children of this dataset, which would be freed if
all the dataset's children were destroyed.
.It Sy usedbydataset
The amount of space used by this dataset itself, which would be freed if the
dataset were destroyed
.Po after first removing any
.Sy refreservation
and destroying any necessary snapshots or descendents
.Pc .
.It Sy usedbyrefreservation
The amount of space used by a
.Sy refreservation
set on this dataset, which would be freed if the
.Sy refreservation
was removed.
.It Sy usedbysnapshots
The amount of space consumed by snapshots of this dataset.
In particular, it is the amount of space that would be freed if all of this
dataset's snapshots were destroyed.
Note that this is not simply the sum of the snapshots'
.Sy used
properties because space can be shared by multiple snapshots.
.It Sy userused Ns @ Ns Ar user
The amount of space consumed by the specified user in this dataset.
Space is charged to the owner of each file, as displayed by
.Nm ls Fl l .
The amount of space charged is displayed by
.Nm du No and Nm ls Fl s .
See the
.Nm zfs Cm userspace
command for more information.
.Pp
Unprivileged users can access only their own space usage.
The root user, or a user who has been granted the
.Sy userused
privilege with
.Nm zfs Cm allow ,
can access everyone's usage.
.Pp
The
.Sy userused Ns @ Ns Ar ...
properties are not displayed by
.Nm zfs Cm get Sy all .
The user's name must be appended after the
.Sy @
symbol, using one of the following forms:
.Bl -bullet -compact -offset 4n
.It
POSIX name
.Pq Qq joe
.It
POSIX numeric ID
.Pq Qq 789
.It
SID name
.Pq Qq joe.smith@mydomain
.It
SID numeric ID
.Pq Qq S-1-123-456-789
.El
.Pp
Files created on Linux always have POSIX owners.
.It Sy userobjused Ns @ Ns Ar user
The
.Sy userobjused
property is similar to
.Sy userused
but instead it counts the number of objects consumed by a user.
This property counts all objects allocated on behalf of the user,
it may differ from the results of system tools such as
.Nm df Fl i .
.Pp
When the property
.Sy xattr Ns = Ns Sy on
is set on a file system additional objects will be created per-file to store
extended attributes.
These additional objects are reflected in the
.Sy userobjused
value and are counted against the user's
.Sy userobjquota .
When a file system is configured to use
.Sy xattr Ns = Ns Sy sa
no additional internal objects are normally required.
.It Sy userrefs
This property is set to the number of user holds on this snapshot.
User holds are set by using the
.Nm zfs Cm hold
command.
.It Sy groupused Ns @ Ns Ar group
The amount of space consumed by the specified group in this dataset.
Space is charged to the group of each file, as displayed by
.Nm ls Fl l .
See the
.Sy userused Ns @ Ns Ar user
property for more information.
.Pp
Unprivileged users can only access their own groups' space usage.
The root user, or a user who has been granted the
.Sy groupused
privilege with
.Nm zfs Cm allow ,
can access all groups' usage.
.It Sy groupobjused Ns @ Ns Ar group
The number of objects consumed by the specified group in this dataset.
Multiple objects may be charged to the group for each file when extended
attributes are in use.
See the
.Sy userobjused Ns @ Ns Ar user
property for more information.
.Pp
Unprivileged users can only access their own groups' space usage.
The root user, or a user who has been granted the
.Sy groupobjused
privilege with
.Nm zfs Cm allow ,
can access all groups' usage.
.It Sy projectused Ns @ Ns Ar project
The amount of space consumed by the specified project in this dataset.
Project is identified via the project identifier (ID) that is object-based
numeral attribute.
An object can inherit the project ID from its parent object (if the
parent has the flag of inherit project ID that can be set and changed via
.Nm chattr Fl /+P
or
.Nm zfs project Fl s )
when being created.
The privileged user can set and change object's project
ID via
.Nm chattr Fl p
or
.Nm zfs project Fl s
anytime.
Space is charged to the project of each file, as displayed by
.Nm lsattr Fl p
or
.Nm zfs project .
See the
.Sy userused Ns @ Ns Ar user
property for more information.
.Pp
The root user, or a user who has been granted the
.Sy projectused
privilege with
.Nm zfs allow ,
can access all projects' usage.
.It Sy projectobjused Ns @ Ns Ar project
The
.Sy projectobjused
is similar to
.Sy projectused
but instead it counts the number of objects consumed by project.
When the property
.Sy xattr Ns = Ns Sy on
is set on a fileset, ZFS will create additional objects per-file to store
extended attributes.
These additional objects are reflected in the
.Sy projectobjused
value and are counted against the project's
.Sy projectobjquota .
When a filesystem is configured to use
.Sy xattr Ns = Ns Sy sa
no additional internal objects are required.
See the
.Sy userobjused Ns @ Ns Ar user
property for more information.
.Pp
The root user, or a user who has been granted the
.Sy projectobjused
privilege with
.Nm zfs allow ,
can access all projects' objects usage.
.It Sy volblocksize
For volumes, specifies the block size of the volume.
The
.Sy blocksize
cannot be changed once the volume has been written, so it should be set at
volume creation time.
The default
.Sy blocksize
for volumes is 8 Kbytes.
Any power of 2 from 512 bytes to 128 Kbytes is valid.
.Pp
This property can also be referred to by its shortened column name,
.Sy volblock .
.It Sy written
The amount of space
.Sy referenced
by this dataset, that was written since the previous snapshot
.Pq i.e. that is not referenced by the previous snapshot .
.It Sy written Ns @ Ns Ar snapshot
The amount of
.Sy referenced
space written to this dataset since the specified snapshot.
This is the space that is referenced by this dataset but was not referenced by
the specified snapshot.
.Pp
The
.Ar snapshot
may be specified as a short snapshot name
.Pq just the part after the Sy @ ,
in which case it will be interpreted as a snapshot in the same filesystem as
this dataset.
The
.Ar snapshot
may be a full snapshot name
.Pq Ar filesystem Ns @ Ns Ar snapshot ,
which for clones may be a snapshot in the origin's filesystem
.Pq or the origin of the origin's filesystem, etc.
.El
.Pp
The following native properties can be used to change the behavior of a ZFS
dataset.
.Bl -tag -width ""
.It Xo
.Sy aclinherit Ns = Ns Sy discard Ns | Ns Sy noallow Ns | Ns
.Sy restricted Ns | Ns Sy passthrough Ns | Ns Sy passthrough-x
.Xc
Controls how ACEs are inherited when files and directories are created.
.Bl -tag -compact -offset 4n -width "passthrough-x"
.It Sy discard
does not inherit any ACEs.
.It Sy noallow
only inherits inheritable ACEs that specify
.Qq deny
permissions.
.It Sy restricted
default, removes the
.Sy write_acl
and
.Sy write_owner
permissions when the ACE is inherited.
.It Sy passthrough
inherits all inheritable ACEs without any modifications.
.It Sy passthrough-x
same meaning as
.Sy passthrough ,
except that the
.Sy owner@ , group@ , No and Sy everyone@
ACEs inherit the execute permission only if the file creation mode also requests
the execute bit.
.El
.Pp
When the property value is set to
.Sy passthrough ,
files are created with a mode determined by the inheritable ACEs.
If no inheritable ACEs exist that affect the mode, then the mode is set in
accordance to the requested mode from the application.
.Pp
The
.Sy aclinherit
property does not apply to POSIX ACLs.
.It Xo
.Sy aclmode Ns = Ns Sy discard Ns | Ns Sy groupmask Ns | Ns
.Sy passthrough Ns | Ns Sy restricted Ns
.Xc
Controls how an ACL is modified during chmod(2) and how inherited ACEs
are modified by the file creation mode:
.Bl -tag -compact -offset 4n -width "passthrough"
.It Sy discard
default, deletes all
.Sy ACEs
except for those representing
the mode of the file or directory requested by
.Xr chmod 2 .
.It Sy groupmask
reduces permissions granted in all
.Sy ALLOW
entries found in the
.Sy ACL
such that they are no greater than the group permissions specified by
.Xr chmod 2 .
.It Sy passthrough
indicates that no changes are made to the ACL other than creating or updating
the necessary ACL entries to represent the new mode of the file or directory.
.It Sy restricted
will cause the
.Xr chmod 2
operation to return an error when used on any file or directory which has
a non-trivial ACL whose entries can not be represented by a mode.
.Xr chmod 2
is required to change the set user ID, set group ID, or sticky bits on a file
or directory, as they do not have equivalent ACL entries.
In order to use
.Xr chmod 2
on a file or directory with a non-trivial ACL when
.Sy aclmode
is set to
.Sy restricted ,
you must first remove all ACL entries which do not represent the current mode.
.El
.It Sy acltype Ns = Ns Sy off Ns | Ns Sy nfsv4 Ns | Ns Sy posix
Controls whether ACLs are enabled and if so what type of ACL to use.
When this property is set to a type of ACL not supported by the current
platform, the behavior is the same as if it were set to
.Sy off .
.Bl -tag -compact -offset 4n -width "posixacl"
.It Sy off
default on Linux, when a file system has the
.Sy acltype
property set to off then ACLs are disabled.
.It Sy noacl
an alias for
.Sy off
.It Sy nfsv4
default on
.Fx ,
indicates that NFSv4-style ZFS ACLs should be used.
These ACLs can be managed with the
.Xr getfacl 1
and
.Xr setfacl 1 .
The
.Sy nfsv4
ZFS ACL type is not yet supported on Linux.
.It Sy posix
indicates POSIX ACLs should be used.
POSIX ACLs are specific to Linux and are not functional on other platforms.
POSIX ACLs are stored as an extended
attribute and therefore will not overwrite any existing NFSv4 ACLs which
may be set.
.It Sy posixacl
an alias for
.Sy posix
.El
.Pp
To obtain the best performance when setting
.Sy posix
users are strongly encouraged to set the
.Sy xattr Ns = Ns Sy sa
property.
This will result in the POSIX ACL being stored more efficiently on disk.
But as a consequence, all new extended attributes will only be
accessible from OpenZFS implementations which support the
.Sy xattr Ns = Ns Sy sa
property.
See the
.Sy xattr
property for more details.
.It Sy atime Ns = Ns Sy on Ns | Ns Sy off
Controls whether the access time for files is updated when they are read.
Turning this property off avoids producing write traffic when reading files and
can result in significant performance gains, though it might confuse mailers
and other similar utilities.
The values
.Sy on
and
.Sy off
are equivalent to the
.Sy atime
and
.Sy noatime
mount options.
The default value is
.Sy on .
See also
.Sy relatime
below.
.It Sy canmount Ns = Ns Sy on Ns | Ns Sy off Ns | Ns Sy noauto
If this property is set to
.Sy off ,
the file system cannot be mounted, and is ignored by
.Nm zfs Cm mount Fl a .
Setting this property to
.Sy off
is similar to setting the
.Sy mountpoint
property to
.Sy none ,
except that the dataset still has a normal
.Sy mountpoint
property, which can be inherited.
Setting this property to
.Sy off
allows datasets to be used solely as a mechanism to inherit properties.
One example of setting
.Sy canmount Ns = Ns Sy off
is to have two datasets with the same
.Sy mountpoint ,
so that the children of both datasets appear in the same directory, but might
have different inherited characteristics.
.Pp
When set to
.Sy noauto ,
a dataset can only be mounted and unmounted explicitly.
The dataset is not mounted automatically when the dataset is created or
imported, nor is it mounted by the
.Nm zfs Cm mount Fl a
command or unmounted by the
.Nm zfs Cm unmount Fl a
command.
.Pp
This property is not inherited.
.It Xo
.Sy checksum Ns = Ns Sy on Ns | Ns Sy off Ns | Ns Sy fletcher2 Ns | Ns
.Sy fletcher4 Ns | Ns Sy sha256 Ns | Ns Sy noparity Ns | Ns
.Sy sha512 Ns | Ns Sy skein Ns | Ns Sy edonr
.Xc
Controls the checksum used to verify data integrity.
The default value is
.Sy on ,
which automatically selects an appropriate algorithm
.Po currently,
.Sy fletcher4 ,
but this may change in future releases
.Pc .
The value
.Sy off
disables integrity checking on user data.
The value
.Sy noparity
not only disables integrity but also disables maintaining parity for user data.
This setting is used internally by a dump device residing on a RAID-Z pool and
should not be used by any other dataset.
Disabling checksums is
.Em NOT
a recommended practice.
.Pp
The
.Sy sha512 ,
.Sy skein ,
and
.Sy edonr
checksum algorithms require enabling the appropriate features on the pool.
.Fx
does not support the
.Sy edonr
algorithm.
.Pp
Please see
.Xr zpool-features 7
for more information on these algorithms.
.Pp
Changing this property affects only newly-written data.
.It Xo
.Sy compression Ns = Ns Sy on Ns | Ns Sy off Ns | Ns Sy gzip Ns | Ns
.Sy gzip- Ns Ar N Ns | Ns Sy lz4 Ns | Ns Sy lzjb Ns | Ns Sy zle Ns | Ns Sy zstd Ns | Ns
.Sy zstd- Ns Ar N Ns | Ns Sy zstd-fast Ns | Ns Sy zstd-fast- Ns Ar N
.Xc
Controls the compression algorithm used for this dataset.
.Pp
Setting compression to
.Sy on
indicates that the current default compression algorithm should be used.
The default balances compression and decompression speed, with compression ratio
and is expected to work well on a wide variety of workloads.
Unlike all other settings for this property,
.Sy on
does not select a fixed compression type.
As new compression algorithms are added to ZFS and enabled on a pool, the
default compression algorithm may change.
The current default compression algorithm is either
.Sy lzjb
or, if the
.Sy lz4_compress
feature is enabled,
.Sy lz4 .
.Pp
The
.Sy lz4
compression algorithm is a high-performance replacement for the
.Sy lzjb
algorithm.
It features significantly faster compression and decompression, as well as a
moderately higher compression ratio than
.Sy lzjb ,
but can only be used on pools with the
.Sy lz4_compress
feature set to
.Sy enabled .
See
.Xr zpool-features 7
for details on ZFS feature flags and the
.Sy lz4_compress
feature.
.Pp
The
.Sy lzjb
compression algorithm is optimized for performance while providing decent data
compression.
.Pp
The
.Sy gzip
compression algorithm uses the same compression as the
.Xr gzip 1
command.
You can specify the
.Sy gzip
level by using the value
.Sy gzip- Ns Ar N ,
where
.Ar N
is an integer from 1
.Pq fastest
to 9
.Pq best compression ratio .
Currently,
.Sy gzip
is equivalent to
.Sy gzip-6
.Po which is also the default for
.Xr gzip 1
.Pc .
.Pp
The
.Sy zstd
compression algorithm provides both high compression ratios and good performance.
You can specify the
.Sy zstd
level by using the value
.Sy zstd- Ns Ar N ,
where
.Ar N
is an integer from 1
.Pq fastest
to 19
.Pq best compression ratio .
.Sy zstd
is equivalent to
.Sy zstd-3 .
.Pp
Faster speeds at the cost of the compression ratio can be requested by
setting a negative
.Sy zstd
level.
This is done using
.Sy zstd-fast- Ns Ar N ,
where
.Ar N
is an integer in [1-9,10,20,30,...,100,500,1000] which maps to a negative
.Sy zstd
level.
The lower the level the faster the compression -
.Ar 1000 No provides the fastest compression and lowest compression ratio.
.Sy zstd-fast
is equivalent to
.Sy zstd-fast-1 .
.Pp
The
.Sy zle
compression algorithm compresses runs of zeros.
.Pp
This property can also be referred to by its shortened column name
.Sy compress .
Changing this property affects only newly-written data.
.Pp
When any setting except
.Sy off
is selected, compression will explicitly check for blocks consisting of only
zeroes (the NUL byte).
When a zero-filled block is detected, it is stored as
a hole and not compressed using the indicated compression algorithm.
.Pp
Any block being compressed must be no larger than 7/8 of its original size
after compression, otherwise the compression will not be considered worthwhile
and the block saved uncompressed.
Note that when the logical block is less than
8 times the disk sector size this effectively reduces the necessary compression
ratio; for example, 8kB blocks on disks with 4kB disk sectors must compress to 1/2
or less of their original size.
.It Xo
.Sy context Ns = Ns Sy none Ns | Ns
-.Ar SELinux-User : Ns Ar SElinux-Role : Ns Ar Selinux-Type : Ns Ar Sensitivity-Level
+.Ar SELinux-User : Ns Ar SELinux-Role : Ns Ar SELinux-Type : Ns Ar Sensitivity-Level
.Xc
This flag sets the SELinux context for all files in the file system under
a mount point for that file system.
See
.Xr selinux 8
for more information.
.It Xo
.Sy fscontext Ns = Ns Sy none Ns | Ns
-.Ar SELinux-User : Ns Ar SElinux-Role : Ns Ar Selinux-Type : Ns Ar Sensitivity-Level
+.Ar SELinux-User : Ns Ar SELinux-Role : Ns Ar SELinux-Type : Ns Ar Sensitivity-Level
.Xc
This flag sets the SELinux context for the file system file system being
mounted.
See
.Xr selinux 8
for more information.
.It Xo
.Sy defcontext Ns = Ns Sy none Ns | Ns
-.Ar SELinux-User : Ns Ar SElinux-Role : Ns Ar Selinux-Type : Ns Ar Sensitivity-Level
+.Ar SELinux-User : Ns Ar SELinux-Role : Ns Ar SELinux-Type : Ns Ar Sensitivity-Level
.Xc
This flag sets the SELinux default context for unlabeled files.
See
.Xr selinux 8
for more information.
.It Xo
.Sy rootcontext Ns = Ns Sy none Ns | Ns
-.Ar SELinux-User : Ns Ar SElinux-Role : Ns Ar Selinux-Type : Ns Ar Sensitivity-Level
+.Ar SELinux-User : Ns Ar SELinux-Role : Ns Ar SELinux-Type : Ns Ar Sensitivity-Level
.Xc
This flag sets the SELinux context for the root inode of the file system.
See
.Xr selinux 8
for more information.
.It Sy copies Ns = Ns Sy 1 Ns | Ns Sy 2 Ns | Ns Sy 3
Controls the number of copies of data stored for this dataset.
These copies are in addition to any redundancy provided by the pool, for
example, mirroring or RAID-Z.
The copies are stored on different disks, if possible.
The space used by multiple copies is charged to the associated file and dataset,
changing the
.Sy used
property and counting against quotas and reservations.
.Pp
Changing this property only affects newly-written data.
Therefore, set this property at file system creation time by using the
.Fl o Sy copies Ns = Ns Ar N
option.
.Pp
Remember that ZFS will not import a pool with a missing top-level vdev.
Do
.Em NOT
create, for example a two-disk striped pool and set
.Sy copies Ns = Ns Ar 2
on some datasets thinking you have setup redundancy for them.
When a disk fails you will not be able to import the pool
and will have lost all of your data.
.Pp
Encrypted datasets may not have
.Sy copies Ns = Ns Ar 3
since the implementation stores some encryption metadata where the third copy
would normally be.
.It Sy devices Ns = Ns Sy on Ns | Ns Sy off
Controls whether device nodes can be opened on this file system.
The default value is
.Sy on .
The values
.Sy on
and
.Sy off
are equivalent to the
.Sy dev
and
.Sy nodev
mount options.
.It Xo
.Sy dedup Ns = Ns Sy off Ns | Ns Sy on Ns | Ns Sy verify Ns | Ns
.Sy sha256 Ns Oo , Ns Sy verify Oc Ns | Ns Sy sha512 Ns Oo , Ns Sy verify Oc Ns | Ns Sy skein Ns Oo , Ns Sy verify Oc Ns | Ns
.Sy edonr , Ns Sy verify
.Xc
Configures deduplication for a dataset.
The default value is
.Sy off .
The default deduplication checksum is
.Sy sha256
(this may change in the future).
When
.Sy dedup
is enabled, the checksum defined here overrides the
.Sy checksum
property.
Setting the value to
.Sy verify
has the same effect as the setting
.Sy sha256 , Ns Sy verify .
.Pp
If set to
.Sy verify ,
ZFS will do a byte-to-byte comparison in case of two blocks having the same
signature to make sure the block contents are identical.
Specifying
.Sy verify
is mandatory for the
.Sy edonr
algorithm.
.Pp
Unless necessary, deduplication should
.Em not
be enabled on a system.
See the
.Sx Deduplication
section of
.Xr zfsconcepts 7 .
.It Xo
.Sy dnodesize Ns = Ns Sy legacy Ns | Ns Sy auto Ns | Ns Sy 1k Ns | Ns
.Sy 2k Ns | Ns Sy 4k Ns | Ns Sy 8k Ns | Ns Sy 16k
.Xc
Specifies a compatibility mode or literal value for the size of dnodes in the
file system.
The default value is
.Sy legacy .
Setting this property to a value other than
.Sy legacy No requires the Sy large_dnode No pool feature to be enabled.
.Pp
Consider setting
.Sy dnodesize
to
.Sy auto
if the dataset uses the
.Sy xattr Ns = Ns Sy sa
property setting and the workload makes heavy use of extended attributes.
This
may be applicable to SELinux-enabled systems, Lustre servers, and Samba
servers, for example.
Literal values are supported for cases where the optimal
size is known in advance and for performance testing.
.Pp
Leave
.Sy dnodesize
set to
.Sy legacy
if you need to receive a send stream of this dataset on a pool that doesn't
enable the
.Sy large_dnode
feature, or if you need to import this pool on a system that doesn't support the
.Sy large_dnode No feature.
.Pp
This property can also be referred to by its shortened column name,
.Sy dnsize .
.It Xo
.Sy encryption Ns = Ns Sy off Ns | Ns Sy on Ns | Ns Sy aes-128-ccm Ns | Ns
.Sy aes-192-ccm Ns | Ns Sy aes-256-ccm Ns | Ns Sy aes-128-gcm Ns | Ns
.Sy aes-192-gcm Ns | Ns Sy aes-256-gcm
.Xc
Controls the encryption cipher suite (block cipher, key length, and mode) used
for this dataset.
Requires the
.Sy encryption
feature to be enabled on the pool.
Requires a
.Sy keyformat
to be set at dataset creation time.
.Pp
Selecting
.Sy encryption Ns = Ns Sy on
when creating a dataset indicates that the default encryption suite will be
selected, which is currently
.Sy aes-256-gcm .
In order to provide consistent data protection, encryption must be specified at
dataset creation time and it cannot be changed afterwards.
.Pp
For more details and caveats about encryption see the
.Sx Encryption
section of
.Xr zfs-load-key 8 .
.It Sy keyformat Ns = Ns Sy raw Ns | Ns Sy hex Ns | Ns Sy passphrase
Controls what format the user's encryption key will be provided as.
This property is only set when the dataset is encrypted.
.Pp
Raw keys and hex keys must be 32 bytes long (regardless of the chosen
encryption suite) and must be randomly generated.
A raw key can be generated with the following command:
.Dl # Nm dd Sy if=/dev/urandom bs=32 count=1 Sy of= Ns Pa /path/to/output/key
.Pp
Passphrases must be between 8 and 512 bytes long and will be processed through
PBKDF2 before being used (see the
.Sy pbkdf2iters
property).
Even though the encryption suite cannot be changed after dataset creation,
the keyformat can be with
.Nm zfs Cm change-key .
.It Xo
-.Sy keylocation Ns = Ns Sy prompt Ns | Ns Sy file:// Ns Em </absolute/file/path>
+.Sy keylocation Ns = Ns Sy prompt Ns | Ns Sy file:// Ns Em </absolute/file/path> Ns | Ns Sy https:// Ns Em <address> | Ns Sy http:// Ns Em <address>
.Xc
Controls where the user's encryption key will be loaded from by default for
commands such as
.Nm zfs Cm load-key
and
.Nm zfs Cm mount Fl l .
This property is only set for encrypted datasets which are encryption roots.
If unspecified, the default is
.Sy prompt .
.Pp
Even though the encryption suite cannot be changed after dataset creation, the
keylocation can be with either
.Nm zfs Cm set
or
.Nm zfs Cm change-key .
If
.Sy prompt
is selected ZFS will ask for the key at the command prompt when it is required
to access the encrypted data (see
.Nm zfs Cm load-key
for details).
This setting will also allow the key to be passed in via the standard input stream,
but users should be careful not to place keys which should be kept secret on
the command line.
If a file URI is selected, the key will be loaded from the
specified absolute file path.
+If an HTTPS or HTTP URL is selected, it will be GETted using
+.Xr fetch 3 ,
+libcurl, or nothing, depending on compile-time configuration and run-time
+availability.
+The
+.Ev SSL_CA_CERT_FILE
+environment variable can be set to set the location
+of the concatenated certificate store.
+The
+.Ev SSL_CA_CERT_PATH
+environment variable can be set to override the location
+of the directory containing the certificate authority bundle.
+The
+.Ev SSL_CLIENT_CERT_FILE
+and
+.Ev SSL_CLIENT_KEY_FILE
+environment variables can be set to configure the path
+to the client certificate and its key.
.It Sy pbkdf2iters Ns = Ns Ar iterations
Controls the number of PBKDF2 iterations that a
.Sy passphrase
encryption key should be run through when processing it into an encryption key.
This property is only defined when encryption is enabled and a keyformat of
.Sy passphrase
is selected.
The goal of PBKDF2 is to significantly increase the
computational difficulty needed to brute force a user's passphrase.
This is accomplished by forcing the attacker to run each passphrase through a
computationally expensive hashing function many times before they arrive at the
resulting key.
A user who actually knows the passphrase will only have to pay this cost once.
As CPUs become better at processing, this number should be
raised to ensure that a brute force attack is still not possible.
The current default is
.Sy 350000
and the minimum is
.Sy 100000 .
This property may be changed with
.Nm zfs Cm change-key .
.It Sy exec Ns = Ns Sy on Ns | Ns Sy off
Controls whether processes can be executed from within this file system.
The default value is
.Sy on .
The values
.Sy on
and
.Sy off
are equivalent to the
.Sy exec
and
.Sy noexec
mount options.
.It Sy filesystem_limit Ns = Ns Ar count Ns | Ns Sy none
Limits the number of filesystems and volumes that can exist under this point in
the dataset tree.
The limit is not enforced if the user is allowed to change the limit.
Setting a
.Sy filesystem_limit
to
.Sy on
a descendent of a filesystem that already has a
.Sy filesystem_limit
does not override the ancestor's
.Sy filesystem_limit ,
but rather imposes an additional limit.
This feature must be enabled to be used
.Po see
.Xr zpool-features 7
.Pc .
.It Sy special_small_blocks Ns = Ns Ar size
This value represents the threshold block size for including small file
blocks into the special allocation class.
Blocks smaller than or equal to this
value will be assigned to the special allocation class while greater blocks
will be assigned to the regular class.
Valid values are zero or a power of two from 512B up to 1M.
The default size is 0 which means no small file blocks
will be allocated in the special class.
.Pp
Before setting this property, a special class vdev must be added to the
pool.
See
.Xr zpoolconcepts 7
for more details on the special allocation class.
.It Sy mountpoint Ns = Ns Pa path Ns | Ns Sy none Ns | Ns Sy legacy
Controls the mount point used for this file system.
See the
.Sx Mount Points
section of
.Xr zfsconcepts 7
for more information on how this property is used.
.Pp
When the
.Sy mountpoint
property is changed for a file system, the file system and any children that
inherit the mount point are unmounted.
If the new value is
.Sy legacy ,
then they remain unmounted.
Otherwise, they are automatically remounted in the new location if the property
was previously
.Sy legacy
or
.Sy none ,
or if they were mounted before the property was changed.
In addition, any shared file systems are unshared and shared in the new
location.
.It Sy nbmand Ns = Ns Sy on Ns | Ns Sy off
Controls whether the file system should be mounted with
.Sy nbmand
.Pq Non-blocking mandatory locks .
This is used for SMB clients.
Changes to this property only take effect when the file system is umounted and
remounted.
Support for these locks is scarce and not described by POSIX.
.It Sy overlay Ns = Ns Sy on Ns | Ns Sy off
Allow mounting on a busy directory or a directory which already contains
files or directories.
This is the default mount behavior for Linux and
.Fx
file systems.
On these platforms the property is
.Sy on
by default.
Set to
.Sy off
to disable overlay mounts for consistency with OpenZFS on other platforms.
.It Sy primarycache Ns = Ns Sy all Ns | Ns Sy none Ns | Ns Sy metadata
Controls what is cached in the primary cache
.Pq ARC .
If this property is set to
.Sy all ,
then both user data and metadata is cached.
If this property is set to
.Sy none ,
then neither user data nor metadata is cached.
If this property is set to
.Sy metadata ,
then only metadata is cached.
The default value is
.Sy all .
.It Sy quota Ns = Ns Ar size Ns | Ns Sy none
Limits the amount of space a dataset and its descendents can consume.
This property enforces a hard limit on the amount of space used.
This includes all space consumed by descendents, including file systems and
snapshots.
Setting a quota on a descendent of a dataset that already has a quota does not
override the ancestor's quota, but rather imposes an additional limit.
.Pp
Quotas cannot be set on volumes, as the
.Sy volsize
property acts as an implicit quota.
.It Sy snapshot_limit Ns = Ns Ar count Ns | Ns Sy none
Limits the number of snapshots that can be created on a dataset and its
descendents.
Setting a
.Sy snapshot_limit
on a descendent of a dataset that already has a
.Sy snapshot_limit
does not override the ancestor's
.Sy snapshot_limit ,
but rather imposes an additional limit.
The limit is not enforced if the user is allowed to change the limit.
For example, this means that recursive snapshots taken from the global zone are
counted against each delegated dataset within a zone.
This feature must be enabled to be used
.Po see
.Xr zpool-features 7
.Pc .
.It Sy userquota@ Ns Ar user Ns = Ns Ar size Ns | Ns Sy none
Limits the amount of space consumed by the specified user.
User space consumption is identified by the
.Sy userspace@ Ns Ar user
property.
.Pp
Enforcement of user quotas may be delayed by several seconds.
This delay means that a user might exceed their quota before the system notices
that they are over quota and begins to refuse additional writes with the
.Er EDQUOT
error message.
See the
.Nm zfs Cm userspace
command for more information.
.Pp
Unprivileged users can only access their own groups' space usage.
The root user, or a user who has been granted the
.Sy userquota
privilege with
.Nm zfs Cm allow ,
can get and set everyone's quota.
.Pp
This property is not available on volumes, on file systems before version 4, or
on pools before version 15.
The
.Sy userquota@ Ns Ar ...
properties are not displayed by
.Nm zfs Cm get Sy all .
The user's name must be appended after the
.Sy @
symbol, using one of the following forms:
.Bl -bullet -compact -offset 4n
.It
POSIX name
.Pq Qq joe
.It
POSIX numeric ID
.Pq Qq 789
.It
SID name
.Pq Qq joe.smith@mydomain
.It
SID numeric ID
.Pq Qq S-1-123-456-789
.El
.Pp
Files created on Linux always have POSIX owners.
.It Sy userobjquota@ Ns Ar user Ns = Ns Ar size Ns | Ns Sy none
The
.Sy userobjquota
is similar to
.Sy userquota
but it limits the number of objects a user can create.
Please refer to
.Sy userobjused
for more information about how objects are counted.
.It Sy groupquota@ Ns Ar group Ns = Ns Ar size Ns | Ns Sy none
Limits the amount of space consumed by the specified group.
Group space consumption is identified by the
.Sy groupused@ Ns Ar group
property.
.Pp
Unprivileged users can access only their own groups' space usage.
The root user, or a user who has been granted the
.Sy groupquota
privilege with
.Nm zfs Cm allow ,
can get and set all groups' quotas.
.It Sy groupobjquota@ Ns Ar group Ns = Ns Ar size Ns | Ns Sy none
The
.Sy groupobjquota
is similar to
.Sy groupquota
but it limits number of objects a group can consume.
Please refer to
.Sy userobjused
for more information about how objects are counted.
.It Sy projectquota@ Ns Ar project Ns = Ns Ar size Ns | Ns Sy none
Limits the amount of space consumed by the specified project.
Project space consumption is identified by the
.Sy projectused@ Ns Ar project
property.
Please refer to
.Sy projectused
for more information about how project is identified and set/changed.
.Pp
The root user, or a user who has been granted the
.Sy projectquota
privilege with
.Nm zfs allow ,
can access all projects' quota.
.It Sy projectobjquota@ Ns Ar project Ns = Ns Ar size Ns | Ns Sy none
The
.Sy projectobjquota
is similar to
.Sy projectquota
but it limits number of objects a project can consume.
Please refer to
.Sy userobjused
for more information about how objects are counted.
.It Sy readonly Ns = Ns Sy on Ns | Ns Sy off
Controls whether this dataset can be modified.
The default value is
.Sy off .
The values
.Sy on
and
.Sy off
are equivalent to the
.Sy ro
and
.Sy rw
mount options.
.Pp
This property can also be referred to by its shortened column name,
.Sy rdonly .
.It Sy recordsize Ns = Ns Ar size
Specifies a suggested block size for files in the file system.
This property is designed solely for use with database workloads that access
files in fixed-size records.
ZFS automatically tunes block sizes according to internal algorithms optimized
for typical access patterns.
.Pp
For databases that create very large files but access them in small random
chunks, these algorithms may be suboptimal.
Specifying a
.Sy recordsize
greater than or equal to the record size of the database can result in
significant performance gains.
Use of this property for general purpose file systems is strongly discouraged,
and may adversely affect performance.
.Pp
The size specified must be a power of two greater than or equal to
.Ar 512B
and less than or equal to
.Ar 128kB .
If the
.Sy large_blocks
feature is enabled on the pool, the size may be up to
.Ar 1MB .
See
.Xr zpool-features 7
for details on ZFS feature flags.
.Pp
Changing the file system's
.Sy recordsize
affects only files created afterward; existing files are unaffected.
.Pp
This property can also be referred to by its shortened column name,
.Sy recsize .
.It Sy redundant_metadata Ns = Ns Sy all Ns | Ns Sy most
Controls what types of metadata are stored redundantly.
ZFS stores an extra copy of metadata, so that if a single block is corrupted,
the amount of user data lost is limited.
This extra copy is in addition to any redundancy provided at the pool level
.Pq e.g. by mirroring or RAID-Z ,
and is in addition to an extra copy specified by the
.Sy copies
property
.Pq up to a total of 3 copies .
For example if the pool is mirrored,
.Sy copies Ns = Ns 2 ,
and
.Sy redundant_metadata Ns = Ns Sy most ,
then ZFS stores 6 copies of most metadata, and 4 copies of data and some
metadata.
.Pp
When set to
.Sy all ,
ZFS stores an extra copy of all metadata.
If a single on-disk block is corrupt, at worst a single block of user data
.Po which is
.Sy recordsize
bytes long
.Pc
can be lost.
.Pp
When set to
.Sy most ,
ZFS stores an extra copy of most types of metadata.
This can improve performance of random writes, because less metadata must be
written.
In practice, at worst about 100 blocks
.Po of
.Sy recordsize
bytes each
.Pc
of user data can be lost if a single on-disk block is corrupt.
The exact behavior of which metadata blocks are stored redundantly may change in
future releases.
.Pp
The default value is
.Sy all .
.It Sy refquota Ns = Ns Ar size Ns | Ns Sy none
Limits the amount of space a dataset can consume.
This property enforces a hard limit on the amount of space used.
This hard limit does not include space used by descendents, including file
systems and snapshots.
.It Sy refreservation Ns = Ns Ar size Ns | Ns Sy none Ns | Ns Sy auto
The minimum amount of space guaranteed to a dataset, not including its
descendents.
When the amount of space used is below this value, the dataset is treated as if
it were taking up the amount of space specified by
.Sy refreservation .
The
.Sy refreservation
reservation is accounted for in the parent datasets' space used, and counts
against the parent datasets' quotas and reservations.
.Pp
If
.Sy refreservation
is set, a snapshot is only allowed if there is enough free pool space outside of
this reservation to accommodate the current number of
.Qq referenced
bytes in the dataset.
.Pp
If
.Sy refreservation
is set to
.Sy auto ,
a volume is thick provisioned
.Po or
.Qq not sparse
.Pc .
.Sy refreservation Ns = Ns Sy auto
is only supported on volumes.
See
.Sy volsize
in the
.Sx Native Properties
section for more information about sparse volumes.
.Pp
This property can also be referred to by its shortened column name,
.Sy refreserv .
.It Sy relatime Ns = Ns Sy on Ns | Ns Sy off
Controls the manner in which the access time is updated when
.Sy atime Ns = Ns Sy on
is set.
Turning this property on causes the access time to be updated relative
to the modify or change time.
Access time is only updated if the previous
access time was earlier than the current modify or change time or if the
existing access time hasn't been updated within the past 24 hours.
The default value is
.Sy off .
The values
.Sy on
and
.Sy off
are equivalent to the
.Sy relatime
and
.Sy norelatime
mount options.
.It Sy reservation Ns = Ns Ar size Ns | Ns Sy none
The minimum amount of space guaranteed to a dataset and its descendants.
When the amount of space used is below this value, the dataset is treated as if
it were taking up the amount of space specified by its reservation.
Reservations are accounted for in the parent datasets' space used, and count
against the parent datasets' quotas and reservations.
.Pp
This property can also be referred to by its shortened column name,
.Sy reserv .
.It Sy secondarycache Ns = Ns Sy all Ns | Ns Sy none Ns | Ns Sy metadata
Controls what is cached in the secondary cache
.Pq L2ARC .
If this property is set to
.Sy all ,
then both user data and metadata is cached.
If this property is set to
.Sy none ,
then neither user data nor metadata is cached.
If this property is set to
.Sy metadata ,
then only metadata is cached.
The default value is
.Sy all .
.It Sy setuid Ns = Ns Sy on Ns | Ns Sy off
Controls whether the setuid bit is respected for the file system.
The default value is
.Sy on .
The values
.Sy on
and
.Sy off
are equivalent to the
.Sy suid
and
.Sy nosuid
mount options.
.It Sy sharesmb Ns = Ns Sy on Ns | Ns Sy off Ns | Ns Ar opts
Controls whether the file system is shared by using
.Sy Samba USERSHARES
and what options are to be used.
Otherwise, the file system is automatically shared and unshared with the
.Nm zfs Cm share
and
.Nm zfs Cm unshare
commands.
If the property is set to on, the
.Xr net 8
command is invoked to create a
.Sy USERSHARE .
.Pp
Because SMB shares requires a resource name, a unique resource name is
constructed from the dataset name.
The constructed name is a copy of the
dataset name except that the characters in the dataset name, which would be
invalid in the resource name, are replaced with underscore (_) characters.
Linux does not currently support additional options which might be available
on Solaris.
.Pp
If the
.Sy sharesmb
property is set to
.Sy off ,
the file systems are unshared.
.Pp
The share is created with the ACL (Access Control List) "Everyone:F" ("F"
stands for "full permissions", i.e. read and write permissions) and no guest
access (which means Samba must be able to authenticate a real user, system
passwd/shadow, LDAP or smbpasswd based) by default.
This means that any additional access control
(disallow specific user specific access etc) must be done on the underlying file system.
.It Sy sharenfs Ns = Ns Sy on Ns | Ns Sy off Ns | Ns Ar opts
Controls whether the file system is shared via NFS, and what options are to be
used.
A file system with a
.Sy sharenfs
property of
.Sy off
is managed with the
.Xr exportfs 8
command and entries in the
.Pa /etc/exports
file.
Otherwise, the file system is automatically shared and unshared with the
.Nm zfs Cm share
and
.Nm zfs Cm unshare
commands.
If the property is set to
.Sy on ,
the dataset is shared using the default options:
.Dl sec=sys,rw,crossmnt,no_subtree_check
.Pp
+Please note that the options are comma-separated, unlike those found in
+.Xr exports 5 .
+This is done to negate the need for quoting, as well as to make parsing
+with scripts easier.
+.Pp
See
.Xr exports 5
for the meaning of the default options.
Otherwise, the
.Xr exportfs 8
command is invoked with options equivalent to the contents of this property.
.Pp
When the
.Sy sharenfs
property is changed for a dataset, the dataset and any children inheriting the
property are re-shared with the new options, only if the property was previously
.Sy off ,
or if they were shared before the property was changed.
If the new property is
.Sy off ,
the file systems are unshared.
.It Sy logbias Ns = Ns Sy latency Ns | Ns Sy throughput
Provide a hint to ZFS about handling of synchronous requests in this dataset.
If
.Sy logbias
is set to
.Sy latency
.Pq the default ,
ZFS will use pool log devices
.Pq if configured
to handle the requests at low latency.
If
.Sy logbias
is set to
.Sy throughput ,
ZFS will not use configured pool log devices.
ZFS will instead optimize synchronous operations for global pool throughput and
efficient use of resources.
.It Sy snapdev Ns = Ns Sy hidden Ns | Ns Sy visible
Controls whether the volume snapshot devices under
.Pa /dev/zvol/ Ns Aq Ar pool
are hidden or visible.
The default value is
.Sy hidden .
.It Sy snapdir Ns = Ns Sy hidden Ns | Ns Sy visible
Controls whether the
.Pa .zfs
directory is hidden or visible in the root of the file system as discussed in
the
.Sx Snapshots
section of
.Xr zfsconcepts 7 .
The default value is
.Sy hidden .
.It Sy sync Ns = Ns Sy standard Ns | Ns Sy always Ns | Ns Sy disabled
Controls the behavior of synchronous requests
.Pq e.g. fsync, O_DSYNC .
.Sy standard
is the POSIX-specified behavior of ensuring all synchronous requests
are written to stable storage and all devices are flushed to ensure
data is not cached by device controllers
.Pq this is the default .
.Sy always
causes every file system transaction to be written and flushed before its
system call returns.
This has a large performance penalty.
.Sy disabled
disables synchronous requests.
File system transactions are only committed to stable storage periodically.
This option will give the highest performance.
However, it is very dangerous as ZFS would be ignoring the synchronous
transaction demands of applications such as databases or NFS.
Administrators should only use this option when the risks are understood.
.It Sy version Ns = Ns Ar N Ns | Ns Sy current
The on-disk version of this file system, which is independent of the pool
version.
This property can only be set to later supported versions.
See the
.Nm zfs Cm upgrade
command.
.It Sy volsize Ns = Ns Ar size
For volumes, specifies the logical size of the volume.
By default, creating a volume establishes a reservation of equal size.
For storage pools with a version number of 9 or higher, a
.Sy refreservation
is set instead.
Any changes to
.Sy volsize
are reflected in an equivalent change to the reservation
.Pq or Sy refreservation .
The
.Sy volsize
can only be set to a multiple of
.Sy volblocksize ,
and cannot be zero.
.Pp
The reservation is kept equal to the volume's logical size to prevent unexpected
behavior for consumers.
Without the reservation, the volume could run out of space, resulting in
undefined behavior or data corruption, depending on how the volume is used.
These effects can also occur when the volume size is changed while it is in use
.Pq particularly when shrinking the size .
Extreme care should be used when adjusting the volume size.
.Pp
Though not recommended, a
.Qq sparse volume
.Po also known as
.Qq thin provisioned
.Pc
can be created by specifying the
.Fl s
option to the
.Nm zfs Cm create Fl V
command, or by changing the value of the
.Sy refreservation
property
.Po or
.Sy reservation
property on pool version 8 or earlier
.Pc
after the volume has been created.
A
.Qq sparse volume
is a volume where the value of
.Sy refreservation
is less than the size of the volume plus the space required to store its
metadata.
Consequently, writes to a sparse volume can fail with
.Er ENOSPC
when the pool is low on space.
For a sparse volume, changes to
.Sy volsize
are not reflected in the
.Sy refreservation .
A volume that is not sparse is said to be
.Qq thick provisioned .
A sparse volume can become thick provisioned by setting
.Sy refreservation
to
.Sy auto .
.It Sy volmode Ns = Ns Sy default Ns | Ns Sy full Ns | Ns Sy geom Ns | Ns Sy dev Ns | Ns Sy none
This property specifies how volumes should be exposed to the OS.
Setting it to
.Sy full
exposes volumes as fully fledged block devices, providing maximal
functionality.
The value
.Sy geom
is just an alias for
.Sy full
and is kept for compatibility.
Setting it to
.Sy dev
hides its partitions.
Volumes with property set to
.Sy none
are not exposed outside ZFS, but can be snapshotted, cloned, replicated, etc,
that can be suitable for backup purposes.
Value
.Sy default
means that volumes exposition is controlled by system-wide tunable
.Sy zvol_volmode ,
where
.Sy full ,
.Sy dev
and
.Sy none
are encoded as 1, 2 and 3 respectively.
The default value is
.Sy full .
.It Sy vscan Ns = Ns Sy on Ns | Ns Sy off
Controls whether regular files should be scanned for viruses when a file is
opened and closed.
In addition to enabling this property, the virus scan service must also be
enabled for virus scanning to occur.
The default value is
.Sy off .
This property is not used on Linux.
.It Sy xattr Ns = Ns Sy on Ns | Ns Sy off Ns | Ns Sy sa
Controls whether extended attributes are enabled for this file system.
Two styles of extended attributes are supported: either directory based
or system attribute based.
.Pp
The default value of
.Sy on
enables directory based extended attributes.
This style of extended attribute imposes no practical limit
on either the size or number of attributes which can be set on a file.
Although under Linux the
.Xr getxattr 2
and
.Xr setxattr 2
system calls limit the maximum size to 64K.
This is the most compatible
style of extended attribute and is supported by all ZFS implementations.
.Pp
System attribute based xattrs can be enabled by setting the value to
.Sy sa .
The key advantage of this type of xattr is improved performance.
Storing extended attributes as system attributes
significantly decreases the amount of disk IO required.
Up to 64K of data may be stored per-file in the space reserved for system attributes.
If there is not enough space available for an extended attribute
then it will be automatically written as a directory based xattr.
System attribute based extended attributes are not accessible
on platforms which do not support the
.Sy xattr Ns = Ns Sy sa
feature.
.Pp
The use of system attribute based xattrs is strongly encouraged for users of
SELinux or POSIX ACLs.
Both of these features heavily rely on extended
attributes and benefit significantly from the reduced access time.
.Pp
The values
.Sy on
and
.Sy off
are equivalent to the
.Sy xattr
and
.Sy noxattr
mount options.
.It Sy jailed Ns = Ns Sy off Ns | Ns Sy on
Controls whether the dataset is managed from a jail.
See
.Xr zfs-jail 8
for more information.
Jails are a
.Fx
feature and are not relevant on other platforms.
The default value is
.Sy off .
.It Sy zoned Ns = Ns Sy on Ns | Ns Sy off
Controls whether the dataset is managed from a non-global zone.
Zones are a Solaris feature and are not relevant on other platforms.
The default value is
.Sy off .
.El
.Pp
The following three properties cannot be changed after the file system is
created, and therefore, should be set when the file system is created.
If the properties are not set with the
.Nm zfs Cm create
or
.Nm zpool Cm create
commands, these properties are inherited from the parent dataset.
If the parent dataset lacks these properties due to having been created prior to
these features being supported, the new file system will have the default values
for these properties.
.Bl -tag -width ""
.It Xo
.Sy casesensitivity Ns = Ns Sy sensitive Ns | Ns
.Sy insensitive Ns | Ns Sy mixed
.Xc
Indicates whether the file name matching algorithm used by the file system
should be case-sensitive, case-insensitive, or allow a combination of both
styles of matching.
The default value for the
.Sy casesensitivity
property is
.Sy sensitive .
Traditionally,
.Ux
and POSIX file systems have case-sensitive file names.
.Pp
The
.Sy mixed
value for the
.Sy casesensitivity
property indicates that the file system can support requests for both
case-sensitive and case-insensitive matching behavior.
Currently, case-insensitive matching behavior on a file system that supports
mixed behavior is limited to the SMB server product.
For more information about the
.Sy mixed
value behavior, see the "ZFS Administration Guide".
.It Xo
.Sy normalization Ns = Ns Sy none Ns | Ns Sy formC Ns | Ns
.Sy formD Ns | Ns Sy formKC Ns | Ns Sy formKD
.Xc
Indicates whether the file system should perform a
.Sy unicode
normalization of file names whenever two file names are compared, and which
normalization algorithm should be used.
File names are always stored unmodified, names are normalized as part of any
comparison process.
If this property is set to a legal value other than
.Sy none ,
and the
.Sy utf8only
property was left unspecified, the
.Sy utf8only
property is automatically set to
.Sy on .
The default value of the
.Sy normalization
property is
.Sy none .
This property cannot be changed after the file system is created.
.It Sy utf8only Ns = Ns Sy on Ns | Ns Sy off
Indicates whether the file system should reject file names that include
characters that are not present in the
.Sy UTF-8
character code set.
If this property is explicitly set to
.Sy off ,
the normalization property must either not be explicitly set or be set to
.Sy none .
The default value for the
.Sy utf8only
property is
.Sy off .
This property cannot be changed after the file system is created.
.El
.Pp
The
.Sy casesensitivity ,
.Sy normalization ,
and
.Sy utf8only
properties are also new permissions that can be assigned to non-privileged users
by using the ZFS delegated administration feature.
.
.Ss Temporary Mount Point Properties
When a file system is mounted, either through
.Xr mount 8
for legacy mounts or the
.Nm zfs Cm mount
command for normal file systems, its mount options are set according to its
properties.
The correlation between properties and mount options is as follows:
.Bl -tag -compact -offset Ds -width "rootcontext="
.It Sy atime
atime/noatime
.It Sy canmount
auto/noauto
.It Sy devices
dev/nodev
.It Sy exec
exec/noexec
.It Sy readonly
ro/rw
.It Sy relatime
relatime/norelatime
.It Sy setuid
suid/nosuid
.It Sy xattr
xattr/noxattr
.It Sy nbmand
mand/nomand
.It Sy context Ns =
context=
.It Sy fscontext Ns =
fscontext=
.It Sy defcontext Ns =
defcontext=
.It Sy rootcontext Ns =
rootcontext=
.El
.Pp
In addition, these options can be set on a per-mount basis using the
.Fl o
option, without affecting the property that is stored on disk.
The values specified on the command line override the values stored in the
dataset.
The
.Sy nosuid
option is an alias for
.Sy nodevices , Ns Sy nosetuid .
These properties are reported as
.Qq temporary
by the
.Nm zfs Cm get
command.
If the properties are changed while the dataset is mounted, the new setting
overrides any temporary settings.
.
.Ss User Properties
In addition to the standard native properties, ZFS supports arbitrary user
properties.
User properties have no effect on ZFS behavior, but applications or
administrators can use them to annotate datasets
.Pq file systems, volumes, and snapshots .
.Pp
User property names must contain a colon
.Pq Qq Sy \&:
character to distinguish them from native properties.
They may contain lowercase letters, numbers, and the following punctuation
characters: colon
.Pq Qq Sy \&: ,
dash
.Pq Qq Sy - ,
period
.Pq Qq Sy \&. ,
and underscore
.Pq Qq Sy _ .
The expected convention is that the property name is divided into two portions
such as
.Ar module : Ns Ar property ,
but this namespace is not enforced by ZFS.
User property names can be at most 256 characters, and cannot begin with a dash
.Pq Qq Sy - .
.Pp
When making programmatic use of user properties, it is strongly suggested to use
a reversed DNS domain name for the
.Ar module
component of property names to reduce the chance that two
independently-developed packages use the same property name for different
purposes.
.Pp
The values of user properties are arbitrary strings, are always inherited, and
are never validated.
All of the commands that operate on properties
.Po Nm zfs Cm list ,
.Nm zfs Cm get ,
.Nm zfs Cm set ,
and so forth
.Pc
can be used to manipulate both native properties and user properties.
Use the
.Nm zfs Cm inherit
command to clear a user property.
If the property is not defined in any parent dataset, it is removed entirely.
Property values are limited to 8192 bytes.
diff --git a/sys/contrib/openzfs/man/man7/zpoolprops.7 b/sys/contrib/openzfs/man/man7/zpoolprops.7
index 513f02e0314f..5bd1c2bf3128 100644
--- a/sys/contrib/openzfs/man/man7/zpoolprops.7
+++ b/sys/contrib/openzfs/man/man7/zpoolprops.7
@@ -1,412 +1,417 @@
.\"
.\" CDDL HEADER START
.\"
.\" The contents of this file are subject to the terms of the
.\" Common Development and Distribution License (the "License").
.\" You may not use this file except in compliance with the License.
.\"
.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
.\" or http://www.opensolaris.org/os/licensing.
.\" See the License for the specific language governing permissions
.\" and limitations under the License.
.\"
.\" When distributing Covered Code, include this CDDL HEADER in each
.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
.\" If applicable, add the following below this CDDL HEADER, with the
.\" fields enclosed by brackets "[]" replaced with your own identifying
.\" information: Portions Copyright [yyyy] [name of copyright owner]
.\"
.\" CDDL HEADER END
.\"
.\" Copyright (c) 2007, Sun Microsystems, Inc. All Rights Reserved.
.\" Copyright (c) 2012, 2018 by Delphix. All rights reserved.
.\" Copyright (c) 2012 Cyril Plisko. All Rights Reserved.
.\" Copyright (c) 2017 Datto Inc.
.\" Copyright (c) 2018 George Melikov. All Rights Reserved.
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\" Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
.\"
.Dd May 27, 2021
.Dt ZPOOLPROPS 7
.Os
.
.Sh NAME
.Nm zpoolprops
.Nd properties of ZFS storage pools
.
.Sh DESCRIPTION
Each pool has several properties associated with it.
Some properties are read-only statistics while others are configurable and
change the behavior of the pool.
.Pp
The following are read-only properties:
.Bl -tag -width "unsupported@guid"
.It Cm allocated
Amount of storage used within the pool.
See
.Sy fragmentation
and
.Sy free
for more information.
.It Sy capacity
Percentage of pool space used.
This property can also be referred to by its shortened column name,
.Sy cap .
.It Sy expandsize
Amount of uninitialized space within the pool or device that can be used to
increase the total capacity of the pool.
On whole-disk vdevs, this is the space beyond the end of the GPT –
typically occurring when a LUN is dynamically expanded
or a disk replaced with a larger one.
On partition vdevs, this is the space appended to the partition after it was
added to the pool – most likely by resizing it in-place.
The space can be claimed for the pool by bringing it online with
.Sy autoexpand=on
or using
.Nm zpool Cm online Fl e .
.It Sy fragmentation
The amount of fragmentation in the pool.
As the amount of space
.Sy allocated
increases, it becomes more difficult to locate
.Sy free
space.
This may result in lower write performance compared to pools with more
unfragmented free space.
.It Sy free
The amount of free space available in the pool.
By contrast, the
.Xr zfs 8
.Sy available
property describes how much new data can be written to ZFS filesystems/volumes.
The zpool
.Sy free
property is not generally useful for this purpose, and can be substantially more than the zfs
.Sy available
space.
This discrepancy is due to several factors, including raidz parity;
zfs reservation, quota, refreservation, and refquota properties; and space set aside by
.Sy spa_slop_shift
(see
.Xr zfs 4
for more information).
.It Sy freeing
After a file system or snapshot is destroyed, the space it was using is
returned to the pool asynchronously.
.Sy freeing
is the amount of space remaining to be reclaimed.
Over time
.Sy freeing
will decrease while
.Sy free
increases.
+.It Sy leaked
+Space not released while
+.Sy freeing
+due to corruption, now permanently leaked into the pool.
.It Sy health
The current health of the pool.
Health can be one of
.Sy ONLINE , DEGRADED , FAULTED , OFFLINE, REMOVED , UNAVAIL .
.It Sy guid
A unique identifier for the pool.
.It Sy load_guid
A unique identifier for the pool.
Unlike the
.Sy guid
property, this identifier is generated every time we load the pool (i.e. does
not persist across imports/exports) and never changes while the pool is loaded
(even if a
.Sy reguid
operation takes place).
.It Sy size
Total size of the storage pool.
.It Sy unsupported@ Ns Em guid
Information about unsupported features that are enabled on the pool.
See
.Xr zpool-features 7
for details.
.El
.Pp
The space usage properties report actual physical space available to the
storage pool.
The physical space can be different from the total amount of space that any
contained datasets can actually use.
The amount of space used in a raidz configuration depends on the characteristics
of the data being written.
In addition, ZFS reserves some space for internal accounting that the
.Xr zfs 8
command takes into account, but the
.Nm
command does not.
For non-full pools of a reasonable size, these effects should be invisible.
For small pools, or pools that are close to being completely full, these
discrepancies may become more noticeable.
.Pp
The following property can be set at creation time and import time:
.Bl -tag -width Ds
.It Sy altroot
Alternate root directory.
If set, this directory is prepended to any mount points within the pool.
This can be used when examining an unknown pool where the mount points cannot be
trusted, or in an alternate boot environment, where the typical paths are not
valid.
.Sy altroot
is not a persistent property.
It is valid only while the system is up.
Setting
.Sy altroot
defaults to using
.Sy cachefile Ns = Ns Sy none ,
though this may be overridden using an explicit setting.
.El
.Pp
The following property can be set only at import time:
.Bl -tag -width Ds
.It Sy readonly Ns = Ns Sy on Ns | Ns Sy off
If set to
.Sy on ,
the pool will be imported in read-only mode.
This property can also be referred to by its shortened column name,
.Sy rdonly .
.El
.Pp
The following properties can be set at creation time and import time, and later
changed with the
.Nm zpool Cm set
command:
.Bl -tag -width Ds
.It Sy ashift Ns = Ns Sy ashift
Pool sector size exponent, to the power of
.Sy 2
(internally referred to as
.Sy ashift ) .
Values from 9 to 16, inclusive, are valid; also, the
value 0 (the default) means to auto-detect using the kernel's block
layer and a ZFS internal exception list.
I/O operations will be aligned to the specified size boundaries.
Additionally, the minimum (disk)
write size will be set to the specified size, so this represents a
space vs. performance trade-off.
For optimal performance, the pool sector size should be greater than
or equal to the sector size of the underlying disks.
The typical case for setting this property is when
performance is important and the underlying disks use 4KiB sectors but
report 512B sectors to the OS (for compatibility reasons); in that
case, set
.Sy ashift Ns = Ns Sy 12
(which is
.Sy 1<<12 No = Sy 4096 ) .
When set, this property is
used as the default hint value in subsequent vdev operations (add,
attach and replace).
Changing this value will not modify any existing
vdev, not even on disk replacement; however it can be used, for
instance, to replace a dying 512B sectors disk with a newer 4KiB
sectors device: this will probably result in bad performance but at the
same time could prevent loss of data.
.It Sy autoexpand Ns = Ns Sy on Ns | Ns Sy off
Controls automatic pool expansion when the underlying LUN is grown.
If set to
.Sy on ,
the pool will be resized according to the size of the expanded device.
If the device is part of a mirror or raidz then all devices within that
mirror/raidz group must be expanded before the new space is made available to
the pool.
The default behavior is
.Sy off .
This property can also be referred to by its shortened column name,
.Sy expand .
.It Sy autoreplace Ns = Ns Sy on Ns | Ns Sy off
Controls automatic device replacement.
If set to
.Sy off ,
device replacement must be initiated by the administrator by using the
.Nm zpool Cm replace
command.
If set to
.Sy on ,
any new device, found in the same physical location as a device that previously
belonged to the pool, is automatically formatted and replaced.
The default behavior is
.Sy off .
This property can also be referred to by its shortened column name,
.Sy replace .
Autoreplace can also be used with virtual disks (like device
mapper) provided that you use the /dev/disk/by-vdev paths setup by
vdev_id.conf.
See the
.Xr vdev_id 8
manual page for more details.
Autoreplace and autoonline require the ZFS Event Daemon be configured and
running.
See the
.Xr zed 8
manual page for more details.
.It Sy autotrim Ns = Ns Sy on Ns | Ns Sy off
When set to
.Sy on
space which has been recently freed, and is no longer allocated by the pool,
will be periodically trimmed.
This allows block device vdevs which support
BLKDISCARD, such as SSDs, or file vdevs on which the underlying file system
supports hole-punching, to reclaim unused blocks.
The default value for this property is
.Sy off .
.Pp
Automatic TRIM does not immediately reclaim blocks after a free.
Instead, it will optimistically delay allowing smaller ranges to be aggregated
into a few larger ones.
These can then be issued more efficiently to the storage.
TRIM on L2ARC devices is enabled by setting
.Sy l2arc_trim_ahead > 0 .
.Pp
Be aware that automatic trimming of recently freed data blocks can put
significant stress on the underlying storage devices.
This will vary depending of how well the specific device handles these commands.
For lower-end devices it is often possible to achieve most of the benefits
of automatic trimming by running an on-demand (manual) TRIM periodically
using the
.Nm zpool Cm trim
command.
.It Sy bootfs Ns = Ns Sy (unset) Ns | Ns Ar pool Ns Op / Ns Ar dataset
Identifies the default bootable dataset for the root pool.
This property is expected to be set mainly by the installation and upgrade programs.
Not all Linux distribution boot processes use the bootfs property.
.It Sy cachefile Ns = Ns Ar path Ns | Ns Sy none
Controls the location of where the pool configuration is cached.
Discovering all pools on system startup requires a cached copy of the
configuration data that is stored on the root file system.
All pools in this cache are automatically imported when the system boots.
Some environments, such as install and clustering, need to cache this
information in a different location so that pools are not automatically
imported.
Setting this property caches the pool configuration in a different location that
can later be imported with
.Nm zpool Cm import Fl c .
Setting it to the value
.Sy none
creates a temporary pool that is never cached, and the
.Qq
.Pq empty string
uses the default location.
.Pp
Multiple pools can share the same cache file.
Because the kernel destroys and recreates this file when pools are added and
removed, care should be taken when attempting to access this file.
When the last pool using a
.Sy cachefile
is exported or destroyed, the file will be empty.
.It Sy comment Ns = Ns Ar text
A text string consisting of printable ASCII characters that will be stored
such that it is available even if the pool becomes faulted.
An administrator can provide additional information about a pool using this
property.
.It Sy compatibility Ns = Ns Sy off Ns | Ns Sy legacy Ns | Ns Ar file Ns Oo , Ns Ar file Oc Ns …
Specifies that the pool maintain compatibility with specific feature sets.
When set to
.Sy off
(or unset) compatibility is disabled (all features may be enabled); when set to
.Sy legacy Ns
no features may be enabled.
When set to a comma-separated list of filenames
(each filename may either be an absolute path, or relative to
.Pa /etc/zfs/compatibility.d
or
.Pa /usr/share/zfs/compatibility.d )
the lists of requested features are read from those files, separated by
whitespace and/or commas.
Only features present in all files may be enabled.
.Pp
See
.Xr zpool-features 7 ,
.Xr zpool-create 8
and
.Xr zpool-upgrade 8
for more information on the operation of compatibility feature sets.
.It Sy dedupditto Ns = Ns Ar number
This property is deprecated and no longer has any effect.
.It Sy delegation Ns = Ns Sy on Ns | Ns Sy off
Controls whether a non-privileged user is granted access based on the dataset
permissions defined on the dataset.
See
.Xr zfs 8
for more information on ZFS delegated administration.
.It Sy failmode Ns = Ns Sy wait Ns | Ns Sy continue Ns | Ns Sy panic
Controls the system behavior in the event of catastrophic pool failure.
This condition is typically a result of a loss of connectivity to the underlying
storage device(s) or a failure of all devices within the pool.
The behavior of such an event is determined as follows:
.Bl -tag -width "continue"
.It Sy wait
Blocks all I/O access until the device connectivity is recovered and the errors
-are cleared.
+are cleared with
+.Nm zpool Cm clear .
This is the default behavior.
.It Sy continue
Returns
.Er EIO
to any new write I/O requests but allows reads to any of the remaining healthy
devices.
Any write requests that have yet to be committed to disk would be blocked.
.It Sy panic
Prints out a message to the console and generates a system crash dump.
.El
.It Sy feature@ Ns Ar feature_name Ns = Ns Sy enabled
The value of this property is the current state of
.Ar feature_name .
The only valid value when setting this property is
.Sy enabled
which moves
.Ar feature_name
to the enabled state.
See
.Xr zpool-features 7
for details on feature states.
.It Sy listsnapshots Ns = Ns Sy on Ns | Ns Sy off
Controls whether information about snapshots associated with this pool is
output when
.Nm zfs Cm list
is run without the
.Fl t
option.
The default value is
.Sy off .
This property can also be referred to by its shortened name,
.Sy listsnaps .
.It Sy multihost Ns = Ns Sy on Ns | Ns Sy off
Controls whether a pool activity check should be performed during
.Nm zpool Cm import .
When a pool is determined to be active it cannot be imported, even with the
.Fl f
option.
This property is intended to be used in failover configurations
where multiple hosts have access to a pool on shared storage.
.Pp
Multihost provides protection on import only.
It does not protect against an
individual device being used in multiple pools, regardless of the type of vdev.
See the discussion under
.Nm zpool Cm create .
.Pp
When this property is on, periodic writes to storage occur to show the pool is
in use.
See
.Sy zfs_multihost_interval
in the
.Xr zfs 4
manual page.
In order to enable this property each host must set a unique hostid.
See
.Xr genhostid 1
.Xr zgenhostid 8
.Xr spl 4
for additional details.
The default value is
.Sy off .
.It Sy version Ns = Ns Ar version
The current on-disk version of the pool.
This can be increased, but never decreased.
The preferred method of updating pools is with the
.Nm zpool Cm upgrade
command, though this property can be used when a specific version is needed for
backwards compatibility.
Once feature flags are enabled on a pool this property will no longer have a
value.
.El
diff --git a/sys/contrib/openzfs/man/man8/zfs-load-key.8 b/sys/contrib/openzfs/man/man8/zfs-load-key.8
index ed89b65d7159..b12a79e0150a 100644
--- a/sys/contrib/openzfs/man/man8/zfs-load-key.8
+++ b/sys/contrib/openzfs/man/man8/zfs-load-key.8
@@ -1,301 +1,301 @@
.\"
.\" CDDL HEADER START
.\"
.\" The contents of this file are subject to the terms of the
.\" Common Development and Distribution License (the "License").
.\" You may not use this file except in compliance with the License.
.\"
.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
.\" or http://www.opensolaris.org/os/licensing.
.\" See the License for the specific language governing permissions
.\" and limitations under the License.
.\"
.\" When distributing Covered Code, include this CDDL HEADER in each
.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
.\" If applicable, add the following below this CDDL HEADER, with the
.\" fields enclosed by brackets "[]" replaced with your own identifying
.\" information: Portions Copyright [yyyy] [name of copyright owner]
.\"
.\" CDDL HEADER END
.\"
.\" Copyright (c) 2009 Sun Microsystems, Inc. All Rights Reserved.
.\" Copyright 2011 Joshua M. Clulow <josh@sysmgr.org>
.\" Copyright (c) 2011, 2019 by Delphix. All rights reserved.
.\" Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
.\" Copyright (c) 2014, Joyent, Inc. All rights reserved.
.\" Copyright (c) 2014 by Adam Stevko. All rights reserved.
.\" Copyright (c) 2014 Integros [integros.com]
.\" Copyright 2019 Richard Laager. All rights reserved.
.\" Copyright 2018 Nexenta Systems, Inc.
.\" Copyright 2019 Joyent, Inc.
.\"
.Dd January 13, 2020
.Dt ZFS-LOAD-KEY 8
.Os
.
.Sh NAME
.Nm zfs-load-key
.Nd load, unload, or change encryption key of ZFS dataset
.Sh SYNOPSIS
.Nm zfs
.Cm load-key
.Op Fl nr
.Op Fl L Ar keylocation
.Fl a Ns | Ns Ar filesystem
.Nm zfs
.Cm unload-key
.Op Fl r
.Fl a Ns | Ns Ar filesystem
.Nm zfs
.Cm change-key
.Op Fl l
.Op Fl o Ar keylocation Ns = Ns Ar value
.Op Fl o Ar keyformat Ns = Ns Ar value
.Op Fl o Ar pbkdf2iters Ns = Ns Ar value
.Ar filesystem
.Nm zfs
.Cm change-key
.Fl i
.Op Fl l
.Ar filesystem
.
.Sh DESCRIPTION
.Bl -tag -width ""
.It Xo
.Nm zfs
.Cm load-key
.Op Fl nr
.Op Fl L Ar keylocation
.Fl a Ns | Ns Ar filesystem
.Xc
Load the key for
.Ar filesystem ,
allowing it and all children that inherit the
.Sy keylocation
property to be accessed.
The key will be expected in the format specified by the
.Sy keyformat
and location specified by the
.Sy keylocation
property.
Note that if the
.Sy keylocation
is set to
.Sy prompt
the terminal will interactively wait for the key to be entered.
Loading a key will not automatically mount the dataset.
If that functionality is desired,
.Nm zfs Cm mount Fl l
will ask for the key and mount the dataset
.Po
see
.Xr zfs-mount 8
.Pc .
Once the key is loaded the
.Sy keystatus
property will become
.Sy available .
.Bl -tag -width "-r"
.It Fl r
Recursively loads the keys for the specified filesystem and all descendent
encryption roots.
.It Fl a
Loads the keys for all encryption roots in all imported pools.
.It Fl n
Do a dry-run
.Pq Qq No-op
.Cm load-key .
This will cause
.Nm zfs
to simply check that the provided key is correct.
This command may be run even if the key is already loaded.
.It Fl L Ar keylocation
Use
.Ar keylocation
instead of the
.Sy keylocation
property.
This will not change the value of the property on the dataset.
Note that if used with either
.Fl r
or
.Fl a ,
.Ar keylocation
may only be given as
.Sy prompt .
.El
.It Xo
.Nm zfs
.Cm unload-key
.Op Fl r
.Fl a Ns | Ns Ar filesystem
.Xc
Unloads a key from ZFS, removing the ability to access the dataset and all of
its children that inherit the
.Sy keylocation
property.
This requires that the dataset is not currently open or mounted.
Once the key is unloaded the
.Sy keystatus
property will become
.Sy unavailable .
.Bl -tag -width "-r"
.It Fl r
Recursively unloads the keys for the specified filesystem and all descendent
encryption roots.
.It Fl a
Unloads the keys for all encryption roots in all imported pools.
.El
.It Xo
.Nm zfs
.Cm change-key
.Op Fl l
.Op Fl o Ar keylocation Ns = Ns Ar value
.Op Fl o Ar keyformat Ns = Ns Ar value
.Op Fl o Ar pbkdf2iters Ns = Ns Ar value
.Ar filesystem
.Xc
.It Xo
.Nm zfs
.Cm change-key
.Fl i
.Op Fl l
.Ar filesystem
.Xc
Changes the user's key (e.g. a passphrase) used to access a dataset.
This command requires that the existing key for the dataset is already loaded.
This command may also be used to change the
.Sy keylocation ,
.Sy keyformat ,
and
.Sy pbkdf2iters
properties as needed.
If the dataset was not previously an encryption root it will become one.
Alternatively, the
.Fl i
flag may be provided to cause an encryption root to inherit the parent's key
instead.
.Pp
If the user's key is compromised,
.Nm zfs Cm change-key
does not necessarily protect existing or newly-written data from attack.
Newly-written data will continue to be encrypted with the same master key as
the existing data.
The master key is compromised if an attacker obtains a
user key and the corresponding wrapped master key.
Currently,
.Nm zfs Cm change-key
does not overwrite the previous wrapped master key on disk, so it is
accessible via forensic analysis for an indeterminate length of time.
.Pp
In the event of a master key compromise, ideally the drives should be securely
erased to remove all the old data (which is readable using the compromised
master key), a new pool created, and the data copied back.
This can be approximated in place by creating new datasets, copying the data
.Pq e.g. using Nm zfs Cm send | Nm zfs Cm recv ,
and then clearing the free space with
.Nm zpool Cm trim Fl -secure
if supported by your hardware, otherwise
.Nm zpool Cm initialize .
.Bl -tag -width "-r"
.It Fl l
Ensures the key is loaded before attempting to change the key.
-This is effectively equivalent to runnin
+This is effectively equivalent to running
.Nm zfs Cm load-key Ar filesystem ; Nm zfs Cm change-key Ar filesystem
.It Fl o Ar property Ns = Ns Ar value
Allows the user to set encryption key properties
.Pq Sy keyformat , keylocation , No and Sy pbkdf2iters
while changing the key.
This is the only way to alter
.Sy keyformat
and
.Sy pbkdf2iters
after the dataset has been created.
.It Fl i
Indicates that zfs should make
.Ar filesystem
inherit the key of its parent.
Note that this command can only be run on an encryption root
that has an encrypted parent.
.El
.El
.Ss Encryption
Enabling the
.Sy encryption
feature allows for the creation of encrypted filesystems and volumes.
ZFS will encrypt file and volume data, file attributes, ACLs, permission bits,
directory listings, FUID mappings, and
.Sy userused Ns / Ns Sy groupused
data.
ZFS will not encrypt metadata related to the pool structure, including
dataset and snapshot names, dataset hierarchy, properties, file size, file
holes, and deduplication tables (though the deduplicated data itself is
encrypted).
.Pp
Key rotation is managed by ZFS.
Changing the user's key (e.g. a passphrase)
does not require re-encrypting the entire dataset.
Datasets can be scrubbed,
resilvered, renamed, and deleted without the encryption keys being loaded (see the
.Cm load-key
subcommand for more info on key loading).
.Pp
Creating an encrypted dataset requires specifying the
.Sy encryption No and Sy keyformat
properties at creation time, along with an optional
.Sy keylocation No and Sy pbkdf2iters .
After entering an encryption key, the
created dataset will become an encryption root.
Any descendant datasets will
inherit their encryption key from the encryption root by default, meaning that
loading, unloading, or changing the key for the encryption root will implicitly
do the same for all inheriting datasets.
If this inheritance is not desired, simply supply a
.Sy keyformat
when creating the child dataset or use
.Nm zfs Cm change-key
to break an existing relationship, creating a new encryption root on the child.
Note that the child's
.Sy keyformat
may match that of the parent while still creating a new encryption root, and
that changing the
.Sy encryption
property alone does not create a new encryption root; this would simply use a
different cipher suite with the same key as its encryption root.
The one exception is that clones will always use their origin's encryption key.
As a result of this exception, some encryption-related properties
.Pq namely Sy keystatus , keyformat , keylocation , No and Sy pbkdf2iters
do not inherit like other ZFS properties and instead use the value determined
by their encryption root.
Encryption root inheritance can be tracked via the read-only
.Sy encryptionroot
property.
.Pp
Encryption changes the behavior of a few ZFS
operations.
Encryption is applied after compression so compression ratios are preserved.
Normally checksums in ZFS are 256 bits long, but for encrypted data
the checksum is 128 bits of the user-chosen checksum and 128 bits of MAC from
the encryption suite, which provides additional protection against maliciously
altered data.
Deduplication is still possible with encryption enabled but for security,
datasets will only deduplicate against themselves, their snapshots,
and their clones.
.Pp
There are a few limitations on encrypted datasets.
Encrypted data cannot be embedded via the
.Sy embedded_data
feature.
Encrypted datasets may not have
.Sy copies Ns = Ns Em 3
since the implementation stores some encryption metadata where the third copy
would normally be.
Since compression is applied before encryption, datasets may
be vulnerable to a CRIME-like attack if applications accessing the data allow for it.
Deduplication with encryption will leak information about which blocks
are equivalent in a dataset and will incur an extra CPU cost for each block written.
.
.Sh SEE ALSO
.Xr zfsprops 7 ,
.Xr zfs-create 8 ,
.Xr zfs-set 8
diff --git a/sys/contrib/openzfs/man/man8/zfs-receive.8 b/sys/contrib/openzfs/man/man8/zfs-receive.8
index d2cec42a8e71..b81bfc507e1f 100644
--- a/sys/contrib/openzfs/man/man8/zfs-receive.8
+++ b/sys/contrib/openzfs/man/man8/zfs-receive.8
@@ -1,400 +1,400 @@
.\"
.\" CDDL HEADER START
.\"
.\" The contents of this file are subject to the terms of the
.\" Common Development and Distribution License (the "License").
.\" You may not use this file except in compliance with the License.
.\"
.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
.\" or http://www.opensolaris.org/os/licensing.
.\" See the License for the specific language governing permissions
.\" and limitations under the License.
.\"
.\" When distributing Covered Code, include this CDDL HEADER in each
.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
.\" If applicable, add the following below this CDDL HEADER, with the
.\" fields enclosed by brackets "[]" replaced with your own identifying
.\" information: Portions Copyright [yyyy] [name of copyright owner]
.\"
.\" CDDL HEADER END
.\"
.\" Copyright (c) 2009 Sun Microsystems, Inc. All Rights Reserved.
.\" Copyright 2011 Joshua M. Clulow <josh@sysmgr.org>
.\" Copyright (c) 2011, 2019 by Delphix. All rights reserved.
.\" Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
.\" Copyright (c) 2014, Joyent, Inc. All rights reserved.
.\" Copyright (c) 2014 by Adam Stevko. All rights reserved.
.\" Copyright (c) 2014 Integros [integros.com]
.\" Copyright 2019 Richard Laager. All rights reserved.
.\" Copyright 2018 Nexenta Systems, Inc.
.\" Copyright 2019 Joyent, Inc.
.\"
.Dd February 16, 2020
.Dt ZFS-RECEIVE 8
.Os
.
.Sh NAME
.Nm zfs-receive
.Nd create snapshot from backup stream
.Sh SYNOPSIS
.Nm zfs
.Cm receive
.Op Fl FhMnsuv
.Op Fl o Sy origin Ns = Ns Ar snapshot
.Op Fl o Ar property Ns = Ns Ar value
.Op Fl x Ar property
.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
.Nm zfs
.Cm receive
.Op Fl FhMnsuv
.Op Fl d Ns | Ns Fl e
.Op Fl o Sy origin Ns = Ns Ar snapshot
.Op Fl o Ar property Ns = Ns Ar value
.Op Fl x Ar property
.Ar filesystem
.Nm zfs
.Cm receive
.Fl A
.Ar filesystem Ns | Ns Ar volume
.
.Sh DESCRIPTION
.Bl -tag -width ""
.It Xo
.Nm zfs
.Cm receive
.Op Fl FhMnsuv
.Op Fl o Sy origin Ns = Ns Ar snapshot
.Op Fl o Ar property Ns = Ns Ar value
.Op Fl x Ar property
.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
.Xc
.It Xo
.Nm zfs
.Cm receive
.Op Fl FhMnsuv
.Op Fl d Ns | Ns Fl e
.Op Fl o Sy origin Ns = Ns Ar snapshot
.Op Fl o Ar property Ns = Ns Ar value
.Op Fl x Ar property
.Ar filesystem
.Xc
Creates a snapshot whose contents are as specified in the stream provided on
standard input.
If a full stream is received, then a new file system is created as well.
Streams are created using the
.Nm zfs Cm send
subcommand, which by default creates a full stream.
.Nm zfs Cm recv
can be used as an alias for
.Nm zfs Cm receive .
.Pp
If an incremental stream is received, then the destination file system must
already exist, and its most recent snapshot must match the incremental stream's
source.
For
.Sy zvols ,
the destination device link is destroyed and recreated, which means the
.Sy zvol
cannot be accessed during the
.Cm receive
operation.
.Pp
When a snapshot replication package stream that is generated by using the
.Nm zfs Cm send Fl R
command is received, any snapshots that do not exist on the sending location are
destroyed by using the
.Nm zfs Cm destroy Fl d
command.
.Pp
The ability to send and receive deduplicated send streams has been removed.
However, a deduplicated send stream created with older software can be converted
to a regular (non-deduplicated) stream by using the
.Nm zstream Cm redup
command.
.Pp
If
.Fl o Em property Ns = Ns Ar value
or
.Fl x Em property
is specified, it applies to the effective value of the property throughout
the entire subtree of replicated datasets.
Effective property values will be set
.Pq Fl o
or inherited
.Pq Fl x
on the topmost in the replicated subtree.
In descendant datasets, if the
property is set by the send stream, it will be overridden by forcing the
property to be inherited from the top‐most file system.
Received properties are retained in spite of being overridden
and may be restored with
.Nm zfs Cm inherit Fl S .
Specifying
.Fl o Sy origin Ns = Ns Em snapshot
is a special case because, even if
.Sy origin
is a read-only property and cannot be set, it's allowed to receive the send
stream as a clone of the given snapshot.
.Pp
Raw encrypted send streams (created with
.Nm zfs Cm send Fl w )
may only be received as is, and cannot be re-encrypted, decrypted, or
recompressed by the receive process.
Unencrypted streams can be received as
encrypted datasets, either through inheritance or by specifying encryption
parameters with the
.Fl o
options.
Note that the
.Sy keylocation
property cannot be overridden to
.Sy prompt
during a receive.
This is because the receive process itself is already using
the standard input for the send stream.
Instead, the property can be overridden after the receive completes.
.Pp
The added security provided by raw sends adds some restrictions to the send
and receive process.
ZFS will not allow a mix of raw receives and non-raw receives.
Specifically, any raw incremental receives that are attempted after
a non-raw receive will fail.
Non-raw receives do not have this restriction and,
therefore, are always possible.
Because of this, it is best practice to always
use either raw sends for their security benefits or non-raw sends for their
flexibility when working with encrypted datasets, but not a combination.
.Pp
The reason for this restriction stems from the inherent restrictions of the
AEAD ciphers that ZFS uses to encrypt data.
When using ZFS native encryption,
each block of data is encrypted against a randomly generated number known as
the "initialization vector" (IV), which is stored in the filesystem metadata.
This number is required by the encryption algorithms whenever the data is to
be decrypted.
Together, all of the IVs provided for all of the blocks in a
given snapshot are collectively called an "IV set".
When ZFS performs a raw send, the IV set is transferred from the source
to the destination in the send stream.
When ZFS performs a non-raw send, the data is decrypted by the source
system and re-encrypted by the destination system, creating a snapshot with
effectively the same data, but a different IV set.
In order for decryption to work after a raw send, ZFS must ensure that
the IV set used on both the source and destination side match.
When an incremental raw receive is performed on
top of an existing snapshot, ZFS will check to confirm that the "from"
snapshot on both the source and destination were using the same IV set,
ensuring the new IV set is consistent.
.Pp
The name of the snapshot
.Pq and file system, if a full stream is received
that this subcommand creates depends on the argument type and the use of the
.Fl d
or
.Fl e
options.
.Pp
If the argument is a snapshot name, the specified
.Ar snapshot
is created.
If the argument is a file system or volume name, a snapshot with the same name
as the sent snapshot is created within the specified
.Ar filesystem
or
.Ar volume .
If neither of the
.Fl d
or
.Fl e
options are specified, the provided target snapshot name is used exactly as
provided.
.Pp
The
.Fl d
and
.Fl e
options cause the file system name of the target snapshot to be determined by
appending a portion of the sent snapshot's name to the specified target
.Ar filesystem .
If the
.Fl d
option is specified, all but the first element of the sent snapshot's file
system path
.Pq usually the pool name
is used and any required intermediate file systems within the specified one are
created.
If the
.Fl e
option is specified, then only the last element of the sent snapshot's file
system name
.Pq i.e. the name of the source file system itself
is used as the target file system name.
.Bl -tag -width "-F"
.It Fl F
Force a rollback of the file system to the most recent snapshot before
performing the receive operation.
If receiving an incremental replication stream
.Po for example, one generated by
.Nm zfs Cm send Fl R Op Fl i Ns | Ns Fl I
.Pc ,
destroy snapshots and file systems that do not exist on the sending side.
.It Fl d
Discard the first element of the sent snapshot's file system name, using the
remaining elements to determine the name of the target file system for the new
snapshot as described in the paragraph above.
.It Fl e
Discard all but the last element of the sent snapshot's file system name, using
that element to determine the name of the target file system for the new
snapshot as described in the paragraph above.
.It Fl h
Skip the receive of holds.
There is no effect if holds are not sent.
.It Fl M
Force an unmount of the file system while receiving a snapshot.
This option is not supported on Linux.
.It Fl n
Do not actually receive the stream.
This can be useful in conjunction with the
.Fl v
option to verify the name the receive operation would use.
.It Fl o Sy origin Ns = Ns Ar snapshot
Forces the stream to be received as a clone of the given snapshot.
If the stream is a full send stream, this will create the filesystem
described by the stream as a clone of the specified snapshot.
Which snapshot was specified will not affect the success or failure of the
receive, as long as the snapshot does exist.
If the stream is an incremental send stream, all the normal verification will be
performed.
.It Fl o Em property Ns = Ns Ar value
Sets the specified property as if the command
.Nm zfs Cm set Em property Ns = Ns Ar value
was invoked immediately before the receive.
When receiving a stream from
.Nm zfs Cm send Fl R ,
causes the property to be inherited by all descendant datasets, as through
.Nm zfs Cm inherit Em property
was run on any descendant datasets that have this property set on the
sending system.
.Pp
If the send stream was sent with
.Fl c
then overriding the
.Sy compression
property will have no affect on received data but the
.Sy compression
property will be set.
To have the data recompressed on receive remove the
.Fl c
flag from the send stream.
.Pp
Any editable property can be set at receive time.
Set-once properties bound
to the received data, such as
.Sy normalization
and
.Sy casesensitivity ,
cannot be set at receive time even when the datasets are newly created by
.Nm zfs Cm receive .
Additionally both settable properties
.Sy version
and
.Sy volsize
cannot be set at receive time.
.Pp
The
.Fl o
option may be specified multiple times, for different properties.
An error results if the same property is specified in multiple
.Fl o
or
.Fl x
options.
.Pp
The
.Fl o
option may also be used to override encryption properties upon initial receive.
This allows unencrypted streams to be received as encrypted datasets.
To cause the received dataset (or root dataset of a recursive stream) to be
received as an encryption root, specify encryption properties in the same
manner as is required for
.Nm zfs Cm create .
For instance:
-.Dl # Nm zfs Cm send Pa tank/test@snap1 | Nm zfs Cm recv Fl o Sy encryption Ns = Ns Sy on Fl o keyformat=passphrase Fl o Sy keylocation Ns = Ns Pa file:///path/to/keyfile
+.Dl # Nm zfs Cm send Pa tank/test@snap1 | Nm zfs Cm recv Fl o Sy encryption Ns = Ns Sy on Fl o Sy keyformat Ns = Ns Sy passphrase Fl o Sy keylocation Ns = Ns Pa file:///path/to/keyfile
.Pp
Note that
.Fl o Sy keylocation Ns = Ns Sy prompt
may not be specified here, since the standard input
is already being utilized for the send stream.
Once the receive has completed, you can use
.Nm zfs Cm set
to change this setting after the fact.
Similarly, you can receive a dataset as an encrypted child by specifying
-.Op Fl x Ar encryption
+.Fl x Sy encryption
to force the property to be inherited.
Overriding encryption properties (except for
.Sy keylocation )
is not possible with raw send streams.
.It Fl s
If the receive is interrupted, save the partially received state, rather
than deleting it.
Interruption may be due to premature termination of the stream
.Po e.g. due to network failure or failure of the remote system
if the stream is being read over a network connection
.Pc ,
a checksum error in the stream, termination of the
.Nm zfs Cm receive
process, or unclean shutdown of the system.
.Pp
The receive can be resumed with a stream generated by
.Nm zfs Cm send Fl t Ar token ,
where the
.Ar token
is the value of the
.Sy receive_resume_token
property of the filesystem or volume which is received into.
.Pp
To use this flag, the storage pool must have the
.Sy extensible_dataset
feature enabled.
See
.Xr zpool-features 7
for details on ZFS feature flags.
.It Fl u
File system that is associated with the received stream is not mounted.
.It Fl v
Print verbose information about the stream and the time required to perform the
receive operation.
.It Fl x Em property
Ensures that the effective value of the specified property after the
receive is unaffected by the value of that property in the send stream (if any),
as if the property had been excluded from the send stream.
.Pp
If the specified property is not present in the send stream, this option does
nothing.
.Pp
If a received property needs to be overridden, the effective value will be
set or inherited, depending on whether the property is inheritable or not.
.Pp
In the case of an incremental update,
.Fl x
leaves any existing local setting or explicit inheritance unchanged.
.Pp
All
.Fl o
restrictions (e.g. set-once) apply equally to
.Fl x .
.El
.It Xo
.Nm zfs
.Cm receive
.Fl A
.Ar filesystem Ns | Ns Ar volume
.Xc
Abort an interrupted
.Nm zfs Cm receive Fl s ,
deleting its saved partially received state.
.El
.
.Sh SEE ALSO
.Xr zfs-send 8 ,
.Xr zstream 8
diff --git a/sys/contrib/openzfs/man/man8/zfs-share.8 b/sys/contrib/openzfs/man/man8/zfs-share.8
index e30d538814ca..89121ead0958 100644
--- a/sys/contrib/openzfs/man/man8/zfs-share.8
+++ b/sys/contrib/openzfs/man/man8/zfs-share.8
@@ -1,90 +1,100 @@
.\"
.\" CDDL HEADER START
.\"
.\" The contents of this file are subject to the terms of the
.\" Common Development and Distribution License (the "License").
.\" You may not use this file except in compliance with the License.
.\"
.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
.\" or http://www.opensolaris.org/os/licensing.
.\" See the License for the specific language governing permissions
.\" and limitations under the License.
.\"
.\" When distributing Covered Code, include this CDDL HEADER in each
.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
.\" If applicable, add the following below this CDDL HEADER, with the
.\" fields enclosed by brackets "[]" replaced with your own identifying
.\" information: Portions Copyright [yyyy] [name of copyright owner]
.\"
.\" CDDL HEADER END
.\"
.\" Copyright (c) 2009 Sun Microsystems, Inc. All Rights Reserved.
.\" Copyright 2011 Joshua M. Clulow <josh@sysmgr.org>
.\" Copyright (c) 2011, 2019 by Delphix. All rights reserved.
.\" Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
.\" Copyright (c) 2014, Joyent, Inc. All rights reserved.
.\" Copyright (c) 2014 by Adam Stevko. All rights reserved.
.\" Copyright (c) 2014 Integros [integros.com]
.\" Copyright 2019 Richard Laager. All rights reserved.
.\" Copyright 2018 Nexenta Systems, Inc.
.\" Copyright 2019 Joyent, Inc.
.\"
-.Dd June 30, 2019
+.Dd May 17, 2021
.Dt ZFS-SHARE 8
.Os
.
.Sh NAME
.Nm zfs-share
.Nd share and unshare ZFS filesystems
.Sh SYNOPSIS
.Nm zfs
.Cm share
+.Op Fl l
.Fl a Ns | Ns Ar filesystem
.Nm zfs
.Cm unshare
.Fl a Ns | Ns Ar filesystem Ns | Ns Ar mountpoint
.
.Sh DESCRIPTION
.Bl -tag -width ""
.It Xo
.Nm zfs
.Cm share
+.Op Fl l
.Fl a Ns | Ns Ar filesystem
.Xc
Shares available ZFS file systems.
.Bl -tag -width "-a"
+.It Fl l
+Load keys for encrypted filesystems as they are being mounted.
+This is equivalent to executing
+.Nm zfs Cm load-key
+on each encryption root before mounting it.
+Note that if a filesystem has
+.Sy keylocation Ns = Ns Sy prompt ,
+this will cause the terminal to interactively block after asking for the key.
.It Fl a
Share all available ZFS file systems.
Invoked automatically as part of the boot process.
.It Ar filesystem
Share the specified filesystem according to the
.Sy sharenfs
and
.Sy sharesmb
properties.
File systems are shared when the
.Sy sharenfs
or
.Sy sharesmb
property is set.
.El
.It Xo
.Nm zfs
.Cm unshare
.Fl a Ns | Ns Ar filesystem Ns | Ns Ar mountpoint
.Xc
Unshares currently shared ZFS file systems.
.Bl -tag -width "-a"
.It Fl a
Unshare all available ZFS file systems.
Invoked automatically as part of the shutdown process.
.It Ar filesystem Ns | Ns Ar mountpoint
Unshare the specified filesystem.
The command can also be given a path to a ZFS file system shared on the system.
.El
.El
.
.Sh SEE ALSO
.Xr exports 5 ,
.Xr smb.conf 5 ,
.Xr zfsprops 7
diff --git a/sys/contrib/openzfs/man/man8/zgenhostid.8 b/sys/contrib/openzfs/man/man8/zgenhostid.8
index 0dcebef73c31..e157578cf0bb 100644
--- a/sys/contrib/openzfs/man/man8/zgenhostid.8
+++ b/sys/contrib/openzfs/man/man8/zgenhostid.8
@@ -1,100 +1,100 @@
.\"
.\" CDDL HEADER START
.\"
.\" The contents of this file are subject to the terms of the
.\" Common Development and Distribution License (the "License").
.\" You may not use this file except in compliance with the License.
.\"
.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
.\" or http://www.opensolaris.org/os/licensing.
.\" See the License for the specific language governing permissions
.\" and limitations under the License.
.\"
.\" When distributing Covered Code, include this CDDL HEADER in each
.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
.\" If applicable, add the following below this CDDL HEADER, with the
.\" fields enclosed by brackets "[]" replaced with your own identifying
.\" information: Portions Copyright [yyyy] [name of copyright owner]
.\"
.\" CDDL HEADER END
.\"
.\" Copyright (c) 2017 by Lawrence Livermore National Security, LLC.
.\"
.Dd May 26, 2021
.Dt ZGENHOSTID 8
.Os
.
.Sh NAME
.Nm zgenhostid
.Nd generate host ID into /etc/hostid
.Sh SYNOPSIS
.Nm
.Op Fl f
.Op Fl o Ar filename
.Op Ar hostid
.
.Sh DESCRIPTION
Creates
.Pa /etc/hostid
file and stores the host ID in it.
If
.Ar hostid
was provided, validate and store that value.
Otherwise, randomly generate an ID.
.
.Sh OPTIONS
.Bl -tag -width "-o filename"
.It Fl h
Display a summary of the command-line options.
.It Fl f
Allow output overwrite.
.It Fl o Ar filename
Write to
.Pa filename
instead of the default
.Pa /etc/hostid .
.It Ar hostid
Specifies the value to be placed in
.Pa /etc/hostid .
It should be a number with a value between 1 and 2^32-1.
If
.Sy 0 ,
generate a random ID.
This value
.Em must
be unique among your systems.
It
.Em must
be an 8-digit-long hexadecimal number, optionally prefixed by
.Qq 0x .
.El
.
.Sh FILES
.Pa /etc/hostid
.
.Sh EXAMPLES
.Bl -tag -width Bd
.It Generate a random hostid and store it
.Dl # Nm
.It Record the libc-generated hostid in Pa /etc/hostid
.Dl # Nm Qq $ Ns Pq Nm hostid
.It Record a custom hostid Po Ar 0xdeadbeef Pc in Pa /etc/hostid
.Dl # Nm Ar deadbeef
-.It Record a custom hostid Po Ar 0x01234567 Pc in Pa /tmp/hostid No and ovewrite the file if it exists
+.It Record a custom hostid Po Ar 0x01234567 Pc in Pa /tmp/hostid No and overwrite the file if it exists
.Dl # Nm Fl f o Ar /tmp/hostid 0x01234567
.El
.
.Sh SEE ALSO
.Xr genhostid 1 ,
.Xr hostid 1 ,
.Xr sethostid 3 ,
.Xr spl 4
.
.Sh HISTORY
.Nm
emulates the
.Xr genhostid 1
utility and is provided for use on systems which
do not include the utility or do not provide the
.Xr sethostid 3
function.
diff --git a/sys/contrib/openzfs/man/man8/zpool-attach.8 b/sys/contrib/openzfs/man/man8/zpool-attach.8
index 19d8f6ac07ac..9dfa35a107d3 100644
--- a/sys/contrib/openzfs/man/man8/zpool-attach.8
+++ b/sys/contrib/openzfs/man/man8/zpool-attach.8
@@ -1,98 +1,98 @@
.\"
.\" CDDL HEADER START
.\"
.\" The contents of this file are subject to the terms of the
.\" Common Development and Distribution License (the "License").
.\" You may not use this file except in compliance with the License.
.\"
.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
.\" or http://www.opensolaris.org/os/licensing.
.\" See the License for the specific language governing permissions
.\" and limitations under the License.
.\"
.\" When distributing Covered Code, include this CDDL HEADER in each
.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
.\" If applicable, add the following below this CDDL HEADER, with the
.\" fields enclosed by brackets "[]" replaced with your own identifying
.\" information: Portions Copyright [yyyy] [name of copyright owner]
.\"
.\" CDDL HEADER END
.\"
.\" Copyright (c) 2007, Sun Microsystems, Inc. All Rights Reserved.
.\" Copyright (c) 2012, 2018 by Delphix. All rights reserved.
.\" Copyright (c) 2012 Cyril Plisko. All Rights Reserved.
.\" Copyright (c) 2017 Datto Inc.
.\" Copyright (c) 2018 George Melikov. All Rights Reserved.
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
.Dd May 15, 2020
.Dt ZPOOL-ATTACH 8
.Os
.
.Sh NAME
.Nm zpool-attach
.Nd attach new device to existing ZFS vdev
.Sh SYNOPSIS
.Nm zpool
.Cm attach
.Op Fl fsw
.Oo Fl o Ar property Ns = Ns Ar value Oc
.Ar pool device new_device
.
.Sh DESCRIPTION
Attaches
.Ar new_device
to the existing
.Ar device .
The existing device cannot be part of a raidz configuration.
If
.Ar device
is not currently part of a mirrored configuration,
.Ar device
automatically transforms into a two-way mirror of
.Ar device
and
.Ar new_device .
If
.Ar device
is part of a two-way mirror, attaching
.Ar new_device
creates a three-way mirror, and so on.
In either case,
.Ar new_device
begins to resilver immediately and any running scrub is cancelled.
.Bl -tag -width Ds
.It Fl f
Forces use of
.Ar new_device ,
even if it appears to be in use.
Not all devices can be overridden in this manner.
.It Fl o Ar property Ns = Ns Ar value
Sets the given pool properties.
See the
.Xr zpoolprops 7
manual page for a list of valid properties that can be set.
The only property supported at the moment is
.Sy ashift .
.It Fl s
The
.Ar new_device
is reconstructed sequentially to restore redundancy as quickly as possible.
-Checksums are not verfied during sequential reconstruction so a scrub is
+Checksums are not verified during sequential reconstruction so a scrub is
started when the resilver completes.
Sequential reconstruction is not supported for raidz configurations.
.It Fl w
Waits until
.Ar new_device
has finished resilvering before returning.
.El
.
.Sh SEE ALSO
.Xr zpool-add 8 ,
.Xr zpool-detach 8 ,
.Xr zpool-import 8 ,
.Xr zpool-initialize 8 ,
.Xr zpool-online 8 ,
.Xr zpool-replace 8 ,
.Xr zpool-resilver 8
diff --git a/sys/contrib/openzfs/man/man8/zpool-clear.8 b/sys/contrib/openzfs/man/man8/zpool-clear.8
index 6e41566ca6da..0b256b28bd21 100644
--- a/sys/contrib/openzfs/man/man8/zpool-clear.8
+++ b/sys/contrib/openzfs/man/man8/zpool-clear.8
@@ -1,56 +1,59 @@
.\"
.\" CDDL HEADER START
.\"
.\" The contents of this file are subject to the terms of the
.\" Common Development and Distribution License (the "License").
.\" You may not use this file except in compliance with the License.
.\"
.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
.\" or http://www.opensolaris.org/os/licensing.
.\" See the License for the specific language governing permissions
.\" and limitations under the License.
.\"
.\" When distributing Covered Code, include this CDDL HEADER in each
.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
.\" If applicable, add the following below this CDDL HEADER, with the
.\" fields enclosed by brackets "[]" replaced with your own identifying
.\" information: Portions Copyright [yyyy] [name of copyright owner]
.\"
.\" CDDL HEADER END
.\"
.\" Copyright (c) 2007, Sun Microsystems, Inc. All Rights Reserved.
.\" Copyright (c) 2012, 2018 by Delphix. All rights reserved.
.\" Copyright (c) 2012 Cyril Plisko. All Rights Reserved.
.\" Copyright (c) 2017 Datto Inc.
.\" Copyright (c) 2018 George Melikov. All Rights Reserved.
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
.Dd May 27, 2021
.Dt ZPOOL-CLEAR 8
.Os
.
.Sh NAME
.Nm zpool-clear
.Nd clear device errors in ZFS storage pool
.Sh SYNOPSIS
.Nm zpool
.Cm clear
.Ar pool
.Oo Ar device Oc Ns …
.
.Sh DESCRIPTION
Clears device errors in a pool.
If no arguments are specified, all device errors within the pool are cleared.
If one or more devices is specified, only those errors associated with the
specified device or devices are cleared.
-If
+.Pp
+If the pool was suspended it will be brought back online provided the
+devices can be accessed.
+Pools with
.Sy multihost
-is enabled and the pool has been suspended, this will not resume I/O.
+enabled which have been suspended cannot be resumed.
While the pool was suspended, it may have been imported on
another host, and resuming I/O could result in pool damage.
.
.Sh SEE ALSO
.Xr zdb 8 ,
.Xr zpool-reopen 8 ,
.Xr zpool-status 8
diff --git a/sys/contrib/openzfs/man/man8/zpool-events.8 b/sys/contrib/openzfs/man/man8/zpool-events.8
index ab1d6ea56213..55f7babae2bd 100644
--- a/sys/contrib/openzfs/man/man8/zpool-events.8
+++ b/sys/contrib/openzfs/man/man8/zpool-events.8
@@ -1,483 +1,483 @@
.\"
.\" CDDL HEADER START
.\"
.\" The contents of this file are subject to the terms of the
.\" Common Development and Distribution License (the "License").
.\" You may not use this file except in compliance with the License.
.\"
.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
.\" or http://www.opensolaris.org/os/licensing.
.\" See the License for the specific language governing permissions
.\" and limitations under the License.
.\"
.\" When distributing Covered Code, include this CDDL HEADER in each
.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
.\" If applicable, add the following below this CDDL HEADER, with the
.\" fields enclosed by brackets "[]" replaced with your own identifying
.\" information: Portions Copyright [yyyy] [name of copyright owner]
.\"
.\" CDDL HEADER END
.\"
.\" Copyright (c) 2007, Sun Microsystems, Inc. All Rights Reserved.
.\" Copyright (c) 2012, 2018 by Delphix. All rights reserved.
.\" Copyright (c) 2012 Cyril Plisko. All Rights Reserved.
.\" Copyright (c) 2017 Datto Inc.
.\" Copyright (c) 2018 George Melikov. All Rights Reserved.
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
.Dd May 27, 2021
.Dt ZPOOL-EVENTS 8
.Os
.
.Sh NAME
.Nm zpool-events
.Nd list recent events generated by kernel
.Sh SYNOPSIS
.Nm zpool
.Cm events
.Op Fl vHf
.Op Ar pool
.Nm zpool
.Cm events
.Fl c
.
.Sh DESCRIPTION
Lists all recent events generated by the ZFS kernel modules.
These events are consumed by the
.Xr zed 8
and used to automate administrative tasks such as replacing a failed device
with a hot spare.
For more information about the subclasses and event payloads
that can be generated see
.Sx EVENTS
and the following sections.
.
.Sh OPTIONS
.Bl -tag -compact -width Ds
.It Fl c
Clear all previous events.
.It Fl f
Follow mode.
.It Fl H
Scripted mode.
Do not display headers, and separate fields by a
single tab instead of arbitrary space.
.It Fl v
Print the entire payload for each event.
.El
.
.Sh EVENTS
-Theese are the different event subclasses.
+These are the different event subclasses.
The full event name would be
.Sy ereport.fs.zfs.\& Ns Em SUBCLASS ,
but only the last part is listed here.
.Pp
.Bl -tag -compact -width "vdev.bad_guid_sum"
.It Sy checksum
Issued when a checksum error has been detected.
.It Sy io
Issued when there is an I/O error in a vdev in the pool.
.It Sy data
Issued when there have been data errors in the pool.
.It Sy deadman
Issued when an I/O request is determined to be "hung", this can be caused
by lost completion events due to flaky hardware or drivers.
See
.Sy zfs_deadman_failmode
in
.Xr zfs 4
for additional information regarding "hung" I/O detection and configuration.
.It Sy delay
Issued when a completed I/O request exceeds the maximum allowed time
specified by the
.Sy zio_slow_io_ms
module parameter.
This can be an indicator of problems with the underlying storage device.
The number of delay events is ratelimited by the
.Sy zfs_slow_io_events_per_second
module parameter.
.It Sy config
Issued every time a vdev change have been done to the pool.
.It Sy zpool
Issued when a pool cannot be imported.
.It Sy zpool.destroy
Issued when a pool is destroyed.
.It Sy zpool.export
Issued when a pool is exported.
.It Sy zpool.import
Issued when a pool is imported.
.It Sy zpool.reguid
Issued when a REGUID (new unique identifier for the pool have been regenerated) have been detected.
.It Sy vdev.unknown
Issued when the vdev is unknown.
Such as trying to clear device errors on a vdev that have failed/been kicked
from the system/pool and is no longer available.
.It Sy vdev.open_failed
Issued when a vdev could not be opened (because it didn't exist for example).
.It Sy vdev.corrupt_data
Issued when corrupt data have been detected on a vdev.
.It Sy vdev.no_replicas
Issued when there are no more replicas to sustain the pool.
This would lead to the pool being
.Em DEGRADED .
.It Sy vdev.bad_guid_sum
Issued when a missing device in the pool have been detected.
.It Sy vdev.too_small
Issued when the system (kernel) have removed a device, and ZFS
notices that the device isn't there any more.
This is usually followed by a
.Sy probe_failure
event.
.It Sy vdev.bad_label
Issued when the label is OK but invalid.
.It Sy vdev.bad_ashift
Issued when the ashift alignment requirement has increased.
.It Sy vdev.remove
Issued when a vdev is detached from a mirror (or a spare detached from a
vdev where it have been used to replace a failed drive - only works if
-the original drive have been readded).
+the original drive have been re-added).
.It Sy vdev.clear
Issued when clearing device errors in a pool.
Such as running
.Nm zpool Cm clear
on a device in the pool.
.It Sy vdev.check
Issued when a check to see if a given vdev could be opened is started.
.It Sy vdev.spare
Issued when a spare have kicked in to replace a failed device.
.It Sy vdev.autoexpand
Issued when a vdev can be automatically expanded.
.It Sy io_failure
Issued when there is an I/O failure in a vdev in the pool.
.It Sy probe_failure
Issued when a probe fails on a vdev.
This would occur if a vdev
have been kicked from the system outside of ZFS (such as the kernel
have removed the device).
.It Sy log_replay
Issued when the intent log cannot be replayed.
The can occur in the case of a missing or damaged log device.
.It Sy resilver.start
Issued when a resilver is started.
.It Sy resilver.finish
Issued when the running resilver have finished.
.It Sy scrub.start
Issued when a scrub is started on a pool.
.It Sy scrub.finish
Issued when a pool has finished scrubbing.
.It Sy scrub.abort
Issued when a scrub is aborted on a pool.
.It Sy scrub.resume
Issued when a scrub is resumed on a pool.
.It Sy scrub.paused
Issued when a scrub is paused on a pool.
.It Sy bootfs.vdev.attach
.El
.
.Sh PAYLOADS
This is the payload (data, information) that accompanies an
event.
.Pp
For
.Xr zed 8 ,
these are set to uppercase and prefixed with
.Sy ZEVENT_ .
.Pp
.Bl -tag -compact -width "vdev_cksum_errors"
.It Sy pool
Pool name.
.It Sy pool_failmode
Failmode -
.Sy wait ,
.Sy continue ,
or
.Sy panic .
See the
.Sy failmode
property in
.Xr zpoolprops 7
for more information.
.It Sy pool_guid
The GUID of the pool.
.It Sy pool_context
The load state for the pool (0=none, 1=open, 2=import, 3=tryimport, 4=recover
5=error).
.It Sy vdev_guid
The GUID of the vdev in question (the vdev failing or operated upon with
.Nm zpool Cm clear ,
etc.).
.It Sy vdev_type
Type of vdev -
.Sy disk ,
.Sy file ,
.Sy mirror ,
etc.
See the
.Sy Virtual Devices
section of
.Xr zpoolconcepts 7
for more information on possible values.
.It Sy vdev_path
Full path of the vdev, including any
.Em -partX .
.It Sy vdev_devid
ID of vdev (if any).
.It Sy vdev_fru
Physical FRU location.
.It Sy vdev_state
State of vdev (0=uninitialized, 1=closed, 2=offline, 3=removed, 4=failed to open, 5=faulted, 6=degraded, 7=healthy).
.It Sy vdev_ashift
The ashift value of the vdev.
.It Sy vdev_complete_ts
The time the last I/O request completed for the specified vdev.
.It Sy vdev_delta_ts
The time since the last I/O request completed for the specified vdev.
.It Sy vdev_spare_paths
List of spares, including full path and any
.Em -partX .
.It Sy vdev_spare_guids
GUID(s) of spares.
.It Sy vdev_read_errors
How many read errors that have been detected on the vdev.
.It Sy vdev_write_errors
How many write errors that have been detected on the vdev.
.It Sy vdev_cksum_errors
How many checksum errors that have been detected on the vdev.
.It Sy parent_guid
GUID of the vdev parent.
.It Sy parent_type
Type of parent.
See
.Sy vdev_type .
.It Sy parent_path
Path of the vdev parent (if any).
.It Sy parent_devid
ID of the vdev parent (if any).
.It Sy zio_objset
The object set number for a given I/O request.
.It Sy zio_object
The object number for a given I/O request.
.It Sy zio_level
The indirect level for the block.
Level 0 is the lowest level and includes data blocks.
Values > 0 indicate metadata blocks at the appropriate level.
.It Sy zio_blkid
The block ID for a given I/O request.
.It Sy zio_err
The error number for a failure when handling a given I/O request,
compatible with
.Xr errno 3
with the value of
.Sy EBADE
used to indicate a ZFS checksum error.
.It Sy zio_offset
The offset in bytes of where to write the I/O request for the specified vdev.
.It Sy zio_size
The size in bytes of the I/O request.
.It Sy zio_flags
The current flags describing how the I/O request should be handled.
See the
.Sy I/O FLAGS
section for the full list of I/O flags.
.It Sy zio_stage
The current stage of the I/O in the pipeline.
See the
.Sy I/O STAGES
section for a full list of all the I/O stages.
.It Sy zio_pipeline
The valid pipeline stages for the I/O.
See the
.Sy I/O STAGES
section for a full list of all the I/O stages.
.It Sy zio_delay
The time elapsed (in nanoseconds) waiting for the block layer to complete the
I/O request.
Unlike
.Sy zio_delta ,
this does not include any vdev queuing time and is
therefore solely a measure of the block layer performance.
.It Sy zio_timestamp
The time when a given I/O request was submitted.
.It Sy zio_delta
The time required to service a given I/O request.
.It Sy prev_state
The previous state of the vdev.
.It Sy cksum_expected
The expected checksum value for the block.
.It Sy cksum_actual
The actual checksum value for an errant block.
.It Sy cksum_algorithm
Checksum algorithm used.
See
.Xr zfsprops 7
for more information on the available checksum algorithms.
.It Sy cksum_byteswap
Whether or not the data is byteswapped.
.It Sy bad_ranges
.No [\& Ns Ar start , end )
pairs of corruption offsets.
Offsets are always aligned on a 64-bit boundary,
and can include some gaps of non-corruption.
(See
.Sy bad_ranges_min_gap )
.It Sy bad_ranges_min_gap
In order to bound the size of the
.Sy bad_ranges
array, gaps of non-corruption
less than or equal to
.Sy bad_ranges_min_gap
bytes have been merged with
adjacent corruption.
Always at least 8 bytes, since corruption is detected on a 64-bit word basis.
.It Sy bad_range_sets
This array has one element per range in
.Sy bad_ranges .
Each element contains
the count of bits in that range which were clear in the good data and set
in the bad data.
.It Sy bad_range_clears
This array has one element per range in
.Sy bad_ranges .
Each element contains
the count of bits for that range which were set in the good data and clear in
the bad data.
.It Sy bad_set_bits
If this field exists, it is an array of
.Pq Ar bad data No & ~( Ns Ar good data ) ;
that is, the bits set in the bad data which are cleared in the good data.
Each element corresponds a byte whose offset is in a range in
.Sy bad_ranges ,
and the array is ordered by offset.
Thus, the first element is the first byte in the first
.Sy bad_ranges
range, and the last element is the last byte in the last
.Sy bad_ranges
range.
.It Sy bad_cleared_bits
Like
.Sy bad_set_bits ,
but contains
.Pq Ar good data No & ~( Ns Ar bad data ) ;
that is, the bits set in the good data which are cleared in the bad data.
.It Sy bad_set_histogram
If this field exists, it is an array of counters.
Each entry counts bits set in a particular bit of a big-endian uint64 type.
The first entry counts bits
set in the high-order bit of the first byte, the 9th byte, etc, and the last
entry counts bits set of the low-order bit of the 8th byte, the 16th byte, etc.
This information is useful for observing a stuck bit in a parallel data path,
such as IDE or parallel SCSI.
.It Sy bad_cleared_histogram
If this field exists, it is an array of counters.
Each entry counts bit clears in a particular bit of a big-endian uint64 type.
The first entry counts bits
clears of the high-order bit of the first byte, the 9th byte, etc, and the
last entry counts clears of the low-order bit of the 8th byte, the 16th byte, etc.
This information is useful for observing a stuck bit in a parallel data
path, such as IDE or parallel SCSI.
.El
.
.Sh I/O STAGES
The ZFS I/O pipeline is comprised of various stages which are defined below.
The individual stages are used to construct these basic I/O
operations: Read, Write, Free, Claim, and Ioctl.
These stages may be
set on an event to describe the life cycle of a given I/O request.
.Pp
.TS
tab(:);
l l l .
Stage:Bit Mask:Operations
_:_:_
ZIO_STAGE_OPEN:0x00000001:RWFCI
ZIO_STAGE_READ_BP_INIT:0x00000002:R----
ZIO_STAGE_WRITE_BP_INIT:0x00000004:-W---
ZIO_STAGE_FREE_BP_INIT:0x00000008:--F--
ZIO_STAGE_ISSUE_ASYNC:0x00000010:RWF--
ZIO_STAGE_WRITE_COMPRESS:0x00000020:-W---
ZIO_STAGE_ENCRYPT:0x00000040:-W---
ZIO_STAGE_CHECKSUM_GENERATE:0x00000080:-W---
ZIO_STAGE_NOP_WRITE:0x00000100:-W---
ZIO_STAGE_DDT_READ_START:0x00000200:R----
ZIO_STAGE_DDT_READ_DONE:0x00000400:R----
ZIO_STAGE_DDT_WRITE:0x00000800:-W---
ZIO_STAGE_DDT_FREE:0x00001000:--F--
ZIO_STAGE_GANG_ASSEMBLE:0x00002000:RWFC-
ZIO_STAGE_GANG_ISSUE:0x00004000:RWFC-
ZIO_STAGE_DVA_THROTTLE:0x00008000:-W---
ZIO_STAGE_DVA_ALLOCATE:0x00010000:-W---
ZIO_STAGE_DVA_FREE:0x00020000:--F--
ZIO_STAGE_DVA_CLAIM:0x00040000:---C-
ZIO_STAGE_READY:0x00080000:RWFCI
ZIO_STAGE_VDEV_IO_START:0x00100000:RW--I
ZIO_STAGE_VDEV_IO_DONE:0x00200000:RW--I
ZIO_STAGE_VDEV_IO_ASSESS:0x00400000:RW--I
ZIO_STAGE_CHECKSUM_VERIFY:0x00800000:R----
ZIO_STAGE_DONE:0x01000000:RWFCI
.TE
.
.Sh I/O FLAGS
Every I/O request in the pipeline contains a set of flags which describe its
function and are used to govern its behavior.
These flags will be set in an event as a
.Sy zio_flags
payload entry.
.Pp
.TS
tab(:);
l l .
Flag:Bit Mask
_:_
ZIO_FLAG_DONT_AGGREGATE:0x00000001
ZIO_FLAG_IO_REPAIR:0x00000002
ZIO_FLAG_SELF_HEAL:0x00000004
ZIO_FLAG_RESILVER:0x00000008
ZIO_FLAG_SCRUB:0x00000010
ZIO_FLAG_SCAN_THREAD:0x00000020
ZIO_FLAG_PHYSICAL:0x00000040
ZIO_FLAG_CANFAIL:0x00000080
ZIO_FLAG_SPECULATIVE:0x00000100
ZIO_FLAG_CONFIG_WRITER:0x00000200
ZIO_FLAG_DONT_RETRY:0x00000400
ZIO_FLAG_DONT_CACHE:0x00000800
ZIO_FLAG_NODATA:0x00001000
ZIO_FLAG_INDUCE_DAMAGE:0x00002000
ZIO_FLAG_IO_ALLOCATING:0x00004000
ZIO_FLAG_IO_RETRY:0x00008000
ZIO_FLAG_PROBE:0x00010000
ZIO_FLAG_TRYHARD:0x00020000
ZIO_FLAG_OPTIONAL:0x00040000
ZIO_FLAG_DONT_QUEUE:0x00080000
ZIO_FLAG_DONT_PROPAGATE:0x00100000
ZIO_FLAG_IO_BYPASS:0x00200000
ZIO_FLAG_IO_REWRITE:0x00400000
ZIO_FLAG_RAW_COMPRESS:0x00800000
ZIO_FLAG_RAW_ENCRYPT:0x01000000
ZIO_FLAG_GANG_CHILD:0x02000000
ZIO_FLAG_DDT_CHILD:0x04000000
ZIO_FLAG_GODFATHER:0x08000000
ZIO_FLAG_NOPWRITE:0x10000000
ZIO_FLAG_REEXECUTED:0x20000000
ZIO_FLAG_DELEGATED:0x40000000
ZIO_FLAG_FASTWRITE:0x80000000
.TE
.
.Sh SEE ALSO
.Xr zfs 4 ,
.Xr zed 8 ,
.Xr zpool-wait 8
diff --git a/sys/contrib/openzfs/man/man8/zpool-import.8 b/sys/contrib/openzfs/man/man8/zpool-import.8
index 518e3cf1d76a..39b0e17ef586 100644
--- a/sys/contrib/openzfs/man/man8/zpool-import.8
+++ b/sys/contrib/openzfs/man/man8/zpool-import.8
@@ -1,409 +1,410 @@
.\"
.\" CDDL HEADER START
.\"
.\" The contents of this file are subject to the terms of the
.\" Common Development and Distribution License (the "License").
.\" You may not use this file except in compliance with the License.
.\"
.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
.\" or http://www.opensolaris.org/os/licensing.
.\" See the License for the specific language governing permissions
.\" and limitations under the License.
.\"
.\" When distributing Covered Code, include this CDDL HEADER in each
.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
.\" If applicable, add the following below this CDDL HEADER, with the
.\" fields enclosed by brackets "[]" replaced with your own identifying
.\" information: Portions Copyright [yyyy] [name of copyright owner]
.\"
.\" CDDL HEADER END
.\"
.\" Copyright (c) 2007, Sun Microsystems, Inc. All Rights Reserved.
.\" Copyright (c) 2012, 2018 by Delphix. All rights reserved.
.\" Copyright (c) 2012 Cyril Plisko. All Rights Reserved.
.\" Copyright (c) 2017 Datto Inc.
.\" Copyright (c) 2018 George Melikov. All Rights Reserved.
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
.Dd August 9, 2019
.Dt ZPOOL-IMPORT 8
.Os
.
.Sh NAME
.Nm zpool-import
.Nd import ZFS storage pools or list available pools
.Sh SYNOPSIS
.Nm zpool
.Cm import
.Op Fl D
.Oo Fl d Ar dir Ns | Ns Ar device Oc Ns …
.Nm zpool
.Cm import
.Fl a
.Op Fl DflmN
.Op Fl F Op Fl nTX
.Op Fl -rewind-to-checkpoint
.Op Fl c Ar cachefile Ns | Ns Fl d Ar dir Ns | Ns Ar device
.Op Fl o Ar mntopts
.Oo Fl o Ar property Ns = Ns Ar value Oc Ns …
.Op Fl R Ar root
.Nm zpool
.Cm import
.Op Fl Dflmt
.Op Fl F Op Fl nTX
.Op Fl -rewind-to-checkpoint
.Op Fl c Ar cachefile Ns | Ns Fl d Ar dir Ns | Ns Ar device
.Op Fl o Ar mntopts
.Oo Fl o Ar property Ns = Ns Ar value Oc Ns …
.Op Fl R Ar root
.Op Fl s
.Ar pool Ns | Ns Ar id
.Op Ar newpool
.
.Sh DESCRIPTION
.Bl -tag -width Ds
.It Xo
.Nm zpool
.Cm import
.Op Fl D
.Oo Fl d Ar dir Ns | Ns Ar device Oc Ns …
.Xc
Lists pools available to import.
If the
.Fl d or
.Fl c
options are not specified, this command searches for devices using libblkid
on Linux and geom on
.Fx .
The
.Fl d
option can be specified multiple times, and all directories are searched.
If the device appears to be part of an exported pool, this command displays a
summary of the pool with the name of the pool, a numeric identifier, as well as
the vdev layout and current health of the device for each device or file.
Destroyed pools, pools that were previously destroyed with the
.Nm zpool Cm destroy
command, are not listed unless the
.Fl D
option is specified.
.Pp
The numeric identifier is unique, and can be used instead of the pool name when
multiple exported pools of the same name are available.
.Bl -tag -width Ds
.It Fl c Ar cachefile
Reads configuration from the given
.Ar cachefile
that was created with the
.Sy cachefile
pool property.
This
.Ar cachefile
is used instead of searching for devices.
.It Fl d Ar dir Ns | Ns Ar device
Uses
.Ar device
or searches for devices or files in
.Ar dir .
The
.Fl d
option can be specified multiple times.
.It Fl D
Lists destroyed pools only.
.El
.It Xo
.Nm zpool
.Cm import
.Fl a
.Op Fl DflmN
.Op Fl F Op Fl nTX
.Op Fl c Ar cachefile Ns | Ns Fl d Ar dir Ns | Ns Ar device
.Op Fl o Ar mntopts
.Oo Fl o Ar property Ns = Ns Ar value Oc Ns …
.Op Fl R Ar root
.Op Fl s
.Xc
Imports all pools found in the search directories.
Identical to the previous command, except that all pools with a sufficient
number of devices available are imported.
Destroyed pools, pools that were previously destroyed with the
.Nm zpool Cm destroy
command, will not be imported unless the
.Fl D
option is specified.
.Bl -tag -width Ds
.It Fl a
Searches for and imports all pools found.
.It Fl c Ar cachefile
Reads configuration from the given
.Ar cachefile
that was created with the
.Sy cachefile
pool property.
This
.Ar cachefile
is used instead of searching for devices.
.It Fl d Ar dir Ns | Ns Ar device
Uses
.Ar device
or searches for devices or files in
.Ar dir .
The
.Fl d
option can be specified multiple times.
This option is incompatible with the
.Fl c
option.
.It Fl D
Imports destroyed pools only.
The
.Fl f
option is also required.
.It Fl f
Forces import, even if the pool appears to be potentially active.
.It Fl F
Recovery mode for a non-importable pool.
Attempt to return the pool to an importable state by discarding the last few
transactions.
Not all damaged pools can be recovered by using this option.
If successful, the data from the discarded transactions is irretrievably lost.
This option is ignored if the pool is importable or already imported.
.It Fl l
Indicates that this command will request encryption keys for all encrypted
datasets it attempts to mount as it is bringing the pool online.
Note that if any datasets have a
.Sy keylocation
of
.Sy prompt
this command will block waiting for the keys to be entered.
Without this flag
encrypted datasets will be left unavailable until the keys are loaded.
.It Fl m
Allows a pool to import when there is a missing log device.
Recent transactions can be lost because the log device will be discarded.
.It Fl n
Used with the
.Fl F
recovery option.
Determines whether a non-importable pool can be made importable again, but does
not actually perform the pool recovery.
For more details about pool recovery mode, see the
.Fl F
option, above.
.It Fl N
Import the pool without mounting any file systems.
.It Fl o Ar mntopts
Comma-separated list of mount options to use when mounting datasets within the
pool.
See
.Xr zfs 8
for a description of dataset properties and mount options.
.It Fl o Ar property Ns = Ns Ar value
Sets the specified property on the imported pool.
See the
.Xr zpoolprops 7
manual page for more information on the available pool properties.
.It Fl R Ar root
Sets the
.Sy cachefile
property to
.Sy none
and the
.Sy altroot
property to
.Ar root .
.It Fl -rewind-to-checkpoint
Rewinds pool to the checkpointed state.
Once the pool is imported with this flag there is no way to undo the rewind.
All changes and data that were written after the checkpoint are lost!
The only exception is when the
.Sy readonly
mounting option is enabled.
In this case, the checkpointed state of the pool is opened and an
administrator can see how the pool would look like if they were
to fully rewind.
.It Fl s
Scan using the default search path, the libblkid cache will not be
consulted.
A custom search path may be specified by setting the
.Sy ZPOOL_IMPORT_PATH
environment variable.
.It Fl X
Used with the
.Fl F
recovery option.
Determines whether extreme measures to find a valid txg should take place.
This allows the pool to
be rolled back to a txg which is no longer guaranteed to be consistent.
Pools imported at an inconsistent txg may contain uncorrectable checksum errors.
For more details about pool recovery mode, see the
.Fl F
option, above.
WARNING: This option can be extremely hazardous to the
health of your pool and should only be used as a last resort.
.It Fl T
Specify the txg to use for rollback.
Implies
.Fl FX .
For more details
about pool recovery mode, see the
.Fl X
option, above.
WARNING: This option can be extremely hazardous to the
health of your pool and should only be used as a last resort.
.El
.It Xo
.Nm zpool
.Cm import
.Op Fl Dflmt
.Op Fl F Op Fl nTX
.Op Fl c Ar cachefile Ns | Ns Fl d Ar dir Ns | Ns Ar device
.Op Fl o Ar mntopts
.Oo Fl o Ar property Ns = Ns Ar value Oc Ns …
.Op Fl R Ar root
.Op Fl s
.Ar pool Ns | Ns Ar id
.Op Ar newpool
.Xc
Imports a specific pool.
A pool can be identified by its name or the numeric identifier.
If
.Ar newpool
is specified, the pool is imported using the name
.Ar newpool .
Otherwise, it is imported with the same name as its exported name.
.Pp
If a device is removed from a system without running
.Nm zpool Cm export
first, the device appears as potentially active.
It cannot be determined if this was a failed export, or whether the device is
really in use from another host.
To import a pool in this state, the
.Fl f
option is required.
.Bl -tag -width Ds
.It Fl c Ar cachefile
Reads configuration from the given
.Ar cachefile
that was created with the
.Sy cachefile
pool property.
This
.Ar cachefile
is used instead of searching for devices.
.It Fl d Ar dir Ns | Ns Ar device
Uses
.Ar device
or searches for devices or files in
.Ar dir .
The
.Fl d
option can be specified multiple times.
This option is incompatible with the
.Fl c
option.
.It Fl D
Imports destroyed pool.
The
.Fl f
option is also required.
.It Fl f
Forces import, even if the pool appears to be potentially active.
.It Fl F
Recovery mode for a non-importable pool.
Attempt to return the pool to an importable state by discarding the last few
transactions.
Not all damaged pools can be recovered by using this option.
If successful, the data from the discarded transactions is irretrievably lost.
This option is ignored if the pool is importable or already imported.
.It Fl l
Indicates that this command will request encryption keys for all encrypted
datasets it attempts to mount as it is bringing the pool online.
Note that if any datasets have a
.Sy keylocation
of
.Sy prompt
this command will block waiting for the keys to be entered.
Without this flag
encrypted datasets will be left unavailable until the keys are loaded.
.It Fl m
Allows a pool to import when there is a missing log device.
Recent transactions can be lost because the log device will be discarded.
.It Fl n
Used with the
.Fl F
recovery option.
Determines whether a non-importable pool can be made importable again, but does
not actually perform the pool recovery.
For more details about pool recovery mode, see the
.Fl F
option, above.
.It Fl o Ar mntopts
Comma-separated list of mount options to use when mounting datasets within the
pool.
See
.Xr zfs 8
for a description of dataset properties and mount options.
.It Fl o Ar property Ns = Ns Ar value
Sets the specified property on the imported pool.
See the
.Xr zpoolprops 7
manual page for more information on the available pool properties.
.It Fl R Ar root
Sets the
.Sy cachefile
property to
.Sy none
and the
.Sy altroot
property to
.Ar root .
.It Fl s
Scan using the default search path, the libblkid cache will not be
consulted.
A custom search path may be specified by setting the
.Sy ZPOOL_IMPORT_PATH
environment variable.
.It Fl X
Used with the
.Fl F
recovery option.
Determines whether extreme measures to find a valid txg should take place.
This allows the pool to
be rolled back to a txg which is no longer guaranteed to be consistent.
Pools imported at an inconsistent txg may contain uncorrectable
checksum errors.
For more details about pool recovery mode, see the
.Fl F
option, above.
WARNING: This option can be extremely hazardous to the
health of your pool and should only be used as a last resort.
.It Fl T
Specify the txg to use for rollback.
Implies
.Fl FX .
For more details
about pool recovery mode, see the
.Fl X
option, above.
-WARNING: This option can be extremely hazardous to the
+.Em WARNING :
+This option can be extremely hazardous to the
health of your pool and should only be used as a last resort.
.It Fl t
Used with
-.Sy newpool .
+.Ar newpool .
Specifies that
-.Sy newpool
+.Ar newpool
is temporary.
Temporary pool names last until export.
Ensures that the original pool name will be used
in all label updates and therefore is retained upon export.
Will also set
.Fl o Sy cachefile Ns = Ns Sy none
when not explicitly specified.
.El
.El
.
.Sh SEE ALSO
.Xr zpool-export 8 ,
.Xr zpool-list 8 ,
.Xr zpool-status 8
diff --git a/sys/contrib/openzfs/man/man8/zpool-replace.8 b/sys/contrib/openzfs/man/man8/zpool-replace.8
index 2b2875ed4292..cc61fa3ea37e 100644
--- a/sys/contrib/openzfs/man/man8/zpool-replace.8
+++ b/sys/contrib/openzfs/man/man8/zpool-replace.8
@@ -1,99 +1,99 @@
.\"
.\" CDDL HEADER START
.\"
.\" The contents of this file are subject to the terms of the
.\" Common Development and Distribution License (the "License").
.\" You may not use this file except in compliance with the License.
.\"
.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
.\" or http://www.opensolaris.org/os/licensing.
.\" See the License for the specific language governing permissions
.\" and limitations under the License.
.\"
.\" When distributing Covered Code, include this CDDL HEADER in each
.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
.\" If applicable, add the following below this CDDL HEADER, with the
.\" fields enclosed by brackets "[]" replaced with your own identifying
.\" information: Portions Copyright [yyyy] [name of copyright owner]
.\"
.\" CDDL HEADER END
.\"
.\" Copyright (c) 2007, Sun Microsystems, Inc. All Rights Reserved.
.\" Copyright (c) 2012, 2018 by Delphix. All rights reserved.
.\" Copyright (c) 2012 Cyril Plisko. All Rights Reserved.
.\" Copyright (c) 2017 Datto Inc.
.\" Copyright (c) 2018 George Melikov. All Rights Reserved.
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
.Dd May 29, 2021
.Dt ZPOOL-REPLACE 8
.Os
.
.Sh NAME
.Nm zpool-replace
.Nd replace one device with another in ZFS storage pool
.Sh SYNOPSIS
.Nm zpool
.Cm replace
.Op Fl fsw
.Oo Fl o Ar property Ns = Ns Ar value Oc
.Ar pool Ar device Op Ar new-device
.
.Sh DESCRIPTION
Replaces
.Ar device
with
.Ar new-device .
This is equivalent to attaching
.Ar new-device ,
waiting for it to resilver, and then detaching
.Ar device .
Any in progress scrub will be cancelled.
.Pp
The size of
.Ar new-device
must be greater than or equal to the minimum size of all the devices in a mirror
or raidz configuration.
.Pp
.Ar new-device
is required if the pool is not redundant.
If
.Ar new-device
is not specified, it defaults to
.Ar device .
This form of replacement is useful after an existing disk has failed and has
been physically replaced.
In this case, the new disk may have the same
.Pa /dev
path as the old device, even though it is actually a different disk.
ZFS recognizes this.
.Bl -tag -width Ds
.It Fl f
Forces use of
.Ar new-device ,
even if it appears to be in use.
Not all devices can be overridden in this manner.
.It Fl o Ar property Ns = Ns Ar value
Sets the given pool properties.
See the
.Xr zpoolprops 7
manual page for a list of valid properties that can be set.
The only property supported at the moment is
.Sy ashift .
.It Fl s
The
.Ar new-device
is reconstructed sequentially to restore redundancy as quickly as possible.
-Checksums are not verfied during sequential reconstruction so a scrub is
+Checksums are not verified during sequential reconstruction so a scrub is
started when the resilver completes.
Sequential reconstruction is not supported for raidz configurations.
.It Fl w
Waits until the replacement has completed before returning.
.El
.
.Sh SEE ALSO
.Xr zpool-detach 8 ,
.Xr zpool-initialize 8 ,
.Xr zpool-online 8 ,
.Xr zpool-resilver 8
diff --git a/sys/contrib/openzfs/man/man8/zpool-scrub.8 b/sys/contrib/openzfs/man/man8/zpool-scrub.8
index 768f71539290..69ae825b6158 100644
--- a/sys/contrib/openzfs/man/man8/zpool-scrub.8
+++ b/sys/contrib/openzfs/man/man8/zpool-scrub.8
@@ -1,123 +1,144 @@
.\"
.\" CDDL HEADER START
.\"
.\" The contents of this file are subject to the terms of the
.\" Common Development and Distribution License (the "License").
.\" You may not use this file except in compliance with the License.
.\"
.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
.\" or http://www.opensolaris.org/os/licensing.
.\" See the License for the specific language governing permissions
.\" and limitations under the License.
.\"
.\" When distributing Covered Code, include this CDDL HEADER in each
.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
.\" If applicable, add the following below this CDDL HEADER, with the
.\" fields enclosed by brackets "[]" replaced with your own identifying
.\" information: Portions Copyright [yyyy] [name of copyright owner]
.\"
.\" CDDL HEADER END
.\"
.\" Copyright (c) 2007, Sun Microsystems, Inc. All Rights Reserved.
.\" Copyright (c) 2012, 2018 by Delphix. All rights reserved.
.\" Copyright (c) 2012 Cyril Plisko. All Rights Reserved.
.\" Copyright (c) 2017 Datto Inc.
.\" Copyright (c) 2018, 2021 George Melikov. All Rights Reserved.
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
.Dd July 25, 2021
.Dt ZPOOL-SCRUB 8
.Os
.
.Sh NAME
.Nm zpool-scrub
.Nd begin or resume scrub of ZFS storage pools
.Sh SYNOPSIS
.Nm zpool
.Cm scrub
.Op Fl s Ns | Ns Fl p
.Op Fl w
.Ar pool Ns …
.
.Sh DESCRIPTION
Begins a scrub or resumes a paused scrub.
The scrub examines all data in the specified pools to verify that it checksums
correctly.
For replicated
.Pq mirror, raidz, or draid
devices, ZFS automatically repairs any damage discovered during the scrub.
The
.Nm zpool Cm status
command reports the progress of the scrub and summarizes the results of the
scrub upon completion.
.Pp
Scrubbing and resilvering are very similar operations.
The difference is that resilvering only examines data that ZFS knows to be out
of date
.Po
for example, when attaching a new device to a mirror or replacing an existing
device
.Pc ,
whereas scrubbing examines all data to discover silent errors due to hardware
faults or disk failure.
.Pp
Because scrubbing and resilvering are I/O-intensive operations, ZFS only allows
one at a time.
.Pp
A scrub is split into two parts: metadata scanning and block scrubbing.
The metadata scanning sorts blocks into large sequential ranges which can then
be read much more efficiently from disk when issuing the scrub I/O.
.Pp
If a scrub is paused, the
.Nm zpool Cm scrub
resumes it.
If a resilver is in progress, ZFS does not allow a scrub to be started until the
resilver completes.
.Pp
Note that, due to changes in pool data on a live system, it is possible for
scrubs to progress slightly beyond 100% completion.
During this period, no completion time estimate will be provided.
.
.Sh OPTIONS
.Bl -tag -width "-s"
.It Fl s
Stop scrubbing.
.It Fl p
Pause scrubbing.
Scrub pause state and progress are periodically synced to disk.
If the system is restarted or pool is exported during a paused scrub,
even after import, scrub will remain paused until it is resumed.
Once resumed the scrub will pick up from the place where it was last
checkpointed to disk.
To resume a paused scrub issue
.Nm zpool Cm scrub
again.
.It Fl w
Wait until scrub has completed before returning.
.El
.Sh EXAMPLES
.Bl -tag -width "Exam"
.It Sy Example 1 : Status of pool with ongoing scrub:
Output:
.Bd -literal -compact -offset Ds
.No # Nm zpool Cm status
...
scan: scrub in progress since Sun Jul 25 16:07:49 2021
403M scanned at 100M/s, 68.4M issued at 10.0M/s, 405M total
0B repaired, 16.91% done, 00:00:04 to go
...
.Ed
Where:
.Bl -dash -offset indent
.It
Metadata which references 403M of file data has been
scanned at 100M/s, and 68.4M of that file data has been
scrubbed sequentially at 10.0M/s.
.El
.El
+.Sh PERIODIC SCRUB
+On machines using systemd, scrub timers can be enabled on per-pool basis.
+.Nm weekly
+and
+.Nm monthly
+timer units are provided.
+.Bl -tag -width Ds
+.It Xo
+.Xc
+.Nm systemctl
+.Cm enable
+.Cm zfs-scrub-\fIweekly\fB@\fIrpool\fB.timer
+.Cm --now
+.It Xo
+.Xc
+.Nm systemctl
+.Cm enable
+.Cm zfs-scrub-\fImonthly\fB@\fIotherpool\fB.timer
+.Cm --now
+.El
.
.Sh SEE ALSO
+.Xr systemd.timer 5 ,
.Xr zpool-iostat 8 ,
.Xr zpool-resilver 8 ,
.Xr zpool-status 8
diff --git a/sys/contrib/openzfs/module/Makefile.in b/sys/contrib/openzfs/module/Makefile.in
index 05c673231cc5..b6338430e812 100644
--- a/sys/contrib/openzfs/module/Makefile.in
+++ b/sys/contrib/openzfs/module/Makefile.in
@@ -1,142 +1,149 @@
include Kbuild
INSTALL_MOD_DIR ?= extra
+INSTALL_MOD_PATH ?= $(DESTDIR)
SUBDIR_TARGETS = icp lua zstd
all: modules
distclean maintainer-clean: clean
install: modules_install
uninstall: modules_uninstall
check:
.PHONY: all distclean maintainer-clean install uninstall check distdir \
modules modules-Linux modules-FreeBSD modules-unknown \
clean clean-Linux clean-FreeBSD \
modules_install modules_install-Linux modules_install-FreeBSD \
modules_uninstall modules_uninstall-Linux modules_uninstall-FreeBSD \
cppcheck cppcheck-Linux cppcheck-FreeBSD
# For FreeBSD, use debug options from ./configure if not overridden.
export WITH_DEBUG ?= @WITH_DEBUG@
export WITH_INVARIANTS ?= @WITH_INVARIANTS@
# Filter out options that FreeBSD make doesn't understand
getflags = ( \
set -- \
$(filter-out --%,$(firstword $(MFLAGS))) \
$(filter -I%,$(MFLAGS)) \
$(filter -j%,$(MFLAGS)); \
fmakeflags=""; \
while getopts :deiI:j:knqrstw flag; do \
case $$flag in \
\?) :;; \
:) if [ $$OPTARG = "j" ]; then \
ncpus=$$(sysctl -n kern.smp.cpus 2>/dev/null || :); \
if [ -n "$$ncpus" ]; then fmakeflags="$$fmakeflags -j$$ncpus"; fi; \
fi;; \
d) fmakeflags="$$fmakeflags -dA";; \
*) fmakeflags="$$fmakeflags -$$flag$$OPTARG";; \
esac; \
done; \
echo $$fmakeflags \
)
FMAKEFLAGS = -C @abs_srcdir@ -f Makefile.bsd $(shell $(getflags))
ifneq (@abs_srcdir@,@abs_builddir@)
FMAKEFLAGS += MAKEOBJDIR=@abs_builddir@
endif
FMAKE = env -u MAKEFLAGS make $(FMAKEFLAGS)
modules-Linux:
- list='$(SUBDIR_TARGETS)'; for targetdir in $$list; do \
- $(MAKE) -C $$targetdir; \
- done
- $(MAKE) -C @LINUX_OBJ@ M=`pwd` @KERNEL_MAKE@ CONFIG_ZFS=m modules
+ list='$(SUBDIR_TARGETS)'; for td in $$list; do $(MAKE) -C $$td; done
+ $(MAKE) -C @LINUX_OBJ@ $(if @KERNEL_CC@,CC=@KERNEL_CC@) \
+ $(if @KERNEL_LD@,LD=@KERNEL_LD@) $(if @KERNEL_LLVM@,LLVM=@KERNEL_LLVM@) \
+ M="$$PWD" @KERNEL_MAKE@ CONFIG_ZFS=m modules
modules-FreeBSD:
+$(FMAKE)
modules-unknown:
@true
modules: modules-@ac_system@
clean-Linux:
@# Only cleanup the kernel build directories when CONFIG_KERNEL
@# is defined. This indicates that kernel modules should be built.
-@CONFIG_KERNEL_TRUE@ $(MAKE) -C @LINUX_OBJ@ M=`pwd` @KERNEL_MAKE@ clean
-
- if [ -f @LINUX_SYMBOLS@ ]; then $(RM) @LINUX_SYMBOLS@; fi
- if [ -f Module.markers ]; then $(RM) Module.markers; fi
+@CONFIG_KERNEL_TRUE@ $(MAKE) -C @LINUX_OBJ@ M="$$PWD" @KERNEL_MAKE@ clean
- find . -name '*.ur-safe' -type f -print | xargs $(RM)
+ $(RM) @LINUX_SYMBOLS@ Module.markers
+ find . -name '*.ur-safe' -type f -delete
clean-FreeBSD:
+$(FMAKE) clean
clean: clean-@ac_system@
modules_install-Linux:
@# Install the kernel modules
- $(MAKE) -C @LINUX_OBJ@ M=`pwd` modules_install \
- INSTALL_MOD_PATH=$(DESTDIR)$(INSTALL_MOD_PATH) \
+ $(MAKE) -C @LINUX_OBJ@ M="$$PWD" modules_install \
+ INSTALL_MOD_PATH=$(INSTALL_MOD_PATH) \
INSTALL_MOD_DIR=$(INSTALL_MOD_DIR) \
KERNELRELEASE=@LINUX_VERSION@
@# Remove extraneous build products when packaging
- kmoddir=$(DESTDIR)$(INSTALL_MOD_PATH)/lib/modules/@LINUX_VERSION@; \
+ kmoddir=$(INSTALL_MOD_PATH)/lib/modules/@LINUX_VERSION@; \
if [ -n "$(DESTDIR)" ]; then \
- find $$kmoddir -name 'modules.*' | xargs $(RM); \
+ find $$kmoddir -name 'modules.*' -delete; \
fi
- sysmap=$(DESTDIR)$(INSTALL_MOD_PATH)/boot/System.map-@LINUX_VERSION@; \
+ @# Debian ships tiny fake System.map files that are
+ @# syntactically valid but just say
+ @# "if you want system.map go install this package"
+ @# Naturally, depmod is less than amused by this.
+ @# So if we find it missing or with one of these present,
+ @# we check for the alternate path for the System.map
+ sysmap=$(INSTALL_MOD_PATH)/boot/System.map-@LINUX_VERSION@; \
+ { [ -f "$$sysmap" ] && [ $$(wc -l < "$$sysmap") -ge 100 ]; } || \
+ sysmap=$(INSTALL_MOD_PATH)/usr/lib/debug/boot/System.map-@LINUX_VERSION@; \
if [ -f $$sysmap ]; then \
depmod -ae -F $$sysmap @LINUX_VERSION@; \
fi
modules_install-FreeBSD:
@# Install the kernel modules
+$(FMAKE) install
modules_install: modules_install-@ac_system@
modules_uninstall-Linux:
@# Uninstall the kernel modules
- kmoddir=$(DESTDIR)$(INSTALL_MOD_PATH)/lib/modules/@LINUX_VERSION@; \
+ kmoddir=$(INSTALL_MOD_PATH)/lib/modules/@LINUX_VERSION@; \
for objdir in $(ZFS_MODULES); do \
$(RM) -R $$kmoddir/$(INSTALL_MOD_DIR)/$$objdir; \
done
modules_uninstall-FreeBSD:
@false
modules_uninstall: modules_uninstall-@ac_system@
cppcheck-Linux:
@CPPCHECK@ -j@CPU_COUNT@ --std=c99 --quiet --force --error-exitcode=2 \
--inline-suppr \
--suppress=unmatchedSuppression \
--suppress=noValidConfiguration \
--enable=warning,information -D_KERNEL \
--include=@LINUX_OBJ@/include/generated/autoconf.h \
--include=@top_srcdir@/zfs_config.h \
--config-exclude=@LINUX_OBJ@/include \
-I @LINUX_OBJ@/include \
-I @top_srcdir@/include/os/linux/kernel \
-I @top_srcdir@/include/os/linux/spl \
-I @top_srcdir@/include/os/linux/zfs \
-I @top_srcdir@/include \
avl icp lua nvpair spl unicode zcommon zfs zstd os/linux
cppcheck-FreeBSD:
@true
cppcheck: cppcheck-@ac_system@
distdir:
(cd @srcdir@ && find $(ZFS_MODULES) os -name '*.[chS]') | \
while read path; do \
mkdir -p $$distdir/$${path%/*}; \
cp @srcdir@/$$path $$distdir/$$path; \
done; \
cp @srcdir@/Makefile.bsd $$distdir/Makefile.bsd
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c
index 95a83542fadc..41ceed1dc54c 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/abd_os.c
@@ -1,508 +1,509 @@
/*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*/
/*
* Copyright (c) 2014 by Chunwei Chen. All rights reserved.
* Copyright (c) 2016 by Delphix. All rights reserved.
*/
/*
* See abd.c for a general overview of the arc buffered data (ABD).
*
* Using a large proportion of scattered ABDs decreases ARC fragmentation since
* when we are at the limit of allocatable space, using equal-size chunks will
* allow us to quickly reclaim enough space for a new large allocation (assuming
* it is also scattered).
*
* ABDs are allocated scattered by default unless the caller uses
* abd_alloc_linear() or zfs_abd_scatter_enabled is disabled.
*/
#include <sys/abd_impl.h>
#include <sys/param.h>
#include <sys/types.h>
#include <sys/zio.h>
#include <sys/zfs_context.h>
#include <sys/zfs_znode.h>
typedef struct abd_stats {
kstat_named_t abdstat_struct_size;
kstat_named_t abdstat_scatter_cnt;
kstat_named_t abdstat_scatter_data_size;
kstat_named_t abdstat_scatter_chunk_waste;
kstat_named_t abdstat_linear_cnt;
kstat_named_t abdstat_linear_data_size;
} abd_stats_t;
static abd_stats_t abd_stats = {
/* Amount of memory occupied by all of the abd_t struct allocations */
{ "struct_size", KSTAT_DATA_UINT64 },
/*
* The number of scatter ABDs which are currently allocated, excluding
* ABDs which don't own their data (for instance the ones which were
* allocated through abd_get_offset()).
*/
{ "scatter_cnt", KSTAT_DATA_UINT64 },
/* Amount of data stored in all scatter ABDs tracked by scatter_cnt */
{ "scatter_data_size", KSTAT_DATA_UINT64 },
/*
* The amount of space wasted at the end of the last chunk across all
* scatter ABDs tracked by scatter_cnt.
*/
{ "scatter_chunk_waste", KSTAT_DATA_UINT64 },
/*
* The number of linear ABDs which are currently allocated, excluding
* ABDs which don't own their data (for instance the ones which were
* allocated through abd_get_offset() and abd_get_from_buf()). If an
* ABD takes ownership of its buf then it will become tracked.
*/
{ "linear_cnt", KSTAT_DATA_UINT64 },
/* Amount of data stored in all linear ABDs tracked by linear_cnt */
{ "linear_data_size", KSTAT_DATA_UINT64 },
};
struct {
wmsum_t abdstat_struct_size;
wmsum_t abdstat_scatter_cnt;
wmsum_t abdstat_scatter_data_size;
wmsum_t abdstat_scatter_chunk_waste;
wmsum_t abdstat_linear_cnt;
wmsum_t abdstat_linear_data_size;
} abd_sums;
/*
* zfs_abd_scatter_min_size is the minimum allocation size to use scatter
* ABD's for. Smaller allocations will use linear ABD's which use
* zio_[data_]buf_alloc().
*
* Scatter ABD's use at least one page each, so sub-page allocations waste
* some space when allocated as scatter (e.g. 2KB scatter allocation wastes
* half of each page). Using linear ABD's for small allocations means that
* they will be put on slabs which contain many allocations.
*
* Linear ABDs for multi-page allocations are easier to use, and in some cases
* it allows to avoid buffer copying. But allocation and especially free
* of multi-page linear ABDs are expensive operations due to KVA mapping and
* unmapping, and with time they cause KVA fragmentations.
*/
size_t zfs_abd_scatter_min_size = PAGE_SIZE + 1;
#if defined(_KERNEL)
SYSCTL_DECL(_vfs_zfs);
SYSCTL_INT(_vfs_zfs, OID_AUTO, abd_scatter_enabled, CTLFLAG_RWTUN,
&zfs_abd_scatter_enabled, 0, "Enable scattered ARC data buffers");
SYSCTL_ULONG(_vfs_zfs, OID_AUTO, abd_scatter_min_size, CTLFLAG_RWTUN,
&zfs_abd_scatter_min_size, 0, "Minimum size of scatter allocations.");
#endif
kmem_cache_t *abd_chunk_cache;
static kstat_t *abd_ksp;
/*
* We use a scattered SPA_MAXBLOCKSIZE sized ABD whose chunks are
* just a single zero'd page-sized buffer. This allows us to conserve
* memory by only using a single zero buffer for the scatter chunks.
*/
abd_t *abd_zero_scatter = NULL;
static char *abd_zero_buf = NULL;
static uint_t
abd_chunkcnt_for_bytes(size_t size)
{
return ((size + PAGE_MASK) >> PAGE_SHIFT);
}
static inline uint_t
abd_scatter_chunkcnt(abd_t *abd)
{
ASSERT(!abd_is_linear(abd));
return (abd_chunkcnt_for_bytes(
ABD_SCATTER(abd).abd_offset + abd->abd_size));
}
boolean_t
abd_size_alloc_linear(size_t size)
{
return (size < zfs_abd_scatter_min_size ? B_TRUE : B_FALSE);
}
void
abd_update_scatter_stats(abd_t *abd, abd_stats_op_t op)
{
uint_t n = abd_scatter_chunkcnt(abd);
ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR);
int waste = (n << PAGE_SHIFT) - abd->abd_size;
if (op == ABDSTAT_INCR) {
ABDSTAT_BUMP(abdstat_scatter_cnt);
ABDSTAT_INCR(abdstat_scatter_data_size, abd->abd_size);
ABDSTAT_INCR(abdstat_scatter_chunk_waste, waste);
arc_space_consume(waste, ARC_SPACE_ABD_CHUNK_WASTE);
} else {
ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
ABDSTAT_INCR(abdstat_scatter_data_size, -(int)abd->abd_size);
ABDSTAT_INCR(abdstat_scatter_chunk_waste, -waste);
arc_space_return(waste, ARC_SPACE_ABD_CHUNK_WASTE);
}
}
void
abd_update_linear_stats(abd_t *abd, abd_stats_op_t op)
{
ASSERT(op == ABDSTAT_INCR || op == ABDSTAT_DECR);
if (op == ABDSTAT_INCR) {
ABDSTAT_BUMP(abdstat_linear_cnt);
ABDSTAT_INCR(abdstat_linear_data_size, abd->abd_size);
} else {
ABDSTAT_BUMPDOWN(abdstat_linear_cnt);
ABDSTAT_INCR(abdstat_linear_data_size, -(int)abd->abd_size);
}
}
void
abd_verify_scatter(abd_t *abd)
{
uint_t i, n;
/*
* There is no scatter linear pages in FreeBSD so there is
* an error if the ABD has been marked as a linear page.
*/
ASSERT(!abd_is_linear_page(abd));
ASSERT3U(ABD_SCATTER(abd).abd_offset, <, PAGE_SIZE);
n = abd_scatter_chunkcnt(abd);
for (i = 0; i < n; i++) {
ASSERT3P(ABD_SCATTER(abd).abd_chunks[i], !=, NULL);
}
}
void
abd_alloc_chunks(abd_t *abd, size_t size)
{
uint_t i, n;
n = abd_chunkcnt_for_bytes(size);
for (i = 0; i < n; i++) {
ABD_SCATTER(abd).abd_chunks[i] =
kmem_cache_alloc(abd_chunk_cache, KM_PUSHPAGE);
}
}
void
abd_free_chunks(abd_t *abd)
{
uint_t i, n;
n = abd_scatter_chunkcnt(abd);
for (i = 0; i < n; i++) {
kmem_cache_free(abd_chunk_cache,
ABD_SCATTER(abd).abd_chunks[i]);
}
}
abd_t *
abd_alloc_struct_impl(size_t size)
{
uint_t chunkcnt = abd_chunkcnt_for_bytes(size);
/*
* In the event we are allocating a gang ABD, the size passed in
* will be 0. We must make sure to set abd_size to the size of an
* ABD struct as opposed to an ABD scatter with 0 chunks. The gang
* ABD struct allocation accounts for an additional 24 bytes over
* a scatter ABD with 0 chunks.
*/
size_t abd_size = MAX(sizeof (abd_t),
offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt]));
abd_t *abd = kmem_alloc(abd_size, KM_PUSHPAGE);
ASSERT3P(abd, !=, NULL);
ABDSTAT_INCR(abdstat_struct_size, abd_size);
return (abd);
}
void
abd_free_struct_impl(abd_t *abd)
{
uint_t chunkcnt = abd_is_linear(abd) || abd_is_gang(abd) ? 0 :
abd_scatter_chunkcnt(abd);
ssize_t size = MAX(sizeof (abd_t),
offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt]));
kmem_free(abd, size);
ABDSTAT_INCR(abdstat_struct_size, -size);
}
/*
* Allocate scatter ABD of size SPA_MAXBLOCKSIZE, where
* each chunk in the scatterlist will be set to abd_zero_buf.
*/
static void
abd_alloc_zero_scatter(void)
{
uint_t i, n;
n = abd_chunkcnt_for_bytes(SPA_MAXBLOCKSIZE);
abd_zero_buf = kmem_cache_alloc(abd_chunk_cache, KM_PUSHPAGE);
+ bzero(abd_zero_buf, PAGE_SIZE);
abd_zero_scatter = abd_alloc_struct(SPA_MAXBLOCKSIZE);
abd_zero_scatter->abd_flags |= ABD_FLAG_OWNER | ABD_FLAG_ZEROS;
abd_zero_scatter->abd_size = SPA_MAXBLOCKSIZE;
ABD_SCATTER(abd_zero_scatter).abd_offset = 0;
for (i = 0; i < n; i++) {
ABD_SCATTER(abd_zero_scatter).abd_chunks[i] =
abd_zero_buf;
}
ABDSTAT_BUMP(abdstat_scatter_cnt);
ABDSTAT_INCR(abdstat_scatter_data_size, PAGE_SIZE);
}
static void
abd_free_zero_scatter(void)
{
ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
ABDSTAT_INCR(abdstat_scatter_data_size, -(int)PAGE_SIZE);
abd_free_struct(abd_zero_scatter);
abd_zero_scatter = NULL;
kmem_cache_free(abd_chunk_cache, abd_zero_buf);
}
static int
abd_kstats_update(kstat_t *ksp, int rw)
{
abd_stats_t *as = ksp->ks_data;
if (rw == KSTAT_WRITE)
return (EACCES);
as->abdstat_struct_size.value.ui64 =
wmsum_value(&abd_sums.abdstat_struct_size);
as->abdstat_scatter_cnt.value.ui64 =
wmsum_value(&abd_sums.abdstat_scatter_cnt);
as->abdstat_scatter_data_size.value.ui64 =
wmsum_value(&abd_sums.abdstat_scatter_data_size);
as->abdstat_scatter_chunk_waste.value.ui64 =
wmsum_value(&abd_sums.abdstat_scatter_chunk_waste);
as->abdstat_linear_cnt.value.ui64 =
wmsum_value(&abd_sums.abdstat_linear_cnt);
as->abdstat_linear_data_size.value.ui64 =
wmsum_value(&abd_sums.abdstat_linear_data_size);
return (0);
}
void
abd_init(void)
{
abd_chunk_cache = kmem_cache_create("abd_chunk", PAGE_SIZE, 0,
NULL, NULL, NULL, NULL, 0, KMC_NODEBUG);
wmsum_init(&abd_sums.abdstat_struct_size, 0);
wmsum_init(&abd_sums.abdstat_scatter_cnt, 0);
wmsum_init(&abd_sums.abdstat_scatter_data_size, 0);
wmsum_init(&abd_sums.abdstat_scatter_chunk_waste, 0);
wmsum_init(&abd_sums.abdstat_linear_cnt, 0);
wmsum_init(&abd_sums.abdstat_linear_data_size, 0);
abd_ksp = kstat_create("zfs", 0, "abdstats", "misc", KSTAT_TYPE_NAMED,
sizeof (abd_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
if (abd_ksp != NULL) {
abd_ksp->ks_data = &abd_stats;
abd_ksp->ks_update = abd_kstats_update;
kstat_install(abd_ksp);
}
abd_alloc_zero_scatter();
}
void
abd_fini(void)
{
abd_free_zero_scatter();
if (abd_ksp != NULL) {
kstat_delete(abd_ksp);
abd_ksp = NULL;
}
wmsum_fini(&abd_sums.abdstat_struct_size);
wmsum_fini(&abd_sums.abdstat_scatter_cnt);
wmsum_fini(&abd_sums.abdstat_scatter_data_size);
wmsum_fini(&abd_sums.abdstat_scatter_chunk_waste);
wmsum_fini(&abd_sums.abdstat_linear_cnt);
wmsum_fini(&abd_sums.abdstat_linear_data_size);
kmem_cache_destroy(abd_chunk_cache);
abd_chunk_cache = NULL;
}
void
abd_free_linear_page(abd_t *abd)
{
/*
* FreeBSD does not have scatter linear pages
* so there is an error.
*/
VERIFY(0);
}
/*
* If we're going to use this ABD for doing I/O using the block layer, the
* consumer of the ABD data doesn't care if it's scattered or not, and we don't
* plan to store this ABD in memory for a long period of time, we should
* allocate the ABD type that requires the least data copying to do the I/O.
*
* Currently this is linear ABDs, however if ldi_strategy() can ever issue I/Os
* using a scatter/gather list we should switch to that and replace this call
* with vanilla abd_alloc().
*/
abd_t *
abd_alloc_for_io(size_t size, boolean_t is_metadata)
{
return (abd_alloc_linear(size, is_metadata));
}
abd_t *
abd_get_offset_scatter(abd_t *abd, abd_t *sabd, size_t off,
size_t size)
{
abd_verify(sabd);
ASSERT3U(off, <=, sabd->abd_size);
size_t new_offset = ABD_SCATTER(sabd).abd_offset + off;
size_t chunkcnt = abd_chunkcnt_for_bytes(
(new_offset & PAGE_MASK) + size);
ASSERT3U(chunkcnt, <=, abd_scatter_chunkcnt(sabd));
/*
* If an abd struct is provided, it is only the minimum size. If we
* need additional chunks, we need to allocate a new struct.
*/
if (abd != NULL &&
offsetof(abd_t, abd_u.abd_scatter.abd_chunks[chunkcnt]) >
sizeof (abd_t)) {
abd = NULL;
}
if (abd == NULL)
abd = abd_alloc_struct(chunkcnt << PAGE_SHIFT);
/*
* Even if this buf is filesystem metadata, we only track that
* if we own the underlying data buffer, which is not true in
* this case. Therefore, we don't ever use ABD_FLAG_META here.
*/
ABD_SCATTER(abd).abd_offset = new_offset & PAGE_MASK;
/* Copy the scatterlist starting at the correct offset */
(void) memcpy(&ABD_SCATTER(abd).abd_chunks,
&ABD_SCATTER(sabd).abd_chunks[new_offset >> PAGE_SHIFT],
chunkcnt * sizeof (void *));
return (abd);
}
/*
* Initialize the abd_iter.
*/
void
abd_iter_init(struct abd_iter *aiter, abd_t *abd)
{
ASSERT(!abd_is_gang(abd));
abd_verify(abd);
aiter->iter_abd = abd;
aiter->iter_pos = 0;
aiter->iter_mapaddr = NULL;
aiter->iter_mapsize = 0;
}
/*
* This is just a helper function to see if we have exhausted the
* abd_iter and reached the end.
*/
boolean_t
abd_iter_at_end(struct abd_iter *aiter)
{
return (aiter->iter_pos == aiter->iter_abd->abd_size);
}
/*
* Advance the iterator by a certain amount. Cannot be called when a chunk is
* in use. This can be safely called when the aiter has already exhausted, in
* which case this does nothing.
*/
void
abd_iter_advance(struct abd_iter *aiter, size_t amount)
{
ASSERT3P(aiter->iter_mapaddr, ==, NULL);
ASSERT0(aiter->iter_mapsize);
/* There's nothing left to advance to, so do nothing */
if (abd_iter_at_end(aiter))
return;
aiter->iter_pos += amount;
}
/*
* Map the current chunk into aiter. This can be safely called when the aiter
* has already exhausted, in which case this does nothing.
*/
void
abd_iter_map(struct abd_iter *aiter)
{
void *paddr;
ASSERT3P(aiter->iter_mapaddr, ==, NULL);
ASSERT0(aiter->iter_mapsize);
/* There's nothing left to iterate over, so do nothing */
if (abd_iter_at_end(aiter))
return;
abd_t *abd = aiter->iter_abd;
size_t offset = aiter->iter_pos;
if (abd_is_linear(abd)) {
aiter->iter_mapsize = abd->abd_size - offset;
paddr = ABD_LINEAR_BUF(abd);
} else {
offset += ABD_SCATTER(abd).abd_offset;
paddr = ABD_SCATTER(abd).abd_chunks[offset >> PAGE_SHIFT];
offset &= PAGE_MASK;
aiter->iter_mapsize = MIN(PAGE_SIZE - offset,
abd->abd_size - aiter->iter_pos);
}
aiter->iter_mapaddr = (char *)paddr + offset;
}
/*
* Unmap the current chunk from aiter. This can be safely called when the aiter
* has already exhausted, in which case this does nothing.
*/
void
abd_iter_unmap(struct abd_iter *aiter)
{
if (!abd_iter_at_end(aiter)) {
ASSERT3P(aiter->iter_mapaddr, !=, NULL);
ASSERT3U(aiter->iter_mapsize, >, 0);
}
aiter->iter_mapaddr = NULL;
aiter->iter_mapsize = 0;
}
void
abd_cache_reap_now(void)
{
kmem_cache_reap_soon(abd_chunk_cache);
}
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_file.c b/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_file.c
index fc04a7476154..2d926813637e 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_file.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/vdev_file.c
@@ -1,355 +1,356 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/file.h>
#include <sys/vdev_file.h>
#include <sys/vdev_impl.h>
#include <sys/zio.h>
#include <sys/fs/zfs.h>
#include <sys/fm/fs/zfs.h>
#include <sys/abd.h>
#include <sys/stat.h>
/*
* Virtual device vector for files.
*/
static taskq_t *vdev_file_taskq;
unsigned long vdev_file_logical_ashift = SPA_MINBLOCKSHIFT;
unsigned long vdev_file_physical_ashift = SPA_MINBLOCKSHIFT;
void
vdev_file_init(void)
{
vdev_file_taskq = taskq_create("z_vdev_file", MAX(max_ncpus, 16),
minclsyspri, max_ncpus, INT_MAX, 0);
}
void
vdev_file_fini(void)
{
taskq_destroy(vdev_file_taskq);
}
static void
vdev_file_hold(vdev_t *vd)
{
ASSERT3P(vd->vdev_path, !=, NULL);
}
static void
vdev_file_rele(vdev_t *vd)
{
ASSERT3P(vd->vdev_path, !=, NULL);
}
static mode_t
vdev_file_open_mode(spa_mode_t spa_mode)
{
mode_t mode = 0;
if ((spa_mode & SPA_MODE_READ) && (spa_mode & SPA_MODE_WRITE)) {
mode = O_RDWR;
} else if (spa_mode & SPA_MODE_READ) {
mode = O_RDONLY;
} else if (spa_mode & SPA_MODE_WRITE) {
mode = O_WRONLY;
}
return (mode | O_LARGEFILE);
}
static int
vdev_file_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
uint64_t *logical_ashift, uint64_t *physical_ashift)
{
vdev_file_t *vf;
zfs_file_t *fp;
zfs_file_attr_t zfa;
int error;
/*
* Rotational optimizations only make sense on block devices.
*/
vd->vdev_nonrot = B_TRUE;
/*
* Allow TRIM on file based vdevs. This may not always be supported,
* since it depends on your kernel version and underlying filesystem
* type but it is always safe to attempt.
*/
vd->vdev_has_trim = B_TRUE;
/*
* Disable secure TRIM on file based vdevs. There is no way to
* request this behavior from the underlying filesystem.
*/
vd->vdev_has_securetrim = B_FALSE;
/*
* We must have a pathname, and it must be absolute.
*/
if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') {
vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
return (SET_ERROR(EINVAL));
}
/*
* Reopen the device if it's not currently open. Otherwise,
* just update the physical size of the device.
*/
if (vd->vdev_tsd != NULL) {
ASSERT(vd->vdev_reopening);
vf = vd->vdev_tsd;
goto skip_open;
}
vf = vd->vdev_tsd = kmem_zalloc(sizeof (vdev_file_t), KM_SLEEP);
/*
* We always open the files from the root of the global zone, even if
* we're in a local zone. If the user has gotten to this point, the
* administrator has already decided that the pool should be available
* to local zone users, so the underlying devices should be as well.
*/
ASSERT3P(vd->vdev_path, !=, NULL);
ASSERT(vd->vdev_path[0] == '/');
error = zfs_file_open(vd->vdev_path,
vdev_file_open_mode(spa_mode(vd->vdev_spa)), 0, &fp);
if (error) {
vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
return (error);
}
vf->vf_file = fp;
#ifdef _KERNEL
/*
* Make sure it's a regular file.
*/
if (zfs_file_getattr(fp, &zfa)) {
return (SET_ERROR(ENODEV));
}
if (!S_ISREG(zfa.zfa_mode)) {
vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
return (SET_ERROR(ENODEV));
}
#endif
skip_open:
error = zfs_file_getattr(vf->vf_file, &zfa);
if (error) {
vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
return (error);
}
*max_psize = *psize = zfa.zfa_size;
*logical_ashift = vdev_file_logical_ashift;
*physical_ashift = vdev_file_physical_ashift;
return (0);
}
static void
vdev_file_close(vdev_t *vd)
{
vdev_file_t *vf = vd->vdev_tsd;
if (vd->vdev_reopening || vf == NULL)
return;
if (vf->vf_file != NULL) {
zfs_file_close(vf->vf_file);
}
vd->vdev_delayed_close = B_FALSE;
kmem_free(vf, sizeof (vdev_file_t));
vd->vdev_tsd = NULL;
}
/*
* Implements the interrupt side for file vdev types. This routine will be
* called when the I/O completes allowing us to transfer the I/O to the
* interrupt taskqs. For consistency, the code structure mimics disk vdev
* types.
*/
static void
vdev_file_io_intr(zio_t *zio)
{
zio_delay_interrupt(zio);
}
static void
vdev_file_io_strategy(void *arg)
{
zio_t *zio = arg;
vdev_t *vd = zio->io_vd;
vdev_file_t *vf;
void *buf;
ssize_t resid;
loff_t off;
ssize_t size;
int err;
off = zio->io_offset;
size = zio->io_size;
resid = 0;
vf = vd->vdev_tsd;
ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE);
if (zio->io_type == ZIO_TYPE_READ) {
buf = abd_borrow_buf(zio->io_abd, zio->io_size);
err = zfs_file_pread(vf->vf_file, buf, size, off, &resid);
abd_return_buf_copy(zio->io_abd, buf, size);
} else {
buf = abd_borrow_buf_copy(zio->io_abd, zio->io_size);
err = zfs_file_pwrite(vf->vf_file, buf, size, off, &resid);
abd_return_buf(zio->io_abd, buf, size);
}
+ zio->io_error = err;
if (resid != 0 && zio->io_error == 0)
zio->io_error = ENOSPC;
vdev_file_io_intr(zio);
}
static void
vdev_file_io_start(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
vdev_file_t *vf = vd->vdev_tsd;
if (zio->io_type == ZIO_TYPE_IOCTL) {
/* XXPOLICY */
if (!vdev_readable(vd)) {
zio->io_error = SET_ERROR(ENXIO);
zio_interrupt(zio);
return;
}
switch (zio->io_cmd) {
case DKIOCFLUSHWRITECACHE:
zio->io_error = zfs_file_fsync(vf->vf_file,
O_SYNC|O_DSYNC);
break;
default:
zio->io_error = SET_ERROR(ENOTSUP);
}
zio_execute(zio);
return;
} else if (zio->io_type == ZIO_TYPE_TRIM) {
#ifdef notyet
int mode = 0;
ASSERT3U(zio->io_size, !=, 0);
/* XXX FreeBSD has no fallocate routine in file ops */
zio->io_error = zfs_file_fallocate(vf->vf_file,
mode, zio->io_offset, zio->io_size);
#endif
zio->io_error = SET_ERROR(ENOTSUP);
zio_execute(zio);
return;
}
ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE);
zio->io_target_timestamp = zio_handle_io_delay(zio);
VERIFY3U(taskq_dispatch(vdev_file_taskq, vdev_file_io_strategy, zio,
TQ_SLEEP), !=, 0);
}
-/* ARGSUSED */
static void
vdev_file_io_done(zio_t *zio)
{
+ (void) zio;
}
vdev_ops_t vdev_file_ops = {
.vdev_op_init = NULL,
.vdev_op_fini = NULL,
.vdev_op_open = vdev_file_open,
.vdev_op_close = vdev_file_close,
.vdev_op_asize = vdev_default_asize,
.vdev_op_min_asize = vdev_default_min_asize,
.vdev_op_min_alloc = NULL,
.vdev_op_io_start = vdev_file_io_start,
.vdev_op_io_done = vdev_file_io_done,
.vdev_op_state_change = NULL,
.vdev_op_need_resilver = NULL,
.vdev_op_hold = vdev_file_hold,
.vdev_op_rele = vdev_file_rele,
.vdev_op_remap = NULL,
.vdev_op_xlate = vdev_default_xlate,
.vdev_op_rebuild_asize = NULL,
.vdev_op_metaslab_init = NULL,
.vdev_op_config_generate = NULL,
.vdev_op_nparity = NULL,
.vdev_op_ndisks = NULL,
.vdev_op_type = VDEV_TYPE_FILE, /* name of this vdev type */
.vdev_op_leaf = B_TRUE /* leaf vdev */
};
/*
* From userland we access disks just like files.
*/
#ifndef _KERNEL
vdev_ops_t vdev_disk_ops = {
.vdev_op_init = NULL,
.vdev_op_fini = NULL,
.vdev_op_open = vdev_file_open,
.vdev_op_close = vdev_file_close,
.vdev_op_asize = vdev_default_asize,
.vdev_op_min_asize = vdev_default_min_asize,
.vdev_op_min_alloc = NULL,
.vdev_op_io_start = vdev_file_io_start,
.vdev_op_io_done = vdev_file_io_done,
.vdev_op_state_change = NULL,
.vdev_op_need_resilver = NULL,
.vdev_op_hold = vdev_file_hold,
.vdev_op_rele = vdev_file_rele,
.vdev_op_remap = NULL,
.vdev_op_xlate = vdev_default_xlate,
.vdev_op_rebuild_asize = NULL,
.vdev_op_metaslab_init = NULL,
.vdev_op_config_generate = NULL,
.vdev_op_nparity = NULL,
.vdev_op_ndisks = NULL,
.vdev_op_type = VDEV_TYPE_DISK, /* name of this vdev type */
.vdev_op_leaf = B_TRUE /* leaf vdev */
};
#endif
ZFS_MODULE_PARAM(zfs_vdev_file, vdev_file_, logical_ashift, ULONG, ZMOD_RW,
"Logical ashift for file-based devices");
ZFS_MODULE_PARAM(zfs_vdev_file, vdev_file_, physical_ashift, ULONG, ZMOD_RW,
"Physical ashift for file-based devices");
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c
index cde40e87698b..3a5c9f8caf0a 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c
@@ -1,1370 +1,1370 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 by Delphix. All rights reserved.
* Copyright 2015, OmniTI Computer Consulting, Inc. All rights reserved.
*/
/*
* ZFS control directory (a.k.a. ".zfs")
*
* This directory provides a common location for all ZFS meta-objects.
* Currently, this is only the 'snapshot' directory, but this may expand in the
* future. The elements are built using the GFS primitives, as the hierarchy
* does not actually exist on disk.
*
* For 'snapshot', we don't want to have all snapshots always mounted, because
* this would take up a huge amount of space in /etc/mnttab. We have three
* types of objects:
*
* ctldir ------> snapshotdir -------> snapshot
* |
* |
* V
* mounted fs
*
* The 'snapshot' node contains just enough information to lookup '..' and act
* as a mountpoint for the snapshot. Whenever we lookup a specific snapshot, we
* perform an automount of the underlying filesystem and return the
* corresponding vnode.
*
* All mounts are handled automatically by the kernel, but unmounts are
* (currently) handled from user land. The main reason is that there is no
* reliable way to auto-unmount the filesystem when it's "no longer in use".
* When the user unmounts a filesystem, we call zfsctl_unmount(), which
* unmounts any snapshots within the snapshot directory.
*
* The '.zfs', '.zfs/snapshot', and all directories created under
* '.zfs/snapshot' (ie: '.zfs/snapshot/<snapname>') are all GFS nodes and
* share the same vfs_t as the head filesystem (what '.zfs' lives under).
*
* File systems mounted ontop of the GFS nodes '.zfs/snapshot/<snapname>'
* (ie: snapshots) are ZFS nodes and have their own unique vfs_t.
* However, vnodes within these mounted on file systems have their v_vfsp
* fields set to the head filesystem to make NFS happy (see
* zfsctl_snapdir_lookup()). We VFS_HOLD the head filesystem's vfs_t
* so that it cannot be freed until all snapshots have been unmounted.
*/
#include <sys/types.h>
#include <sys/param.h>
#include <sys/libkern.h>
#include <sys/dirent.h>
#include <sys/zfs_context.h>
#include <sys/zfs_ctldir.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_vfsops.h>
#include <sys/namei.h>
#include <sys/stat.h>
#include <sys/dmu.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_destroy.h>
#include <sys/dsl_deleg.h>
#include <sys/mount.h>
#include <sys/zap.h>
#include <sys/sysproto.h>
#include "zfs_namecheck.h"
#include <sys/kernel.h>
#include <sys/ccompat.h>
/* Common access mode for all virtual directories under the ctldir */
const uint16_t zfsctl_ctldir_mode = S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP |
S_IROTH | S_IXOTH;
/*
* "Synthetic" filesystem implementation.
*/
/*
* Assert that A implies B.
*/
#define KASSERT_IMPLY(A, B, msg) KASSERT(!(A) || (B), (msg));
static MALLOC_DEFINE(M_SFSNODES, "sfs_nodes", "synthetic-fs nodes");
typedef struct sfs_node {
char sn_name[ZFS_MAX_DATASET_NAME_LEN];
uint64_t sn_parent_id;
uint64_t sn_id;
} sfs_node_t;
/*
* Check the parent's ID as well as the node's to account for a chance
* that IDs originating from different domains (snapshot IDs, artificial
* IDs, znode IDs) may clash.
*/
static int
sfs_compare_ids(struct vnode *vp, void *arg)
{
sfs_node_t *n1 = vp->v_data;
sfs_node_t *n2 = arg;
bool equal;
equal = n1->sn_id == n2->sn_id &&
n1->sn_parent_id == n2->sn_parent_id;
/* Zero means equality. */
return (!equal);
}
static int
sfs_vnode_get(const struct mount *mp, int flags, uint64_t parent_id,
uint64_t id, struct vnode **vpp)
{
sfs_node_t search;
int err;
search.sn_id = id;
search.sn_parent_id = parent_id;
err = vfs_hash_get(mp, (uint32_t)id, flags, curthread, vpp,
sfs_compare_ids, &search);
return (err);
}
static int
sfs_vnode_insert(struct vnode *vp, int flags, uint64_t parent_id,
uint64_t id, struct vnode **vpp)
{
int err;
KASSERT(vp->v_data != NULL, ("sfs_vnode_insert with NULL v_data"));
err = vfs_hash_insert(vp, (uint32_t)id, flags, curthread, vpp,
sfs_compare_ids, vp->v_data);
return (err);
}
static void
sfs_vnode_remove(struct vnode *vp)
{
vfs_hash_remove(vp);
}
typedef void sfs_vnode_setup_fn(vnode_t *vp, void *arg);
static int
sfs_vgetx(struct mount *mp, int flags, uint64_t parent_id, uint64_t id,
const char *tag, struct vop_vector *vops,
sfs_vnode_setup_fn setup, void *arg,
struct vnode **vpp)
{
struct vnode *vp;
int error;
error = sfs_vnode_get(mp, flags, parent_id, id, vpp);
if (error != 0 || *vpp != NULL) {
KASSERT_IMPLY(error == 0, (*vpp)->v_data != NULL,
"sfs vnode with no data");
return (error);
}
/* Allocate a new vnode/inode. */
error = getnewvnode(tag, mp, vops, &vp);
if (error != 0) {
*vpp = NULL;
return (error);
}
/*
* Exclusively lock the vnode vnode while it's being constructed.
*/
lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL);
error = insmntque(vp, mp);
if (error != 0) {
*vpp = NULL;
return (error);
}
setup(vp, arg);
error = sfs_vnode_insert(vp, flags, parent_id, id, vpp);
if (error != 0 || *vpp != NULL) {
KASSERT_IMPLY(error == 0, (*vpp)->v_data != NULL,
"sfs vnode with no data");
return (error);
}
*vpp = vp;
return (0);
}
static void
sfs_print_node(sfs_node_t *node)
{
printf("\tname = %s\n", node->sn_name);
printf("\tparent_id = %ju\n", (uintmax_t)node->sn_parent_id);
printf("\tid = %ju\n", (uintmax_t)node->sn_id);
}
static sfs_node_t *
sfs_alloc_node(size_t size, const char *name, uint64_t parent_id, uint64_t id)
{
struct sfs_node *node;
KASSERT(strlen(name) < sizeof (node->sn_name),
("sfs node name is too long"));
KASSERT(size >= sizeof (*node), ("sfs node size is too small"));
node = malloc(size, M_SFSNODES, M_WAITOK | M_ZERO);
strlcpy(node->sn_name, name, sizeof (node->sn_name));
node->sn_parent_id = parent_id;
node->sn_id = id;
return (node);
}
static void
sfs_destroy_node(sfs_node_t *node)
{
free(node, M_SFSNODES);
}
static void *
sfs_reclaim_vnode(vnode_t *vp)
{
void *data;
sfs_vnode_remove(vp);
data = vp->v_data;
vp->v_data = NULL;
return (data);
}
static int
sfs_readdir_common(uint64_t parent_id, uint64_t id, struct vop_readdir_args *ap,
zfs_uio_t *uio, off_t *offp)
{
struct dirent entry;
int error;
/* Reset ncookies for subsequent use of vfs_read_dirent. */
if (ap->a_ncookies != NULL)
*ap->a_ncookies = 0;
if (zfs_uio_resid(uio) < sizeof (entry))
return (SET_ERROR(EINVAL));
if (zfs_uio_offset(uio) < 0)
return (SET_ERROR(EINVAL));
if (zfs_uio_offset(uio) == 0) {
entry.d_fileno = id;
entry.d_type = DT_DIR;
entry.d_name[0] = '.';
entry.d_name[1] = '\0';
entry.d_namlen = 1;
entry.d_reclen = sizeof (entry);
error = vfs_read_dirent(ap, &entry, zfs_uio_offset(uio));
if (error != 0)
return (SET_ERROR(error));
}
if (zfs_uio_offset(uio) < sizeof (entry))
return (SET_ERROR(EINVAL));
if (zfs_uio_offset(uio) == sizeof (entry)) {
entry.d_fileno = parent_id;
entry.d_type = DT_DIR;
entry.d_name[0] = '.';
entry.d_name[1] = '.';
entry.d_name[2] = '\0';
entry.d_namlen = 2;
entry.d_reclen = sizeof (entry);
error = vfs_read_dirent(ap, &entry, zfs_uio_offset(uio));
if (error != 0)
return (SET_ERROR(error));
}
if (offp != NULL)
*offp = 2 * sizeof (entry);
return (0);
}
/*
* .zfs inode namespace
*
* We need to generate unique inode numbers for all files and directories
* within the .zfs pseudo-filesystem. We use the following scheme:
*
* ENTRY ZFSCTL_INODE
* .zfs 1
* .zfs/snapshot 2
* .zfs/snapshot/<snap> objectid(snap)
*/
#define ZFSCTL_INO_SNAP(id) (id)
static struct vop_vector zfsctl_ops_root;
static struct vop_vector zfsctl_ops_snapdir;
static struct vop_vector zfsctl_ops_snapshot;
void
zfsctl_init(void)
{
}
void
zfsctl_fini(void)
{
}
boolean_t
zfsctl_is_node(vnode_t *vp)
{
return (vn_matchops(vp, zfsctl_ops_root) ||
vn_matchops(vp, zfsctl_ops_snapdir) ||
vn_matchops(vp, zfsctl_ops_snapshot));
}
typedef struct zfsctl_root {
sfs_node_t node;
sfs_node_t *snapdir;
timestruc_t cmtime;
} zfsctl_root_t;
/*
* Create the '.zfs' directory.
*/
void
zfsctl_create(zfsvfs_t *zfsvfs)
{
zfsctl_root_t *dot_zfs;
sfs_node_t *snapdir;
vnode_t *rvp;
uint64_t crtime[2];
ASSERT3P(zfsvfs->z_ctldir, ==, NULL);
snapdir = sfs_alloc_node(sizeof (*snapdir), "snapshot", ZFSCTL_INO_ROOT,
ZFSCTL_INO_SNAPDIR);
dot_zfs = (zfsctl_root_t *)sfs_alloc_node(sizeof (*dot_zfs), ".zfs", 0,
ZFSCTL_INO_ROOT);
dot_zfs->snapdir = snapdir;
VERIFY0(VFS_ROOT(zfsvfs->z_vfs, LK_EXCLUSIVE, &rvp));
VERIFY0(sa_lookup(VTOZ(rvp)->z_sa_hdl, SA_ZPL_CRTIME(zfsvfs),
&crtime, sizeof (crtime)));
ZFS_TIME_DECODE(&dot_zfs->cmtime, crtime);
vput(rvp);
zfsvfs->z_ctldir = dot_zfs;
}
/*
* Destroy the '.zfs' directory. Only called when the filesystem is unmounted.
* The nodes must not have any associated vnodes by now as they should be
* vflush-ed.
*/
void
zfsctl_destroy(zfsvfs_t *zfsvfs)
{
sfs_destroy_node(zfsvfs->z_ctldir->snapdir);
sfs_destroy_node((sfs_node_t *)zfsvfs->z_ctldir);
zfsvfs->z_ctldir = NULL;
}
static int
zfsctl_fs_root_vnode(struct mount *mp, void *arg __unused, int flags,
struct vnode **vpp)
{
return (VFS_ROOT(mp, flags, vpp));
}
static void
zfsctl_common_vnode_setup(vnode_t *vp, void *arg)
{
ASSERT_VOP_ELOCKED(vp, __func__);
/* We support shared locking. */
VN_LOCK_ASHARE(vp);
vp->v_type = VDIR;
vp->v_data = arg;
}
static int
zfsctl_root_vnode(struct mount *mp, void *arg __unused, int flags,
struct vnode **vpp)
{
void *node;
int err;
node = ((zfsvfs_t *)mp->mnt_data)->z_ctldir;
err = sfs_vgetx(mp, flags, 0, ZFSCTL_INO_ROOT, "zfs", &zfsctl_ops_root,
zfsctl_common_vnode_setup, node, vpp);
return (err);
}
static int
zfsctl_snapdir_vnode(struct mount *mp, void *arg __unused, int flags,
struct vnode **vpp)
{
void *node;
int err;
node = ((zfsvfs_t *)mp->mnt_data)->z_ctldir->snapdir;
err = sfs_vgetx(mp, flags, ZFSCTL_INO_ROOT, ZFSCTL_INO_SNAPDIR, "zfs",
&zfsctl_ops_snapdir, zfsctl_common_vnode_setup, node, vpp);
return (err);
}
/*
* Given a root znode, retrieve the associated .zfs directory.
* Add a hold to the vnode and return it.
*/
int
zfsctl_root(zfsvfs_t *zfsvfs, int flags, vnode_t **vpp)
{
int error;
error = zfsctl_root_vnode(zfsvfs->z_vfs, NULL, flags, vpp);
return (error);
}
/*
* Common open routine. Disallow any write access.
*/
static int
zfsctl_common_open(struct vop_open_args *ap)
{
int flags = ap->a_mode;
if (flags & FWRITE)
return (SET_ERROR(EACCES));
return (0);
}
/*
* Common close routine. Nothing to do here.
*/
/* ARGSUSED */
static int
zfsctl_common_close(struct vop_close_args *ap)
{
return (0);
}
/*
* Common access routine. Disallow writes.
*/
static int
zfsctl_common_access(struct vop_access_args *ap)
{
accmode_t accmode = ap->a_accmode;
if (accmode & VWRITE)
return (SET_ERROR(EACCES));
return (0);
}
/*
* Common getattr function. Fill in basic information.
*/
static void
zfsctl_common_getattr(vnode_t *vp, vattr_t *vap)
{
timestruc_t now;
sfs_node_t *node;
node = vp->v_data;
vap->va_uid = 0;
vap->va_gid = 0;
vap->va_rdev = 0;
/*
* We are a purely virtual object, so we have no
* blocksize or allocated blocks.
*/
vap->va_blksize = 0;
vap->va_nblocks = 0;
- vap->va_seq = 0;
+ vap->va_gen = 0;
vn_fsid(vp, vap);
vap->va_mode = zfsctl_ctldir_mode;
vap->va_type = VDIR;
/*
* We live in the now (for atime).
*/
gethrestime(&now);
vap->va_atime = now;
/* FreeBSD: Reset chflags(2) flags. */
vap->va_flags = 0;
vap->va_nodeid = node->sn_id;
/* At least '.' and '..'. */
vap->va_nlink = 2;
}
#ifndef _OPENSOLARIS_SYS_VNODE_H_
struct vop_fid_args {
struct vnode *a_vp;
struct fid *a_fid;
};
#endif
static int
zfsctl_common_fid(struct vop_fid_args *ap)
{
vnode_t *vp = ap->a_vp;
fid_t *fidp = (void *)ap->a_fid;
sfs_node_t *node = vp->v_data;
uint64_t object = node->sn_id;
zfid_short_t *zfid;
int i;
zfid = (zfid_short_t *)fidp;
zfid->zf_len = SHORT_FID_LEN;
for (i = 0; i < sizeof (zfid->zf_object); i++)
zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
/* .zfs nodes always have a generation number of 0 */
for (i = 0; i < sizeof (zfid->zf_gen); i++)
zfid->zf_gen[i] = 0;
return (0);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_reclaim_args {
struct vnode *a_vp;
struct thread *a_td;
};
#endif
static int
zfsctl_common_reclaim(struct vop_reclaim_args *ap)
{
vnode_t *vp = ap->a_vp;
(void) sfs_reclaim_vnode(vp);
return (0);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_print_args {
struct vnode *a_vp;
};
#endif
static int
zfsctl_common_print(struct vop_print_args *ap)
{
sfs_print_node(ap->a_vp->v_data);
return (0);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_getattr_args {
struct vnode *a_vp;
struct vattr *a_vap;
struct ucred *a_cred;
};
#endif
/*
* Get root directory attributes.
*/
static int
zfsctl_root_getattr(struct vop_getattr_args *ap)
{
struct vnode *vp = ap->a_vp;
struct vattr *vap = ap->a_vap;
zfsctl_root_t *node = vp->v_data;
zfsctl_common_getattr(vp, vap);
vap->va_ctime = node->cmtime;
vap->va_mtime = vap->va_ctime;
vap->va_birthtime = vap->va_ctime;
vap->va_nlink += 1; /* snapdir */
vap->va_size = vap->va_nlink;
return (0);
}
/*
* When we lookup "." we still can be asked to lock it
* differently, can't we?
*/
static int
zfsctl_relock_dot(vnode_t *dvp, int ltype)
{
vref(dvp);
if (ltype != VOP_ISLOCKED(dvp)) {
if (ltype == LK_EXCLUSIVE)
vn_lock(dvp, LK_UPGRADE | LK_RETRY);
else /* if (ltype == LK_SHARED) */
vn_lock(dvp, LK_DOWNGRADE | LK_RETRY);
/* Relock for the "." case may left us with reclaimed vnode. */
if (VN_IS_DOOMED(dvp)) {
vrele(dvp);
return (SET_ERROR(ENOENT));
}
}
return (0);
}
/*
* Special case the handling of "..".
*/
static int
zfsctl_root_lookup(struct vop_lookup_args *ap)
{
struct componentname *cnp = ap->a_cnp;
vnode_t *dvp = ap->a_dvp;
vnode_t **vpp = ap->a_vpp;
int flags = ap->a_cnp->cn_flags;
int lkflags = ap->a_cnp->cn_lkflags;
int nameiop = ap->a_cnp->cn_nameiop;
int err;
ASSERT3S(dvp->v_type, ==, VDIR);
if ((flags & ISLASTCN) != 0 && nameiop != LOOKUP)
return (SET_ERROR(ENOTSUP));
if (cnp->cn_namelen == 1 && *cnp->cn_nameptr == '.') {
err = zfsctl_relock_dot(dvp, lkflags & LK_TYPE_MASK);
if (err == 0)
*vpp = dvp;
} else if ((flags & ISDOTDOT) != 0) {
err = vn_vget_ino_gen(dvp, zfsctl_fs_root_vnode, NULL,
lkflags, vpp);
} else if (strncmp(cnp->cn_nameptr, "snapshot", cnp->cn_namelen) == 0) {
err = zfsctl_snapdir_vnode(dvp->v_mount, NULL, lkflags, vpp);
} else {
err = SET_ERROR(ENOENT);
}
if (err != 0)
*vpp = NULL;
return (err);
}
static int
zfsctl_root_readdir(struct vop_readdir_args *ap)
{
struct dirent entry;
vnode_t *vp = ap->a_vp;
zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
zfsctl_root_t *node = vp->v_data;
zfs_uio_t uio;
int *eofp = ap->a_eofflag;
off_t dots_offset;
int error;
zfs_uio_init(&uio, ap->a_uio);
ASSERT3S(vp->v_type, ==, VDIR);
error = sfs_readdir_common(zfsvfs->z_root, ZFSCTL_INO_ROOT, ap, &uio,
&dots_offset);
if (error != 0) {
if (error == ENAMETOOLONG) /* ran out of destination space */
error = 0;
return (error);
}
if (zfs_uio_offset(&uio) != dots_offset)
return (SET_ERROR(EINVAL));
CTASSERT(sizeof (node->snapdir->sn_name) <= sizeof (entry.d_name));
entry.d_fileno = node->snapdir->sn_id;
entry.d_type = DT_DIR;
strcpy(entry.d_name, node->snapdir->sn_name);
entry.d_namlen = strlen(entry.d_name);
entry.d_reclen = sizeof (entry);
error = vfs_read_dirent(ap, &entry, zfs_uio_offset(&uio));
if (error != 0) {
if (error == ENAMETOOLONG)
error = 0;
return (SET_ERROR(error));
}
if (eofp != NULL)
*eofp = 1;
return (0);
}
static int
zfsctl_root_vptocnp(struct vop_vptocnp_args *ap)
{
static const char dotzfs_name[4] = ".zfs";
vnode_t *dvp;
int error;
if (*ap->a_buflen < sizeof (dotzfs_name))
return (SET_ERROR(ENOMEM));
error = vn_vget_ino_gen(ap->a_vp, zfsctl_fs_root_vnode, NULL,
LK_SHARED, &dvp);
if (error != 0)
return (SET_ERROR(error));
VOP_UNLOCK1(dvp);
*ap->a_vpp = dvp;
*ap->a_buflen -= sizeof (dotzfs_name);
bcopy(dotzfs_name, ap->a_buf + *ap->a_buflen, sizeof (dotzfs_name));
return (0);
}
static int
zfsctl_common_pathconf(struct vop_pathconf_args *ap)
{
/*
* We care about ACL variables so that user land utilities like ls
* can display them correctly. Since the ctldir's st_dev is set to be
* the same as the parent dataset, we must support all variables that
* it supports.
*/
switch (ap->a_name) {
case _PC_LINK_MAX:
*ap->a_retval = MIN(LONG_MAX, ZFS_LINK_MAX);
return (0);
case _PC_FILESIZEBITS:
*ap->a_retval = 64;
return (0);
case _PC_MIN_HOLE_SIZE:
*ap->a_retval = (int)SPA_MINBLOCKSIZE;
return (0);
case _PC_ACL_EXTENDED:
*ap->a_retval = 0;
return (0);
case _PC_ACL_NFS4:
*ap->a_retval = 1;
return (0);
case _PC_ACL_PATH_MAX:
*ap->a_retval = ACL_MAX_ENTRIES;
return (0);
case _PC_NAME_MAX:
*ap->a_retval = NAME_MAX;
return (0);
default:
return (vop_stdpathconf(ap));
}
}
/*
* Returns a trivial ACL
*/
static int
zfsctl_common_getacl(struct vop_getacl_args *ap)
{
int i;
if (ap->a_type != ACL_TYPE_NFS4)
return (EINVAL);
acl_nfs4_sync_acl_from_mode(ap->a_aclp, zfsctl_ctldir_mode, 0);
/*
* acl_nfs4_sync_acl_from_mode assumes that the owner can always modify
* attributes. That is not the case for the ctldir, so we must clear
* those bits. We also must clear ACL_READ_NAMED_ATTRS, because xattrs
* aren't supported by the ctldir.
*/
for (i = 0; i < ap->a_aclp->acl_cnt; i++) {
struct acl_entry *entry;
entry = &(ap->a_aclp->acl_entry[i]);
entry->ae_perm &= ~(ACL_WRITE_ACL | ACL_WRITE_OWNER |
ACL_WRITE_ATTRIBUTES | ACL_WRITE_NAMED_ATTRS |
ACL_READ_NAMED_ATTRS);
}
return (0);
}
static struct vop_vector zfsctl_ops_root = {
.vop_default = &default_vnodeops,
#if __FreeBSD_version >= 1300121
.vop_fplookup_vexec = VOP_EAGAIN,
#endif
.vop_open = zfsctl_common_open,
.vop_close = zfsctl_common_close,
.vop_ioctl = VOP_EINVAL,
.vop_getattr = zfsctl_root_getattr,
.vop_access = zfsctl_common_access,
.vop_readdir = zfsctl_root_readdir,
.vop_lookup = zfsctl_root_lookup,
.vop_inactive = VOP_NULL,
.vop_reclaim = zfsctl_common_reclaim,
.vop_fid = zfsctl_common_fid,
.vop_print = zfsctl_common_print,
.vop_vptocnp = zfsctl_root_vptocnp,
.vop_pathconf = zfsctl_common_pathconf,
.vop_getacl = zfsctl_common_getacl,
#if __FreeBSD_version >= 1400043
.vop_add_writecount = vop_stdadd_writecount_nomsync,
#endif
};
VFS_VOP_VECTOR_REGISTER(zfsctl_ops_root);
static int
zfsctl_snapshot_zname(vnode_t *vp, const char *name, int len, char *zname)
{
objset_t *os = ((zfsvfs_t *)((vp)->v_vfsp->vfs_data))->z_os;
dmu_objset_name(os, zname);
if (strlen(zname) + 1 + strlen(name) >= len)
return (SET_ERROR(ENAMETOOLONG));
(void) strcat(zname, "@");
(void) strcat(zname, name);
return (0);
}
static int
zfsctl_snapshot_lookup(vnode_t *vp, const char *name, uint64_t *id)
{
objset_t *os = ((zfsvfs_t *)((vp)->v_vfsp->vfs_data))->z_os;
int err;
err = dsl_dataset_snap_lookup(dmu_objset_ds(os), name, id);
return (err);
}
/*
* Given a vnode get a root vnode of a filesystem mounted on top of
* the vnode, if any. The root vnode is referenced and locked.
* If no filesystem is mounted then the orinal vnode remains referenced
* and locked. If any error happens the orinal vnode is unlocked and
* released.
*/
static int
zfsctl_mounted_here(vnode_t **vpp, int flags)
{
struct mount *mp;
int err;
ASSERT_VOP_LOCKED(*vpp, __func__);
ASSERT3S((*vpp)->v_type, ==, VDIR);
if ((mp = (*vpp)->v_mountedhere) != NULL) {
err = vfs_busy(mp, 0);
KASSERT(err == 0, ("vfs_busy(mp, 0) failed with %d", err));
KASSERT(vrefcnt(*vpp) > 1, ("unreferenced mountpoint"));
vput(*vpp);
err = VFS_ROOT(mp, flags, vpp);
vfs_unbusy(mp);
return (err);
}
return (EJUSTRETURN);
}
typedef struct {
const char *snap_name;
uint64_t snap_id;
} snapshot_setup_arg_t;
static void
zfsctl_snapshot_vnode_setup(vnode_t *vp, void *arg)
{
snapshot_setup_arg_t *ssa = arg;
sfs_node_t *node;
ASSERT_VOP_ELOCKED(vp, __func__);
node = sfs_alloc_node(sizeof (sfs_node_t),
ssa->snap_name, ZFSCTL_INO_SNAPDIR, ssa->snap_id);
zfsctl_common_vnode_setup(vp, node);
/* We have to support recursive locking. */
VN_LOCK_AREC(vp);
}
/*
* Lookup entry point for the 'snapshot' directory. Try to open the
* snapshot if it exist, creating the pseudo filesystem vnode as necessary.
* Perform a mount of the associated dataset on top of the vnode.
* There are four possibilities:
* - the snapshot node and vnode do not exist
* - the snapshot vnode is covered by the mounted snapshot
* - the snapshot vnode is not covered yet, the mount operation is in progress
* - the snapshot vnode is not covered, because the snapshot has been unmounted
* The last two states are transient and should be relatively short-lived.
*/
static int
zfsctl_snapdir_lookup(struct vop_lookup_args *ap)
{
vnode_t *dvp = ap->a_dvp;
vnode_t **vpp = ap->a_vpp;
struct componentname *cnp = ap->a_cnp;
char name[NAME_MAX + 1];
char fullname[ZFS_MAX_DATASET_NAME_LEN];
char *mountpoint;
size_t mountpoint_len;
zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data;
uint64_t snap_id;
int nameiop = cnp->cn_nameiop;
int lkflags = cnp->cn_lkflags;
int flags = cnp->cn_flags;
int err;
ASSERT3S(dvp->v_type, ==, VDIR);
if ((flags & ISLASTCN) != 0 && nameiop != LOOKUP)
return (SET_ERROR(ENOTSUP));
if (cnp->cn_namelen == 1 && *cnp->cn_nameptr == '.') {
err = zfsctl_relock_dot(dvp, lkflags & LK_TYPE_MASK);
if (err == 0)
*vpp = dvp;
return (err);
}
if (flags & ISDOTDOT) {
err = vn_vget_ino_gen(dvp, zfsctl_root_vnode, NULL, lkflags,
vpp);
return (err);
}
if (cnp->cn_namelen >= sizeof (name))
return (SET_ERROR(ENAMETOOLONG));
strlcpy(name, ap->a_cnp->cn_nameptr, ap->a_cnp->cn_namelen + 1);
err = zfsctl_snapshot_lookup(dvp, name, &snap_id);
if (err != 0)
return (SET_ERROR(ENOENT));
for (;;) {
snapshot_setup_arg_t ssa;
ssa.snap_name = name;
ssa.snap_id = snap_id;
err = sfs_vgetx(dvp->v_mount, LK_SHARED, ZFSCTL_INO_SNAPDIR,
snap_id, "zfs", &zfsctl_ops_snapshot,
zfsctl_snapshot_vnode_setup, &ssa, vpp);
if (err != 0)
return (err);
/* Check if a new vnode has just been created. */
if (VOP_ISLOCKED(*vpp) == LK_EXCLUSIVE)
break;
/*
* Check if a snapshot is already mounted on top of the vnode.
*/
err = zfsctl_mounted_here(vpp, lkflags);
if (err != EJUSTRETURN)
return (err);
/*
* If the vnode is not covered, then either the mount operation
* is in progress or the snapshot has already been unmounted
* but the vnode hasn't been inactivated and reclaimed yet.
* We can try to re-use the vnode in the latter case.
*/
VI_LOCK(*vpp);
if (((*vpp)->v_iflag & VI_MOUNT) == 0) {
/*
* Upgrade to exclusive lock in order to:
* - avoid race conditions
* - satisfy the contract of mount_snapshot()
*/
err = VOP_LOCK(*vpp, LK_TRYUPGRADE | LK_INTERLOCK);
if (err == 0)
break;
} else {
VI_UNLOCK(*vpp);
}
/*
* In this state we can loop on uncontested locks and starve
* the thread doing the lengthy, non-trivial mount operation.
* So, yield to prevent that from happening.
*/
vput(*vpp);
kern_yield(PRI_USER);
}
VERIFY0(zfsctl_snapshot_zname(dvp, name, sizeof (fullname), fullname));
mountpoint_len = strlen(dvp->v_vfsp->mnt_stat.f_mntonname) +
strlen("/" ZFS_CTLDIR_NAME "/snapshot/") + strlen(name) + 1;
mountpoint = kmem_alloc(mountpoint_len, KM_SLEEP);
(void) snprintf(mountpoint, mountpoint_len,
"%s/" ZFS_CTLDIR_NAME "/snapshot/%s",
dvp->v_vfsp->mnt_stat.f_mntonname, name);
err = mount_snapshot(curthread, vpp, "zfs", mountpoint, fullname, 0);
kmem_free(mountpoint, mountpoint_len);
if (err == 0) {
/*
* Fix up the root vnode mounted on .zfs/snapshot/<snapname>.
*
* This is where we lie about our v_vfsp in order to
* make .zfs/snapshot/<snapname> accessible over NFS
* without requiring manual mounts of <snapname>.
*/
ASSERT3P(VTOZ(*vpp)->z_zfsvfs, !=, zfsvfs);
VTOZ(*vpp)->z_zfsvfs->z_parent = zfsvfs;
/* Clear the root flag (set via VFS_ROOT) as well. */
(*vpp)->v_vflag &= ~VV_ROOT;
}
if (err != 0)
*vpp = NULL;
return (err);
}
static int
zfsctl_snapdir_readdir(struct vop_readdir_args *ap)
{
char snapname[ZFS_MAX_DATASET_NAME_LEN];
struct dirent entry;
vnode_t *vp = ap->a_vp;
zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
zfs_uio_t uio;
int *eofp = ap->a_eofflag;
off_t dots_offset;
int error;
zfs_uio_init(&uio, ap->a_uio);
ASSERT3S(vp->v_type, ==, VDIR);
error = sfs_readdir_common(ZFSCTL_INO_ROOT, ZFSCTL_INO_SNAPDIR, ap,
&uio, &dots_offset);
if (error != 0) {
if (error == ENAMETOOLONG) /* ran out of destination space */
error = 0;
return (error);
}
ZFS_ENTER(zfsvfs);
for (;;) {
uint64_t cookie;
uint64_t id;
cookie = zfs_uio_offset(&uio) - dots_offset;
dsl_pool_config_enter(dmu_objset_pool(zfsvfs->z_os), FTAG);
error = dmu_snapshot_list_next(zfsvfs->z_os, sizeof (snapname),
snapname, &id, &cookie, NULL);
dsl_pool_config_exit(dmu_objset_pool(zfsvfs->z_os), FTAG);
if (error != 0) {
if (error == ENOENT) {
if (eofp != NULL)
*eofp = 1;
error = 0;
}
ZFS_EXIT(zfsvfs);
return (error);
}
entry.d_fileno = id;
entry.d_type = DT_DIR;
strcpy(entry.d_name, snapname);
entry.d_namlen = strlen(entry.d_name);
entry.d_reclen = sizeof (entry);
error = vfs_read_dirent(ap, &entry, zfs_uio_offset(&uio));
if (error != 0) {
if (error == ENAMETOOLONG)
error = 0;
ZFS_EXIT(zfsvfs);
return (SET_ERROR(error));
}
zfs_uio_setoffset(&uio, cookie + dots_offset);
}
/* NOTREACHED */
}
static int
zfsctl_snapdir_getattr(struct vop_getattr_args *ap)
{
vnode_t *vp = ap->a_vp;
vattr_t *vap = ap->a_vap;
zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
dsl_dataset_t *ds;
uint64_t snap_count;
int err;
ZFS_ENTER(zfsvfs);
ds = dmu_objset_ds(zfsvfs->z_os);
zfsctl_common_getattr(vp, vap);
vap->va_ctime = dmu_objset_snap_cmtime(zfsvfs->z_os);
vap->va_mtime = vap->va_ctime;
vap->va_birthtime = vap->va_ctime;
if (dsl_dataset_phys(ds)->ds_snapnames_zapobj != 0) {
err = zap_count(dmu_objset_pool(ds->ds_objset)->dp_meta_objset,
dsl_dataset_phys(ds)->ds_snapnames_zapobj, &snap_count);
if (err != 0) {
ZFS_EXIT(zfsvfs);
return (err);
}
vap->va_nlink += snap_count;
}
vap->va_size = vap->va_nlink;
ZFS_EXIT(zfsvfs);
return (0);
}
static struct vop_vector zfsctl_ops_snapdir = {
.vop_default = &default_vnodeops,
#if __FreeBSD_version >= 1300121
.vop_fplookup_vexec = VOP_EAGAIN,
#endif
.vop_open = zfsctl_common_open,
.vop_close = zfsctl_common_close,
.vop_getattr = zfsctl_snapdir_getattr,
.vop_access = zfsctl_common_access,
.vop_readdir = zfsctl_snapdir_readdir,
.vop_lookup = zfsctl_snapdir_lookup,
.vop_reclaim = zfsctl_common_reclaim,
.vop_fid = zfsctl_common_fid,
.vop_print = zfsctl_common_print,
.vop_pathconf = zfsctl_common_pathconf,
.vop_getacl = zfsctl_common_getacl,
#if __FreeBSD_version >= 1400043
.vop_add_writecount = vop_stdadd_writecount_nomsync,
#endif
};
VFS_VOP_VECTOR_REGISTER(zfsctl_ops_snapdir);
static int
zfsctl_snapshot_inactive(struct vop_inactive_args *ap)
{
vnode_t *vp = ap->a_vp;
VERIFY3S(vrecycle(vp), ==, 1);
return (0);
}
static int
zfsctl_snapshot_reclaim(struct vop_reclaim_args *ap)
{
vnode_t *vp = ap->a_vp;
void *data = vp->v_data;
sfs_reclaim_vnode(vp);
sfs_destroy_node(data);
return (0);
}
static int
zfsctl_snapshot_vptocnp(struct vop_vptocnp_args *ap)
{
struct mount *mp;
vnode_t *dvp;
vnode_t *vp;
sfs_node_t *node;
size_t len;
int locked;
int error;
vp = ap->a_vp;
node = vp->v_data;
len = strlen(node->sn_name);
if (*ap->a_buflen < len)
return (SET_ERROR(ENOMEM));
/*
* Prevent unmounting of the snapshot while the vnode lock
* is not held. That is not strictly required, but allows
* us to assert that an uncovered snapshot vnode is never
* "leaked".
*/
mp = vp->v_mountedhere;
if (mp == NULL)
return (SET_ERROR(ENOENT));
error = vfs_busy(mp, 0);
KASSERT(error == 0, ("vfs_busy(mp, 0) failed with %d", error));
/*
* We can vput the vnode as we can now depend on the reference owned
* by the busied mp. But we also need to hold the vnode, because
* the reference may go after vfs_unbusy() which has to be called
* before we can lock the vnode again.
*/
locked = VOP_ISLOCKED(vp);
#if __FreeBSD_version >= 1300045
enum vgetstate vs = vget_prep(vp);
#else
vhold(vp);
#endif
vput(vp);
/* Look up .zfs/snapshot, our parent. */
error = zfsctl_snapdir_vnode(vp->v_mount, NULL, LK_SHARED, &dvp);
if (error == 0) {
VOP_UNLOCK1(dvp);
*ap->a_vpp = dvp;
*ap->a_buflen -= len;
bcopy(node->sn_name, ap->a_buf + *ap->a_buflen, len);
}
vfs_unbusy(mp);
#if __FreeBSD_version >= 1300045
vget_finish(vp, locked | LK_RETRY, vs);
#else
vget(vp, locked | LK_VNHELD | LK_RETRY, curthread);
#endif
return (error);
}
/*
* These VP's should never see the light of day. They should always
* be covered.
*/
static struct vop_vector zfsctl_ops_snapshot = {
.vop_default = NULL, /* ensure very restricted access */
#if __FreeBSD_version >= 1300121
.vop_fplookup_vexec = VOP_EAGAIN,
#endif
.vop_inactive = zfsctl_snapshot_inactive,
#if __FreeBSD_version >= 1300045
.vop_need_inactive = vop_stdneed_inactive,
#endif
.vop_reclaim = zfsctl_snapshot_reclaim,
.vop_vptocnp = zfsctl_snapshot_vptocnp,
.vop_lock1 = vop_stdlock,
.vop_unlock = vop_stdunlock,
.vop_islocked = vop_stdislocked,
.vop_advlockpurge = vop_stdadvlockpurge, /* called by vgone */
.vop_print = zfsctl_common_print,
#if __FreeBSD_version >= 1400043
.vop_add_writecount = vop_stdadd_writecount_nomsync,
#endif
};
VFS_VOP_VECTOR_REGISTER(zfsctl_ops_snapshot);
int
zfsctl_lookup_objset(vfs_t *vfsp, uint64_t objsetid, zfsvfs_t **zfsvfsp)
{
zfsvfs_t *zfsvfs __unused = vfsp->vfs_data;
vnode_t *vp;
int error;
ASSERT3P(zfsvfs->z_ctldir, !=, NULL);
*zfsvfsp = NULL;
error = sfs_vnode_get(vfsp, LK_EXCLUSIVE,
ZFSCTL_INO_SNAPDIR, objsetid, &vp);
if (error == 0 && vp != NULL) {
/*
* XXX Probably need to at least reference, if not busy, the mp.
*/
if (vp->v_mountedhere != NULL)
*zfsvfsp = vp->v_mountedhere->mnt_data;
vput(vp);
}
if (*zfsvfsp == NULL)
return (SET_ERROR(EINVAL));
return (0);
}
/*
* Unmount any snapshots for the given filesystem. This is called from
* zfs_umount() - if we have a ctldir, then go through and unmount all the
* snapshots.
*/
int
zfsctl_umount_snapshots(vfs_t *vfsp, int fflags, cred_t *cr)
{
char snapname[ZFS_MAX_DATASET_NAME_LEN];
zfsvfs_t *zfsvfs = vfsp->vfs_data;
struct mount *mp;
vnode_t *vp;
uint64_t cookie;
int error;
ASSERT3P(zfsvfs->z_ctldir, !=, NULL);
cookie = 0;
for (;;) {
uint64_t id;
dsl_pool_config_enter(dmu_objset_pool(zfsvfs->z_os), FTAG);
error = dmu_snapshot_list_next(zfsvfs->z_os, sizeof (snapname),
snapname, &id, &cookie, NULL);
dsl_pool_config_exit(dmu_objset_pool(zfsvfs->z_os), FTAG);
if (error != 0) {
if (error == ENOENT)
error = 0;
break;
}
for (;;) {
error = sfs_vnode_get(vfsp, LK_EXCLUSIVE,
ZFSCTL_INO_SNAPDIR, id, &vp);
if (error != 0 || vp == NULL)
break;
mp = vp->v_mountedhere;
/*
* v_mountedhere being NULL means that the
* (uncovered) vnode is in a transient state
* (mounting or unmounting), so loop until it
* settles down.
*/
if (mp != NULL)
break;
vput(vp);
}
if (error != 0)
break;
if (vp == NULL)
continue; /* no mountpoint, nothing to do */
/*
* The mount-point vnode is kept locked to avoid spurious EBUSY
* from a concurrent umount.
* The vnode lock must have recursive locking enabled.
*/
vfs_ref(mp);
error = dounmount(mp, fflags, curthread);
KASSERT_IMPLY(error == 0, vrefcnt(vp) == 1,
("extra references after unmount"));
vput(vp);
if (error != 0)
break;
}
KASSERT_IMPLY((fflags & MS_FORCE) != 0, error == 0,
("force unmounting failed"));
return (error);
}
int
zfsctl_snapshot_unmount(const char *snapname, int flags __unused)
{
vfs_t *vfsp = NULL;
zfsvfs_t *zfsvfs = NULL;
if (strchr(snapname, '@') == NULL)
return (0);
int err = getzfsvfs(snapname, &zfsvfs);
if (err != 0) {
ASSERT3P(zfsvfs, ==, NULL);
return (0);
}
vfsp = zfsvfs->z_vfs;
ASSERT(!dsl_pool_config_held(dmu_objset_pool(zfsvfs->z_os)));
vfs_ref(vfsp);
vfs_unbusy(vfsp);
return (dounmount(vfsp, MS_FORCE, curthread));
}
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_racct.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_racct.c
index b46cc046268e..883255bc1901 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_racct.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_racct.c
@@ -1,55 +1,59 @@
/*
* Copyright (c) 2021 iXsystems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/zfs_racct.h>
#include <sys/racct.h>
void
zfs_racct_read(uint64_t size, uint64_t iops)
{
curthread->td_ru.ru_inblock += iops;
#ifdef RACCT
if (racct_enable) {
PROC_LOCK(curproc);
racct_add_force(curproc, RACCT_READBPS, size);
racct_add_force(curproc, RACCT_READIOPS, iops);
PROC_UNLOCK(curproc);
}
+#else
+ (void) size;
#endif /* RACCT */
}
void
zfs_racct_write(uint64_t size, uint64_t iops)
{
curthread->td_ru.ru_oublock += iops;
#ifdef RACCT
if (racct_enable) {
PROC_LOCK(curproc);
racct_add_force(curproc, RACCT_WRITEBPS, size);
racct_add_force(curproc, RACCT_WRITEIOPS, iops);
PROC_UNLOCK(curproc);
}
+#else
+ (void) size;
#endif /* RACCT */
}
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c
index c534309351e9..cdd762dcbcbf 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vfsops.c
@@ -1,2338 +1,2344 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011 Pawel Jakub Dawidek <pawel@dawidek.net>.
* All rights reserved.
* Copyright (c) 2012, 2015 by Delphix. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2016 Nexenta Systems, Inc. All rights reserved.
*/
/* Portions Copyright 2010 Robert Milkowski */
#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/sysmacros.h>
#include <sys/kmem.h>
#include <sys/acl.h>
#include <sys/vnode.h>
#include <sys/vfs.h>
#include <sys/mntent.h>
#include <sys/mount.h>
#include <sys/cmn_err.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_vnops.h>
#include <sys/zfs_dir.h>
#include <sys/zil.h>
#include <sys/fs/zfs.h>
#include <sys/dmu.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_deleg.h>
#include <sys/spa.h>
#include <sys/zap.h>
#include <sys/sa.h>
#include <sys/sa_impl.h>
#include <sys/policy.h>
#include <sys/atomic.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_ctldir.h>
#include <sys/zfs_fuid.h>
#include <sys/sunddi.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dir.h>
#include <sys/spa_boot.h>
#include <sys/jail.h>
#include <ufs/ufs/quota.h>
#include <sys/zfs_quota.h>
#include "zfs_comutil.h"
#ifndef MNTK_VMSETSIZE_BUG
#define MNTK_VMSETSIZE_BUG 0
#endif
#ifndef MNTK_NOMSYNC
#define MNTK_NOMSYNC 8
#endif
/* BEGIN CSTYLED */
struct mtx zfs_debug_mtx;
MTX_SYSINIT(zfs_debug_mtx, &zfs_debug_mtx, "zfs_debug", MTX_DEF);
SYSCTL_NODE(_vfs, OID_AUTO, zfs, CTLFLAG_RW, 0, "ZFS file system");
int zfs_super_owner;
SYSCTL_INT(_vfs_zfs, OID_AUTO, super_owner, CTLFLAG_RW, &zfs_super_owner, 0,
"File system owner can perform privileged operation on his file systems");
int zfs_debug_level;
SYSCTL_INT(_vfs_zfs, OID_AUTO, debug, CTLFLAG_RWTUN, &zfs_debug_level, 0,
"Debug level");
SYSCTL_NODE(_vfs_zfs, OID_AUTO, version, CTLFLAG_RD, 0, "ZFS versions");
static int zfs_version_acl = ZFS_ACL_VERSION;
SYSCTL_INT(_vfs_zfs_version, OID_AUTO, acl, CTLFLAG_RD, &zfs_version_acl, 0,
"ZFS_ACL_VERSION");
static int zfs_version_spa = SPA_VERSION;
SYSCTL_INT(_vfs_zfs_version, OID_AUTO, spa, CTLFLAG_RD, &zfs_version_spa, 0,
"SPA_VERSION");
static int zfs_version_zpl = ZPL_VERSION;
SYSCTL_INT(_vfs_zfs_version, OID_AUTO, zpl, CTLFLAG_RD, &zfs_version_zpl, 0,
"ZPL_VERSION");
/* END CSTYLED */
#if __FreeBSD_version >= 1400018
static int zfs_quotactl(vfs_t *vfsp, int cmds, uid_t id, void *arg,
bool *mp_busy);
#else
static int zfs_quotactl(vfs_t *vfsp, int cmds, uid_t id, void *arg);
#endif
static int zfs_mount(vfs_t *vfsp);
static int zfs_umount(vfs_t *vfsp, int fflag);
static int zfs_root(vfs_t *vfsp, int flags, vnode_t **vpp);
static int zfs_statfs(vfs_t *vfsp, struct statfs *statp);
static int zfs_vget(vfs_t *vfsp, ino_t ino, int flags, vnode_t **vpp);
static int zfs_sync(vfs_t *vfsp, int waitfor);
#if __FreeBSD_version >= 1300098
static int zfs_checkexp(vfs_t *vfsp, struct sockaddr *nam, uint64_t *extflagsp,
struct ucred **credanonp, int *numsecflavors, int *secflavors);
#else
static int zfs_checkexp(vfs_t *vfsp, struct sockaddr *nam, int *extflagsp,
struct ucred **credanonp, int *numsecflavors, int **secflavors);
#endif
static int zfs_fhtovp(vfs_t *vfsp, fid_t *fidp, int flags, vnode_t **vpp);
static void zfs_freevfs(vfs_t *vfsp);
struct vfsops zfs_vfsops = {
.vfs_mount = zfs_mount,
.vfs_unmount = zfs_umount,
#if __FreeBSD_version >= 1300049
.vfs_root = vfs_cache_root,
.vfs_cachedroot = zfs_root,
#else
.vfs_root = zfs_root,
#endif
.vfs_statfs = zfs_statfs,
.vfs_vget = zfs_vget,
.vfs_sync = zfs_sync,
.vfs_checkexp = zfs_checkexp,
.vfs_fhtovp = zfs_fhtovp,
.vfs_quotactl = zfs_quotactl,
};
VFS_SET(zfs_vfsops, zfs, VFCF_JAIL | VFCF_DELEGADMIN);
/*
* We need to keep a count of active fs's.
* This is necessary to prevent our module
* from being unloaded after a umount -f
*/
static uint32_t zfs_active_fs_count = 0;
int
zfs_get_temporary_prop(dsl_dataset_t *ds, zfs_prop_t zfs_prop, uint64_t *val,
char *setpoint)
{
int error;
zfsvfs_t *zfvp;
vfs_t *vfsp;
objset_t *os;
uint64_t tmp = *val;
error = dmu_objset_from_ds(ds, &os);
if (error != 0)
return (error);
error = getzfsvfs_impl(os, &zfvp);
if (error != 0)
return (error);
if (zfvp == NULL)
return (ENOENT);
vfsp = zfvp->z_vfs;
switch (zfs_prop) {
case ZFS_PROP_ATIME:
if (vfs_optionisset(vfsp, MNTOPT_NOATIME, NULL))
tmp = 0;
if (vfs_optionisset(vfsp, MNTOPT_ATIME, NULL))
tmp = 1;
break;
case ZFS_PROP_DEVICES:
if (vfs_optionisset(vfsp, MNTOPT_NODEVICES, NULL))
tmp = 0;
if (vfs_optionisset(vfsp, MNTOPT_DEVICES, NULL))
tmp = 1;
break;
case ZFS_PROP_EXEC:
if (vfs_optionisset(vfsp, MNTOPT_NOEXEC, NULL))
tmp = 0;
if (vfs_optionisset(vfsp, MNTOPT_EXEC, NULL))
tmp = 1;
break;
case ZFS_PROP_SETUID:
if (vfs_optionisset(vfsp, MNTOPT_NOSETUID, NULL))
tmp = 0;
if (vfs_optionisset(vfsp, MNTOPT_SETUID, NULL))
tmp = 1;
break;
case ZFS_PROP_READONLY:
if (vfs_optionisset(vfsp, MNTOPT_RW, NULL))
tmp = 0;
if (vfs_optionisset(vfsp, MNTOPT_RO, NULL))
tmp = 1;
break;
case ZFS_PROP_XATTR:
if (zfvp->z_flags & ZSB_XATTR)
tmp = zfvp->z_xattr;
break;
case ZFS_PROP_NBMAND:
if (vfs_optionisset(vfsp, MNTOPT_NONBMAND, NULL))
tmp = 0;
if (vfs_optionisset(vfsp, MNTOPT_NBMAND, NULL))
tmp = 1;
break;
default:
vfs_unbusy(vfsp);
return (ENOENT);
}
vfs_unbusy(vfsp);
if (tmp != *val) {
(void) strcpy(setpoint, "temporary");
*val = tmp;
}
return (0);
}
static int
zfs_getquota(zfsvfs_t *zfsvfs, uid_t id, int isgroup, struct dqblk64 *dqp)
{
int error = 0;
char buf[32];
uint64_t usedobj, quotaobj;
uint64_t quota, used = 0;
timespec_t now;
usedobj = isgroup ? DMU_GROUPUSED_OBJECT : DMU_USERUSED_OBJECT;
quotaobj = isgroup ? zfsvfs->z_groupquota_obj : zfsvfs->z_userquota_obj;
if (quotaobj == 0 || zfsvfs->z_replay) {
error = ENOENT;
goto done;
}
(void) sprintf(buf, "%llx", (longlong_t)id);
if ((error = zap_lookup(zfsvfs->z_os, quotaobj,
buf, sizeof (quota), 1, &quota)) != 0) {
dprintf("%s(%d): quotaobj lookup failed\n",
__FUNCTION__, __LINE__);
goto done;
}
/*
* quota(8) uses bsoftlimit as "quoota", and hardlimit as "limit".
* So we set them to be the same.
*/
dqp->dqb_bsoftlimit = dqp->dqb_bhardlimit = btodb(quota);
error = zap_lookup(zfsvfs->z_os, usedobj, buf, sizeof (used), 1, &used);
if (error && error != ENOENT) {
dprintf("%s(%d): usedobj failed; %d\n",
__FUNCTION__, __LINE__, error);
goto done;
}
dqp->dqb_curblocks = btodb(used);
dqp->dqb_ihardlimit = dqp->dqb_isoftlimit = 0;
vfs_timestamp(&now);
/*
* Setting this to 0 causes FreeBSD quota(8) to print
* the number of days since the epoch, which isn't
* particularly useful.
*/
dqp->dqb_btime = dqp->dqb_itime = now.tv_sec;
done:
return (error);
}
static int
#if __FreeBSD_version >= 1400018
zfs_quotactl(vfs_t *vfsp, int cmds, uid_t id, void *arg, bool *mp_busy)
#else
zfs_quotactl(vfs_t *vfsp, int cmds, uid_t id, void *arg)
#endif
{
zfsvfs_t *zfsvfs = vfsp->vfs_data;
struct thread *td;
int cmd, type, error = 0;
int bitsize;
zfs_userquota_prop_t quota_type;
struct dqblk64 dqblk = { 0 };
td = curthread;
cmd = cmds >> SUBCMDSHIFT;
type = cmds & SUBCMDMASK;
ZFS_ENTER(zfsvfs);
if (id == -1) {
switch (type) {
case USRQUOTA:
id = td->td_ucred->cr_ruid;
break;
case GRPQUOTA:
id = td->td_ucred->cr_rgid;
break;
default:
error = EINVAL;
#if __FreeBSD_version < 1400018
if (cmd == Q_QUOTAON || cmd == Q_QUOTAOFF)
vfs_unbusy(vfsp);
#endif
goto done;
}
}
/*
* Map BSD type to:
* ZFS_PROP_USERUSED,
* ZFS_PROP_USERQUOTA,
* ZFS_PROP_GROUPUSED,
* ZFS_PROP_GROUPQUOTA
*/
switch (cmd) {
case Q_SETQUOTA:
case Q_SETQUOTA32:
if (type == USRQUOTA)
quota_type = ZFS_PROP_USERQUOTA;
else if (type == GRPQUOTA)
quota_type = ZFS_PROP_GROUPQUOTA;
else
error = EINVAL;
break;
case Q_GETQUOTA:
case Q_GETQUOTA32:
if (type == USRQUOTA)
quota_type = ZFS_PROP_USERUSED;
else if (type == GRPQUOTA)
quota_type = ZFS_PROP_GROUPUSED;
else
error = EINVAL;
break;
}
/*
* Depending on the cmd, we may need to get
* the ruid and domain (see fuidstr_to_sid?),
* the fuid (how?), or other information.
* Create fuid using zfs_fuid_create(zfsvfs, id,
* ZFS_OWNER or ZFS_GROUP, cr, &fuidp)?
* I think I can use just the id?
*
* Look at zfs_id_overquota() to look up a quota.
* zap_lookup(something, quotaobj, fuidstring,
* sizeof (long long), 1, &quota)
*
* See zfs_set_userquota() to set a quota.
*/
if ((uint32_t)type >= MAXQUOTAS) {
error = EINVAL;
goto done;
}
switch (cmd) {
case Q_GETQUOTASIZE:
bitsize = 64;
error = copyout(&bitsize, arg, sizeof (int));
break;
case Q_QUOTAON:
// As far as I can tell, you can't turn quotas on or off on zfs
error = 0;
#if __FreeBSD_version < 1400018
vfs_unbusy(vfsp);
#endif
break;
case Q_QUOTAOFF:
error = ENOTSUP;
#if __FreeBSD_version < 1400018
vfs_unbusy(vfsp);
#endif
break;
case Q_SETQUOTA:
error = copyin(arg, &dqblk, sizeof (dqblk));
if (error == 0)
error = zfs_set_userquota(zfsvfs, quota_type,
"", id, dbtob(dqblk.dqb_bhardlimit));
break;
case Q_GETQUOTA:
error = zfs_getquota(zfsvfs, id, type == GRPQUOTA, &dqblk);
if (error == 0)
error = copyout(&dqblk, arg, sizeof (dqblk));
break;
default:
error = EINVAL;
break;
}
done:
ZFS_EXIT(zfsvfs);
return (error);
}
boolean_t
zfs_is_readonly(zfsvfs_t *zfsvfs)
{
return (!!(zfsvfs->z_vfs->vfs_flag & VFS_RDONLY));
}
/*ARGSUSED*/
static int
zfs_sync(vfs_t *vfsp, int waitfor)
{
/*
* Data integrity is job one. We don't want a compromised kernel
* writing to the storage pool, so we never sync during panic.
*/
if (panicstr)
return (0);
/*
* Ignore the system syncher. ZFS already commits async data
* at zfs_txg_timeout intervals.
*/
if (waitfor == MNT_LAZY)
return (0);
if (vfsp != NULL) {
/*
* Sync a specific filesystem.
*/
zfsvfs_t *zfsvfs = vfsp->vfs_data;
dsl_pool_t *dp;
int error;
error = vfs_stdsync(vfsp, waitfor);
if (error != 0)
return (error);
ZFS_ENTER(zfsvfs);
dp = dmu_objset_pool(zfsvfs->z_os);
/*
* If the system is shutting down, then skip any
* filesystems which may exist on a suspended pool.
*/
if (rebooting && spa_suspended(dp->dp_spa)) {
ZFS_EXIT(zfsvfs);
return (0);
}
if (zfsvfs->z_log != NULL)
zil_commit(zfsvfs->z_log, 0);
ZFS_EXIT(zfsvfs);
} else {
/*
* Sync all ZFS filesystems. This is what happens when you
* run sync(8). Unlike other filesystems, ZFS honors the
* request by waiting for all pools to commit all dirty data.
*/
spa_sync_allpools();
}
return (0);
}
static void
atime_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
if (newval == TRUE) {
zfsvfs->z_atime = TRUE;
zfsvfs->z_vfs->vfs_flag &= ~MNT_NOATIME;
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NOATIME);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_ATIME, NULL, 0);
} else {
zfsvfs->z_atime = FALSE;
zfsvfs->z_vfs->vfs_flag |= MNT_NOATIME;
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_ATIME);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NOATIME, NULL, 0);
}
}
static void
xattr_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
if (newval == ZFS_XATTR_OFF) {
zfsvfs->z_flags &= ~ZSB_XATTR;
} else {
zfsvfs->z_flags |= ZSB_XATTR;
if (newval == ZFS_XATTR_SA)
zfsvfs->z_xattr_sa = B_TRUE;
else
zfsvfs->z_xattr_sa = B_FALSE;
}
}
static void
blksz_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
ASSERT3U(newval, <=, spa_maxblocksize(dmu_objset_spa(zfsvfs->z_os)));
ASSERT3U(newval, >=, SPA_MINBLOCKSIZE);
ASSERT(ISP2(newval));
zfsvfs->z_max_blksz = newval;
zfsvfs->z_vfs->mnt_stat.f_iosize = newval;
}
static void
readonly_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
if (newval) {
/* XXX locking on vfs_flag? */
zfsvfs->z_vfs->vfs_flag |= VFS_RDONLY;
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_RW);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_RO, NULL, 0);
} else {
/* XXX locking on vfs_flag? */
zfsvfs->z_vfs->vfs_flag &= ~VFS_RDONLY;
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_RO);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_RW, NULL, 0);
}
}
static void
setuid_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
if (newval == FALSE) {
zfsvfs->z_vfs->vfs_flag |= VFS_NOSETUID;
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_SETUID);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NOSETUID, NULL, 0);
} else {
zfsvfs->z_vfs->vfs_flag &= ~VFS_NOSETUID;
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NOSETUID);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_SETUID, NULL, 0);
}
}
static void
exec_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
if (newval == FALSE) {
zfsvfs->z_vfs->vfs_flag |= VFS_NOEXEC;
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_EXEC);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NOEXEC, NULL, 0);
} else {
zfsvfs->z_vfs->vfs_flag &= ~VFS_NOEXEC;
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NOEXEC);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_EXEC, NULL, 0);
}
}
/*
* The nbmand mount option can be changed at mount time.
* We can't allow it to be toggled on live file systems or incorrect
* behavior may be seen from cifs clients
*
* This property isn't registered via dsl_prop_register(), but this callback
* will be called when a file system is first mounted
*/
static void
nbmand_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
if (newval == FALSE) {
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NBMAND);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NONBMAND, NULL, 0);
} else {
vfs_clearmntopt(zfsvfs->z_vfs, MNTOPT_NONBMAND);
vfs_setmntopt(zfsvfs->z_vfs, MNTOPT_NBMAND, NULL, 0);
}
}
static void
snapdir_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
zfsvfs->z_show_ctldir = newval;
}
static void
vscan_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
zfsvfs->z_vscan = newval;
}
static void
acl_mode_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
zfsvfs->z_acl_mode = newval;
}
static void
acl_inherit_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
zfsvfs->z_acl_inherit = newval;
}
static void
acl_type_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
zfsvfs->z_acl_type = newval;
}
static int
zfs_register_callbacks(vfs_t *vfsp)
{
struct dsl_dataset *ds = NULL;
objset_t *os = NULL;
zfsvfs_t *zfsvfs = NULL;
uint64_t nbmand;
boolean_t readonly = B_FALSE;
boolean_t do_readonly = B_FALSE;
boolean_t setuid = B_FALSE;
boolean_t do_setuid = B_FALSE;
boolean_t exec = B_FALSE;
boolean_t do_exec = B_FALSE;
boolean_t xattr = B_FALSE;
boolean_t atime = B_FALSE;
boolean_t do_atime = B_FALSE;
boolean_t do_xattr = B_FALSE;
int error = 0;
ASSERT3P(vfsp, !=, NULL);
zfsvfs = vfsp->vfs_data;
ASSERT3P(zfsvfs, !=, NULL);
os = zfsvfs->z_os;
/*
* This function can be called for a snapshot when we update snapshot's
* mount point, which isn't really supported.
*/
if (dmu_objset_is_snapshot(os))
return (EOPNOTSUPP);
/*
* The act of registering our callbacks will destroy any mount
* options we may have. In order to enable temporary overrides
* of mount options, we stash away the current values and
* restore them after we register the callbacks.
*/
if (vfs_optionisset(vfsp, MNTOPT_RO, NULL) ||
!spa_writeable(dmu_objset_spa(os))) {
readonly = B_TRUE;
do_readonly = B_TRUE;
} else if (vfs_optionisset(vfsp, MNTOPT_RW, NULL)) {
readonly = B_FALSE;
do_readonly = B_TRUE;
}
if (vfs_optionisset(vfsp, MNTOPT_NOSETUID, NULL)) {
setuid = B_FALSE;
do_setuid = B_TRUE;
} else if (vfs_optionisset(vfsp, MNTOPT_SETUID, NULL)) {
setuid = B_TRUE;
do_setuid = B_TRUE;
}
if (vfs_optionisset(vfsp, MNTOPT_NOEXEC, NULL)) {
exec = B_FALSE;
do_exec = B_TRUE;
} else if (vfs_optionisset(vfsp, MNTOPT_EXEC, NULL)) {
exec = B_TRUE;
do_exec = B_TRUE;
}
if (vfs_optionisset(vfsp, MNTOPT_NOXATTR, NULL)) {
zfsvfs->z_xattr = xattr = ZFS_XATTR_OFF;
do_xattr = B_TRUE;
} else if (vfs_optionisset(vfsp, MNTOPT_XATTR, NULL)) {
zfsvfs->z_xattr = xattr = ZFS_XATTR_DIR;
do_xattr = B_TRUE;
} else if (vfs_optionisset(vfsp, MNTOPT_DIRXATTR, NULL)) {
zfsvfs->z_xattr = xattr = ZFS_XATTR_DIR;
do_xattr = B_TRUE;
} else if (vfs_optionisset(vfsp, MNTOPT_SAXATTR, NULL)) {
zfsvfs->z_xattr = xattr = ZFS_XATTR_SA;
do_xattr = B_TRUE;
}
if (vfs_optionisset(vfsp, MNTOPT_NOATIME, NULL)) {
atime = B_FALSE;
do_atime = B_TRUE;
} else if (vfs_optionisset(vfsp, MNTOPT_ATIME, NULL)) {
atime = B_TRUE;
do_atime = B_TRUE;
}
/*
* We need to enter pool configuration here, so that we can use
* dsl_prop_get_int_ds() to handle the special nbmand property below.
* dsl_prop_get_integer() can not be used, because it has to acquire
* spa_namespace_lock and we can not do that because we already hold
* z_teardown_lock. The problem is that spa_write_cachefile() is called
* with spa_namespace_lock held and the function calls ZFS vnode
* operations to write the cache file and thus z_teardown_lock is
* acquired after spa_namespace_lock.
*/
ds = dmu_objset_ds(os);
dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
/*
* nbmand is a special property. It can only be changed at
* mount time.
*
* This is weird, but it is documented to only be changeable
* at mount time.
*/
if (vfs_optionisset(vfsp, MNTOPT_NONBMAND, NULL)) {
nbmand = B_FALSE;
} else if (vfs_optionisset(vfsp, MNTOPT_NBMAND, NULL)) {
nbmand = B_TRUE;
} else if ((error = dsl_prop_get_int_ds(ds, "nbmand", &nbmand) != 0)) {
dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
return (error);
}
/*
* Register property callbacks.
*
* It would probably be fine to just check for i/o error from
* the first prop_register(), but I guess I like to go
* overboard...
*/
error = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ATIME), atime_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_XATTR), xattr_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_RECORDSIZE), blksz_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_READONLY), readonly_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_SETUID), setuid_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_EXEC), exec_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_SNAPDIR), snapdir_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ACLTYPE), acl_type_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ACLMODE), acl_mode_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ACLINHERIT), acl_inherit_changed_cb,
zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_VSCAN), vscan_changed_cb, zfsvfs);
dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
if (error)
goto unregister;
/*
* Invoke our callbacks to restore temporary mount options.
*/
if (do_readonly)
readonly_changed_cb(zfsvfs, readonly);
if (do_setuid)
setuid_changed_cb(zfsvfs, setuid);
if (do_exec)
exec_changed_cb(zfsvfs, exec);
if (do_xattr)
xattr_changed_cb(zfsvfs, xattr);
if (do_atime)
atime_changed_cb(zfsvfs, atime);
nbmand_changed_cb(zfsvfs, nbmand);
return (0);
unregister:
dsl_prop_unregister_all(ds, zfsvfs);
return (error);
}
/*
* Associate this zfsvfs with the given objset, which must be owned.
* This will cache a bunch of on-disk state from the objset in the
* zfsvfs.
*/
static int
zfsvfs_init(zfsvfs_t *zfsvfs, objset_t *os)
{
int error;
uint64_t val;
zfsvfs->z_max_blksz = SPA_OLD_MAXBLOCKSIZE;
zfsvfs->z_show_ctldir = ZFS_SNAPDIR_VISIBLE;
zfsvfs->z_os = os;
error = zfs_get_zplprop(os, ZFS_PROP_VERSION, &zfsvfs->z_version);
if (error != 0)
return (error);
if (zfsvfs->z_version >
zfs_zpl_version_map(spa_version(dmu_objset_spa(os)))) {
(void) printf("Can't mount a version %lld file system "
"on a version %lld pool\n. Pool must be upgraded to mount "
"this file system.", (u_longlong_t)zfsvfs->z_version,
(u_longlong_t)spa_version(dmu_objset_spa(os)));
return (SET_ERROR(ENOTSUP));
}
error = zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &val);
if (error != 0)
return (error);
zfsvfs->z_norm = (int)val;
error = zfs_get_zplprop(os, ZFS_PROP_UTF8ONLY, &val);
if (error != 0)
return (error);
zfsvfs->z_utf8 = (val != 0);
error = zfs_get_zplprop(os, ZFS_PROP_CASE, &val);
if (error != 0)
return (error);
zfsvfs->z_case = (uint_t)val;
error = zfs_get_zplprop(os, ZFS_PROP_ACLTYPE, &val);
if (error != 0)
return (error);
zfsvfs->z_acl_type = (uint_t)val;
/*
* Fold case on file systems that are always or sometimes case
* insensitive.
*/
if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE ||
zfsvfs->z_case == ZFS_CASE_MIXED)
zfsvfs->z_norm |= U8_TEXTPREP_TOUPPER;
zfsvfs->z_use_fuids = USE_FUIDS(zfsvfs->z_version, zfsvfs->z_os);
zfsvfs->z_use_sa = USE_SA(zfsvfs->z_version, zfsvfs->z_os);
uint64_t sa_obj = 0;
if (zfsvfs->z_use_sa) {
/* should either have both of these objects or none */
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1,
&sa_obj);
if (error != 0)
return (error);
error = zfs_get_zplprop(os, ZFS_PROP_XATTR, &val);
if (error == 0 && val == ZFS_XATTR_SA)
zfsvfs->z_xattr_sa = B_TRUE;
}
error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
&zfsvfs->z_attr_table);
if (error != 0)
return (error);
if (zfsvfs->z_version >= ZPL_VERSION_SA)
sa_register_update_callback(os, zfs_sa_upgrade);
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1,
&zfsvfs->z_root);
if (error != 0)
return (error);
ASSERT3U(zfsvfs->z_root, !=, 0);
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET, 8, 1,
&zfsvfs->z_unlinkedobj);
if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_USERQUOTA],
8, 1, &zfsvfs->z_userquota_obj);
if (error == ENOENT)
zfsvfs->z_userquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_GROUPQUOTA],
8, 1, &zfsvfs->z_groupquota_obj);
if (error == ENOENT)
zfsvfs->z_groupquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_PROJECTQUOTA],
8, 1, &zfsvfs->z_projectquota_obj);
if (error == ENOENT)
zfsvfs->z_projectquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_USEROBJQUOTA],
8, 1, &zfsvfs->z_userobjquota_obj);
if (error == ENOENT)
zfsvfs->z_userobjquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_GROUPOBJQUOTA],
8, 1, &zfsvfs->z_groupobjquota_obj);
if (error == ENOENT)
zfsvfs->z_groupobjquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_PROJECTOBJQUOTA],
8, 1, &zfsvfs->z_projectobjquota_obj);
if (error == ENOENT)
zfsvfs->z_projectobjquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 8, 1,
&zfsvfs->z_fuid_obj);
if (error == ENOENT)
zfsvfs->z_fuid_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SHARES_DIR, 8, 1,
&zfsvfs->z_shares_dir);
if (error == ENOENT)
zfsvfs->z_shares_dir = 0;
else if (error != 0)
return (error);
/*
* Only use the name cache if we are looking for a
* name on a file system that does not require normalization
* or case folding. We can also look there if we happen to be
* on a non-normalizing, mixed sensitivity file system IF we
* are looking for the exact name (which is always the case on
* FreeBSD).
*/
zfsvfs->z_use_namecache = !zfsvfs->z_norm ||
((zfsvfs->z_case == ZFS_CASE_MIXED) &&
!(zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER));
return (0);
}
taskq_t *zfsvfs_taskq;
static void
zfsvfs_task_unlinked_drain(void *context, int pending __unused)
{
zfs_unlinked_drain((zfsvfs_t *)context);
}
int
zfsvfs_create(const char *osname, boolean_t readonly, zfsvfs_t **zfvp)
{
objset_t *os;
zfsvfs_t *zfsvfs;
int error;
boolean_t ro = (readonly || (strchr(osname, '@') != NULL));
/*
* XXX: Fix struct statfs so this isn't necessary!
*
* The 'osname' is used as the filesystem's special node, which means
* it must fit in statfs.f_mntfromname, or else it can't be
* enumerated, so libzfs_mnttab_find() returns NULL, which causes
* 'zfs unmount' to think it's not mounted when it is.
*/
if (strlen(osname) >= MNAMELEN)
return (SET_ERROR(ENAMETOOLONG));
zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP);
error = dmu_objset_own(osname, DMU_OST_ZFS, ro, B_TRUE, zfsvfs,
&os);
if (error != 0) {
kmem_free(zfsvfs, sizeof (zfsvfs_t));
return (error);
}
error = zfsvfs_create_impl(zfvp, zfsvfs, os);
return (error);
}
int
zfsvfs_create_impl(zfsvfs_t **zfvp, zfsvfs_t *zfsvfs, objset_t *os)
{
int error;
zfsvfs->z_vfs = NULL;
zfsvfs->z_parent = zfsvfs;
mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&zfsvfs->z_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&zfsvfs->z_all_znodes, sizeof (znode_t),
offsetof(znode_t, z_link_node));
TASK_INIT(&zfsvfs->z_unlinked_drain_task, 0,
zfsvfs_task_unlinked_drain, zfsvfs);
ZFS_TEARDOWN_INIT(zfsvfs);
ZFS_TEARDOWN_INACTIVE_INIT(zfsvfs);
rw_init(&zfsvfs->z_fuid_lock, NULL, RW_DEFAULT, NULL);
for (int i = 0; i != ZFS_OBJ_MTX_SZ; i++)
mutex_init(&zfsvfs->z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL);
error = zfsvfs_init(zfsvfs, os);
if (error != 0) {
dmu_objset_disown(os, B_TRUE, zfsvfs);
*zfvp = NULL;
kmem_free(zfsvfs, sizeof (zfsvfs_t));
return (error);
}
*zfvp = zfsvfs;
return (0);
}
static int
zfsvfs_setup(zfsvfs_t *zfsvfs, boolean_t mounting)
{
int error;
/*
* Check for a bad on-disk format version now since we
* lied about owning the dataset readonly before.
*/
if (!(zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) &&
dmu_objset_incompatible_encryption_version(zfsvfs->z_os))
return (SET_ERROR(EROFS));
error = zfs_register_callbacks(zfsvfs->z_vfs);
if (error)
return (error);
zfsvfs->z_log = zil_open(zfsvfs->z_os, zfs_get_data);
/*
* If we are not mounting (ie: online recv), then we don't
* have to worry about replaying the log as we blocked all
* operations out since we closed the ZIL.
*/
if (mounting) {
boolean_t readonly;
ASSERT3P(zfsvfs->z_kstat.dk_kstats, ==, NULL);
dataset_kstats_create(&zfsvfs->z_kstat, zfsvfs->z_os);
/*
* During replay we remove the read only flag to
* allow replays to succeed.
*/
readonly = zfsvfs->z_vfs->vfs_flag & VFS_RDONLY;
if (readonly != 0) {
zfsvfs->z_vfs->vfs_flag &= ~VFS_RDONLY;
} else {
dsl_dir_t *dd;
zap_stats_t zs;
if (zap_get_stats(zfsvfs->z_os, zfsvfs->z_unlinkedobj,
&zs) == 0) {
dataset_kstats_update_nunlinks_kstat(
&zfsvfs->z_kstat, zs.zs_num_entries);
dprintf_ds(zfsvfs->z_os->os_dsl_dataset,
"num_entries in unlinked set: %llu",
(u_longlong_t)zs.zs_num_entries);
}
zfs_unlinked_drain(zfsvfs);
dd = zfsvfs->z_os->os_dsl_dataset->ds_dir;
dd->dd_activity_cancelled = B_FALSE;
}
/*
* Parse and replay the intent log.
*
* Because of ziltest, this must be done after
* zfs_unlinked_drain(). (Further note: ziltest
* doesn't use readonly mounts, where
* zfs_unlinked_drain() isn't called.) This is because
* ziltest causes spa_sync() to think it's committed,
* but actually it is not, so the intent log contains
* many txg's worth of changes.
*
* In particular, if object N is in the unlinked set in
* the last txg to actually sync, then it could be
* actually freed in a later txg and then reallocated
* in a yet later txg. This would write a "create
* object N" record to the intent log. Normally, this
* would be fine because the spa_sync() would have
* written out the fact that object N is free, before
* we could write the "create object N" intent log
* record.
*
* But when we are in ziltest mode, we advance the "open
* txg" without actually spa_sync()-ing the changes to
* disk. So we would see that object N is still
* allocated and in the unlinked set, and there is an
* intent log record saying to allocate it.
*/
if (spa_writeable(dmu_objset_spa(zfsvfs->z_os))) {
if (zil_replay_disable) {
zil_destroy(zfsvfs->z_log, B_FALSE);
} else {
boolean_t use_nc = zfsvfs->z_use_namecache;
zfsvfs->z_use_namecache = B_FALSE;
zfsvfs->z_replay = B_TRUE;
zil_replay(zfsvfs->z_os, zfsvfs,
zfs_replay_vector);
zfsvfs->z_replay = B_FALSE;
zfsvfs->z_use_namecache = use_nc;
}
}
/* restore readonly bit */
if (readonly != 0)
zfsvfs->z_vfs->vfs_flag |= VFS_RDONLY;
}
/*
* Set the objset user_ptr to track its zfsvfs.
*/
mutex_enter(&zfsvfs->z_os->os_user_ptr_lock);
dmu_objset_set_user(zfsvfs->z_os, zfsvfs);
mutex_exit(&zfsvfs->z_os->os_user_ptr_lock);
return (0);
}
void
zfsvfs_free(zfsvfs_t *zfsvfs)
{
int i;
zfs_fuid_destroy(zfsvfs);
mutex_destroy(&zfsvfs->z_znodes_lock);
mutex_destroy(&zfsvfs->z_lock);
ASSERT3U(zfsvfs->z_nr_znodes, ==, 0);
list_destroy(&zfsvfs->z_all_znodes);
ZFS_TEARDOWN_DESTROY(zfsvfs);
ZFS_TEARDOWN_INACTIVE_DESTROY(zfsvfs);
rw_destroy(&zfsvfs->z_fuid_lock);
for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
mutex_destroy(&zfsvfs->z_hold_mtx[i]);
dataset_kstats_destroy(&zfsvfs->z_kstat);
kmem_free(zfsvfs, sizeof (zfsvfs_t));
}
static void
zfs_set_fuid_feature(zfsvfs_t *zfsvfs)
{
zfsvfs->z_use_fuids = USE_FUIDS(zfsvfs->z_version, zfsvfs->z_os);
if (zfsvfs->z_vfs) {
if (zfsvfs->z_use_fuids) {
vfs_set_feature(zfsvfs->z_vfs, VFSFT_XVATTR);
vfs_set_feature(zfsvfs->z_vfs, VFSFT_SYSATTR_VIEWS);
vfs_set_feature(zfsvfs->z_vfs, VFSFT_ACEMASKONACCESS);
vfs_set_feature(zfsvfs->z_vfs, VFSFT_ACLONCREATE);
vfs_set_feature(zfsvfs->z_vfs, VFSFT_ACCESS_FILTER);
vfs_set_feature(zfsvfs->z_vfs, VFSFT_REPARSE);
} else {
vfs_clear_feature(zfsvfs->z_vfs, VFSFT_XVATTR);
vfs_clear_feature(zfsvfs->z_vfs, VFSFT_SYSATTR_VIEWS);
vfs_clear_feature(zfsvfs->z_vfs, VFSFT_ACEMASKONACCESS);
vfs_clear_feature(zfsvfs->z_vfs, VFSFT_ACLONCREATE);
vfs_clear_feature(zfsvfs->z_vfs, VFSFT_ACCESS_FILTER);
vfs_clear_feature(zfsvfs->z_vfs, VFSFT_REPARSE);
}
}
zfsvfs->z_use_sa = USE_SA(zfsvfs->z_version, zfsvfs->z_os);
}
static int
zfs_domount(vfs_t *vfsp, char *osname)
{
uint64_t recordsize, fsid_guid;
int error = 0;
zfsvfs_t *zfsvfs;
ASSERT3P(vfsp, !=, NULL);
ASSERT3P(osname, !=, NULL);
error = zfsvfs_create(osname, vfsp->mnt_flag & MNT_RDONLY, &zfsvfs);
if (error)
return (error);
zfsvfs->z_vfs = vfsp;
if ((error = dsl_prop_get_integer(osname,
"recordsize", &recordsize, NULL)))
goto out;
zfsvfs->z_vfs->vfs_bsize = SPA_MINBLOCKSIZE;
zfsvfs->z_vfs->mnt_stat.f_iosize = recordsize;
vfsp->vfs_data = zfsvfs;
vfsp->mnt_flag |= MNT_LOCAL;
vfsp->mnt_kern_flag |= MNTK_LOOKUP_SHARED;
vfsp->mnt_kern_flag |= MNTK_SHARED_WRITES;
vfsp->mnt_kern_flag |= MNTK_EXTENDED_SHARED;
/*
* This can cause a loss of coherence between ARC and page cache
* on ZoF - unclear if the problem is in FreeBSD or ZoF
*/
vfsp->mnt_kern_flag |= MNTK_NO_IOPF; /* vn_io_fault can be used */
vfsp->mnt_kern_flag |= MNTK_NOMSYNC;
vfsp->mnt_kern_flag |= MNTK_VMSETSIZE_BUG;
#if defined(_KERNEL) && !defined(KMEM_DEBUG)
vfsp->mnt_kern_flag |= MNTK_FPLOOKUP;
#endif
/*
* The fsid is 64 bits, composed of an 8-bit fs type, which
* separates our fsid from any other filesystem types, and a
* 56-bit objset unique ID. The objset unique ID is unique to
* all objsets open on this system, provided by unique_create().
* The 8-bit fs type must be put in the low bits of fsid[1]
* because that's where other Solaris filesystems put it.
*/
fsid_guid = dmu_objset_fsid_guid(zfsvfs->z_os);
ASSERT3U((fsid_guid & ~((1ULL << 56) - 1)), ==, 0);
vfsp->vfs_fsid.val[0] = fsid_guid;
vfsp->vfs_fsid.val[1] = ((fsid_guid >> 32) << 8) |
(vfsp->mnt_vfc->vfc_typenum & 0xFF);
/*
* Set features for file system.
*/
zfs_set_fuid_feature(zfsvfs);
if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE) {
vfs_set_feature(vfsp, VFSFT_DIRENTFLAGS);
vfs_set_feature(vfsp, VFSFT_CASEINSENSITIVE);
vfs_set_feature(vfsp, VFSFT_NOCASESENSITIVE);
} else if (zfsvfs->z_case == ZFS_CASE_MIXED) {
vfs_set_feature(vfsp, VFSFT_DIRENTFLAGS);
vfs_set_feature(vfsp, VFSFT_CASEINSENSITIVE);
}
vfs_set_feature(vfsp, VFSFT_ZEROCOPY_SUPPORTED);
if (dmu_objset_is_snapshot(zfsvfs->z_os)) {
uint64_t pval;
atime_changed_cb(zfsvfs, B_FALSE);
readonly_changed_cb(zfsvfs, B_TRUE);
if ((error = dsl_prop_get_integer(osname,
"xattr", &pval, NULL)))
goto out;
xattr_changed_cb(zfsvfs, pval);
if ((error = dsl_prop_get_integer(osname,
"acltype", &pval, NULL)))
goto out;
acl_type_changed_cb(zfsvfs, pval);
zfsvfs->z_issnap = B_TRUE;
zfsvfs->z_os->os_sync = ZFS_SYNC_DISABLED;
mutex_enter(&zfsvfs->z_os->os_user_ptr_lock);
dmu_objset_set_user(zfsvfs->z_os, zfsvfs);
mutex_exit(&zfsvfs->z_os->os_user_ptr_lock);
} else {
if ((error = zfsvfs_setup(zfsvfs, B_TRUE)))
goto out;
}
vfs_mountedfrom(vfsp, osname);
if (!zfsvfs->z_issnap)
zfsctl_create(zfsvfs);
out:
if (error) {
dmu_objset_disown(zfsvfs->z_os, B_TRUE, zfsvfs);
zfsvfs_free(zfsvfs);
} else {
atomic_inc_32(&zfs_active_fs_count);
}
return (error);
}
static void
zfs_unregister_callbacks(zfsvfs_t *zfsvfs)
{
objset_t *os = zfsvfs->z_os;
if (!dmu_objset_is_snapshot(os))
dsl_prop_unregister_all(dmu_objset_ds(os), zfsvfs);
}
static int
getpoolname(const char *osname, char *poolname)
{
char *p;
p = strchr(osname, '/');
if (p == NULL) {
if (strlen(osname) >= MAXNAMELEN)
return (ENAMETOOLONG);
(void) strcpy(poolname, osname);
} else {
if (p - osname >= MAXNAMELEN)
return (ENAMETOOLONG);
(void) strncpy(poolname, osname, p - osname);
poolname[p - osname] = '\0';
}
return (0);
}
static void
fetch_osname_options(char *name, bool *checkpointrewind)
{
if (name[0] == '!') {
*checkpointrewind = true;
memmove(name, name + 1, strlen(name));
} else {
*checkpointrewind = false;
}
}
/*ARGSUSED*/
static int
zfs_mount(vfs_t *vfsp)
{
kthread_t *td = curthread;
vnode_t *mvp = vfsp->mnt_vnodecovered;
cred_t *cr = td->td_ucred;
char *osname;
int error = 0;
int canwrite;
bool checkpointrewind;
if (vfs_getopt(vfsp->mnt_optnew, "from", (void **)&osname, NULL))
return (SET_ERROR(EINVAL));
/*
* If full-owner-access is enabled and delegated administration is
* turned on, we must set nosuid.
*/
if (zfs_super_owner &&
dsl_deleg_access(osname, ZFS_DELEG_PERM_MOUNT, cr) != ECANCELED) {
secpolicy_fs_mount_clearopts(cr, vfsp);
}
fetch_osname_options(osname, &checkpointrewind);
/*
* Check for mount privilege?
*
* If we don't have privilege then see if
* we have local permission to allow it
*/
error = secpolicy_fs_mount(cr, mvp, vfsp);
if (error) {
if (dsl_deleg_access(osname, ZFS_DELEG_PERM_MOUNT, cr) != 0)
goto out;
if (!(vfsp->vfs_flag & MS_REMOUNT)) {
vattr_t vattr;
/*
* Make sure user is the owner of the mount point
* or has sufficient privileges.
*/
vattr.va_mask = AT_UID;
vn_lock(mvp, LK_SHARED | LK_RETRY);
if (VOP_GETATTR(mvp, &vattr, cr)) {
VOP_UNLOCK1(mvp);
goto out;
}
if (secpolicy_vnode_owner(mvp, cr, vattr.va_uid) != 0 &&
VOP_ACCESS(mvp, VWRITE, cr, td) != 0) {
VOP_UNLOCK1(mvp);
goto out;
}
VOP_UNLOCK1(mvp);
}
secpolicy_fs_mount_clearopts(cr, vfsp);
}
/*
* Refuse to mount a filesystem if we are in a local zone and the
* dataset is not visible.
*/
if (!INGLOBALZONE(curproc) &&
(!zone_dataset_visible(osname, &canwrite) || !canwrite)) {
error = SET_ERROR(EPERM);
goto out;
}
vfsp->vfs_flag |= MNT_NFS4ACLS;
/*
* When doing a remount, we simply refresh our temporary properties
* according to those options set in the current VFS options.
*/
if (vfsp->vfs_flag & MS_REMOUNT) {
zfsvfs_t *zfsvfs = vfsp->vfs_data;
/*
* Refresh mount options with z_teardown_lock blocking I/O while
* the filesystem is in an inconsistent state.
* The lock also serializes this code with filesystem
* manipulations between entry to zfs_suspend_fs() and return
* from zfs_resume_fs().
*/
ZFS_TEARDOWN_ENTER_WRITE(zfsvfs, FTAG);
zfs_unregister_callbacks(zfsvfs);
error = zfs_register_callbacks(vfsp);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
goto out;
}
/* Initial root mount: try hard to import the requested root pool. */
if ((vfsp->vfs_flag & MNT_ROOTFS) != 0 &&
(vfsp->vfs_flag & MNT_UPDATE) == 0) {
char pname[MAXNAMELEN];
error = getpoolname(osname, pname);
if (error == 0)
error = spa_import_rootpool(pname, checkpointrewind);
if (error)
goto out;
}
DROP_GIANT();
error = zfs_domount(vfsp, osname);
PICKUP_GIANT();
out:
return (error);
}
static int
zfs_statfs(vfs_t *vfsp, struct statfs *statp)
{
zfsvfs_t *zfsvfs = vfsp->vfs_data;
uint64_t refdbytes, availbytes, usedobjs, availobjs;
statp->f_version = STATFS_VERSION;
ZFS_ENTER(zfsvfs);
dmu_objset_space(zfsvfs->z_os,
&refdbytes, &availbytes, &usedobjs, &availobjs);
/*
* The underlying storage pool actually uses multiple block sizes.
* We report the fragsize as the smallest block size we support,
* and we report our blocksize as the filesystem's maximum blocksize.
*/
statp->f_bsize = SPA_MINBLOCKSIZE;
statp->f_iosize = zfsvfs->z_vfs->mnt_stat.f_iosize;
/*
* The following report "total" blocks of various kinds in the
* file system, but reported in terms of f_frsize - the
* "fragment" size.
*/
statp->f_blocks = (refdbytes + availbytes) >> SPA_MINBLOCKSHIFT;
statp->f_bfree = availbytes / statp->f_bsize;
statp->f_bavail = statp->f_bfree; /* no root reservation */
/*
* statvfs() should really be called statufs(), because it assumes
* static metadata. ZFS doesn't preallocate files, so the best
* we can do is report the max that could possibly fit in f_files,
* and that minus the number actually used in f_ffree.
* For f_ffree, report the smaller of the number of object available
* and the number of blocks (each object will take at least a block).
*/
statp->f_ffree = MIN(availobjs, statp->f_bfree);
statp->f_files = statp->f_ffree + usedobjs;
/*
* We're a zfs filesystem.
*/
strlcpy(statp->f_fstypename, "zfs",
sizeof (statp->f_fstypename));
strlcpy(statp->f_mntfromname, vfsp->mnt_stat.f_mntfromname,
sizeof (statp->f_mntfromname));
strlcpy(statp->f_mntonname, vfsp->mnt_stat.f_mntonname,
sizeof (statp->f_mntonname));
statp->f_namemax = MAXNAMELEN - 1;
ZFS_EXIT(zfsvfs);
return (0);
}
static int
zfs_root(vfs_t *vfsp, int flags, vnode_t **vpp)
{
zfsvfs_t *zfsvfs = vfsp->vfs_data;
znode_t *rootzp;
int error;
ZFS_ENTER(zfsvfs);
error = zfs_zget(zfsvfs, zfsvfs->z_root, &rootzp);
if (error == 0)
*vpp = ZTOV(rootzp);
ZFS_EXIT(zfsvfs);
if (error == 0) {
error = vn_lock(*vpp, flags);
if (error != 0) {
VN_RELE(*vpp);
*vpp = NULL;
}
}
return (error);
}
/*
* Teardown the zfsvfs::z_os.
*
* Note, if 'unmounting' is FALSE, we return with the 'z_teardown_lock'
* and 'z_teardown_inactive_lock' held.
*/
static int
zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting)
{
znode_t *zp;
dsl_dir_t *dd;
/*
* If someone has not already unmounted this file system,
* drain the zrele_taskq to ensure all active references to the
* zfsvfs_t have been handled only then can it be safely destroyed.
*/
if (zfsvfs->z_os) {
/*
* If we're unmounting we have to wait for the list to
* drain completely.
*
* If we're not unmounting there's no guarantee the list
* will drain completely, but zreles run from the taskq
* may add the parents of dir-based xattrs to the taskq
* so we want to wait for these.
*
* We can safely read z_nr_znodes without locking because the
* VFS has already blocked operations which add to the
* z_all_znodes list and thus increment z_nr_znodes.
*/
int round = 0;
while (zfsvfs->z_nr_znodes > 0) {
taskq_wait_outstanding(dsl_pool_zrele_taskq(
dmu_objset_pool(zfsvfs->z_os)), 0);
if (++round > 1 && !unmounting)
break;
}
}
ZFS_TEARDOWN_ENTER_WRITE(zfsvfs, FTAG);
if (!unmounting) {
/*
* We purge the parent filesystem's vfsp as the parent
* filesystem and all of its snapshots have their vnode's
* v_vfsp set to the parent's filesystem's vfsp. Note,
* 'z_parent' is self referential for non-snapshots.
*/
#ifdef FREEBSD_NAMECACHE
#if __FreeBSD_version >= 1300117
cache_purgevfs(zfsvfs->z_parent->z_vfs);
#else
cache_purgevfs(zfsvfs->z_parent->z_vfs, true);
#endif
#endif
}
/*
* Close the zil. NB: Can't close the zil while zfs_inactive
* threads are blocked as zil_close can call zfs_inactive.
*/
if (zfsvfs->z_log) {
zil_close(zfsvfs->z_log);
zfsvfs->z_log = NULL;
}
ZFS_TEARDOWN_INACTIVE_ENTER_WRITE(zfsvfs);
/*
* If we are not unmounting (ie: online recv) and someone already
* unmounted this file system while we were doing the switcheroo,
* or a reopen of z_os failed then just bail out now.
*/
if (!unmounting && (zfsvfs->z_unmounted || zfsvfs->z_os == NULL)) {
ZFS_TEARDOWN_INACTIVE_EXIT_WRITE(zfsvfs);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
return (SET_ERROR(EIO));
}
/*
* At this point there are no vops active, and any new vops will
* fail with EIO since we have z_teardown_lock for writer (only
* relevant for forced unmount).
*
* Release all holds on dbufs.
*/
mutex_enter(&zfsvfs->z_znodes_lock);
for (zp = list_head(&zfsvfs->z_all_znodes); zp != NULL;
zp = list_next(&zfsvfs->z_all_znodes, zp)) {
if (zp->z_sa_hdl != NULL) {
zfs_znode_dmu_fini(zp);
}
}
mutex_exit(&zfsvfs->z_znodes_lock);
/*
* If we are unmounting, set the unmounted flag and let new vops
* unblock. zfs_inactive will have the unmounted behavior, and all
* other vops will fail with EIO.
*/
if (unmounting) {
zfsvfs->z_unmounted = B_TRUE;
ZFS_TEARDOWN_INACTIVE_EXIT_WRITE(zfsvfs);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
}
/*
* z_os will be NULL if there was an error in attempting to reopen
* zfsvfs, so just return as the properties had already been
* unregistered and cached data had been evicted before.
*/
if (zfsvfs->z_os == NULL)
return (0);
/*
* Unregister properties.
*/
zfs_unregister_callbacks(zfsvfs);
/*
* Evict cached data
*/
if (!zfs_is_readonly(zfsvfs))
txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0);
dmu_objset_evict_dbufs(zfsvfs->z_os);
dd = zfsvfs->z_os->os_dsl_dataset->ds_dir;
dsl_dir_cancel_waiters(dd);
return (0);
}
/*ARGSUSED*/
static int
zfs_umount(vfs_t *vfsp, int fflag)
{
kthread_t *td = curthread;
zfsvfs_t *zfsvfs = vfsp->vfs_data;
objset_t *os;
cred_t *cr = td->td_ucred;
int ret;
ret = secpolicy_fs_unmount(cr, vfsp);
if (ret) {
if (dsl_deleg_access((char *)vfsp->vfs_resource,
ZFS_DELEG_PERM_MOUNT, cr))
return (ret);
}
/*
* Unmount any snapshots mounted under .zfs before unmounting the
* dataset itself.
*/
if (zfsvfs->z_ctldir != NULL) {
if ((ret = zfsctl_umount_snapshots(vfsp, fflag, cr)) != 0)
return (ret);
}
if (fflag & MS_FORCE) {
/*
* Mark file system as unmounted before calling
* vflush(FORCECLOSE). This way we ensure no future vnops
* will be called and risk operating on DOOMED vnodes.
*/
ZFS_TEARDOWN_ENTER_WRITE(zfsvfs, FTAG);
zfsvfs->z_unmounted = B_TRUE;
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
}
/*
* Flush all the files.
*/
ret = vflush(vfsp, 0, (fflag & MS_FORCE) ? FORCECLOSE : 0, td);
if (ret != 0)
return (ret);
while (taskqueue_cancel(zfsvfs_taskq->tq_queue,
&zfsvfs->z_unlinked_drain_task, NULL) != 0)
taskqueue_drain(zfsvfs_taskq->tq_queue,
&zfsvfs->z_unlinked_drain_task);
VERIFY0(zfsvfs_teardown(zfsvfs, B_TRUE));
os = zfsvfs->z_os;
/*
* z_os will be NULL if there was an error in
* attempting to reopen zfsvfs.
*/
if (os != NULL) {
/*
* Unset the objset user_ptr.
*/
mutex_enter(&os->os_user_ptr_lock);
dmu_objset_set_user(os, NULL);
mutex_exit(&os->os_user_ptr_lock);
/*
* Finally release the objset
*/
dmu_objset_disown(os, B_TRUE, zfsvfs);
}
/*
* We can now safely destroy the '.zfs' directory node.
*/
if (zfsvfs->z_ctldir != NULL)
zfsctl_destroy(zfsvfs);
zfs_freevfs(vfsp);
return (0);
}
static int
zfs_vget(vfs_t *vfsp, ino_t ino, int flags, vnode_t **vpp)
{
zfsvfs_t *zfsvfs = vfsp->vfs_data;
znode_t *zp;
int err;
/*
* zfs_zget() can't operate on virtual entries like .zfs/ or
* .zfs/snapshot/ directories, that's why we return EOPNOTSUPP.
* This will make NFS to switch to LOOKUP instead of using VGET.
*/
if (ino == ZFSCTL_INO_ROOT || ino == ZFSCTL_INO_SNAPDIR ||
(zfsvfs->z_shares_dir != 0 && ino == zfsvfs->z_shares_dir))
return (EOPNOTSUPP);
ZFS_ENTER(zfsvfs);
err = zfs_zget(zfsvfs, ino, &zp);
if (err == 0 && zp->z_unlinked) {
vrele(ZTOV(zp));
err = EINVAL;
}
if (err == 0)
*vpp = ZTOV(zp);
ZFS_EXIT(zfsvfs);
if (err == 0) {
err = vn_lock(*vpp, flags);
if (err != 0)
vrele(*vpp);
}
if (err != 0)
*vpp = NULL;
return (err);
}
static int
#if __FreeBSD_version >= 1300098
zfs_checkexp(vfs_t *vfsp, struct sockaddr *nam, uint64_t *extflagsp,
struct ucred **credanonp, int *numsecflavors, int *secflavors)
#else
zfs_checkexp(vfs_t *vfsp, struct sockaddr *nam, int *extflagsp,
struct ucred **credanonp, int *numsecflavors, int **secflavors)
#endif
{
zfsvfs_t *zfsvfs = vfsp->vfs_data;
/*
* If this is regular file system vfsp is the same as
* zfsvfs->z_parent->z_vfs, but if it is snapshot,
* zfsvfs->z_parent->z_vfs represents parent file system
* which we have to use here, because only this file system
* has mnt_export configured.
*/
return (vfs_stdcheckexp(zfsvfs->z_parent->z_vfs, nam, extflagsp,
credanonp, numsecflavors, secflavors));
}
CTASSERT(SHORT_FID_LEN <= sizeof (struct fid));
CTASSERT(LONG_FID_LEN <= sizeof (struct fid));
static int
zfs_fhtovp(vfs_t *vfsp, fid_t *fidp, int flags, vnode_t **vpp)
{
struct componentname cn;
zfsvfs_t *zfsvfs = vfsp->vfs_data;
znode_t *zp;
vnode_t *dvp;
uint64_t object = 0;
uint64_t fid_gen = 0;
+ uint64_t setgen = 0;
uint64_t gen_mask;
uint64_t zp_gen;
int i, err;
*vpp = NULL;
ZFS_ENTER(zfsvfs);
/*
* On FreeBSD we can get snapshot's mount point or its parent file
* system mount point depending if snapshot is already mounted or not.
*/
if (zfsvfs->z_parent == zfsvfs && fidp->fid_len == LONG_FID_LEN) {
zfid_long_t *zlfid = (zfid_long_t *)fidp;
uint64_t objsetid = 0;
- uint64_t setgen = 0;
for (i = 0; i < sizeof (zlfid->zf_setid); i++)
objsetid |= ((uint64_t)zlfid->zf_setid[i]) << (8 * i);
for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
setgen |= ((uint64_t)zlfid->zf_setgen[i]) << (8 * i);
ZFS_EXIT(zfsvfs);
err = zfsctl_lookup_objset(vfsp, objsetid, &zfsvfs);
if (err)
return (SET_ERROR(EINVAL));
ZFS_ENTER(zfsvfs);
}
if (fidp->fid_len == SHORT_FID_LEN || fidp->fid_len == LONG_FID_LEN) {
zfid_short_t *zfid = (zfid_short_t *)fidp;
for (i = 0; i < sizeof (zfid->zf_object); i++)
object |= ((uint64_t)zfid->zf_object[i]) << (8 * i);
for (i = 0; i < sizeof (zfid->zf_gen); i++)
fid_gen |= ((uint64_t)zfid->zf_gen[i]) << (8 * i);
} else {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
+ if (fidp->fid_len == LONG_FID_LEN && (fid_gen > 1 || setgen != 0)) {
+ dprintf("snapdir fid: fid_gen (%llu) and setgen (%llu)\n",
+ (u_longlong_t)fid_gen, (u_longlong_t)setgen);
+ return (SET_ERROR(EINVAL));
+ }
+
/*
* A zero fid_gen means we are in .zfs or the .zfs/snapshot
* directory tree. If the object == zfsvfs->z_shares_dir, then
* we are in the .zfs/shares directory tree.
*/
if ((fid_gen == 0 &&
(object == ZFSCTL_INO_ROOT || object == ZFSCTL_INO_SNAPDIR)) ||
(zfsvfs->z_shares_dir != 0 && object == zfsvfs->z_shares_dir)) {
ZFS_EXIT(zfsvfs);
VERIFY0(zfsctl_root(zfsvfs, LK_SHARED, &dvp));
if (object == ZFSCTL_INO_SNAPDIR) {
cn.cn_nameptr = "snapshot";
cn.cn_namelen = strlen(cn.cn_nameptr);
cn.cn_nameiop = LOOKUP;
cn.cn_flags = ISLASTCN | LOCKLEAF;
cn.cn_lkflags = flags;
VERIFY0(VOP_LOOKUP(dvp, vpp, &cn));
vput(dvp);
} else if (object == zfsvfs->z_shares_dir) {
/*
* XXX This branch must not be taken,
* if it is, then the lookup below will
* explode.
*/
cn.cn_nameptr = "shares";
cn.cn_namelen = strlen(cn.cn_nameptr);
cn.cn_nameiop = LOOKUP;
cn.cn_flags = ISLASTCN;
cn.cn_lkflags = flags;
VERIFY0(VOP_LOOKUP(dvp, vpp, &cn));
vput(dvp);
} else {
*vpp = dvp;
}
return (err);
}
gen_mask = -1ULL >> (64 - 8 * i);
dprintf("getting %llu [%llu mask %llx]\n", (u_longlong_t)object,
(u_longlong_t)fid_gen,
(u_longlong_t)gen_mask);
if ((err = zfs_zget(zfsvfs, object, &zp))) {
ZFS_EXIT(zfsvfs);
return (err);
}
(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs), &zp_gen,
sizeof (uint64_t));
zp_gen = zp_gen & gen_mask;
if (zp_gen == 0)
zp_gen = 1;
if (zp->z_unlinked || zp_gen != fid_gen) {
dprintf("znode gen (%llu) != fid gen (%llu)\n",
(u_longlong_t)zp_gen, (u_longlong_t)fid_gen);
vrele(ZTOV(zp));
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
*vpp = ZTOV(zp);
ZFS_EXIT(zfsvfs);
err = vn_lock(*vpp, flags);
if (err == 0)
vnode_create_vobject(*vpp, zp->z_size, curthread);
else
*vpp = NULL;
return (err);
}
/*
* Block out VOPs and close zfsvfs_t::z_os
*
* Note, if successful, then we return with the 'z_teardown_lock' and
* 'z_teardown_inactive_lock' write held. We leave ownership of the underlying
* dataset and objset intact so that they can be atomically handed off during
* a subsequent rollback or recv operation and the resume thereafter.
*/
int
zfs_suspend_fs(zfsvfs_t *zfsvfs)
{
int error;
if ((error = zfsvfs_teardown(zfsvfs, B_FALSE)) != 0)
return (error);
return (0);
}
/*
* Rebuild SA and release VOPs. Note that ownership of the underlying dataset
* is an invariant across any of the operations that can be performed while the
* filesystem was suspended. Whether it succeeded or failed, the preconditions
* are the same: the relevant objset and associated dataset are owned by
* zfsvfs, held, and long held on entry.
*/
int
zfs_resume_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds)
{
int err;
znode_t *zp;
ASSERT(ZFS_TEARDOWN_WRITE_HELD(zfsvfs));
ASSERT(ZFS_TEARDOWN_INACTIVE_WRITE_HELD(zfsvfs));
/*
* We already own this, so just update the objset_t, as the one we
* had before may have been evicted.
*/
objset_t *os;
VERIFY3P(ds->ds_owner, ==, zfsvfs);
VERIFY(dsl_dataset_long_held(ds));
dsl_pool_t *dp = spa_get_dsl(dsl_dataset_get_spa(ds));
dsl_pool_config_enter(dp, FTAG);
VERIFY0(dmu_objset_from_ds(ds, &os));
dsl_pool_config_exit(dp, FTAG);
err = zfsvfs_init(zfsvfs, os);
if (err != 0)
goto bail;
ds->ds_dir->dd_activity_cancelled = B_FALSE;
VERIFY0(zfsvfs_setup(zfsvfs, B_FALSE));
zfs_set_fuid_feature(zfsvfs);
/*
* Attempt to re-establish all the active znodes with
* their dbufs. If a zfs_rezget() fails, then we'll let
* any potential callers discover that via ZFS_ENTER_VERIFY_VP
* when they try to use their znode.
*/
mutex_enter(&zfsvfs->z_znodes_lock);
for (zp = list_head(&zfsvfs->z_all_znodes); zp;
zp = list_next(&zfsvfs->z_all_znodes, zp)) {
(void) zfs_rezget(zp);
}
mutex_exit(&zfsvfs->z_znodes_lock);
bail:
/* release the VOPs */
ZFS_TEARDOWN_INACTIVE_EXIT_WRITE(zfsvfs);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
if (err) {
/*
* Since we couldn't setup the sa framework, try to force
* unmount this file system.
*/
if (vn_vfswlock(zfsvfs->z_vfs->vfs_vnodecovered) == 0) {
vfs_ref(zfsvfs->z_vfs);
(void) dounmount(zfsvfs->z_vfs, MS_FORCE, curthread);
}
}
return (err);
}
static void
zfs_freevfs(vfs_t *vfsp)
{
zfsvfs_t *zfsvfs = vfsp->vfs_data;
zfsvfs_free(zfsvfs);
atomic_dec_32(&zfs_active_fs_count);
}
#ifdef __i386__
static int desiredvnodes_backup;
#include <sys/vmmeter.h>
#include <vm/vm_page.h>
#include <vm/vm_object.h>
#include <vm/vm_kern.h>
#include <vm/vm_map.h>
#endif
static void
zfs_vnodes_adjust(void)
{
#ifdef __i386__
int newdesiredvnodes;
desiredvnodes_backup = desiredvnodes;
/*
* We calculate newdesiredvnodes the same way it is done in
* vntblinit(). If it is equal to desiredvnodes, it means that
* it wasn't tuned by the administrator and we can tune it down.
*/
newdesiredvnodes = min(maxproc + vm_cnt.v_page_count / 4, 2 *
vm_kmem_size / (5 * (sizeof (struct vm_object) +
sizeof (struct vnode))));
if (newdesiredvnodes == desiredvnodes)
desiredvnodes = (3 * newdesiredvnodes) / 4;
#endif
}
static void
zfs_vnodes_adjust_back(void)
{
#ifdef __i386__
desiredvnodes = desiredvnodes_backup;
#endif
}
void
zfs_init(void)
{
printf("ZFS filesystem version: " ZPL_VERSION_STRING "\n");
/*
* Initialize .zfs directory structures
*/
zfsctl_init();
/*
* Initialize znode cache, vnode ops, etc...
*/
zfs_znode_init();
/*
* Reduce number of vnodes. Originally number of vnodes is calculated
* with UFS inode in mind. We reduce it here, because it's too big for
* ZFS/i386.
*/
zfs_vnodes_adjust();
dmu_objset_register_type(DMU_OST_ZFS, zpl_get_file_info);
zfsvfs_taskq = taskq_create("zfsvfs", 1, minclsyspri, 0, 0, 0);
}
void
zfs_fini(void)
{
taskq_destroy(zfsvfs_taskq);
zfsctl_fini();
zfs_znode_fini();
zfs_vnodes_adjust_back();
}
int
zfs_busy(void)
{
return (zfs_active_fs_count != 0);
}
/*
* Release VOPs and unmount a suspended filesystem.
*/
int
zfs_end_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds)
{
ASSERT(ZFS_TEARDOWN_WRITE_HELD(zfsvfs));
ASSERT(ZFS_TEARDOWN_INACTIVE_WRITE_HELD(zfsvfs));
/*
* We already own this, so just hold and rele it to update the
* objset_t, as the one we had before may have been evicted.
*/
objset_t *os;
VERIFY3P(ds->ds_owner, ==, zfsvfs);
VERIFY(dsl_dataset_long_held(ds));
dsl_pool_t *dp = spa_get_dsl(dsl_dataset_get_spa(ds));
dsl_pool_config_enter(dp, FTAG);
VERIFY0(dmu_objset_from_ds(ds, &os));
dsl_pool_config_exit(dp, FTAG);
zfsvfs->z_os = os;
/* release the VOPs */
ZFS_TEARDOWN_INACTIVE_EXIT_WRITE(zfsvfs);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
/*
* Try to force unmount this file system.
*/
(void) zfs_umount(zfsvfs->z_vfs, 0);
zfsvfs->z_unmounted = B_TRUE;
return (0);
}
int
zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers)
{
int error;
objset_t *os = zfsvfs->z_os;
dmu_tx_t *tx;
if (newvers < ZPL_VERSION_INITIAL || newvers > ZPL_VERSION)
return (SET_ERROR(EINVAL));
if (newvers < zfsvfs->z_version)
return (SET_ERROR(EINVAL));
if (zfs_spa_version_map(newvers) >
spa_version(dmu_objset_spa(zfsvfs->z_os)))
return (SET_ERROR(ENOTSUP));
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_FALSE, ZPL_VERSION_STR);
if (newvers >= ZPL_VERSION_SA && !zfsvfs->z_use_sa) {
dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_TRUE,
ZFS_SA_ATTRS);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
}
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
return (error);
}
error = zap_update(os, MASTER_NODE_OBJ, ZPL_VERSION_STR,
8, 1, &newvers, tx);
if (error) {
dmu_tx_commit(tx);
return (error);
}
if (newvers >= ZPL_VERSION_SA && !zfsvfs->z_use_sa) {
uint64_t sa_obj;
ASSERT3U(spa_version(dmu_objset_spa(zfsvfs->z_os)), >=,
SPA_VERSION_SA);
sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE,
DMU_OT_NONE, 0, tx);
error = zap_add(os, MASTER_NODE_OBJ,
ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
ASSERT0(error);
VERIFY0(sa_set_sa_object(os, sa_obj));
sa_register_update_callback(os, zfs_sa_upgrade);
}
spa_history_log_internal_ds(dmu_objset_ds(os), "upgrade", tx,
"from %ju to %ju", (uintmax_t)zfsvfs->z_version,
(uintmax_t)newvers);
dmu_tx_commit(tx);
zfsvfs->z_version = newvers;
os->os_version = newvers;
zfs_set_fuid_feature(zfsvfs);
return (0);
}
/*
* Read a property stored within the master node.
*/
int
zfs_get_zplprop(objset_t *os, zfs_prop_t prop, uint64_t *value)
{
uint64_t *cached_copy = NULL;
/*
* Figure out where in the objset_t the cached copy would live, if it
* is available for the requested property.
*/
if (os != NULL) {
switch (prop) {
case ZFS_PROP_VERSION:
cached_copy = &os->os_version;
break;
case ZFS_PROP_NORMALIZE:
cached_copy = &os->os_normalization;
break;
case ZFS_PROP_UTF8ONLY:
cached_copy = &os->os_utf8only;
break;
case ZFS_PROP_CASE:
cached_copy = &os->os_casesensitivity;
break;
default:
break;
}
}
if (cached_copy != NULL && *cached_copy != OBJSET_PROP_UNINITIALIZED) {
*value = *cached_copy;
return (0);
}
/*
* If the property wasn't cached, look up the file system's value for
* the property. For the version property, we look up a slightly
* different string.
*/
const char *pname;
int error = ENOENT;
if (prop == ZFS_PROP_VERSION) {
pname = ZPL_VERSION_STR;
} else {
pname = zfs_prop_to_name(prop);
}
if (os != NULL) {
ASSERT3U(os->os_phys->os_type, ==, DMU_OST_ZFS);
error = zap_lookup(os, MASTER_NODE_OBJ, pname, 8, 1, value);
}
if (error == ENOENT) {
/* No value set, use the default value */
switch (prop) {
case ZFS_PROP_VERSION:
*value = ZPL_VERSION;
break;
case ZFS_PROP_NORMALIZE:
case ZFS_PROP_UTF8ONLY:
*value = 0;
break;
case ZFS_PROP_CASE:
*value = ZFS_CASE_SENSITIVE;
break;
case ZFS_PROP_ACLTYPE:
*value = ZFS_ACLTYPE_NFSV4;
break;
default:
return (error);
}
error = 0;
}
/*
* If one of the methods for getting the property value above worked,
* copy it into the objset_t's cache.
*/
if (error == 0 && cached_copy != NULL) {
*cached_copy = *value;
}
return (error);
}
/*
* Return true if the corresponding vfs's unmounted flag is set.
* Otherwise return false.
* If this function returns true we know VFS unmount has been initiated.
*/
boolean_t
zfs_get_vfs_flag_unmounted(objset_t *os)
{
zfsvfs_t *zfvp;
boolean_t unmounted = B_FALSE;
ASSERT3U(dmu_objset_type(os), ==, DMU_OST_ZFS);
mutex_enter(&os->os_user_ptr_lock);
zfvp = dmu_objset_get_user(os);
if (zfvp != NULL && zfvp->z_vfs != NULL &&
(zfvp->z_vfs->mnt_kern_flag & MNTK_UNMOUNT))
unmounted = B_TRUE;
mutex_exit(&os->os_user_ptr_lock);
return (unmounted);
}
#ifdef _KERNEL
void
zfsvfs_update_fromname(const char *oldname, const char *newname)
{
char tmpbuf[MAXPATHLEN];
struct mount *mp;
char *fromname;
size_t oldlen;
oldlen = strlen(oldname);
mtx_lock(&mountlist_mtx);
TAILQ_FOREACH(mp, &mountlist, mnt_list) {
fromname = mp->mnt_stat.f_mntfromname;
if (strcmp(fromname, oldname) == 0) {
(void) strlcpy(fromname, newname,
sizeof (mp->mnt_stat.f_mntfromname));
continue;
}
if (strncmp(fromname, oldname, oldlen) == 0 &&
(fromname[oldlen] == '/' || fromname[oldlen] == '@')) {
(void) snprintf(tmpbuf, sizeof (tmpbuf), "%s%s",
newname, fromname + oldlen);
(void) strlcpy(fromname, tmpbuf,
sizeof (mp->mnt_stat.f_mntfromname));
continue;
}
}
mtx_unlock(&mountlist_mtx);
}
#endif
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c
index 8e48f78b7311..7bcf80bf5a94 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c
@@ -1,6189 +1,6201 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 by Delphix. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2017 Nexenta Systems, Inc.
*/
/* Portions Copyright 2007 Jeremy Teo */
/* Portions Copyright 2010 Robert Milkowski */
#include <sys/types.h>
#include <sys/param.h>
#include <sys/time.h>
#include <sys/systm.h>
#include <sys/sysmacros.h>
#include <sys/resource.h>
#include <sys/vfs.h>
#include <sys/endian.h>
#include <sys/vm.h>
#include <sys/vnode.h>
#if __FreeBSD_version >= 1300102
#include <sys/smr.h>
#endif
#include <sys/dirent.h>
#include <sys/file.h>
#include <sys/stat.h>
#include <sys/kmem.h>
#include <sys/taskq.h>
#include <sys/uio.h>
#include <sys/atomic.h>
#include <sys/namei.h>
#include <sys/mman.h>
#include <sys/cmn_err.h>
#include <sys/kdb.h>
#include <sys/sysproto.h>
#include <sys/errno.h>
#include <sys/unistd.h>
#include <sys/zfs_dir.h>
#include <sys/zfs_ioctl.h>
#include <sys/fs/zfs.h>
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/dbuf.h>
#include <sys/zap.h>
#include <sys/sa.h>
#include <sys/policy.h>
#include <sys/sunddi.h>
#include <sys/filio.h>
#include <sys/sid.h>
#include <sys/zfs_ctldir.h>
#include <sys/zfs_fuid.h>
#include <sys/zfs_quota.h>
#include <sys/zfs_sa.h>
#include <sys/zfs_rlock.h>
#include <sys/extdirent.h>
#include <sys/bio.h>
#include <sys/buf.h>
#include <sys/sched.h>
#include <sys/acl.h>
#include <sys/vmmeter.h>
#include <vm/vm_param.h>
#include <sys/zil.h>
#include <sys/zfs_vnops.h>
#include <vm/vm_object.h>
#include <sys/extattr.h>
#include <sys/priv.h>
#ifndef VN_OPEN_INVFS
#define VN_OPEN_INVFS 0x0
#endif
VFS_SMR_DECLARE;
#if __FreeBSD_version >= 1300047
#define vm_page_wire_lock(pp)
#define vm_page_wire_unlock(pp)
#else
#define vm_page_wire_lock(pp) vm_page_lock(pp)
#define vm_page_wire_unlock(pp) vm_page_unlock(pp)
#endif
#ifdef DEBUG_VFS_LOCKS
#define VNCHECKREF(vp) \
VNASSERT((vp)->v_holdcnt > 0 && (vp)->v_usecount > 0, vp, \
("%s: wrong ref counts", __func__));
#else
#define VNCHECKREF(vp)
#endif
+#if __FreeBSD_version >= 1400045
+typedef uint64_t cookie_t;
+#else
+typedef ulong_t cookie_t;
+#endif
+
/*
* Programming rules.
*
* Each vnode op performs some logical unit of work. To do this, the ZPL must
* properly lock its in-core state, create a DMU transaction, do the work,
* record this work in the intent log (ZIL), commit the DMU transaction,
* and wait for the intent log to commit if it is a synchronous operation.
* Moreover, the vnode ops must work in both normal and log replay context.
* The ordering of events is important to avoid deadlocks and references
* to freed memory. The example below illustrates the following Big Rules:
*
* (1) A check must be made in each zfs thread for a mounted file system.
* This is done avoiding races using ZFS_ENTER(zfsvfs).
* A ZFS_EXIT(zfsvfs) is needed before all returns. Any znodes
* must be checked with ZFS_VERIFY_ZP(zp). Both of these macros
* can return EIO from the calling function.
*
* (2) VN_RELE() should always be the last thing except for zil_commit()
* (if necessary) and ZFS_EXIT(). This is for 3 reasons:
* First, if it's the last reference, the vnode/znode
* can be freed, so the zp may point to freed memory. Second, the last
* reference will call zfs_zinactive(), which may induce a lot of work --
* pushing cached pages (which acquires range locks) and syncing out
* cached atime changes. Third, zfs_zinactive() may require a new tx,
* which could deadlock the system if you were already holding one.
* If you must call VN_RELE() within a tx then use VN_RELE_ASYNC().
*
* (3) All range locks must be grabbed before calling dmu_tx_assign(),
* as they can span dmu_tx_assign() calls.
*
* (4) If ZPL locks are held, pass TXG_NOWAIT as the second argument to
* dmu_tx_assign(). This is critical because we don't want to block
* while holding locks.
*
* If no ZPL locks are held (aside from ZFS_ENTER()), use TXG_WAIT. This
* reduces lock contention and CPU usage when we must wait (note that if
* throughput is constrained by the storage, nearly every transaction
* must wait).
*
* Note, in particular, that if a lock is sometimes acquired before
* the tx assigns, and sometimes after (e.g. z_lock), then failing
* to use a non-blocking assign can deadlock the system. The scenario:
*
* Thread A has grabbed a lock before calling dmu_tx_assign().
* Thread B is in an already-assigned tx, and blocks for this lock.
* Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
* forever, because the previous txg can't quiesce until B's tx commits.
*
* If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
* then drop all locks, call dmu_tx_wait(), and try again. On subsequent
* calls to dmu_tx_assign(), pass TXG_NOTHROTTLE in addition to TXG_NOWAIT,
* to indicate that this operation has already called dmu_tx_wait().
* This will ensure that we don't retry forever, waiting a short bit
* each time.
*
* (5) If the operation succeeded, generate the intent log entry for it
* before dropping locks. This ensures that the ordering of events
* in the intent log matches the order in which they actually occurred.
* During ZIL replay the zfs_log_* functions will update the sequence
* number to indicate the zil transaction has replayed.
*
* (6) At the end of each vnode op, the DMU tx must always commit,
* regardless of whether there were any errors.
*
* (7) After dropping all locks, invoke zil_commit(zilog, foid)
* to ensure that synchronous semantics are provided when necessary.
*
* In general, this is how things should be ordered in each vnode op:
*
* ZFS_ENTER(zfsvfs); // exit if unmounted
* top:
* zfs_dirent_lookup(&dl, ...) // lock directory entry (may VN_HOLD())
* rw_enter(...); // grab any other locks you need
* tx = dmu_tx_create(...); // get DMU tx
* dmu_tx_hold_*(); // hold each object you might modify
* error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
* if (error) {
* rw_exit(...); // drop locks
* zfs_dirent_unlock(dl); // unlock directory entry
* VN_RELE(...); // release held vnodes
* if (error == ERESTART) {
* waited = B_TRUE;
* dmu_tx_wait(tx);
* dmu_tx_abort(tx);
* goto top;
* }
* dmu_tx_abort(tx); // abort DMU tx
* ZFS_EXIT(zfsvfs); // finished in zfs
* return (error); // really out of space
* }
* error = do_real_work(); // do whatever this VOP does
* if (error == 0)
* zfs_log_*(...); // on success, make ZIL entry
* dmu_tx_commit(tx); // commit DMU tx -- error or not
* rw_exit(...); // drop locks
* zfs_dirent_unlock(dl); // unlock directory entry
* VN_RELE(...); // release held vnodes
* zil_commit(zilog, foid); // synchronous when necessary
* ZFS_EXIT(zfsvfs); // finished in zfs
* return (error); // done, report error
*/
/* ARGSUSED */
static int
zfs_open(vnode_t **vpp, int flag, cred_t *cr)
{
znode_t *zp = VTOZ(*vpp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
if ((flag & FWRITE) && (zp->z_pflags & ZFS_APPENDONLY) &&
((flag & FAPPEND) == 0)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EPERM));
}
if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan &&
ZTOV(zp)->v_type == VREG &&
!(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) {
if (fs_vscan(*vpp, cr, 0) != 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EACCES));
}
}
/* Keep a count of the synchronous opens in the znode */
if (flag & (FSYNC | FDSYNC))
atomic_inc_32(&zp->z_sync_cnt);
ZFS_EXIT(zfsvfs);
return (0);
}
/* ARGSUSED */
static int
zfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr)
{
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
/* Decrement the synchronous opens in the znode */
if ((flag & (FSYNC | FDSYNC)) && (count == 1))
atomic_dec_32(&zp->z_sync_cnt);
if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan &&
ZTOV(zp)->v_type == VREG &&
!(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0)
VERIFY0(fs_vscan(vp, cr, 1));
ZFS_EXIT(zfsvfs);
return (0);
}
/* ARGSUSED */
static int
zfs_ioctl(vnode_t *vp, ulong_t com, intptr_t data, int flag, cred_t *cred,
int *rvalp)
{
loff_t off;
int error;
switch (com) {
case _FIOFFS:
{
return (0);
/*
* The following two ioctls are used by bfu. Faking out,
* necessary to avoid bfu errors.
*/
}
case _FIOGDIO:
case _FIOSDIO:
{
return (0);
}
case F_SEEK_DATA:
case F_SEEK_HOLE:
{
off = *(offset_t *)data;
/* offset parameter is in/out */
error = zfs_holey(VTOZ(vp), com, &off);
if (error)
return (error);
*(offset_t *)data = off;
return (0);
}
}
return (SET_ERROR(ENOTTY));
}
static vm_page_t
page_busy(vnode_t *vp, int64_t start, int64_t off, int64_t nbytes)
{
vm_object_t obj;
vm_page_t pp;
int64_t end;
/*
* At present vm_page_clear_dirty extends the cleared range to DEV_BSIZE
* aligned boundaries, if the range is not aligned. As a result a
* DEV_BSIZE subrange with partially dirty data may get marked as clean.
* It may happen that all DEV_BSIZE subranges are marked clean and thus
* the whole page would be considered clean despite have some
* dirty data.
* For this reason we should shrink the range to DEV_BSIZE aligned
* boundaries before calling vm_page_clear_dirty.
*/
end = rounddown2(off + nbytes, DEV_BSIZE);
off = roundup2(off, DEV_BSIZE);
nbytes = end - off;
obj = vp->v_object;
zfs_vmobject_assert_wlocked_12(obj);
#if __FreeBSD_version < 1300050
for (;;) {
if ((pp = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL &&
pp->valid) {
if (vm_page_xbusied(pp)) {
/*
* Reference the page before unlocking and
* sleeping so that the page daemon is less
* likely to reclaim it.
*/
vm_page_reference(pp);
vm_page_lock(pp);
zfs_vmobject_wunlock(obj);
vm_page_busy_sleep(pp, "zfsmwb", true);
zfs_vmobject_wlock(obj);
continue;
}
vm_page_sbusy(pp);
} else if (pp != NULL) {
ASSERT(!pp->valid);
pp = NULL;
}
if (pp != NULL) {
ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
vm_object_pip_add(obj, 1);
pmap_remove_write(pp);
if (nbytes != 0)
vm_page_clear_dirty(pp, off, nbytes);
}
break;
}
#else
vm_page_grab_valid_unlocked(&pp, obj, OFF_TO_IDX(start),
VM_ALLOC_NOCREAT | VM_ALLOC_SBUSY | VM_ALLOC_NORMAL |
VM_ALLOC_IGN_SBUSY);
if (pp != NULL) {
ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
vm_object_pip_add(obj, 1);
pmap_remove_write(pp);
if (nbytes != 0)
vm_page_clear_dirty(pp, off, nbytes);
}
#endif
return (pp);
}
static void
page_unbusy(vm_page_t pp)
{
vm_page_sunbusy(pp);
#if __FreeBSD_version >= 1300041
vm_object_pip_wakeup(pp->object);
#else
vm_object_pip_subtract(pp->object, 1);
#endif
}
#if __FreeBSD_version > 1300051
static vm_page_t
page_hold(vnode_t *vp, int64_t start)
{
vm_object_t obj;
vm_page_t m;
obj = vp->v_object;
vm_page_grab_valid_unlocked(&m, obj, OFF_TO_IDX(start),
VM_ALLOC_NOCREAT | VM_ALLOC_WIRED | VM_ALLOC_IGN_SBUSY |
VM_ALLOC_NOBUSY);
return (m);
}
#else
static vm_page_t
page_hold(vnode_t *vp, int64_t start)
{
vm_object_t obj;
vm_page_t pp;
obj = vp->v_object;
zfs_vmobject_assert_wlocked(obj);
for (;;) {
if ((pp = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL &&
pp->valid) {
if (vm_page_xbusied(pp)) {
/*
* Reference the page before unlocking and
* sleeping so that the page daemon is less
* likely to reclaim it.
*/
vm_page_reference(pp);
vm_page_lock(pp);
zfs_vmobject_wunlock(obj);
vm_page_busy_sleep(pp, "zfsmwb", true);
zfs_vmobject_wlock(obj);
continue;
}
ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
vm_page_wire_lock(pp);
vm_page_hold(pp);
vm_page_wire_unlock(pp);
} else
pp = NULL;
break;
}
return (pp);
}
#endif
static void
page_unhold(vm_page_t pp)
{
vm_page_wire_lock(pp);
#if __FreeBSD_version >= 1300035
vm_page_unwire(pp, PQ_ACTIVE);
#else
vm_page_unhold(pp);
#endif
vm_page_wire_unlock(pp);
}
/*
* When a file is memory mapped, we must keep the IO data synchronized
* between the DMU cache and the memory mapped pages. What this means:
*
* On Write: If we find a memory mapped page, we write to *both*
* the page and the dmu buffer.
*/
void
update_pages(znode_t *zp, int64_t start, int len, objset_t *os)
{
vm_object_t obj;
struct sf_buf *sf;
vnode_t *vp = ZTOV(zp);
caddr_t va;
int off;
ASSERT3P(vp->v_mount, !=, NULL);
obj = vp->v_object;
ASSERT3P(obj, !=, NULL);
off = start & PAGEOFFSET;
zfs_vmobject_wlock_12(obj);
#if __FreeBSD_version >= 1300041
vm_object_pip_add(obj, 1);
#endif
for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
vm_page_t pp;
int nbytes = imin(PAGESIZE - off, len);
if ((pp = page_busy(vp, start, off, nbytes)) != NULL) {
zfs_vmobject_wunlock_12(obj);
va = zfs_map_page(pp, &sf);
(void) dmu_read(os, zp->z_id, start + off, nbytes,
va + off, DMU_READ_PREFETCH);
zfs_unmap_page(sf);
zfs_vmobject_wlock_12(obj);
page_unbusy(pp);
}
len -= nbytes;
off = 0;
}
#if __FreeBSD_version >= 1300041
vm_object_pip_wakeup(obj);
#else
vm_object_pip_wakeupn(obj, 0);
#endif
zfs_vmobject_wunlock_12(obj);
}
/*
* Read with UIO_NOCOPY flag means that sendfile(2) requests
* ZFS to populate a range of page cache pages with data.
*
* NOTE: this function could be optimized to pre-allocate
* all pages in advance, drain exclusive busy on all of them,
* map them into contiguous KVA region and populate them
* in one single dmu_read() call.
*/
int
mappedread_sf(znode_t *zp, int nbytes, zfs_uio_t *uio)
{
vnode_t *vp = ZTOV(zp);
objset_t *os = zp->z_zfsvfs->z_os;
struct sf_buf *sf;
vm_object_t obj;
vm_page_t pp;
int64_t start;
caddr_t va;
int len = nbytes;
int error = 0;
ASSERT3U(zfs_uio_segflg(uio), ==, UIO_NOCOPY);
ASSERT3P(vp->v_mount, !=, NULL);
obj = vp->v_object;
ASSERT3P(obj, !=, NULL);
ASSERT0(zfs_uio_offset(uio) & PAGEOFFSET);
zfs_vmobject_wlock_12(obj);
for (start = zfs_uio_offset(uio); len > 0; start += PAGESIZE) {
int bytes = MIN(PAGESIZE, len);
pp = vm_page_grab_unlocked(obj, OFF_TO_IDX(start),
VM_ALLOC_SBUSY | VM_ALLOC_NORMAL | VM_ALLOC_IGN_SBUSY);
if (vm_page_none_valid(pp)) {
zfs_vmobject_wunlock_12(obj);
va = zfs_map_page(pp, &sf);
error = dmu_read(os, zp->z_id, start, bytes, va,
DMU_READ_PREFETCH);
if (bytes != PAGESIZE && error == 0)
bzero(va + bytes, PAGESIZE - bytes);
zfs_unmap_page(sf);
zfs_vmobject_wlock_12(obj);
#if __FreeBSD_version >= 1300081
if (error == 0) {
vm_page_valid(pp);
vm_page_activate(pp);
vm_page_do_sunbusy(pp);
} else {
zfs_vmobject_wlock(obj);
if (!vm_page_wired(pp) && pp->valid == 0 &&
vm_page_busy_tryupgrade(pp))
vm_page_free(pp);
else
vm_page_sunbusy(pp);
zfs_vmobject_wunlock(obj);
}
#else
vm_page_do_sunbusy(pp);
vm_page_lock(pp);
if (error) {
if (pp->wire_count == 0 && pp->valid == 0 &&
!vm_page_busied(pp))
vm_page_free(pp);
} else {
pp->valid = VM_PAGE_BITS_ALL;
vm_page_activate(pp);
}
vm_page_unlock(pp);
#endif
} else {
ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
vm_page_do_sunbusy(pp);
}
if (error)
break;
zfs_uio_advance(uio, bytes);
len -= bytes;
}
zfs_vmobject_wunlock_12(obj);
return (error);
}
/*
* When a file is memory mapped, we must keep the IO data synchronized
* between the DMU cache and the memory mapped pages. What this means:
*
* On Read: We "read" preferentially from memory mapped pages,
* else we default from the dmu buffer.
*
* NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
* the file is memory mapped.
*/
int
mappedread(znode_t *zp, int nbytes, zfs_uio_t *uio)
{
vnode_t *vp = ZTOV(zp);
vm_object_t obj;
int64_t start;
int len = nbytes;
int off;
int error = 0;
ASSERT3P(vp->v_mount, !=, NULL);
obj = vp->v_object;
ASSERT3P(obj, !=, NULL);
start = zfs_uio_offset(uio);
off = start & PAGEOFFSET;
zfs_vmobject_wlock_12(obj);
for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
vm_page_t pp;
uint64_t bytes = MIN(PAGESIZE - off, len);
if ((pp = page_hold(vp, start))) {
struct sf_buf *sf;
caddr_t va;
zfs_vmobject_wunlock_12(obj);
va = zfs_map_page(pp, &sf);
error = vn_io_fault_uiomove(va + off, bytes,
GET_UIO_STRUCT(uio));
zfs_unmap_page(sf);
zfs_vmobject_wlock_12(obj);
page_unhold(pp);
} else {
zfs_vmobject_wunlock_12(obj);
error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
uio, bytes);
zfs_vmobject_wlock_12(obj);
}
len -= bytes;
off = 0;
if (error)
break;
}
zfs_vmobject_wunlock_12(obj);
return (error);
}
int
zfs_write_simple(znode_t *zp, const void *data, size_t len,
loff_t pos, size_t *presid)
{
int error = 0;
ssize_t resid;
error = vn_rdwr(UIO_WRITE, ZTOV(zp), __DECONST(void *, data), len, pos,
UIO_SYSSPACE, IO_SYNC, kcred, NOCRED, &resid, curthread);
if (error) {
return (SET_ERROR(error));
} else if (presid == NULL) {
if (resid != 0) {
error = SET_ERROR(EIO);
}
} else {
*presid = resid;
}
return (error);
}
void
zfs_zrele_async(znode_t *zp)
{
vnode_t *vp = ZTOV(zp);
objset_t *os = ITOZSB(vp)->z_os;
VN_RELE_ASYNC(vp, dsl_pool_zrele_taskq(dmu_objset_pool(os)));
}
static int
zfs_dd_callback(struct mount *mp, void *arg, int lkflags, struct vnode **vpp)
{
int error;
*vpp = arg;
error = vn_lock(*vpp, lkflags);
if (error != 0)
vrele(*vpp);
return (error);
}
static int
zfs_lookup_lock(vnode_t *dvp, vnode_t *vp, const char *name, int lkflags)
{
znode_t *zdp = VTOZ(dvp);
zfsvfs_t *zfsvfs __unused = zdp->z_zfsvfs;
int error;
int ltype;
if (zfsvfs->z_replay == B_FALSE)
ASSERT_VOP_LOCKED(dvp, __func__);
if (name[0] == 0 || (name[0] == '.' && name[1] == 0)) {
ASSERT3P(dvp, ==, vp);
vref(dvp);
ltype = lkflags & LK_TYPE_MASK;
if (ltype != VOP_ISLOCKED(dvp)) {
if (ltype == LK_EXCLUSIVE)
vn_lock(dvp, LK_UPGRADE | LK_RETRY);
else /* if (ltype == LK_SHARED) */
vn_lock(dvp, LK_DOWNGRADE | LK_RETRY);
/*
* Relock for the "." case could leave us with
* reclaimed vnode.
*/
if (VN_IS_DOOMED(dvp)) {
vrele(dvp);
return (SET_ERROR(ENOENT));
}
}
return (0);
} else if (name[0] == '.' && name[1] == '.' && name[2] == 0) {
/*
* Note that in this case, dvp is the child vnode, and we
* are looking up the parent vnode - exactly reverse from
* normal operation. Unlocking dvp requires some rather
* tricky unlock/relock dance to prevent mp from being freed;
* use vn_vget_ino_gen() which takes care of all that.
*
* XXX Note that there is a time window when both vnodes are
* unlocked. It is possible, although highly unlikely, that
* during that window the parent-child relationship between
* the vnodes may change, for example, get reversed.
* In that case we would have a wrong lock order for the vnodes.
* All other filesystems seem to ignore this problem, so we
* do the same here.
* A potential solution could be implemented as follows:
* - using LK_NOWAIT when locking the second vnode and retrying
* if necessary
* - checking that the parent-child relationship still holds
* after locking both vnodes and retrying if it doesn't
*/
error = vn_vget_ino_gen(dvp, zfs_dd_callback, vp, lkflags, &vp);
return (error);
} else {
error = vn_lock(vp, lkflags);
if (error != 0)
vrele(vp);
return (error);
}
}
/*
* Lookup an entry in a directory, or an extended attribute directory.
* If it exists, return a held vnode reference for it.
*
* IN: dvp - vnode of directory to search.
* nm - name of entry to lookup.
* pnp - full pathname to lookup [UNUSED].
* flags - LOOKUP_XATTR set if looking for an attribute.
* rdir - root directory vnode [UNUSED].
* cr - credentials of caller.
* ct - caller context
*
* OUT: vpp - vnode of located entry, NULL if not found.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* NA
*/
/* ARGSUSED */
static int
zfs_lookup(vnode_t *dvp, const char *nm, vnode_t **vpp,
struct componentname *cnp, int nameiop, cred_t *cr, int flags,
boolean_t cached)
{
znode_t *zdp = VTOZ(dvp);
znode_t *zp;
zfsvfs_t *zfsvfs = zdp->z_zfsvfs;
#if __FreeBSD_version > 1300124
seqc_t dvp_seqc;
#endif
int error = 0;
/*
* Fast path lookup, however we must skip DNLC lookup
* for case folding or normalizing lookups because the
* DNLC code only stores the passed in name. This means
* creating 'a' and removing 'A' on a case insensitive
* file system would work, but DNLC still thinks 'a'
* exists and won't let you create it again on the next
* pass through fast path.
*/
if (!(flags & LOOKUP_XATTR)) {
if (dvp->v_type != VDIR) {
return (SET_ERROR(ENOTDIR));
} else if (zdp->z_sa_hdl == NULL) {
return (SET_ERROR(EIO));
}
}
DTRACE_PROBE2(zfs__fastpath__lookup__miss, vnode_t *, dvp,
const char *, nm);
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zdp);
#if __FreeBSD_version > 1300124
dvp_seqc = vn_seqc_read_notmodify(dvp);
#endif
*vpp = NULL;
if (flags & LOOKUP_XATTR) {
/*
* If the xattr property is off, refuse the lookup request.
*/
if (!(zfsvfs->z_flags & ZSB_XATTR)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EOPNOTSUPP));
}
/*
* We don't allow recursive attributes..
* Maybe someday we will.
*/
if (zdp->z_pflags & ZFS_XATTR) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
if ((error = zfs_get_xattrdir(VTOZ(dvp), &zp, cr, flags))) {
ZFS_EXIT(zfsvfs);
return (error);
}
*vpp = ZTOV(zp);
/*
* Do we have permission to get into attribute directory?
*/
error = zfs_zaccess(zp, ACE_EXECUTE, 0, B_FALSE, cr);
if (error) {
vrele(ZTOV(zp));
}
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Check accessibility of directory if we're not coming in via
* VOP_CACHEDLOOKUP.
*/
if (!cached) {
#ifdef NOEXECCHECK
if ((cnp->cn_flags & NOEXECCHECK) != 0) {
cnp->cn_flags &= ~NOEXECCHECK;
} else
#endif
if ((error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr))) {
ZFS_EXIT(zfsvfs);
return (error);
}
}
if (zfsvfs->z_utf8 && u8_validate(nm, strlen(nm),
NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EILSEQ));
}
/*
* First handle the special cases.
*/
if ((cnp->cn_flags & ISDOTDOT) != 0) {
/*
* If we are a snapshot mounted under .zfs, return
* the vp for the snapshot directory.
*/
if (zdp->z_id == zfsvfs->z_root && zfsvfs->z_parent != zfsvfs) {
struct componentname cn;
vnode_t *zfsctl_vp;
int ltype;
ZFS_EXIT(zfsvfs);
ltype = VOP_ISLOCKED(dvp);
VOP_UNLOCK1(dvp);
error = zfsctl_root(zfsvfs->z_parent, LK_SHARED,
&zfsctl_vp);
if (error == 0) {
cn.cn_nameptr = "snapshot";
cn.cn_namelen = strlen(cn.cn_nameptr);
cn.cn_nameiop = cnp->cn_nameiop;
cn.cn_flags = cnp->cn_flags & ~ISDOTDOT;
cn.cn_lkflags = cnp->cn_lkflags;
error = VOP_LOOKUP(zfsctl_vp, vpp, &cn);
vput(zfsctl_vp);
}
vn_lock(dvp, ltype | LK_RETRY);
return (error);
}
}
if (zfs_has_ctldir(zdp) && strcmp(nm, ZFS_CTLDIR_NAME) == 0) {
ZFS_EXIT(zfsvfs);
if ((cnp->cn_flags & ISLASTCN) != 0 && nameiop != LOOKUP)
return (SET_ERROR(ENOTSUP));
error = zfsctl_root(zfsvfs, cnp->cn_lkflags, vpp);
return (error);
}
/*
* The loop is retry the lookup if the parent-child relationship
* changes during the dot-dot locking complexities.
*/
for (;;) {
uint64_t parent;
error = zfs_dirlook(zdp, nm, &zp);
if (error == 0)
*vpp = ZTOV(zp);
ZFS_EXIT(zfsvfs);
if (error != 0)
break;
error = zfs_lookup_lock(dvp, *vpp, nm, cnp->cn_lkflags);
if (error != 0) {
/*
* If we've got a locking error, then the vnode
* got reclaimed because of a force unmount.
* We never enter doomed vnodes into the name cache.
*/
*vpp = NULL;
return (error);
}
if ((cnp->cn_flags & ISDOTDOT) == 0)
break;
ZFS_ENTER(zfsvfs);
if (zdp->z_sa_hdl == NULL) {
error = SET_ERROR(EIO);
} else {
error = sa_lookup(zdp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
&parent, sizeof (parent));
}
if (error != 0) {
ZFS_EXIT(zfsvfs);
vput(ZTOV(zp));
break;
}
if (zp->z_id == parent) {
ZFS_EXIT(zfsvfs);
break;
}
vput(ZTOV(zp));
}
if (error != 0)
*vpp = NULL;
/* Translate errors and add SAVENAME when needed. */
if (cnp->cn_flags & ISLASTCN) {
switch (nameiop) {
case CREATE:
case RENAME:
if (error == ENOENT) {
error = EJUSTRETURN;
cnp->cn_flags |= SAVENAME;
break;
}
fallthrough;
case DELETE:
if (error == 0)
cnp->cn_flags |= SAVENAME;
break;
}
}
#if __FreeBSD_version > 1300124
if ((cnp->cn_flags & ISDOTDOT) != 0) {
/*
* FIXME: zfs_lookup_lock relocks vnodes and does nothing to
* handle races. In particular different callers may end up
* with different vnodes and will try to add conflicting
* entries to the namecache.
*
* While finding different result may be acceptable in face
* of concurrent modification, adding conflicting entries
* trips over an assert in the namecache.
*
* Ultimately let an entry through once everything settles.
*/
if (!vn_seqc_consistent(dvp, dvp_seqc)) {
cnp->cn_flags &= ~MAKEENTRY;
}
}
#endif
/* Insert name into cache (as non-existent) if appropriate. */
if (zfsvfs->z_use_namecache && !zfsvfs->z_replay &&
error == ENOENT && (cnp->cn_flags & MAKEENTRY) != 0)
cache_enter(dvp, NULL, cnp);
/* Insert name into cache if appropriate. */
if (zfsvfs->z_use_namecache && !zfsvfs->z_replay &&
error == 0 && (cnp->cn_flags & MAKEENTRY)) {
if (!(cnp->cn_flags & ISLASTCN) ||
(nameiop != DELETE && nameiop != RENAME)) {
cache_enter(dvp, *vpp, cnp);
}
}
return (error);
}
/*
* Attempt to create a new entry in a directory. If the entry
* already exists, truncate the file if permissible, else return
* an error. Return the vp of the created or trunc'd file.
*
* IN: dvp - vnode of directory to put new file entry in.
* name - name of new file entry.
* vap - attributes of new file.
* excl - flag indicating exclusive or non-exclusive mode.
* mode - mode to open file with.
* cr - credentials of caller.
* flag - large file flag [UNUSED].
* ct - caller context
* vsecp - ACL to be set
*
* OUT: vpp - vnode of created or trunc'd entry.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* dvp - ctime|mtime updated if new entry created
* vp - ctime|mtime always, atime if new
*/
/* ARGSUSED */
int
zfs_create(znode_t *dzp, const char *name, vattr_t *vap, int excl, int mode,
znode_t **zpp, cred_t *cr, int flag, vsecattr_t *vsecp)
{
znode_t *zp;
zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
zilog_t *zilog;
objset_t *os;
dmu_tx_t *tx;
int error;
ksid_t *ksid;
uid_t uid;
gid_t gid = crgetgid(cr);
uint64_t projid = ZFS_DEFAULT_PROJID;
zfs_acl_ids_t acl_ids;
boolean_t fuid_dirtied;
uint64_t txtype;
#ifdef DEBUG_VFS_LOCKS
vnode_t *dvp = ZTOV(dzp);
#endif
/*
* If we have an ephemeral id, ACL, or XVATTR then
* make sure file system is at proper version
*/
ksid = crgetsid(cr, KSID_OWNER);
if (ksid)
uid = ksid_getid(ksid);
else
uid = crgetuid(cr);
if (zfsvfs->z_use_fuids == B_FALSE &&
(vsecp || (vap->va_mask & AT_XVATTR) ||
IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
return (SET_ERROR(EINVAL));
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(dzp);
os = zfsvfs->z_os;
zilog = zfsvfs->z_log;
if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EILSEQ));
}
if (vap->va_mask & AT_XVATTR) {
if ((error = secpolicy_xvattr(ZTOV(dzp), (xvattr_t *)vap,
crgetuid(cr), cr, vap->va_type)) != 0) {
ZFS_EXIT(zfsvfs);
return (error);
}
}
*zpp = NULL;
if ((vap->va_mode & S_ISVTX) && secpolicy_vnode_stky_modify(cr))
vap->va_mode &= ~S_ISVTX;
error = zfs_dirent_lookup(dzp, name, &zp, ZNEW);
if (error) {
ZFS_EXIT(zfsvfs);
return (error);
}
ASSERT3P(zp, ==, NULL);
/*
* Create a new file object and update the directory
* to reference it.
*/
if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
goto out;
}
/*
* We only support the creation of regular files in
* extended attribute directories.
*/
if ((dzp->z_pflags & ZFS_XATTR) &&
(vap->va_type != VREG)) {
error = SET_ERROR(EINVAL);
goto out;
}
if ((error = zfs_acl_ids_create(dzp, 0, vap,
cr, vsecp, &acl_ids)) != 0)
goto out;
if (S_ISREG(vap->va_mode) || S_ISDIR(vap->va_mode))
projid = zfs_inherit_projid(dzp);
if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, projid)) {
zfs_acl_ids_free(&acl_ids);
error = SET_ERROR(EDQUOT);
goto out;
}
getnewvnode_reserve_();
tx = dmu_tx_create(os);
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
ZFS_SA_BASE_ATTR_SIZE);
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
if (!zfsvfs->z_use_sa &&
acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
0, acl_ids.z_aclp->z_acl_bytes);
}
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx);
getnewvnode_drop_reserve();
ZFS_EXIT(zfsvfs);
return (error);
}
zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
(void) zfs_link_create(dzp, name, zp, tx, ZNEW);
txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap);
zfs_log_create(zilog, tx, txtype, dzp, zp, name,
vsecp, acl_ids.z_fuidp, vap);
zfs_acl_ids_free(&acl_ids);
dmu_tx_commit(tx);
getnewvnode_drop_reserve();
out:
VNCHECKREF(dvp);
if (error == 0) {
*zpp = zp;
}
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Remove an entry from a directory.
*
* IN: dvp - vnode of directory to remove entry from.
* name - name of entry to remove.
* cr - credentials of caller.
* ct - caller context
* flags - case flags
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* dvp - ctime|mtime
* vp - ctime (if nlink > 0)
*/
/*ARGSUSED*/
static int
zfs_remove_(vnode_t *dvp, vnode_t *vp, const char *name, cred_t *cr)
{
znode_t *dzp = VTOZ(dvp);
znode_t *zp;
znode_t *xzp;
zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
zilog_t *zilog;
uint64_t xattr_obj;
uint64_t obj = 0;
dmu_tx_t *tx;
boolean_t unlinked;
uint64_t txtype;
int error;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(dzp);
zp = VTOZ(vp);
ZFS_VERIFY_ZP(zp);
zilog = zfsvfs->z_log;
xattr_obj = 0;
xzp = NULL;
if ((error = zfs_zaccess_delete(dzp, zp, cr))) {
goto out;
}
/*
* Need to use rmdir for removing directories.
*/
if (vp->v_type == VDIR) {
error = SET_ERROR(EPERM);
goto out;
}
vnevent_remove(vp, dvp, name, ct);
obj = zp->z_id;
/* are there any extended attributes? */
error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
&xattr_obj, sizeof (xattr_obj));
if (error == 0 && xattr_obj) {
error = zfs_zget(zfsvfs, xattr_obj, &xzp);
ASSERT0(error);
}
/*
* We may delete the znode now, or we may put it in the unlinked set;
* it depends on whether we're the last link, and on whether there are
* other holds on the vnode. So we dmu_tx_hold() the right things to
* allow for either case.
*/
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
zfs_sa_upgrade_txholds(tx, dzp);
if (xzp) {
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
}
/* charge as an update -- would be nice not to charge at all */
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
/*
* Mark this transaction as typically resulting in a net free of space
*/
dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Remove the directory entry.
*/
error = zfs_link_destroy(dzp, name, zp, tx, ZEXISTS, &unlinked);
if (error) {
dmu_tx_commit(tx);
goto out;
}
if (unlinked) {
zfs_unlinked_add(zp, tx);
vp->v_vflag |= VV_NOSYNC;
}
/* XXX check changes to linux vnops */
txtype = TX_REMOVE;
zfs_log_remove(zilog, tx, txtype, dzp, name, obj, unlinked);
dmu_tx_commit(tx);
out:
if (xzp)
vrele(ZTOV(xzp));
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
ZFS_EXIT(zfsvfs);
return (error);
}
static int
zfs_lookup_internal(znode_t *dzp, const char *name, vnode_t **vpp,
struct componentname *cnp, int nameiop)
{
zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
int error;
cnp->cn_nameptr = __DECONST(char *, name);
cnp->cn_namelen = strlen(name);
cnp->cn_nameiop = nameiop;
cnp->cn_flags = ISLASTCN | SAVENAME;
cnp->cn_lkflags = LK_EXCLUSIVE | LK_RETRY;
cnp->cn_cred = kcred;
#if __FreeBSD_version < 1400037
cnp->cn_thread = curthread;
#endif
if (zfsvfs->z_use_namecache && !zfsvfs->z_replay) {
struct vop_lookup_args a;
a.a_gen.a_desc = &vop_lookup_desc;
a.a_dvp = ZTOV(dzp);
a.a_vpp = vpp;
a.a_cnp = cnp;
error = vfs_cache_lookup(&a);
} else {
error = zfs_lookup(ZTOV(dzp), name, vpp, cnp, nameiop, kcred, 0,
B_FALSE);
}
#ifdef ZFS_DEBUG
if (error) {
printf("got error %d on name %s on op %d\n", error, name,
nameiop);
kdb_backtrace();
}
#endif
return (error);
}
int
zfs_remove(znode_t *dzp, const char *name, cred_t *cr, int flags)
{
vnode_t *vp;
int error;
struct componentname cn;
if ((error = zfs_lookup_internal(dzp, name, &vp, &cn, DELETE)))
return (error);
error = zfs_remove_(ZTOV(dzp), vp, name, cr);
vput(vp);
return (error);
}
/*
* Create a new directory and insert it into dvp using the name
* provided. Return a pointer to the inserted directory.
*
* IN: dvp - vnode of directory to add subdir to.
* dirname - name of new directory.
* vap - attributes of new directory.
* cr - credentials of caller.
* ct - caller context
* flags - case flags
* vsecp - ACL to be set
*
* OUT: vpp - vnode of created directory.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* dvp - ctime|mtime updated
* vp - ctime|mtime|atime updated
*/
/*ARGSUSED*/
int
zfs_mkdir(znode_t *dzp, const char *dirname, vattr_t *vap, znode_t **zpp,
cred_t *cr, int flags, vsecattr_t *vsecp)
{
znode_t *zp;
zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
zilog_t *zilog;
uint64_t txtype;
dmu_tx_t *tx;
int error;
ksid_t *ksid;
uid_t uid;
gid_t gid = crgetgid(cr);
zfs_acl_ids_t acl_ids;
boolean_t fuid_dirtied;
ASSERT3U(vap->va_type, ==, VDIR);
/*
* If we have an ephemeral id, ACL, or XVATTR then
* make sure file system is at proper version
*/
ksid = crgetsid(cr, KSID_OWNER);
if (ksid)
uid = ksid_getid(ksid);
else
uid = crgetuid(cr);
if (zfsvfs->z_use_fuids == B_FALSE &&
((vap->va_mask & AT_XVATTR) ||
IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
return (SET_ERROR(EINVAL));
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(dzp);
zilog = zfsvfs->z_log;
if (dzp->z_pflags & ZFS_XATTR) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
if (zfsvfs->z_utf8 && u8_validate(dirname,
strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EILSEQ));
}
if (vap->va_mask & AT_XVATTR) {
if ((error = secpolicy_xvattr(ZTOV(dzp), (xvattr_t *)vap,
crgetuid(cr), cr, vap->va_type)) != 0) {
ZFS_EXIT(zfsvfs);
return (error);
}
}
if ((error = zfs_acl_ids_create(dzp, 0, vap, cr,
NULL, &acl_ids)) != 0) {
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* First make sure the new directory doesn't exist.
*
* Existence is checked first to make sure we don't return
* EACCES instead of EEXIST which can cause some applications
* to fail.
*/
*zpp = NULL;
if ((error = zfs_dirent_lookup(dzp, dirname, &zp, ZNEW))) {
zfs_acl_ids_free(&acl_ids);
ZFS_EXIT(zfsvfs);
return (error);
}
ASSERT3P(zp, ==, NULL);
if ((error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr))) {
zfs_acl_ids_free(&acl_ids);
ZFS_EXIT(zfsvfs);
return (error);
}
if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, zfs_inherit_projid(dzp))) {
zfs_acl_ids_free(&acl_ids);
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EDQUOT));
}
/*
* Add a new entry to the directory.
*/
getnewvnode_reserve_();
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
acl_ids.z_aclp->z_acl_bytes);
}
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
ZFS_SA_BASE_ATTR_SIZE);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx);
getnewvnode_drop_reserve();
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Create new node.
*/
zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
/*
* Now put new name in parent dir.
*/
(void) zfs_link_create(dzp, dirname, zp, tx, ZNEW);
*zpp = zp;
txtype = zfs_log_create_txtype(Z_DIR, NULL, vap);
zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, NULL,
acl_ids.z_fuidp, vap);
zfs_acl_ids_free(&acl_ids);
dmu_tx_commit(tx);
getnewvnode_drop_reserve();
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
ZFS_EXIT(zfsvfs);
return (0);
}
#if __FreeBSD_version < 1300124
static void
cache_vop_rmdir(struct vnode *dvp, struct vnode *vp)
{
cache_purge(dvp);
cache_purge(vp);
}
#endif
/*
* Remove a directory subdir entry. If the current working
* directory is the same as the subdir to be removed, the
* remove will fail.
*
* IN: dvp - vnode of directory to remove from.
* name - name of directory to be removed.
* cwd - vnode of current working directory.
* cr - credentials of caller.
* ct - caller context
* flags - case flags
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* dvp - ctime|mtime updated
*/
/*ARGSUSED*/
static int
zfs_rmdir_(vnode_t *dvp, vnode_t *vp, const char *name, cred_t *cr)
{
znode_t *dzp = VTOZ(dvp);
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
zilog_t *zilog;
dmu_tx_t *tx;
int error;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(dzp);
ZFS_VERIFY_ZP(zp);
zilog = zfsvfs->z_log;
if ((error = zfs_zaccess_delete(dzp, zp, cr))) {
goto out;
}
if (vp->v_type != VDIR) {
error = SET_ERROR(ENOTDIR);
goto out;
}
vnevent_rmdir(vp, dvp, name, ct);
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
zfs_sa_upgrade_txholds(tx, zp);
zfs_sa_upgrade_txholds(tx, dzp);
dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
ZFS_EXIT(zfsvfs);
return (error);
}
error = zfs_link_destroy(dzp, name, zp, tx, ZEXISTS, NULL);
if (error == 0) {
uint64_t txtype = TX_RMDIR;
zfs_log_remove(zilog, tx, txtype, dzp, name,
ZFS_NO_OBJECT, B_FALSE);
}
dmu_tx_commit(tx);
cache_vop_rmdir(dvp, vp);
out:
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
ZFS_EXIT(zfsvfs);
return (error);
}
int
zfs_rmdir(znode_t *dzp, const char *name, znode_t *cwd, cred_t *cr, int flags)
{
struct componentname cn;
vnode_t *vp;
int error;
if ((error = zfs_lookup_internal(dzp, name, &vp, &cn, DELETE)))
return (error);
error = zfs_rmdir_(ZTOV(dzp), vp, name, cr);
vput(vp);
return (error);
}
/*
* Read as many directory entries as will fit into the provided
* buffer from the given directory cursor position (specified in
* the uio structure).
*
* IN: vp - vnode of directory to read.
* uio - structure supplying read location, range info,
* and return buffer.
* cr - credentials of caller.
* ct - caller context
* flags - case flags
*
* OUT: uio - updated offset and range, buffer filled.
* eofp - set to true if end-of-file detected.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* vp - atime updated
*
* Note that the low 4 bits of the cookie returned by zap is always zero.
* This allows us to use the low range for "special" directory entries:
* We use 0 for '.', and 1 for '..'. If this is the root of the filesystem,
* we use the offset 2 for the '.zfs' directory.
*/
/* ARGSUSED */
static int
zfs_readdir(vnode_t *vp, zfs_uio_t *uio, cred_t *cr, int *eofp,
- int *ncookies, ulong_t **cookies)
+ int *ncookies, cookie_t **cookies)
{
znode_t *zp = VTOZ(vp);
iovec_t *iovp;
edirent_t *eodp;
dirent64_t *odp;
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
objset_t *os;
caddr_t outbuf;
size_t bufsize;
zap_cursor_t zc;
zap_attribute_t zap;
uint_t bytes_wanted;
uint64_t offset; /* must be unsigned; checks for < 1 */
uint64_t parent;
int local_eof;
int outcount;
int error;
uint8_t prefetch;
boolean_t check_sysattrs;
uint8_t type;
int ncooks;
- ulong_t *cooks = NULL;
+ cookie_t *cooks = NULL;
int flags = 0;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
&parent, sizeof (parent))) != 0) {
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* If we are not given an eof variable,
* use a local one.
*/
if (eofp == NULL)
eofp = &local_eof;
/*
* Check for valid iov_len.
*/
if (GET_UIO_STRUCT(uio)->uio_iov->iov_len <= 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
/*
* Quit if directory has been removed (posix)
*/
if ((*eofp = zp->z_unlinked) != 0) {
ZFS_EXIT(zfsvfs);
return (0);
}
error = 0;
os = zfsvfs->z_os;
offset = zfs_uio_offset(uio);
prefetch = zp->z_zn_prefetch;
/*
* Initialize the iterator cursor.
*/
if (offset <= 3) {
/*
* Start iteration from the beginning of the directory.
*/
zap_cursor_init(&zc, os, zp->z_id);
} else {
/*
* The offset is a serialized cursor.
*/
zap_cursor_init_serialized(&zc, os, zp->z_id, offset);
}
/*
* Get space to change directory entries into fs independent format.
*/
iovp = GET_UIO_STRUCT(uio)->uio_iov;
bytes_wanted = iovp->iov_len;
if (zfs_uio_segflg(uio) != UIO_SYSSPACE || zfs_uio_iovcnt(uio) != 1) {
bufsize = bytes_wanted;
outbuf = kmem_alloc(bufsize, KM_SLEEP);
odp = (struct dirent64 *)outbuf;
} else {
bufsize = bytes_wanted;
outbuf = NULL;
odp = (struct dirent64 *)iovp->iov_base;
}
eodp = (struct edirent *)odp;
if (ncookies != NULL) {
/*
* Minimum entry size is dirent size and 1 byte for a file name.
*/
ncooks = zfs_uio_resid(uio) / (sizeof (struct dirent) -
sizeof (((struct dirent *)NULL)->d_name) + 1);
- cooks = malloc(ncooks * sizeof (ulong_t), M_TEMP, M_WAITOK);
+ cooks = malloc(ncooks * sizeof (*cooks), M_TEMP, M_WAITOK);
*cookies = cooks;
*ncookies = ncooks;
}
/*
* If this VFS supports the system attribute view interface; and
* we're looking at an extended attribute directory; and we care
* about normalization conflicts on this vfs; then we must check
* for normalization conflicts with the sysattr name space.
*/
#ifdef TODO
check_sysattrs = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) &&
(vp->v_flag & V_XATTRDIR) && zfsvfs->z_norm &&
(flags & V_RDDIR_ENTFLAGS);
#else
check_sysattrs = 0;
#endif
/*
* Transform to file-system independent format
*/
outcount = 0;
while (outcount < bytes_wanted) {
ino64_t objnum;
ushort_t reclen;
off64_t *next = NULL;
/*
* Special case `.', `..', and `.zfs'.
*/
if (offset == 0) {
(void) strcpy(zap.za_name, ".");
zap.za_normalization_conflict = 0;
objnum = zp->z_id;
type = DT_DIR;
} else if (offset == 1) {
(void) strcpy(zap.za_name, "..");
zap.za_normalization_conflict = 0;
objnum = parent;
type = DT_DIR;
} else if (offset == 2 && zfs_show_ctldir(zp)) {
(void) strcpy(zap.za_name, ZFS_CTLDIR_NAME);
zap.za_normalization_conflict = 0;
objnum = ZFSCTL_INO_ROOT;
type = DT_DIR;
} else {
/*
* Grab next entry.
*/
if ((error = zap_cursor_retrieve(&zc, &zap))) {
if ((*eofp = (error == ENOENT)) != 0)
break;
else
goto update;
}
if (zap.za_integer_length != 8 ||
zap.za_num_integers != 1) {
cmn_err(CE_WARN, "zap_readdir: bad directory "
"entry, obj = %lld, offset = %lld\n",
(u_longlong_t)zp->z_id,
(u_longlong_t)offset);
error = SET_ERROR(ENXIO);
goto update;
}
objnum = ZFS_DIRENT_OBJ(zap.za_first_integer);
/*
* MacOS X can extract the object type here such as:
* uint8_t type = ZFS_DIRENT_TYPE(zap.za_first_integer);
*/
type = ZFS_DIRENT_TYPE(zap.za_first_integer);
if (check_sysattrs && !zap.za_normalization_conflict) {
#ifdef TODO
zap.za_normalization_conflict =
xattr_sysattr_casechk(zap.za_name);
#else
panic("%s:%u: TODO", __func__, __LINE__);
#endif
}
}
if (flags & V_RDDIR_ACCFILTER) {
/*
* If we have no access at all, don't include
* this entry in the returned information
*/
znode_t *ezp;
if (zfs_zget(zp->z_zfsvfs, objnum, &ezp) != 0)
goto skip_entry;
if (!zfs_has_access(ezp, cr)) {
vrele(ZTOV(ezp));
goto skip_entry;
}
vrele(ZTOV(ezp));
}
if (flags & V_RDDIR_ENTFLAGS)
reclen = EDIRENT_RECLEN(strlen(zap.za_name));
else
reclen = DIRENT64_RECLEN(strlen(zap.za_name));
/*
* Will this entry fit in the buffer?
*/
if (outcount + reclen > bufsize) {
/*
* Did we manage to fit anything in the buffer?
*/
if (!outcount) {
error = SET_ERROR(EINVAL);
goto update;
}
break;
}
if (flags & V_RDDIR_ENTFLAGS) {
/*
* Add extended flag entry:
*/
eodp->ed_ino = objnum;
eodp->ed_reclen = reclen;
/* NOTE: ed_off is the offset for the *next* entry */
next = &(eodp->ed_off);
eodp->ed_eflags = zap.za_normalization_conflict ?
ED_CASE_CONFLICT : 0;
(void) strncpy(eodp->ed_name, zap.za_name,
EDIRENT_NAMELEN(reclen));
eodp = (edirent_t *)((intptr_t)eodp + reclen);
} else {
/*
* Add normal entry:
*/
odp->d_ino = objnum;
odp->d_reclen = reclen;
odp->d_namlen = strlen(zap.za_name);
/* NOTE: d_off is the offset for the *next* entry. */
next = &odp->d_off;
strlcpy(odp->d_name, zap.za_name, odp->d_namlen + 1);
odp->d_type = type;
dirent_terminate(odp);
odp = (dirent64_t *)((intptr_t)odp + reclen);
}
outcount += reclen;
ASSERT3S(outcount, <=, bufsize);
/* Prefetch znode */
if (prefetch)
dmu_prefetch(os, objnum, 0, 0, 0,
ZIO_PRIORITY_SYNC_READ);
skip_entry:
/*
* Move to the next entry, fill in the previous offset.
*/
if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) {
zap_cursor_advance(&zc);
offset = zap_cursor_serialize(&zc);
} else {
offset += 1;
}
/* Fill the offset right after advancing the cursor. */
if (next != NULL)
*next = offset;
if (cooks != NULL) {
*cooks++ = offset;
ncooks--;
KASSERT(ncooks >= 0, ("ncookies=%d", ncooks));
}
}
zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */
/* Subtract unused cookies */
if (ncookies != NULL)
*ncookies -= ncooks;
if (zfs_uio_segflg(uio) == UIO_SYSSPACE && zfs_uio_iovcnt(uio) == 1) {
iovp->iov_base += outcount;
iovp->iov_len -= outcount;
zfs_uio_resid(uio) -= outcount;
} else if ((error =
zfs_uiomove(outbuf, (long)outcount, UIO_READ, uio))) {
/*
* Reset the pointer.
*/
offset = zfs_uio_offset(uio);
}
update:
zap_cursor_fini(&zc);
if (zfs_uio_segflg(uio) != UIO_SYSSPACE || zfs_uio_iovcnt(uio) != 1)
kmem_free(outbuf, bufsize);
if (error == ENOENT)
error = 0;
ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
zfs_uio_setoffset(uio, offset);
ZFS_EXIT(zfsvfs);
if (error != 0 && cookies != NULL) {
free(*cookies, M_TEMP);
*cookies = NULL;
*ncookies = 0;
}
return (error);
}
/*
* Get the requested file attributes and place them in the provided
* vattr structure.
*
* IN: vp - vnode of file.
* vap - va_mask identifies requested attributes.
* If AT_XVATTR set, then optional attrs are requested
* flags - ATTR_NOACLCHECK (CIFS server context)
* cr - credentials of caller.
*
* OUT: vap - attribute values.
*
* RETURN: 0 (always succeeds).
*/
/* ARGSUSED */
static int
zfs_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr)
{
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
int error = 0;
uint32_t blksize;
u_longlong_t nblocks;
uint64_t mtime[2], ctime[2], crtime[2], rdev;
xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
xoptattr_t *xoap = NULL;
boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
sa_bulk_attr_t bulk[4];
int count = 0;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
zfs_fuid_map_ids(zp, cr, &vap->va_uid, &vap->va_gid);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL, &crtime, 16);
if (vp->v_type == VBLK || vp->v_type == VCHR)
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zfsvfs), NULL,
&rdev, 8);
if ((error = sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) != 0) {
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES.
* Also, if we are the owner don't bother, since owner should
* always be allowed to read basic attributes of file.
*/
if (!(zp->z_pflags & ZFS_ACL_TRIVIAL) &&
(vap->va_uid != crgetuid(cr))) {
if ((error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, 0,
skipaclchk, cr))) {
ZFS_EXIT(zfsvfs);
return (error);
}
}
/*
* Return all attributes. It's cheaper to provide the answer
* than to determine whether we were asked the question.
*/
vap->va_type = IFTOVT(zp->z_mode);
vap->va_mode = zp->z_mode & ~S_IFMT;
vn_fsid(vp, vap);
vap->va_nodeid = zp->z_id;
vap->va_nlink = zp->z_links;
if ((vp->v_flag & VROOT) && zfs_show_ctldir(zp) &&
zp->z_links < ZFS_LINK_MAX)
vap->va_nlink++;
vap->va_size = zp->z_size;
if (vp->v_type == VBLK || vp->v_type == VCHR)
vap->va_rdev = zfs_cmpldev(rdev);
- vap->va_seq = zp->z_seq;
+ vap->va_gen = zp->z_gen;
vap->va_flags = 0; /* FreeBSD: Reset chflags(2) flags. */
vap->va_filerev = zp->z_seq;
/*
* Add in any requested optional attributes and the create time.
* Also set the corresponding bits in the returned attribute bitmap.
*/
if ((xoap = xva_getxoptattr(xvap)) != NULL && zfsvfs->z_use_fuids) {
if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
xoap->xoa_archive =
((zp->z_pflags & ZFS_ARCHIVE) != 0);
XVA_SET_RTN(xvap, XAT_ARCHIVE);
}
if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
xoap->xoa_readonly =
((zp->z_pflags & ZFS_READONLY) != 0);
XVA_SET_RTN(xvap, XAT_READONLY);
}
if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
xoap->xoa_system =
((zp->z_pflags & ZFS_SYSTEM) != 0);
XVA_SET_RTN(xvap, XAT_SYSTEM);
}
if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
xoap->xoa_hidden =
((zp->z_pflags & ZFS_HIDDEN) != 0);
XVA_SET_RTN(xvap, XAT_HIDDEN);
}
if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
xoap->xoa_nounlink =
((zp->z_pflags & ZFS_NOUNLINK) != 0);
XVA_SET_RTN(xvap, XAT_NOUNLINK);
}
if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
xoap->xoa_immutable =
((zp->z_pflags & ZFS_IMMUTABLE) != 0);
XVA_SET_RTN(xvap, XAT_IMMUTABLE);
}
if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
xoap->xoa_appendonly =
((zp->z_pflags & ZFS_APPENDONLY) != 0);
XVA_SET_RTN(xvap, XAT_APPENDONLY);
}
if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
xoap->xoa_nodump =
((zp->z_pflags & ZFS_NODUMP) != 0);
XVA_SET_RTN(xvap, XAT_NODUMP);
}
if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
xoap->xoa_opaque =
((zp->z_pflags & ZFS_OPAQUE) != 0);
XVA_SET_RTN(xvap, XAT_OPAQUE);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
xoap->xoa_av_quarantined =
((zp->z_pflags & ZFS_AV_QUARANTINED) != 0);
XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
xoap->xoa_av_modified =
((zp->z_pflags & ZFS_AV_MODIFIED) != 0);
XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) &&
vp->v_type == VREG) {
zfs_sa_get_scanstamp(zp, xvap);
}
if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
xoap->xoa_reparse = ((zp->z_pflags & ZFS_REPARSE) != 0);
XVA_SET_RTN(xvap, XAT_REPARSE);
}
if (XVA_ISSET_REQ(xvap, XAT_GEN)) {
xoap->xoa_generation = zp->z_gen;
XVA_SET_RTN(xvap, XAT_GEN);
}
if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
xoap->xoa_offline =
((zp->z_pflags & ZFS_OFFLINE) != 0);
XVA_SET_RTN(xvap, XAT_OFFLINE);
}
if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
xoap->xoa_sparse =
((zp->z_pflags & ZFS_SPARSE) != 0);
XVA_SET_RTN(xvap, XAT_SPARSE);
}
if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT)) {
xoap->xoa_projinherit =
((zp->z_pflags & ZFS_PROJINHERIT) != 0);
XVA_SET_RTN(xvap, XAT_PROJINHERIT);
}
if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
xoap->xoa_projid = zp->z_projid;
XVA_SET_RTN(xvap, XAT_PROJID);
}
}
ZFS_TIME_DECODE(&vap->va_atime, zp->z_atime);
ZFS_TIME_DECODE(&vap->va_mtime, mtime);
ZFS_TIME_DECODE(&vap->va_ctime, ctime);
ZFS_TIME_DECODE(&vap->va_birthtime, crtime);
sa_object_size(zp->z_sa_hdl, &blksize, &nblocks);
vap->va_blksize = blksize;
vap->va_bytes = nblocks << 9; /* nblocks * 512 */
if (zp->z_blksz == 0) {
/*
* Block size hasn't been set; suggest maximal I/O transfers.
*/
vap->va_blksize = zfsvfs->z_max_blksz;
}
ZFS_EXIT(zfsvfs);
return (0);
}
/*
* Set the file attributes to the values contained in the
* vattr structure.
*
* IN: zp - znode of file to be modified.
* vap - new attribute values.
* If AT_XVATTR set, then optional attrs are being set
* flags - ATTR_UTIME set if non-default time values provided.
* - ATTR_NOACLCHECK (CIFS context only).
* cr - credentials of caller.
* ct - caller context
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* vp - ctime updated, mtime updated if size changed.
*/
/* ARGSUSED */
int
zfs_setattr(znode_t *zp, vattr_t *vap, int flags, cred_t *cr)
{
vnode_t *vp = ZTOV(zp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
objset_t *os;
zilog_t *zilog;
dmu_tx_t *tx;
vattr_t oldva;
xvattr_t tmpxvattr;
uint_t mask = vap->va_mask;
uint_t saved_mask = 0;
uint64_t saved_mode;
int trim_mask = 0;
uint64_t new_mode;
uint64_t new_uid, new_gid;
uint64_t xattr_obj;
uint64_t mtime[2], ctime[2];
uint64_t projid = ZFS_INVALID_PROJID;
znode_t *attrzp;
int need_policy = FALSE;
int err, err2;
zfs_fuid_info_t *fuidp = NULL;
xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
xoptattr_t *xoap;
zfs_acl_t *aclp;
boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
boolean_t fuid_dirtied = B_FALSE;
sa_bulk_attr_t bulk[7], xattr_bulk[7];
int count = 0, xattr_count = 0;
if (mask == 0)
return (0);
if (mask & AT_NOSET)
return (SET_ERROR(EINVAL));
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
os = zfsvfs->z_os;
zilog = zfsvfs->z_log;
/*
* Make sure that if we have ephemeral uid/gid or xvattr specified
* that file system is at proper version level
*/
if (zfsvfs->z_use_fuids == B_FALSE &&
(((mask & AT_UID) && IS_EPHEMERAL(vap->va_uid)) ||
((mask & AT_GID) && IS_EPHEMERAL(vap->va_gid)) ||
(mask & AT_XVATTR))) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
if (mask & AT_SIZE && vp->v_type == VDIR) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EISDIR));
}
if (mask & AT_SIZE && vp->v_type != VREG && vp->v_type != VFIFO) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
/*
* If this is an xvattr_t, then get a pointer to the structure of
* optional attributes. If this is NULL, then we have a vattr_t.
*/
xoap = xva_getxoptattr(xvap);
xva_init(&tmpxvattr);
/*
* Immutable files can only alter immutable bit and atime
*/
if ((zp->z_pflags & ZFS_IMMUTABLE) &&
((mask & (AT_SIZE|AT_UID|AT_GID|AT_MTIME|AT_MODE)) ||
((mask & AT_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EPERM));
}
/*
* Note: ZFS_READONLY is handled in zfs_zaccess_common.
*/
/*
* Verify timestamps doesn't overflow 32 bits.
* ZFS can handle large timestamps, but 32bit syscalls can't
* handle times greater than 2039. This check should be removed
* once large timestamps are fully supported.
*/
if (mask & (AT_ATIME | AT_MTIME)) {
if (((mask & AT_ATIME) && TIMESPEC_OVERFLOW(&vap->va_atime)) ||
((mask & AT_MTIME) && TIMESPEC_OVERFLOW(&vap->va_mtime))) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EOVERFLOW));
}
}
if (xoap != NULL && (mask & AT_XVATTR)) {
if (XVA_ISSET_REQ(xvap, XAT_CREATETIME) &&
TIMESPEC_OVERFLOW(&vap->va_birthtime)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EOVERFLOW));
}
if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
if (!dmu_objset_projectquota_enabled(os) ||
(!S_ISREG(zp->z_mode) && !S_ISDIR(zp->z_mode))) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EOPNOTSUPP));
}
projid = xoap->xoa_projid;
if (unlikely(projid == ZFS_INVALID_PROJID)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
if (projid == zp->z_projid && zp->z_pflags & ZFS_PROJID)
projid = ZFS_INVALID_PROJID;
else
need_policy = TRUE;
}
if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT) &&
(xoap->xoa_projinherit !=
((zp->z_pflags & ZFS_PROJINHERIT) != 0)) &&
(!dmu_objset_projectquota_enabled(os) ||
(!S_ISREG(zp->z_mode) && !S_ISDIR(zp->z_mode)))) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EOPNOTSUPP));
}
}
attrzp = NULL;
aclp = NULL;
if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EROFS));
}
/*
* First validate permissions
*/
if (mask & AT_SIZE) {
/*
* XXX - Note, we are not providing any open
* mode flags here (like FNDELAY), so we may
* block if there are locks present... this
* should be addressed in openat().
*/
/* XXX - would it be OK to generate a log record here? */
err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE);
if (err) {
ZFS_EXIT(zfsvfs);
return (err);
}
}
if (mask & (AT_ATIME|AT_MTIME) ||
((mask & AT_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) ||
XVA_ISSET_REQ(xvap, XAT_READONLY) ||
XVA_ISSET_REQ(xvap, XAT_ARCHIVE) ||
XVA_ISSET_REQ(xvap, XAT_OFFLINE) ||
XVA_ISSET_REQ(xvap, XAT_SPARSE) ||
XVA_ISSET_REQ(xvap, XAT_CREATETIME) ||
XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) {
need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0,
skipaclchk, cr);
}
if (mask & (AT_UID|AT_GID)) {
int idmask = (mask & (AT_UID|AT_GID));
int take_owner;
int take_group;
/*
* NOTE: even if a new mode is being set,
* we may clear S_ISUID/S_ISGID bits.
*/
if (!(mask & AT_MODE))
vap->va_mode = zp->z_mode;
/*
* Take ownership or chgrp to group we are a member of
*/
take_owner = (mask & AT_UID) && (vap->va_uid == crgetuid(cr));
take_group = (mask & AT_GID) &&
zfs_groupmember(zfsvfs, vap->va_gid, cr);
/*
* If both AT_UID and AT_GID are set then take_owner and
* take_group must both be set in order to allow taking
* ownership.
*
* Otherwise, send the check through secpolicy_vnode_setattr()
*
*/
if (((idmask == (AT_UID|AT_GID)) && take_owner && take_group) ||
((idmask == AT_UID) && take_owner) ||
((idmask == AT_GID) && take_group)) {
if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0,
skipaclchk, cr) == 0) {
/*
* Remove setuid/setgid for non-privileged users
*/
secpolicy_setid_clear(vap, vp, cr);
trim_mask = (mask & (AT_UID|AT_GID));
} else {
need_policy = TRUE;
}
} else {
need_policy = TRUE;
}
}
oldva.va_mode = zp->z_mode;
zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid);
if (mask & AT_XVATTR) {
/*
* Update xvattr mask to include only those attributes
* that are actually changing.
*
* the bits will be restored prior to actually setting
* the attributes so the caller thinks they were set.
*/
if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
if (xoap->xoa_appendonly !=
((zp->z_pflags & ZFS_APPENDONLY) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_APPENDONLY);
XVA_SET_REQ(&tmpxvattr, XAT_APPENDONLY);
}
}
if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT)) {
if (xoap->xoa_projinherit !=
((zp->z_pflags & ZFS_PROJINHERIT) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_PROJINHERIT);
XVA_SET_REQ(&tmpxvattr, XAT_PROJINHERIT);
}
}
if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
if (xoap->xoa_nounlink !=
((zp->z_pflags & ZFS_NOUNLINK) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_NOUNLINK);
XVA_SET_REQ(&tmpxvattr, XAT_NOUNLINK);
}
}
if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
if (xoap->xoa_immutable !=
((zp->z_pflags & ZFS_IMMUTABLE) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_IMMUTABLE);
XVA_SET_REQ(&tmpxvattr, XAT_IMMUTABLE);
}
}
if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
if (xoap->xoa_nodump !=
((zp->z_pflags & ZFS_NODUMP) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_NODUMP);
XVA_SET_REQ(&tmpxvattr, XAT_NODUMP);
}
}
if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
if (xoap->xoa_av_modified !=
((zp->z_pflags & ZFS_AV_MODIFIED) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_AV_MODIFIED);
XVA_SET_REQ(&tmpxvattr, XAT_AV_MODIFIED);
}
}
if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
if ((vp->v_type != VREG &&
xoap->xoa_av_quarantined) ||
xoap->xoa_av_quarantined !=
((zp->z_pflags & ZFS_AV_QUARANTINED) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED);
XVA_SET_REQ(&tmpxvattr, XAT_AV_QUARANTINED);
}
}
if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EPERM));
}
if (need_policy == FALSE &&
(XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) ||
XVA_ISSET_REQ(xvap, XAT_OPAQUE))) {
need_policy = TRUE;
}
}
if (mask & AT_MODE) {
if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) {
err = secpolicy_setid_setsticky_clear(vp, vap,
&oldva, cr);
if (err) {
ZFS_EXIT(zfsvfs);
return (err);
}
trim_mask |= AT_MODE;
} else {
need_policy = TRUE;
}
}
if (need_policy) {
/*
* If trim_mask is set then take ownership
* has been granted or write_acl is present and user
* has the ability to modify mode. In that case remove
* UID|GID and or MODE from mask so that
* secpolicy_vnode_setattr() doesn't revoke it.
*/
if (trim_mask) {
saved_mask = vap->va_mask;
vap->va_mask &= ~trim_mask;
if (trim_mask & AT_MODE) {
/*
* Save the mode, as secpolicy_vnode_setattr()
* will overwrite it with ova.va_mode.
*/
saved_mode = vap->va_mode;
}
}
err = secpolicy_vnode_setattr(cr, vp, vap, &oldva, flags,
(int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp);
if (err) {
ZFS_EXIT(zfsvfs);
return (err);
}
if (trim_mask) {
vap->va_mask |= saved_mask;
if (trim_mask & AT_MODE) {
/*
* Recover the mode after
* secpolicy_vnode_setattr().
*/
vap->va_mode = saved_mode;
}
}
}
/*
* secpolicy_vnode_setattr, or take ownership may have
* changed va_mask
*/
mask = vap->va_mask;
if ((mask & (AT_UID | AT_GID)) || projid != ZFS_INVALID_PROJID) {
err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
&xattr_obj, sizeof (xattr_obj));
if (err == 0 && xattr_obj) {
err = zfs_zget(zp->z_zfsvfs, xattr_obj, &attrzp);
if (err == 0) {
err = vn_lock(ZTOV(attrzp), LK_EXCLUSIVE);
if (err != 0)
vrele(ZTOV(attrzp));
}
if (err)
goto out2;
}
if (mask & AT_UID) {
new_uid = zfs_fuid_create(zfsvfs,
(uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp);
if (new_uid != zp->z_uid &&
zfs_id_overquota(zfsvfs, DMU_USERUSED_OBJECT,
new_uid)) {
if (attrzp)
vput(ZTOV(attrzp));
err = SET_ERROR(EDQUOT);
goto out2;
}
}
if (mask & AT_GID) {
new_gid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_gid,
cr, ZFS_GROUP, &fuidp);
if (new_gid != zp->z_gid &&
zfs_id_overquota(zfsvfs, DMU_GROUPUSED_OBJECT,
new_gid)) {
if (attrzp)
vput(ZTOV(attrzp));
err = SET_ERROR(EDQUOT);
goto out2;
}
}
if (projid != ZFS_INVALID_PROJID &&
zfs_id_overquota(zfsvfs, DMU_PROJECTUSED_OBJECT, projid)) {
if (attrzp)
vput(ZTOV(attrzp));
err = SET_ERROR(EDQUOT);
goto out2;
}
}
tx = dmu_tx_create(os);
if (mask & AT_MODE) {
uint64_t pmode = zp->z_mode;
uint64_t acl_obj;
new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT);
if (zp->z_zfsvfs->z_acl_mode == ZFS_ACL_RESTRICTED &&
!(zp->z_pflags & ZFS_ACL_TRIVIAL)) {
err = SET_ERROR(EPERM);
goto out;
}
if ((err = zfs_acl_chmod_setattr(zp, &aclp, new_mode)))
goto out;
if (!zp->z_is_sa && ((acl_obj = zfs_external_acl(zp)) != 0)) {
/*
* Are we upgrading ACL from old V0 format
* to V1 format?
*/
if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
zfs_znode_acl_version(zp) ==
ZFS_ACL_VERSION_INITIAL) {
dmu_tx_hold_free(tx, acl_obj, 0,
DMU_OBJECT_END);
dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
0, aclp->z_acl_bytes);
} else {
dmu_tx_hold_write(tx, acl_obj, 0,
aclp->z_acl_bytes);
}
} else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
0, aclp->z_acl_bytes);
}
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
} else {
if (((mask & AT_XVATTR) &&
XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) ||
(projid != ZFS_INVALID_PROJID &&
!(zp->z_pflags & ZFS_PROJID)))
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
else
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
}
if (attrzp) {
dmu_tx_hold_sa(tx, attrzp->z_sa_hdl, B_FALSE);
}
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
zfs_sa_upgrade_txholds(tx, zp);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err)
goto out;
count = 0;
/*
* Set each attribute requested.
* We group settings according to the locks they need to acquire.
*
* Note: you cannot set ctime directly, although it will be
* updated as a side-effect of calling this function.
*/
if (projid != ZFS_INVALID_PROJID && !(zp->z_pflags & ZFS_PROJID)) {
/*
* For the existed object that is upgraded from old system,
* its on-disk layout has no slot for the project ID attribute.
* But quota accounting logic needs to access related slots by
* offset directly. So we need to adjust old objects' layout
* to make the project ID to some unified and fixed offset.
*/
if (attrzp)
err = sa_add_projid(attrzp->z_sa_hdl, tx, projid);
if (err == 0)
err = sa_add_projid(zp->z_sa_hdl, tx, projid);
if (unlikely(err == EEXIST))
err = 0;
else if (err != 0)
goto out;
else
projid = ZFS_INVALID_PROJID;
}
if (mask & (AT_UID|AT_GID|AT_MODE))
mutex_enter(&zp->z_acl_lock);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, sizeof (zp->z_pflags));
if (attrzp) {
if (mask & (AT_UID|AT_GID|AT_MODE))
mutex_enter(&attrzp->z_acl_lock);
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_FLAGS(zfsvfs), NULL, &attrzp->z_pflags,
sizeof (attrzp->z_pflags));
if (projid != ZFS_INVALID_PROJID) {
attrzp->z_projid = projid;
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_PROJID(zfsvfs), NULL, &attrzp->z_projid,
sizeof (attrzp->z_projid));
}
}
if (mask & (AT_UID|AT_GID)) {
if (mask & AT_UID) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
&new_uid, sizeof (new_uid));
zp->z_uid = new_uid;
if (attrzp) {
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_UID(zfsvfs), NULL, &new_uid,
sizeof (new_uid));
attrzp->z_uid = new_uid;
}
}
if (mask & AT_GID) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs),
NULL, &new_gid, sizeof (new_gid));
zp->z_gid = new_gid;
if (attrzp) {
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_GID(zfsvfs), NULL, &new_gid,
sizeof (new_gid));
attrzp->z_gid = new_gid;
}
}
if (!(mask & AT_MODE)) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs),
NULL, &new_mode, sizeof (new_mode));
new_mode = zp->z_mode;
}
err = zfs_acl_chown_setattr(zp);
ASSERT0(err);
if (attrzp) {
vn_seqc_write_begin(ZTOV(attrzp));
err = zfs_acl_chown_setattr(attrzp);
vn_seqc_write_end(ZTOV(attrzp));
ASSERT0(err);
}
}
if (mask & AT_MODE) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
&new_mode, sizeof (new_mode));
zp->z_mode = new_mode;
ASSERT3P(aclp, !=, NULL);
err = zfs_aclset_common(zp, aclp, cr, tx);
ASSERT0(err);
if (zp->z_acl_cached)
zfs_acl_free(zp->z_acl_cached);
zp->z_acl_cached = aclp;
aclp = NULL;
}
if (mask & AT_ATIME) {
ZFS_TIME_ENCODE(&vap->va_atime, zp->z_atime);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
&zp->z_atime, sizeof (zp->z_atime));
}
if (mask & AT_MTIME) {
ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
mtime, sizeof (mtime));
}
if (projid != ZFS_INVALID_PROJID) {
zp->z_projid = projid;
SA_ADD_BULK_ATTR(bulk, count,
SA_ZPL_PROJID(zfsvfs), NULL, &zp->z_projid,
sizeof (zp->z_projid));
}
/* XXX - shouldn't this be done *before* the ATIME/MTIME checks? */
if (mask & AT_SIZE && !(mask & AT_MTIME)) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs),
NULL, mtime, sizeof (mtime));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
&ctime, sizeof (ctime));
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
} else if (mask != 0) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
&ctime, sizeof (ctime));
zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime, ctime);
if (attrzp) {
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_CTIME(zfsvfs), NULL,
&ctime, sizeof (ctime));
zfs_tstamp_update_setup(attrzp, STATE_CHANGED,
mtime, ctime);
}
}
/*
* Do this after setting timestamps to prevent timestamp
* update from toggling bit
*/
if (xoap && (mask & AT_XVATTR)) {
if (XVA_ISSET_REQ(xvap, XAT_CREATETIME))
xoap->xoa_createtime = vap->va_birthtime;
/*
* restore trimmed off masks
* so that return masks can be set for caller.
*/
if (XVA_ISSET_REQ(&tmpxvattr, XAT_APPENDONLY)) {
XVA_SET_REQ(xvap, XAT_APPENDONLY);
}
if (XVA_ISSET_REQ(&tmpxvattr, XAT_NOUNLINK)) {
XVA_SET_REQ(xvap, XAT_NOUNLINK);
}
if (XVA_ISSET_REQ(&tmpxvattr, XAT_IMMUTABLE)) {
XVA_SET_REQ(xvap, XAT_IMMUTABLE);
}
if (XVA_ISSET_REQ(&tmpxvattr, XAT_NODUMP)) {
XVA_SET_REQ(xvap, XAT_NODUMP);
}
if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_MODIFIED)) {
XVA_SET_REQ(xvap, XAT_AV_MODIFIED);
}
if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_QUARANTINED)) {
XVA_SET_REQ(xvap, XAT_AV_QUARANTINED);
}
if (XVA_ISSET_REQ(&tmpxvattr, XAT_PROJINHERIT)) {
XVA_SET_REQ(xvap, XAT_PROJINHERIT);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
ASSERT3S(vp->v_type, ==, VREG);
zfs_xvattr_set(zp, xvap, tx);
}
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
if (mask != 0)
zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp);
if (mask & (AT_UID|AT_GID|AT_MODE))
mutex_exit(&zp->z_acl_lock);
if (attrzp) {
if (mask & (AT_UID|AT_GID|AT_MODE))
mutex_exit(&attrzp->z_acl_lock);
}
out:
if (err == 0 && attrzp) {
err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk,
xattr_count, tx);
ASSERT0(err2);
}
if (attrzp)
vput(ZTOV(attrzp));
if (aclp)
zfs_acl_free(aclp);
if (fuidp) {
zfs_fuid_info_free(fuidp);
fuidp = NULL;
}
if (err) {
dmu_tx_abort(tx);
} else {
err2 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
dmu_tx_commit(tx);
}
out2:
if (os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
ZFS_EXIT(zfsvfs);
return (err);
}
/*
* Look up the directory entries corresponding to the source and target
* directory/name pairs.
*/
static int
zfs_rename_relock_lookup(znode_t *sdzp, const struct componentname *scnp,
znode_t **szpp, znode_t *tdzp, const struct componentname *tcnp,
znode_t **tzpp)
{
zfsvfs_t *zfsvfs;
znode_t *szp, *tzp;
int error;
/*
* Before using sdzp and tdzp we must ensure that they are live.
* As a porting legacy from illumos we have two things to worry
* about. One is typical for FreeBSD and it is that the vnode is
* not reclaimed (doomed). The other is that the znode is live.
* The current code can invalidate the znode without acquiring the
* corresponding vnode lock if the object represented by the znode
* and vnode is no longer valid after a rollback or receive operation.
* z_teardown_lock hidden behind ZFS_ENTER and ZFS_EXIT is the lock
* that protects the znodes from the invalidation.
*/
zfsvfs = sdzp->z_zfsvfs;
ASSERT3P(zfsvfs, ==, tdzp->z_zfsvfs);
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(sdzp);
ZFS_VERIFY_ZP(tdzp);
/*
* Re-resolve svp to be certain it still exists and fetch the
* correct vnode.
*/
error = zfs_dirent_lookup(sdzp, scnp->cn_nameptr, &szp, ZEXISTS);
if (error != 0) {
/* Source entry invalid or not there. */
if ((scnp->cn_flags & ISDOTDOT) != 0 ||
(scnp->cn_namelen == 1 && scnp->cn_nameptr[0] == '.'))
error = SET_ERROR(EINVAL);
goto out;
}
*szpp = szp;
/*
* Re-resolve tvp, if it disappeared we just carry on.
*/
error = zfs_dirent_lookup(tdzp, tcnp->cn_nameptr, &tzp, 0);
if (error != 0) {
vrele(ZTOV(szp));
if ((tcnp->cn_flags & ISDOTDOT) != 0)
error = SET_ERROR(EINVAL);
goto out;
}
*tzpp = tzp;
out:
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* We acquire all but fdvp locks using non-blocking acquisitions. If we
* fail to acquire any lock in the path we will drop all held locks,
* acquire the new lock in a blocking fashion, and then release it and
* restart the rename. This acquire/release step ensures that we do not
* spin on a lock waiting for release. On error release all vnode locks
* and decrement references the way tmpfs_rename() would do.
*/
static int
zfs_rename_relock(struct vnode *sdvp, struct vnode **svpp,
struct vnode *tdvp, struct vnode **tvpp,
const struct componentname *scnp, const struct componentname *tcnp)
{
struct vnode *nvp, *svp, *tvp;
znode_t *sdzp, *tdzp, *szp, *tzp;
int error;
VOP_UNLOCK1(tdvp);
if (*tvpp != NULL && *tvpp != tdvp)
VOP_UNLOCK1(*tvpp);
relock:
error = vn_lock(sdvp, LK_EXCLUSIVE);
if (error)
goto out;
error = vn_lock(tdvp, LK_EXCLUSIVE | LK_NOWAIT);
if (error != 0) {
VOP_UNLOCK1(sdvp);
if (error != EBUSY)
goto out;
error = vn_lock(tdvp, LK_EXCLUSIVE);
if (error)
goto out;
VOP_UNLOCK1(tdvp);
goto relock;
}
tdzp = VTOZ(tdvp);
sdzp = VTOZ(sdvp);
error = zfs_rename_relock_lookup(sdzp, scnp, &szp, tdzp, tcnp, &tzp);
if (error != 0) {
VOP_UNLOCK1(sdvp);
VOP_UNLOCK1(tdvp);
goto out;
}
svp = ZTOV(szp);
tvp = tzp != NULL ? ZTOV(tzp) : NULL;
/*
* Now try acquire locks on svp and tvp.
*/
nvp = svp;
error = vn_lock(nvp, LK_EXCLUSIVE | LK_NOWAIT);
if (error != 0) {
VOP_UNLOCK1(sdvp);
VOP_UNLOCK1(tdvp);
if (tvp != NULL)
vrele(tvp);
if (error != EBUSY) {
vrele(nvp);
goto out;
}
error = vn_lock(nvp, LK_EXCLUSIVE);
if (error != 0) {
vrele(nvp);
goto out;
}
VOP_UNLOCK1(nvp);
/*
* Concurrent rename race.
* XXX ?
*/
if (nvp == tdvp) {
vrele(nvp);
error = SET_ERROR(EINVAL);
goto out;
}
vrele(*svpp);
*svpp = nvp;
goto relock;
}
vrele(*svpp);
*svpp = nvp;
if (*tvpp != NULL)
vrele(*tvpp);
*tvpp = NULL;
if (tvp != NULL) {
nvp = tvp;
error = vn_lock(nvp, LK_EXCLUSIVE | LK_NOWAIT);
if (error != 0) {
VOP_UNLOCK1(sdvp);
VOP_UNLOCK1(tdvp);
VOP_UNLOCK1(*svpp);
if (error != EBUSY) {
vrele(nvp);
goto out;
}
error = vn_lock(nvp, LK_EXCLUSIVE);
if (error != 0) {
vrele(nvp);
goto out;
}
vput(nvp);
goto relock;
}
*tvpp = nvp;
}
return (0);
out:
return (error);
}
/*
* Note that we must use VRELE_ASYNC in this function as it walks
* up the directory tree and vrele may need to acquire an exclusive
* lock if a last reference to a vnode is dropped.
*/
static int
zfs_rename_check(znode_t *szp, znode_t *sdzp, znode_t *tdzp)
{
zfsvfs_t *zfsvfs;
znode_t *zp, *zp1;
uint64_t parent;
int error;
zfsvfs = tdzp->z_zfsvfs;
if (tdzp == szp)
return (SET_ERROR(EINVAL));
if (tdzp == sdzp)
return (0);
if (tdzp->z_id == zfsvfs->z_root)
return (0);
zp = tdzp;
for (;;) {
ASSERT(!zp->z_unlinked);
if ((error = sa_lookup(zp->z_sa_hdl,
SA_ZPL_PARENT(zfsvfs), &parent, sizeof (parent))) != 0)
break;
if (parent == szp->z_id) {
error = SET_ERROR(EINVAL);
break;
}
if (parent == zfsvfs->z_root)
break;
if (parent == sdzp->z_id)
break;
error = zfs_zget(zfsvfs, parent, &zp1);
if (error != 0)
break;
if (zp != tdzp)
VN_RELE_ASYNC(ZTOV(zp),
dsl_pool_zrele_taskq(
dmu_objset_pool(zfsvfs->z_os)));
zp = zp1;
}
if (error == ENOTDIR)
panic("checkpath: .. not a directory\n");
if (zp != tdzp)
VN_RELE_ASYNC(ZTOV(zp),
dsl_pool_zrele_taskq(dmu_objset_pool(zfsvfs->z_os)));
return (error);
}
#if __FreeBSD_version < 1300124
static void
cache_vop_rename(struct vnode *fdvp, struct vnode *fvp, struct vnode *tdvp,
struct vnode *tvp, struct componentname *fcnp, struct componentname *tcnp)
{
cache_purge(fvp);
if (tvp != NULL)
cache_purge(tvp);
cache_purge_negative(tdvp);
}
#endif
static int
zfs_do_rename_impl(vnode_t *sdvp, vnode_t **svpp, struct componentname *scnp,
vnode_t *tdvp, vnode_t **tvpp, struct componentname *tcnp,
cred_t *cr);
/*
* Move an entry from the provided source directory to the target
* directory. Change the entry name as indicated.
*
* IN: sdvp - Source directory containing the "old entry".
* scnp - Old entry name.
* tdvp - Target directory to contain the "new entry".
* tcnp - New entry name.
* cr - credentials of caller.
* INOUT: svpp - Source file
* tvpp - Target file, may point to NULL initially
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* sdvp,tdvp - ctime|mtime updated
*/
/*ARGSUSED*/
static int
zfs_do_rename(vnode_t *sdvp, vnode_t **svpp, struct componentname *scnp,
vnode_t *tdvp, vnode_t **tvpp, struct componentname *tcnp,
cred_t *cr)
{
int error;
ASSERT_VOP_ELOCKED(tdvp, __func__);
if (*tvpp != NULL)
ASSERT_VOP_ELOCKED(*tvpp, __func__);
/* Reject renames across filesystems. */
if ((*svpp)->v_mount != tdvp->v_mount ||
((*tvpp) != NULL && (*svpp)->v_mount != (*tvpp)->v_mount)) {
error = SET_ERROR(EXDEV);
goto out;
}
if (zfsctl_is_node(tdvp)) {
error = SET_ERROR(EXDEV);
goto out;
}
/*
* Lock all four vnodes to ensure safety and semantics of renaming.
*/
error = zfs_rename_relock(sdvp, svpp, tdvp, tvpp, scnp, tcnp);
if (error != 0) {
/* no vnodes are locked in the case of error here */
return (error);
}
error = zfs_do_rename_impl(sdvp, svpp, scnp, tdvp, tvpp, tcnp, cr);
VOP_UNLOCK1(sdvp);
VOP_UNLOCK1(*svpp);
out:
if (*tvpp != NULL)
VOP_UNLOCK1(*tvpp);
if (tdvp != *tvpp)
VOP_UNLOCK1(tdvp);
return (error);
}
static int
zfs_do_rename_impl(vnode_t *sdvp, vnode_t **svpp, struct componentname *scnp,
vnode_t *tdvp, vnode_t **tvpp, struct componentname *tcnp,
cred_t *cr)
{
dmu_tx_t *tx;
zfsvfs_t *zfsvfs;
zilog_t *zilog;
znode_t *tdzp, *sdzp, *tzp, *szp;
const char *snm = scnp->cn_nameptr;
const char *tnm = tcnp->cn_nameptr;
int error;
tdzp = VTOZ(tdvp);
sdzp = VTOZ(sdvp);
zfsvfs = tdzp->z_zfsvfs;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(tdzp);
ZFS_VERIFY_ZP(sdzp);
zilog = zfsvfs->z_log;
if (zfsvfs->z_utf8 && u8_validate(tnm,
strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
error = SET_ERROR(EILSEQ);
goto out;
}
/* If source and target are the same file, there is nothing to do. */
if ((*svpp) == (*tvpp)) {
error = 0;
goto out;
}
if (((*svpp)->v_type == VDIR && (*svpp)->v_mountedhere != NULL) ||
((*tvpp) != NULL && (*tvpp)->v_type == VDIR &&
(*tvpp)->v_mountedhere != NULL)) {
error = SET_ERROR(EXDEV);
goto out;
}
szp = VTOZ(*svpp);
ZFS_VERIFY_ZP(szp);
tzp = *tvpp == NULL ? NULL : VTOZ(*tvpp);
if (tzp != NULL)
ZFS_VERIFY_ZP(tzp);
/*
* This is to prevent the creation of links into attribute space
* by renaming a linked file into/outof an attribute directory.
* See the comment in zfs_link() for why this is considered bad.
*/
if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) {
error = SET_ERROR(EINVAL);
goto out;
}
/*
* If we are using project inheritance, means if the directory has
* ZFS_PROJINHERIT set, then its descendant directories will inherit
* not only the project ID, but also the ZFS_PROJINHERIT flag. Under
* such case, we only allow renames into our tree when the project
* IDs are the same.
*/
if (tdzp->z_pflags & ZFS_PROJINHERIT &&
tdzp->z_projid != szp->z_projid) {
error = SET_ERROR(EXDEV);
goto out;
}
/*
* Must have write access at the source to remove the old entry
* and write access at the target to create the new entry.
* Note that if target and source are the same, this can be
* done in a single check.
*/
if ((error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr)))
goto out;
if ((*svpp)->v_type == VDIR) {
/*
* Avoid ".", "..", and aliases of "." for obvious reasons.
*/
if ((scnp->cn_namelen == 1 && scnp->cn_nameptr[0] == '.') ||
sdzp == szp ||
(scnp->cn_flags | tcnp->cn_flags) & ISDOTDOT) {
error = EINVAL;
goto out;
}
/*
* Check to make sure rename is valid.
* Can't do a move like this: /usr/a/b to /usr/a/b/c/d
*/
if ((error = zfs_rename_check(szp, sdzp, tdzp)))
goto out;
}
/*
* Does target exist?
*/
if (tzp) {
/*
* Source and target must be the same type.
*/
if ((*svpp)->v_type == VDIR) {
if ((*tvpp)->v_type != VDIR) {
error = SET_ERROR(ENOTDIR);
goto out;
} else {
cache_purge(tdvp);
if (sdvp != tdvp)
cache_purge(sdvp);
}
} else {
if ((*tvpp)->v_type == VDIR) {
error = SET_ERROR(EISDIR);
goto out;
}
}
}
vn_seqc_write_begin(*svpp);
vn_seqc_write_begin(sdvp);
if (*tvpp != NULL)
vn_seqc_write_begin(*tvpp);
if (tdvp != *tvpp)
vn_seqc_write_begin(tdvp);
vnevent_rename_src(*svpp, sdvp, scnp->cn_nameptr, ct);
if (tzp)
vnevent_rename_dest(*tvpp, tdvp, tnm, ct);
/*
* notify the target directory if it is not the same
* as source directory.
*/
if (tdvp != sdvp) {
vnevent_rename_dest_dir(tdvp, ct);
}
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE);
dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm);
dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm);
if (sdzp != tdzp) {
dmu_tx_hold_sa(tx, tdzp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, tdzp);
}
if (tzp) {
dmu_tx_hold_sa(tx, tzp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, tzp);
}
zfs_sa_upgrade_txholds(tx, szp);
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
goto out_seq;
}
if (tzp) /* Attempt to remove the existing target */
error = zfs_link_destroy(tdzp, tnm, tzp, tx, 0, NULL);
if (error == 0) {
error = zfs_link_create(tdzp, tnm, szp, tx, ZRENAMING);
if (error == 0) {
szp->z_pflags |= ZFS_AV_MODIFIED;
error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
(void *)&szp->z_pflags, sizeof (uint64_t), tx);
ASSERT0(error);
error = zfs_link_destroy(sdzp, snm, szp, tx, ZRENAMING,
NULL);
if (error == 0) {
zfs_log_rename(zilog, tx, TX_RENAME, sdzp,
snm, tdzp, tnm, szp);
/*
* Update path information for the target vnode
*/
vn_renamepath(tdvp, *svpp, tnm, strlen(tnm));
} else {
/*
* At this point, we have successfully created
* the target name, but have failed to remove
* the source name. Since the create was done
* with the ZRENAMING flag, there are
* complications; for one, the link count is
* wrong. The easiest way to deal with this
* is to remove the newly created target, and
* return the original error. This must
* succeed; fortunately, it is very unlikely to
* fail, since we just created it.
*/
VERIFY0(zfs_link_destroy(tdzp, tnm, szp, tx,
ZRENAMING, NULL));
}
}
if (error == 0) {
cache_vop_rename(sdvp, *svpp, tdvp, *tvpp, scnp, tcnp);
}
}
dmu_tx_commit(tx);
out_seq:
vn_seqc_write_end(*svpp);
vn_seqc_write_end(sdvp);
if (*tvpp != NULL)
vn_seqc_write_end(*tvpp);
if (tdvp != *tvpp)
vn_seqc_write_end(tdvp);
out:
if (error == 0 && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
ZFS_EXIT(zfsvfs);
return (error);
}
int
zfs_rename(znode_t *sdzp, const char *sname, znode_t *tdzp, const char *tname,
cred_t *cr, int flags)
{
struct componentname scn, tcn;
vnode_t *sdvp, *tdvp;
vnode_t *svp, *tvp;
int error;
svp = tvp = NULL;
sdvp = ZTOV(sdzp);
tdvp = ZTOV(tdzp);
error = zfs_lookup_internal(sdzp, sname, &svp, &scn, DELETE);
if (sdzp->z_zfsvfs->z_replay == B_FALSE)
VOP_UNLOCK1(sdvp);
if (error != 0)
goto fail;
VOP_UNLOCK1(svp);
vn_lock(tdvp, LK_EXCLUSIVE | LK_RETRY);
error = zfs_lookup_internal(tdzp, tname, &tvp, &tcn, RENAME);
if (error == EJUSTRETURN)
tvp = NULL;
else if (error != 0) {
VOP_UNLOCK1(tdvp);
goto fail;
}
error = zfs_do_rename(sdvp, &svp, &scn, tdvp, &tvp, &tcn, cr);
fail:
if (svp != NULL)
vrele(svp);
if (tvp != NULL)
vrele(tvp);
return (error);
}
/*
* Insert the indicated symbolic reference entry into the directory.
*
* IN: dvp - Directory to contain new symbolic link.
* link - Name for new symlink entry.
* vap - Attributes of new entry.
* cr - credentials of caller.
* ct - caller context
* flags - case flags
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* dvp - ctime|mtime updated
*/
/*ARGSUSED*/
int
zfs_symlink(znode_t *dzp, const char *name, vattr_t *vap,
const char *link, znode_t **zpp, cred_t *cr, int flags)
{
znode_t *zp;
dmu_tx_t *tx;
zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
zilog_t *zilog;
uint64_t len = strlen(link);
int error;
zfs_acl_ids_t acl_ids;
boolean_t fuid_dirtied;
uint64_t txtype = TX_SYMLINK;
ASSERT3S(vap->va_type, ==, VLNK);
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(dzp);
zilog = zfsvfs->z_log;
if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EILSEQ));
}
if (len > MAXPATHLEN) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(ENAMETOOLONG));
}
if ((error = zfs_acl_ids_create(dzp, 0,
vap, cr, NULL, &acl_ids)) != 0) {
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Attempt to lock directory; fail if entry already exists.
*/
error = zfs_dirent_lookup(dzp, name, &zp, ZNEW);
if (error) {
zfs_acl_ids_free(&acl_ids);
ZFS_EXIT(zfsvfs);
return (error);
}
if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
zfs_acl_ids_free(&acl_ids);
ZFS_EXIT(zfsvfs);
return (error);
}
if (zfs_acl_ids_overquota(zfsvfs, &acl_ids,
0 /* projid */)) {
zfs_acl_ids_free(&acl_ids);
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EDQUOT));
}
getnewvnode_reserve_();
tx = dmu_tx_create(zfsvfs->z_os);
fuid_dirtied = zfsvfs->z_fuid_dirty;
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len));
dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
ZFS_SA_BASE_ATTR_SIZE + len);
dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
acl_ids.z_aclp->z_acl_bytes);
}
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx);
getnewvnode_drop_reserve();
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Create a new object for the symlink.
* for version 4 ZPL datasets the symlink will be an SA attribute
*/
zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
if (zp->z_is_sa)
error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zfsvfs),
__DECONST(void *, link), len, tx);
else
zfs_sa_symlink(zp, __DECONST(char *, link), len, tx);
zp->z_size = len;
(void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
&zp->z_size, sizeof (zp->z_size), tx);
/*
* Insert the new object into the directory.
*/
(void) zfs_link_create(dzp, name, zp, tx, ZNEW);
zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link);
*zpp = zp;
zfs_acl_ids_free(&acl_ids);
dmu_tx_commit(tx);
getnewvnode_drop_reserve();
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Return, in the buffer contained in the provided uio structure,
* the symbolic path referred to by vp.
*
* IN: vp - vnode of symbolic link.
* uio - structure to contain the link path.
* cr - credentials of caller.
* ct - caller context
*
* OUT: uio - structure containing the link path.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* vp - atime updated
*/
/* ARGSUSED */
static int
zfs_readlink(vnode_t *vp, zfs_uio_t *uio, cred_t *cr, caller_context_t *ct)
{
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
int error;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
if (zp->z_is_sa)
error = sa_lookup_uio(zp->z_sa_hdl,
SA_ZPL_SYMLINK(zfsvfs), uio);
else
error = zfs_sa_readlink(zp, uio);
ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Insert a new entry into directory tdvp referencing svp.
*
* IN: tdvp - Directory to contain new entry.
* svp - vnode of new entry.
* name - name of new entry.
* cr - credentials of caller.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* tdvp - ctime|mtime updated
* svp - ctime updated
*/
/* ARGSUSED */
int
zfs_link(znode_t *tdzp, znode_t *szp, const char *name, cred_t *cr,
int flags)
{
znode_t *tzp;
zfsvfs_t *zfsvfs = tdzp->z_zfsvfs;
zilog_t *zilog;
dmu_tx_t *tx;
int error;
uint64_t parent;
uid_t owner;
ASSERT3S(ZTOV(tdzp)->v_type, ==, VDIR);
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(tdzp);
zilog = zfsvfs->z_log;
/*
* POSIX dictates that we return EPERM here.
* Better choices include ENOTSUP or EISDIR.
*/
if (ZTOV(szp)->v_type == VDIR) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EPERM));
}
ZFS_VERIFY_ZP(szp);
/*
* If we are using project inheritance, means if the directory has
* ZFS_PROJINHERIT set, then its descendant directories will inherit
* not only the project ID, but also the ZFS_PROJINHERIT flag. Under
* such case, we only allow hard link creation in our tree when the
* project IDs are the same.
*/
if (tdzp->z_pflags & ZFS_PROJINHERIT &&
tdzp->z_projid != szp->z_projid) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EXDEV));
}
if (szp->z_pflags & (ZFS_APPENDONLY |
ZFS_IMMUTABLE | ZFS_READONLY)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EPERM));
}
/* Prevent links to .zfs/shares files */
if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
&parent, sizeof (uint64_t))) != 0) {
ZFS_EXIT(zfsvfs);
return (error);
}
if (parent == zfsvfs->z_shares_dir) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EPERM));
}
if (zfsvfs->z_utf8 && u8_validate(name,
strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EILSEQ));
}
/*
* We do not support links between attributes and non-attributes
* because of the potential security risk of creating links
* into "normal" file space in order to circumvent restrictions
* imposed in attribute space.
*/
if ((szp->z_pflags & ZFS_XATTR) != (tdzp->z_pflags & ZFS_XATTR)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
owner = zfs_fuid_map_id(zfsvfs, szp->z_uid, cr, ZFS_OWNER);
if (owner != crgetuid(cr) && secpolicy_basic_link(ZTOV(szp), cr) != 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EPERM));
}
if ((error = zfs_zaccess(tdzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Attempt to lock directory; fail if entry already exists.
*/
error = zfs_dirent_lookup(tdzp, name, &tzp, ZNEW);
if (error) {
ZFS_EXIT(zfsvfs);
return (error);
}
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, name);
zfs_sa_upgrade_txholds(tx, szp);
zfs_sa_upgrade_txholds(tx, tdzp);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
ZFS_EXIT(zfsvfs);
return (error);
}
error = zfs_link_create(tdzp, name, szp, tx, 0);
if (error == 0) {
uint64_t txtype = TX_LINK;
zfs_log_link(zilog, tx, txtype, tdzp, szp, name);
}
dmu_tx_commit(tx);
if (error == 0) {
vnevent_link(ZTOV(szp), ct);
}
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Free or allocate space in a file. Currently, this function only
* supports the `F_FREESP' command. However, this command is somewhat
* misnamed, as its functionality includes the ability to allocate as
* well as free space.
*
* IN: ip - inode of file to free data in.
* cmd - action to take (only F_FREESP supported).
* bfp - section of file to free/alloc.
* flag - current file open mode flags.
* offset - current file offset.
* cr - credentials of caller.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* ip - ctime|mtime updated
*/
/* ARGSUSED */
int
zfs_space(znode_t *zp, int cmd, flock64_t *bfp, int flag,
offset_t offset, cred_t *cr)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
uint64_t off, len;
int error;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
if (cmd != F_FREESP) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
/*
* Callers might not be able to detect properly that we are read-only,
* so check it explicitly here.
*/
if (zfs_is_readonly(zfsvfs)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EROFS));
}
if (bfp->l_len < 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
/*
* Permissions aren't checked on Solaris because on this OS
* zfs_space() can only be called with an opened file handle.
* On Linux we can get here through truncate_range() which
* operates directly on inodes, so we need to check access rights.
*/
if ((error = zfs_zaccess(zp, ACE_WRITE_DATA, 0, B_FALSE, cr))) {
ZFS_EXIT(zfsvfs);
return (error);
}
off = bfp->l_start;
len = bfp->l_len; /* 0 means from off to end of file */
error = zfs_freesp(zp, off, len, flag, TRUE);
ZFS_EXIT(zfsvfs);
return (error);
}
/*ARGSUSED*/
static void
zfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
{
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
int error;
ZFS_TEARDOWN_INACTIVE_ENTER_READ(zfsvfs);
if (zp->z_sa_hdl == NULL) {
/*
* The fs has been unmounted, or we did a
* suspend/resume and this file no longer exists.
*/
ZFS_TEARDOWN_INACTIVE_EXIT_READ(zfsvfs);
vrecycle(vp);
return;
}
if (zp->z_unlinked) {
/*
* Fast path to recycle a vnode of a removed file.
*/
ZFS_TEARDOWN_INACTIVE_EXIT_READ(zfsvfs);
vrecycle(vp);
return;
}
if (zp->z_atime_dirty && zp->z_unlinked == 0) {
dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
} else {
(void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zfsvfs),
(void *)&zp->z_atime, sizeof (zp->z_atime), tx);
zp->z_atime_dirty = 0;
dmu_tx_commit(tx);
}
}
ZFS_TEARDOWN_INACTIVE_EXIT_READ(zfsvfs);
}
CTASSERT(sizeof (struct zfid_short) <= sizeof (struct fid));
CTASSERT(sizeof (struct zfid_long) <= sizeof (struct fid));
/*ARGSUSED*/
static int
zfs_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct)
{
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
uint32_t gen;
uint64_t gen64;
uint64_t object = zp->z_id;
zfid_short_t *zfid;
int size, i, error;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs),
&gen64, sizeof (uint64_t))) != 0) {
ZFS_EXIT(zfsvfs);
return (error);
}
gen = (uint32_t)gen64;
size = (zfsvfs->z_parent != zfsvfs) ? LONG_FID_LEN : SHORT_FID_LEN;
fidp->fid_len = size;
zfid = (zfid_short_t *)fidp;
zfid->zf_len = size;
for (i = 0; i < sizeof (zfid->zf_object); i++)
zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
/* Must have a non-zero generation number to distinguish from .zfs */
if (gen == 0)
gen = 1;
for (i = 0; i < sizeof (zfid->zf_gen); i++)
zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i));
if (size == LONG_FID_LEN) {
uint64_t objsetid = dmu_objset_id(zfsvfs->z_os);
zfid_long_t *zlfid;
zlfid = (zfid_long_t *)fidp;
for (i = 0; i < sizeof (zlfid->zf_setid); i++)
zlfid->zf_setid[i] = (uint8_t)(objsetid >> (8 * i));
/* XXX - this should be the generation number for the objset */
for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
zlfid->zf_setgen[i] = 0;
}
ZFS_EXIT(zfsvfs);
return (0);
}
static int
zfs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr,
caller_context_t *ct)
{
znode_t *zp;
zfsvfs_t *zfsvfs;
switch (cmd) {
case _PC_LINK_MAX:
*valp = MIN(LONG_MAX, ZFS_LINK_MAX);
return (0);
case _PC_FILESIZEBITS:
*valp = 64;
return (0);
case _PC_MIN_HOLE_SIZE:
*valp = (int)SPA_MINBLOCKSIZE;
return (0);
case _PC_ACL_EXTENDED:
#if 0 /* POSIX ACLs are not implemented for ZFS on FreeBSD yet. */
zp = VTOZ(vp);
zfsvfs = zp->z_zfsvfs;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
*valp = zfsvfs->z_acl_type == ZFSACLTYPE_POSIX ? 1 : 0;
ZFS_EXIT(zfsvfs);
#else
*valp = 0;
#endif
return (0);
case _PC_ACL_NFS4:
zp = VTOZ(vp);
zfsvfs = zp->z_zfsvfs;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
*valp = zfsvfs->z_acl_type == ZFS_ACLTYPE_NFSV4 ? 1 : 0;
ZFS_EXIT(zfsvfs);
return (0);
case _PC_ACL_PATH_MAX:
*valp = ACL_MAX_ENTRIES;
return (0);
default:
return (EOPNOTSUPP);
}
}
static int
zfs_getpages(struct vnode *vp, vm_page_t *ma, int count, int *rbehind,
int *rahead)
{
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
zfs_locked_range_t *lr;
vm_object_t object;
off_t start, end, obj_size;
uint_t blksz;
int pgsin_b, pgsin_a;
int error;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
start = IDX_TO_OFF(ma[0]->pindex);
end = IDX_TO_OFF(ma[count - 1]->pindex + 1);
/*
* Lock a range covering all required and optional pages.
* Note that we need to handle the case of the block size growing.
*/
for (;;) {
blksz = zp->z_blksz;
lr = zfs_rangelock_tryenter(&zp->z_rangelock,
rounddown(start, blksz),
roundup(end, blksz) - rounddown(start, blksz), RL_READER);
if (lr == NULL) {
if (rahead != NULL) {
*rahead = 0;
rahead = NULL;
}
if (rbehind != NULL) {
*rbehind = 0;
rbehind = NULL;
}
break;
}
if (blksz == zp->z_blksz)
break;
zfs_rangelock_exit(lr);
}
object = ma[0]->object;
zfs_vmobject_wlock(object);
obj_size = object->un_pager.vnp.vnp_size;
zfs_vmobject_wunlock(object);
if (IDX_TO_OFF(ma[count - 1]->pindex) >= obj_size) {
if (lr != NULL)
zfs_rangelock_exit(lr);
ZFS_EXIT(zfsvfs);
return (zfs_vm_pagerret_bad);
}
pgsin_b = 0;
if (rbehind != NULL) {
pgsin_b = OFF_TO_IDX(start - rounddown(start, blksz));
pgsin_b = MIN(*rbehind, pgsin_b);
}
pgsin_a = 0;
if (rahead != NULL) {
pgsin_a = OFF_TO_IDX(roundup(end, blksz) - end);
if (end + IDX_TO_OFF(pgsin_a) >= obj_size)
pgsin_a = OFF_TO_IDX(round_page(obj_size) - end);
pgsin_a = MIN(*rahead, pgsin_a);
}
/*
* NB: we need to pass the exact byte size of the data that we expect
* to read after accounting for the file size. This is required because
* ZFS will panic if we request DMU to read beyond the end of the last
* allocated block.
*/
error = dmu_read_pages(zfsvfs->z_os, zp->z_id, ma, count, &pgsin_b,
&pgsin_a, MIN(end, obj_size) - (end - PAGE_SIZE));
if (lr != NULL)
zfs_rangelock_exit(lr);
ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
+
+ dataset_kstats_update_read_kstats(&zfsvfs->z_kstat, count*PAGE_SIZE);
+
ZFS_EXIT(zfsvfs);
if (error != 0)
return (zfs_vm_pagerret_error);
VM_CNT_INC(v_vnodein);
VM_CNT_ADD(v_vnodepgsin, count + pgsin_b + pgsin_a);
if (rbehind != NULL)
*rbehind = pgsin_b;
if (rahead != NULL)
*rahead = pgsin_a;
return (zfs_vm_pagerret_ok);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_getpages_args {
struct vnode *a_vp;
vm_page_t *a_m;
int a_count;
int *a_rbehind;
int *a_rahead;
};
#endif
static int
zfs_freebsd_getpages(struct vop_getpages_args *ap)
{
return (zfs_getpages(ap->a_vp, ap->a_m, ap->a_count, ap->a_rbehind,
ap->a_rahead));
}
static int
zfs_putpages(struct vnode *vp, vm_page_t *ma, size_t len, int flags,
int *rtvals)
{
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
zfs_locked_range_t *lr;
dmu_tx_t *tx;
struct sf_buf *sf;
vm_object_t object;
vm_page_t m;
caddr_t va;
size_t tocopy;
size_t lo_len;
vm_ooffset_t lo_off;
vm_ooffset_t off;
uint_t blksz;
int ncount;
int pcount;
int err;
int i;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
object = vp->v_object;
pcount = btoc(len);
ncount = pcount;
KASSERT(ma[0]->object == object, ("mismatching object"));
KASSERT(len > 0 && (len & PAGE_MASK) == 0, ("unexpected length"));
for (i = 0; i < pcount; i++)
rtvals[i] = zfs_vm_pagerret_error;
off = IDX_TO_OFF(ma[0]->pindex);
blksz = zp->z_blksz;
lo_off = rounddown(off, blksz);
lo_len = roundup(len + (off - lo_off), blksz);
lr = zfs_rangelock_enter(&zp->z_rangelock, lo_off, lo_len, RL_WRITER);
zfs_vmobject_wlock(object);
if (len + off > object->un_pager.vnp.vnp_size) {
if (object->un_pager.vnp.vnp_size > off) {
int pgoff;
len = object->un_pager.vnp.vnp_size - off;
ncount = btoc(len);
if ((pgoff = (int)len & PAGE_MASK) != 0) {
/*
* If the object is locked and the following
* conditions hold, then the page's dirty
* field cannot be concurrently changed by a
* pmap operation.
*/
m = ma[ncount - 1];
vm_page_assert_sbusied(m);
KASSERT(!pmap_page_is_write_mapped(m),
("zfs_putpages: page %p is not read-only",
m));
vm_page_clear_dirty(m, pgoff, PAGE_SIZE -
pgoff);
}
} else {
len = 0;
ncount = 0;
}
if (ncount < pcount) {
for (i = ncount; i < pcount; i++) {
rtvals[i] = zfs_vm_pagerret_bad;
}
}
}
zfs_vmobject_wunlock(object);
if (ncount == 0)
goto out;
if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT, zp->z_uid) ||
zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT, zp->z_gid) ||
(zp->z_projid != ZFS_DEFAULT_PROJID &&
zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT,
zp->z_projid))) {
goto out;
}
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_write(tx, zp->z_id, off, len);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err != 0) {
dmu_tx_abort(tx);
goto out;
}
if (zp->z_blksz < PAGE_SIZE) {
for (i = 0; len > 0; off += tocopy, len -= tocopy, i++) {
tocopy = len > PAGE_SIZE ? PAGE_SIZE : len;
va = zfs_map_page(ma[i], &sf);
dmu_write(zfsvfs->z_os, zp->z_id, off, tocopy, va, tx);
zfs_unmap_page(sf);
}
} else {
err = dmu_write_pages(zfsvfs->z_os, zp->z_id, off, len, ma, tx);
}
if (err == 0) {
uint64_t mtime[2], ctime[2];
sa_bulk_attr_t bulk[3];
int count = 0;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
&mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
&ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, 8);
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
err = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
ASSERT0(err);
/*
* XXX we should be passing a callback to undirty
* but that would make the locking messier
*/
zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, off,
len, 0, NULL, NULL);
zfs_vmobject_wlock(object);
for (i = 0; i < ncount; i++) {
rtvals[i] = zfs_vm_pagerret_ok;
vm_page_undirty(ma[i]);
}
zfs_vmobject_wunlock(object);
VM_CNT_INC(v_vnodeout);
VM_CNT_ADD(v_vnodepgsout, ncount);
}
dmu_tx_commit(tx);
out:
zfs_rangelock_exit(lr);
if ((flags & (zfs_vm_pagerput_sync | zfs_vm_pagerput_inval)) != 0 ||
zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zfsvfs->z_log, zp->z_id);
+
+ dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, len);
+
ZFS_EXIT(zfsvfs);
return (rtvals[0]);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_putpages_args {
struct vnode *a_vp;
vm_page_t *a_m;
int a_count;
int a_sync;
int *a_rtvals;
};
#endif
static int
zfs_freebsd_putpages(struct vop_putpages_args *ap)
{
return (zfs_putpages(ap->a_vp, ap->a_m, ap->a_count, ap->a_sync,
ap->a_rtvals));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_bmap_args {
struct vnode *a_vp;
daddr_t a_bn;
struct bufobj **a_bop;
daddr_t *a_bnp;
int *a_runp;
int *a_runb;
};
#endif
static int
zfs_freebsd_bmap(struct vop_bmap_args *ap)
{
if (ap->a_bop != NULL)
*ap->a_bop = &ap->a_vp->v_bufobj;
if (ap->a_bnp != NULL)
*ap->a_bnp = ap->a_bn;
if (ap->a_runp != NULL)
*ap->a_runp = 0;
if (ap->a_runb != NULL)
*ap->a_runb = 0;
return (0);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_open_args {
struct vnode *a_vp;
int a_mode;
struct ucred *a_cred;
struct thread *a_td;
};
#endif
static int
zfs_freebsd_open(struct vop_open_args *ap)
{
vnode_t *vp = ap->a_vp;
znode_t *zp = VTOZ(vp);
int error;
error = zfs_open(&vp, ap->a_mode, ap->a_cred);
if (error == 0)
vnode_create_vobject(vp, zp->z_size, ap->a_td);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_close_args {
struct vnode *a_vp;
int a_fflag;
struct ucred *a_cred;
struct thread *a_td;
};
#endif
static int
zfs_freebsd_close(struct vop_close_args *ap)
{
return (zfs_close(ap->a_vp, ap->a_fflag, 1, 0, ap->a_cred));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_ioctl_args {
struct vnode *a_vp;
ulong_t a_command;
caddr_t a_data;
int a_fflag;
struct ucred *cred;
struct thread *td;
};
#endif
static int
zfs_freebsd_ioctl(struct vop_ioctl_args *ap)
{
return (zfs_ioctl(ap->a_vp, ap->a_command, (intptr_t)ap->a_data,
ap->a_fflag, ap->a_cred, NULL));
}
static int
ioflags(int ioflags)
{
int flags = 0;
if (ioflags & IO_APPEND)
flags |= FAPPEND;
if (ioflags & IO_NDELAY)
flags |= FNONBLOCK;
if (ioflags & IO_SYNC)
flags |= (FSYNC | FDSYNC | FRSYNC);
return (flags);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_read_args {
struct vnode *a_vp;
struct uio *a_uio;
int a_ioflag;
struct ucred *a_cred;
};
#endif
static int
zfs_freebsd_read(struct vop_read_args *ap)
{
zfs_uio_t uio;
zfs_uio_init(&uio, ap->a_uio);
return (zfs_read(VTOZ(ap->a_vp), &uio, ioflags(ap->a_ioflag),
ap->a_cred));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_write_args {
struct vnode *a_vp;
struct uio *a_uio;
int a_ioflag;
struct ucred *a_cred;
};
#endif
static int
zfs_freebsd_write(struct vop_write_args *ap)
{
zfs_uio_t uio;
zfs_uio_init(&uio, ap->a_uio);
return (zfs_write(VTOZ(ap->a_vp), &uio, ioflags(ap->a_ioflag),
ap->a_cred));
}
#if __FreeBSD_version >= 1300102
/*
* VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see
* the comment above cache_fplookup for details.
*/
static int
zfs_freebsd_fplookup_vexec(struct vop_fplookup_vexec_args *v)
{
vnode_t *vp;
znode_t *zp;
uint64_t pflags;
vp = v->a_vp;
zp = VTOZ_SMR(vp);
if (__predict_false(zp == NULL))
return (EAGAIN);
pflags = atomic_load_64(&zp->z_pflags);
if (pflags & ZFS_AV_QUARANTINED)
return (EAGAIN);
if (pflags & ZFS_XATTR)
return (EAGAIN);
if ((pflags & ZFS_NO_EXECS_DENIED) == 0)
return (EAGAIN);
return (0);
}
#endif
#if __FreeBSD_version >= 1300139
static int
zfs_freebsd_fplookup_symlink(struct vop_fplookup_symlink_args *v)
{
vnode_t *vp;
znode_t *zp;
char *target;
vp = v->a_vp;
zp = VTOZ_SMR(vp);
if (__predict_false(zp == NULL)) {
return (EAGAIN);
}
target = atomic_load_consume_ptr(&zp->z_cached_symlink);
if (target == NULL) {
return (EAGAIN);
}
return (cache_symlink_resolve(v->a_fpl, target, strlen(target)));
}
#endif
#ifndef _SYS_SYSPROTO_H_
struct vop_access_args {
struct vnode *a_vp;
accmode_t a_accmode;
struct ucred *a_cred;
struct thread *a_td;
};
#endif
static int
zfs_freebsd_access(struct vop_access_args *ap)
{
vnode_t *vp = ap->a_vp;
znode_t *zp = VTOZ(vp);
accmode_t accmode;
int error = 0;
if (ap->a_accmode == VEXEC) {
if (zfs_fastaccesschk_execute(zp, ap->a_cred) == 0)
return (0);
}
/*
* ZFS itself only knowns about VREAD, VWRITE, VEXEC and VAPPEND,
*/
accmode = ap->a_accmode & (VREAD|VWRITE|VEXEC|VAPPEND);
if (accmode != 0)
error = zfs_access(zp, accmode, 0, ap->a_cred);
/*
* VADMIN has to be handled by vaccess().
*/
if (error == 0) {
accmode = ap->a_accmode & ~(VREAD|VWRITE|VEXEC|VAPPEND);
if (accmode != 0) {
#if __FreeBSD_version >= 1300105
error = vaccess(vp->v_type, zp->z_mode, zp->z_uid,
zp->z_gid, accmode, ap->a_cred);
#else
error = vaccess(vp->v_type, zp->z_mode, zp->z_uid,
zp->z_gid, accmode, ap->a_cred, NULL);
#endif
}
}
/*
* For VEXEC, ensure that at least one execute bit is set for
* non-directories.
*/
if (error == 0 && (ap->a_accmode & VEXEC) != 0 && vp->v_type != VDIR &&
(zp->z_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) {
error = EACCES;
}
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_lookup_args {
struct vnode *a_dvp;
struct vnode **a_vpp;
struct componentname *a_cnp;
};
#endif
static int
zfs_freebsd_lookup(struct vop_lookup_args *ap, boolean_t cached)
{
struct componentname *cnp = ap->a_cnp;
char nm[NAME_MAX + 1];
ASSERT3U(cnp->cn_namelen, <, sizeof (nm));
strlcpy(nm, cnp->cn_nameptr, MIN(cnp->cn_namelen + 1, sizeof (nm)));
return (zfs_lookup(ap->a_dvp, nm, ap->a_vpp, cnp, cnp->cn_nameiop,
cnp->cn_cred, 0, cached));
}
static int
zfs_freebsd_cachedlookup(struct vop_cachedlookup_args *ap)
{
return (zfs_freebsd_lookup((struct vop_lookup_args *)ap, B_TRUE));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_lookup_args {
struct vnode *a_dvp;
struct vnode **a_vpp;
struct componentname *a_cnp;
};
#endif
static int
zfs_cache_lookup(struct vop_lookup_args *ap)
{
zfsvfs_t *zfsvfs;
zfsvfs = ap->a_dvp->v_mount->mnt_data;
if (zfsvfs->z_use_namecache)
return (vfs_cache_lookup(ap));
else
return (zfs_freebsd_lookup(ap, B_FALSE));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_create_args {
struct vnode *a_dvp;
struct vnode **a_vpp;
struct componentname *a_cnp;
struct vattr *a_vap;
};
#endif
static int
zfs_freebsd_create(struct vop_create_args *ap)
{
zfsvfs_t *zfsvfs;
struct componentname *cnp = ap->a_cnp;
vattr_t *vap = ap->a_vap;
znode_t *zp = NULL;
int rc, mode;
ASSERT(cnp->cn_flags & SAVENAME);
vattr_init_mask(vap);
mode = vap->va_mode & ALLPERMS;
zfsvfs = ap->a_dvp->v_mount->mnt_data;
*ap->a_vpp = NULL;
rc = zfs_create(VTOZ(ap->a_dvp), cnp->cn_nameptr, vap, !EXCL, mode,
&zp, cnp->cn_cred, 0 /* flag */, NULL /* vsecattr */);
if (rc == 0)
*ap->a_vpp = ZTOV(zp);
if (zfsvfs->z_use_namecache &&
rc == 0 && (cnp->cn_flags & MAKEENTRY) != 0)
cache_enter(ap->a_dvp, *ap->a_vpp, cnp);
return (rc);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_remove_args {
struct vnode *a_dvp;
struct vnode *a_vp;
struct componentname *a_cnp;
};
#endif
static int
zfs_freebsd_remove(struct vop_remove_args *ap)
{
ASSERT(ap->a_cnp->cn_flags & SAVENAME);
return (zfs_remove_(ap->a_dvp, ap->a_vp, ap->a_cnp->cn_nameptr,
ap->a_cnp->cn_cred));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_mkdir_args {
struct vnode *a_dvp;
struct vnode **a_vpp;
struct componentname *a_cnp;
struct vattr *a_vap;
};
#endif
static int
zfs_freebsd_mkdir(struct vop_mkdir_args *ap)
{
vattr_t *vap = ap->a_vap;
znode_t *zp = NULL;
int rc;
ASSERT(ap->a_cnp->cn_flags & SAVENAME);
vattr_init_mask(vap);
*ap->a_vpp = NULL;
rc = zfs_mkdir(VTOZ(ap->a_dvp), ap->a_cnp->cn_nameptr, vap, &zp,
ap->a_cnp->cn_cred, 0, NULL);
if (rc == 0)
*ap->a_vpp = ZTOV(zp);
return (rc);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_rmdir_args {
struct vnode *a_dvp;
struct vnode *a_vp;
struct componentname *a_cnp;
};
#endif
static int
zfs_freebsd_rmdir(struct vop_rmdir_args *ap)
{
struct componentname *cnp = ap->a_cnp;
ASSERT(cnp->cn_flags & SAVENAME);
return (zfs_rmdir_(ap->a_dvp, ap->a_vp, cnp->cn_nameptr, cnp->cn_cred));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_readdir_args {
struct vnode *a_vp;
struct uio *a_uio;
struct ucred *a_cred;
int *a_eofflag;
int *a_ncookies;
- ulong_t **a_cookies;
+ cookie_t **a_cookies;
};
#endif
static int
zfs_freebsd_readdir(struct vop_readdir_args *ap)
{
zfs_uio_t uio;
zfs_uio_init(&uio, ap->a_uio);
return (zfs_readdir(ap->a_vp, &uio, ap->a_cred, ap->a_eofflag,
ap->a_ncookies, ap->a_cookies));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_fsync_args {
struct vnode *a_vp;
int a_waitfor;
struct thread *a_td;
};
#endif
static int
zfs_freebsd_fsync(struct vop_fsync_args *ap)
{
vop_stdfsync(ap);
return (zfs_fsync(VTOZ(ap->a_vp), 0, ap->a_td->td_ucred));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_getattr_args {
struct vnode *a_vp;
struct vattr *a_vap;
struct ucred *a_cred;
};
#endif
static int
zfs_freebsd_getattr(struct vop_getattr_args *ap)
{
vattr_t *vap = ap->a_vap;
xvattr_t xvap;
ulong_t fflags = 0;
int error;
xva_init(&xvap);
xvap.xva_vattr = *vap;
xvap.xva_vattr.va_mask |= AT_XVATTR;
/* Convert chflags into ZFS-type flags. */
/* XXX: what about SF_SETTABLE?. */
XVA_SET_REQ(&xvap, XAT_IMMUTABLE);
XVA_SET_REQ(&xvap, XAT_APPENDONLY);
XVA_SET_REQ(&xvap, XAT_NOUNLINK);
XVA_SET_REQ(&xvap, XAT_NODUMP);
XVA_SET_REQ(&xvap, XAT_READONLY);
XVA_SET_REQ(&xvap, XAT_ARCHIVE);
XVA_SET_REQ(&xvap, XAT_SYSTEM);
XVA_SET_REQ(&xvap, XAT_HIDDEN);
XVA_SET_REQ(&xvap, XAT_REPARSE);
XVA_SET_REQ(&xvap, XAT_OFFLINE);
XVA_SET_REQ(&xvap, XAT_SPARSE);
error = zfs_getattr(ap->a_vp, (vattr_t *)&xvap, 0, ap->a_cred);
if (error != 0)
return (error);
/* Convert ZFS xattr into chflags. */
#define FLAG_CHECK(fflag, xflag, xfield) do { \
if (XVA_ISSET_RTN(&xvap, (xflag)) && (xfield) != 0) \
fflags |= (fflag); \
} while (0)
FLAG_CHECK(SF_IMMUTABLE, XAT_IMMUTABLE,
xvap.xva_xoptattrs.xoa_immutable);
FLAG_CHECK(SF_APPEND, XAT_APPENDONLY,
xvap.xva_xoptattrs.xoa_appendonly);
FLAG_CHECK(SF_NOUNLINK, XAT_NOUNLINK,
xvap.xva_xoptattrs.xoa_nounlink);
FLAG_CHECK(UF_ARCHIVE, XAT_ARCHIVE,
xvap.xva_xoptattrs.xoa_archive);
FLAG_CHECK(UF_NODUMP, XAT_NODUMP,
xvap.xva_xoptattrs.xoa_nodump);
FLAG_CHECK(UF_READONLY, XAT_READONLY,
xvap.xva_xoptattrs.xoa_readonly);
FLAG_CHECK(UF_SYSTEM, XAT_SYSTEM,
xvap.xva_xoptattrs.xoa_system);
FLAG_CHECK(UF_HIDDEN, XAT_HIDDEN,
xvap.xva_xoptattrs.xoa_hidden);
FLAG_CHECK(UF_REPARSE, XAT_REPARSE,
xvap.xva_xoptattrs.xoa_reparse);
FLAG_CHECK(UF_OFFLINE, XAT_OFFLINE,
xvap.xva_xoptattrs.xoa_offline);
FLAG_CHECK(UF_SPARSE, XAT_SPARSE,
xvap.xva_xoptattrs.xoa_sparse);
#undef FLAG_CHECK
*vap = xvap.xva_vattr;
vap->va_flags = fflags;
return (0);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_setattr_args {
struct vnode *a_vp;
struct vattr *a_vap;
struct ucred *a_cred;
};
#endif
static int
zfs_freebsd_setattr(struct vop_setattr_args *ap)
{
vnode_t *vp = ap->a_vp;
vattr_t *vap = ap->a_vap;
cred_t *cred = ap->a_cred;
xvattr_t xvap;
ulong_t fflags;
uint64_t zflags;
vattr_init_mask(vap);
vap->va_mask &= ~AT_NOSET;
xva_init(&xvap);
xvap.xva_vattr = *vap;
zflags = VTOZ(vp)->z_pflags;
if (vap->va_flags != VNOVAL) {
zfsvfs_t *zfsvfs = VTOZ(vp)->z_zfsvfs;
int error;
if (zfsvfs->z_use_fuids == B_FALSE)
return (EOPNOTSUPP);
fflags = vap->va_flags;
/*
* XXX KDM
* We need to figure out whether it makes sense to allow
* UF_REPARSE through, since we don't really have other
* facilities to handle reparse points and zfs_setattr()
* doesn't currently allow setting that attribute anyway.
*/
if ((fflags & ~(SF_IMMUTABLE|SF_APPEND|SF_NOUNLINK|UF_ARCHIVE|
UF_NODUMP|UF_SYSTEM|UF_HIDDEN|UF_READONLY|UF_REPARSE|
UF_OFFLINE|UF_SPARSE)) != 0)
return (EOPNOTSUPP);
/*
* Unprivileged processes are not permitted to unset system
* flags, or modify flags if any system flags are set.
* Privileged non-jail processes may not modify system flags
* if securelevel > 0 and any existing system flags are set.
* Privileged jail processes behave like privileged non-jail
* processes if the PR_ALLOW_CHFLAGS permission bit is set;
* otherwise, they behave like unprivileged processes.
*/
if (secpolicy_fs_owner(vp->v_mount, cred) == 0 ||
spl_priv_check_cred(cred, PRIV_VFS_SYSFLAGS) == 0) {
if (zflags &
(ZFS_IMMUTABLE | ZFS_APPENDONLY | ZFS_NOUNLINK)) {
error = securelevel_gt(cred, 0);
if (error != 0)
return (error);
}
} else {
/*
* Callers may only modify the file flags on
* objects they have VADMIN rights for.
*/
if ((error = VOP_ACCESS(vp, VADMIN, cred,
curthread)) != 0)
return (error);
if (zflags &
(ZFS_IMMUTABLE | ZFS_APPENDONLY |
ZFS_NOUNLINK)) {
return (EPERM);
}
if (fflags &
(SF_IMMUTABLE | SF_APPEND | SF_NOUNLINK)) {
return (EPERM);
}
}
#define FLAG_CHANGE(fflag, zflag, xflag, xfield) do { \
if (((fflags & (fflag)) && !(zflags & (zflag))) || \
((zflags & (zflag)) && !(fflags & (fflag)))) { \
XVA_SET_REQ(&xvap, (xflag)); \
(xfield) = ((fflags & (fflag)) != 0); \
} \
} while (0)
/* Convert chflags into ZFS-type flags. */
/* XXX: what about SF_SETTABLE?. */
FLAG_CHANGE(SF_IMMUTABLE, ZFS_IMMUTABLE, XAT_IMMUTABLE,
xvap.xva_xoptattrs.xoa_immutable);
FLAG_CHANGE(SF_APPEND, ZFS_APPENDONLY, XAT_APPENDONLY,
xvap.xva_xoptattrs.xoa_appendonly);
FLAG_CHANGE(SF_NOUNLINK, ZFS_NOUNLINK, XAT_NOUNLINK,
xvap.xva_xoptattrs.xoa_nounlink);
FLAG_CHANGE(UF_ARCHIVE, ZFS_ARCHIVE, XAT_ARCHIVE,
xvap.xva_xoptattrs.xoa_archive);
FLAG_CHANGE(UF_NODUMP, ZFS_NODUMP, XAT_NODUMP,
xvap.xva_xoptattrs.xoa_nodump);
FLAG_CHANGE(UF_READONLY, ZFS_READONLY, XAT_READONLY,
xvap.xva_xoptattrs.xoa_readonly);
FLAG_CHANGE(UF_SYSTEM, ZFS_SYSTEM, XAT_SYSTEM,
xvap.xva_xoptattrs.xoa_system);
FLAG_CHANGE(UF_HIDDEN, ZFS_HIDDEN, XAT_HIDDEN,
xvap.xva_xoptattrs.xoa_hidden);
FLAG_CHANGE(UF_REPARSE, ZFS_REPARSE, XAT_REPARSE,
xvap.xva_xoptattrs.xoa_reparse);
FLAG_CHANGE(UF_OFFLINE, ZFS_OFFLINE, XAT_OFFLINE,
xvap.xva_xoptattrs.xoa_offline);
FLAG_CHANGE(UF_SPARSE, ZFS_SPARSE, XAT_SPARSE,
xvap.xva_xoptattrs.xoa_sparse);
#undef FLAG_CHANGE
}
if (vap->va_birthtime.tv_sec != VNOVAL) {
xvap.xva_vattr.va_mask |= AT_XVATTR;
XVA_SET_REQ(&xvap, XAT_CREATETIME);
}
return (zfs_setattr(VTOZ(vp), (vattr_t *)&xvap, 0, cred));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_rename_args {
struct vnode *a_fdvp;
struct vnode *a_fvp;
struct componentname *a_fcnp;
struct vnode *a_tdvp;
struct vnode *a_tvp;
struct componentname *a_tcnp;
};
#endif
static int
zfs_freebsd_rename(struct vop_rename_args *ap)
{
vnode_t *fdvp = ap->a_fdvp;
vnode_t *fvp = ap->a_fvp;
vnode_t *tdvp = ap->a_tdvp;
vnode_t *tvp = ap->a_tvp;
int error;
ASSERT(ap->a_fcnp->cn_flags & (SAVENAME|SAVESTART));
ASSERT(ap->a_tcnp->cn_flags & (SAVENAME|SAVESTART));
error = zfs_do_rename(fdvp, &fvp, ap->a_fcnp, tdvp, &tvp,
ap->a_tcnp, ap->a_fcnp->cn_cred);
vrele(fdvp);
vrele(fvp);
vrele(tdvp);
if (tvp != NULL)
vrele(tvp);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_symlink_args {
struct vnode *a_dvp;
struct vnode **a_vpp;
struct componentname *a_cnp;
struct vattr *a_vap;
char *a_target;
};
#endif
static int
zfs_freebsd_symlink(struct vop_symlink_args *ap)
{
struct componentname *cnp = ap->a_cnp;
vattr_t *vap = ap->a_vap;
znode_t *zp = NULL;
#if __FreeBSD_version >= 1300139
char *symlink;
size_t symlink_len;
#endif
int rc;
ASSERT(cnp->cn_flags & SAVENAME);
vap->va_type = VLNK; /* FreeBSD: Syscall only sets va_mode. */
vattr_init_mask(vap);
*ap->a_vpp = NULL;
rc = zfs_symlink(VTOZ(ap->a_dvp), cnp->cn_nameptr, vap,
ap->a_target, &zp, cnp->cn_cred, 0 /* flags */);
if (rc == 0) {
*ap->a_vpp = ZTOV(zp);
ASSERT_VOP_ELOCKED(ZTOV(zp), __func__);
#if __FreeBSD_version >= 1300139
MPASS(zp->z_cached_symlink == NULL);
symlink_len = strlen(ap->a_target);
symlink = cache_symlink_alloc(symlink_len + 1, M_WAITOK);
if (symlink != NULL) {
memcpy(symlink, ap->a_target, symlink_len);
symlink[symlink_len] = '\0';
atomic_store_rel_ptr((uintptr_t *)&zp->z_cached_symlink,
(uintptr_t)symlink);
}
#endif
}
return (rc);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_readlink_args {
struct vnode *a_vp;
struct uio *a_uio;
struct ucred *a_cred;
};
#endif
static int
zfs_freebsd_readlink(struct vop_readlink_args *ap)
{
zfs_uio_t uio;
int error;
#if __FreeBSD_version >= 1300139
znode_t *zp = VTOZ(ap->a_vp);
char *symlink, *base;
size_t symlink_len;
bool trycache;
#endif
zfs_uio_init(&uio, ap->a_uio);
#if __FreeBSD_version >= 1300139
trycache = false;
if (zfs_uio_segflg(&uio) == UIO_SYSSPACE &&
zfs_uio_iovcnt(&uio) == 1) {
base = zfs_uio_iovbase(&uio, 0);
symlink_len = zfs_uio_iovlen(&uio, 0);
trycache = true;
}
#endif
error = zfs_readlink(ap->a_vp, &uio, ap->a_cred, NULL);
#if __FreeBSD_version >= 1300139
if (atomic_load_ptr(&zp->z_cached_symlink) != NULL ||
error != 0 || !trycache) {
return (error);
}
symlink_len -= zfs_uio_resid(&uio);
symlink = cache_symlink_alloc(symlink_len + 1, M_WAITOK);
if (symlink != NULL) {
memcpy(symlink, base, symlink_len);
symlink[symlink_len] = '\0';
if (!atomic_cmpset_rel_ptr((uintptr_t *)&zp->z_cached_symlink,
(uintptr_t)NULL, (uintptr_t)symlink)) {
cache_symlink_free(symlink, symlink_len + 1);
}
}
#endif
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_link_args {
struct vnode *a_tdvp;
struct vnode *a_vp;
struct componentname *a_cnp;
};
#endif
static int
zfs_freebsd_link(struct vop_link_args *ap)
{
struct componentname *cnp = ap->a_cnp;
vnode_t *vp = ap->a_vp;
vnode_t *tdvp = ap->a_tdvp;
if (tdvp->v_mount != vp->v_mount)
return (EXDEV);
ASSERT(cnp->cn_flags & SAVENAME);
return (zfs_link(VTOZ(tdvp), VTOZ(vp),
cnp->cn_nameptr, cnp->cn_cred, 0));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_inactive_args {
struct vnode *a_vp;
struct thread *a_td;
};
#endif
static int
zfs_freebsd_inactive(struct vop_inactive_args *ap)
{
vnode_t *vp = ap->a_vp;
#if __FreeBSD_version >= 1300123
zfs_inactive(vp, curthread->td_ucred, NULL);
#else
zfs_inactive(vp, ap->a_td->td_ucred, NULL);
#endif
return (0);
}
#if __FreeBSD_version >= 1300042
#ifndef _SYS_SYSPROTO_H_
struct vop_need_inactive_args {
struct vnode *a_vp;
struct thread *a_td;
};
#endif
static int
zfs_freebsd_need_inactive(struct vop_need_inactive_args *ap)
{
vnode_t *vp = ap->a_vp;
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
int need;
if (vn_need_pageq_flush(vp))
return (1);
if (!ZFS_TEARDOWN_INACTIVE_TRY_ENTER_READ(zfsvfs))
return (1);
need = (zp->z_sa_hdl == NULL || zp->z_unlinked || zp->z_atime_dirty);
ZFS_TEARDOWN_INACTIVE_EXIT_READ(zfsvfs);
return (need);
}
#endif
#ifndef _SYS_SYSPROTO_H_
struct vop_reclaim_args {
struct vnode *a_vp;
struct thread *a_td;
};
#endif
static int
zfs_freebsd_reclaim(struct vop_reclaim_args *ap)
{
vnode_t *vp = ap->a_vp;
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
ASSERT3P(zp, !=, NULL);
#if __FreeBSD_version < 1300042
/* Destroy the vm object and flush associated pages. */
vnode_destroy_vobject(vp);
#endif
/*
* z_teardown_inactive_lock protects from a race with
* zfs_znode_dmu_fini in zfsvfs_teardown during
* force unmount.
*/
ZFS_TEARDOWN_INACTIVE_ENTER_READ(zfsvfs);
if (zp->z_sa_hdl == NULL)
zfs_znode_free(zp);
else
zfs_zinactive(zp);
ZFS_TEARDOWN_INACTIVE_EXIT_READ(zfsvfs);
vp->v_data = NULL;
return (0);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_fid_args {
struct vnode *a_vp;
struct fid *a_fid;
};
#endif
static int
zfs_freebsd_fid(struct vop_fid_args *ap)
{
return (zfs_fid(ap->a_vp, (void *)ap->a_fid, NULL));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_pathconf_args {
struct vnode *a_vp;
int a_name;
register_t *a_retval;
} *ap;
#endif
static int
zfs_freebsd_pathconf(struct vop_pathconf_args *ap)
{
ulong_t val;
int error;
error = zfs_pathconf(ap->a_vp, ap->a_name, &val,
curthread->td_ucred, NULL);
if (error == 0) {
*ap->a_retval = val;
return (error);
}
if (error != EOPNOTSUPP)
return (error);
switch (ap->a_name) {
case _PC_NAME_MAX:
*ap->a_retval = NAME_MAX;
return (0);
case _PC_PIPE_BUF:
if (ap->a_vp->v_type == VDIR || ap->a_vp->v_type == VFIFO) {
*ap->a_retval = PIPE_BUF;
return (0);
}
return (EINVAL);
default:
return (vop_stdpathconf(ap));
}
}
/*
* FreeBSD's extended attributes namespace defines file name prefix for ZFS'
* extended attribute name:
*
* NAMESPACE PREFIX
* system freebsd:system:
* user (none, can be used to access ZFS fsattr(5) attributes
* created on Solaris)
*/
static int
zfs_create_attrname(int attrnamespace, const char *name, char *attrname,
size_t size)
{
const char *namespace, *prefix, *suffix;
/* We don't allow '/' character in attribute name. */
if (strchr(name, '/') != NULL)
return (SET_ERROR(EINVAL));
/* We don't allow attribute names that start with "freebsd:" string. */
if (strncmp(name, "freebsd:", 8) == 0)
return (SET_ERROR(EINVAL));
bzero(attrname, size);
switch (attrnamespace) {
case EXTATTR_NAMESPACE_USER:
#if 0
prefix = "freebsd:";
namespace = EXTATTR_NAMESPACE_USER_STRING;
suffix = ":";
#else
/*
* This is the default namespace by which we can access all
* attributes created on Solaris.
*/
prefix = namespace = suffix = "";
#endif
break;
case EXTATTR_NAMESPACE_SYSTEM:
prefix = "freebsd:";
namespace = EXTATTR_NAMESPACE_SYSTEM_STRING;
suffix = ":";
break;
case EXTATTR_NAMESPACE_EMPTY:
default:
return (SET_ERROR(EINVAL));
}
if (snprintf(attrname, size, "%s%s%s%s", prefix, namespace, suffix,
name) >= size) {
return (SET_ERROR(ENAMETOOLONG));
}
return (0);
}
static int
zfs_ensure_xattr_cached(znode_t *zp)
{
int error = 0;
ASSERT(RW_LOCK_HELD(&zp->z_xattr_lock));
if (zp->z_xattr_cached != NULL)
return (0);
if (rw_write_held(&zp->z_xattr_lock))
return (zfs_sa_get_xattr(zp));
if (!rw_tryupgrade(&zp->z_xattr_lock)) {
rw_exit(&zp->z_xattr_lock);
rw_enter(&zp->z_xattr_lock, RW_WRITER);
}
if (zp->z_xattr_cached == NULL)
error = zfs_sa_get_xattr(zp);
rw_downgrade(&zp->z_xattr_lock);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_getextattr {
IN struct vnode *a_vp;
IN int a_attrnamespace;
IN const char *a_name;
INOUT struct uio *a_uio;
OUT size_t *a_size;
IN struct ucred *a_cred;
IN struct thread *a_td;
};
#endif
static int
zfs_getextattr_dir(struct vop_getextattr_args *ap, const char *attrname)
{
struct thread *td = ap->a_td;
struct nameidata nd;
struct vattr va;
vnode_t *xvp = NULL, *vp;
int error, flags;
error = zfs_lookup(ap->a_vp, NULL, &xvp, NULL, 0, ap->a_cred,
LOOKUP_XATTR, B_FALSE);
if (error != 0)
return (error);
flags = FREAD;
#if __FreeBSD_version < 1400043
NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, attrname,
xvp, td);
#else
NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, attrname, xvp);
#endif
error = vn_open_cred(&nd, &flags, 0, VN_OPEN_INVFS, ap->a_cred, NULL);
vp = nd.ni_vp;
NDFREE(&nd, NDF_ONLY_PNBUF);
if (error != 0)
return (error);
if (ap->a_size != NULL) {
error = VOP_GETATTR(vp, &va, ap->a_cred);
if (error == 0)
*ap->a_size = (size_t)va.va_size;
} else if (ap->a_uio != NULL)
error = VOP_READ(vp, ap->a_uio, IO_UNIT, ap->a_cred);
VOP_UNLOCK1(vp);
vn_close(vp, flags, ap->a_cred, td);
return (error);
}
static int
zfs_getextattr_sa(struct vop_getextattr_args *ap, const char *attrname)
{
znode_t *zp = VTOZ(ap->a_vp);
uchar_t *nv_value;
uint_t nv_size;
int error;
error = zfs_ensure_xattr_cached(zp);
if (error != 0)
return (error);
ASSERT(RW_LOCK_HELD(&zp->z_xattr_lock));
ASSERT3P(zp->z_xattr_cached, !=, NULL);
error = nvlist_lookup_byte_array(zp->z_xattr_cached, attrname,
&nv_value, &nv_size);
if (error)
return (error);
if (ap->a_size != NULL)
*ap->a_size = nv_size;
else if (ap->a_uio != NULL)
error = uiomove(nv_value, nv_size, ap->a_uio);
return (error);
}
/*
* Vnode operation to retrieve a named extended attribute.
*/
static int
zfs_getextattr(struct vop_getextattr_args *ap)
{
znode_t *zp = VTOZ(ap->a_vp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
char attrname[EXTATTR_MAXNAMELEN+1];
int error;
/*
* If the xattr property is off, refuse the request.
*/
if (!(zfsvfs->z_flags & ZSB_XATTR))
return (SET_ERROR(EOPNOTSUPP));
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
ap->a_cred, ap->a_td, VREAD);
if (error != 0)
return (error);
error = zfs_create_attrname(ap->a_attrnamespace, ap->a_name, attrname,
sizeof (attrname));
if (error != 0)
return (error);
error = ENOENT;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp)
rw_enter(&zp->z_xattr_lock, RW_READER);
if (zfsvfs->z_use_sa && zp->z_is_sa)
error = zfs_getextattr_sa(ap, attrname);
if (error == ENOENT)
error = zfs_getextattr_dir(ap, attrname);
rw_exit(&zp->z_xattr_lock);
ZFS_EXIT(zfsvfs);
if (error == ENOENT)
error = SET_ERROR(ENOATTR);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_deleteextattr {
IN struct vnode *a_vp;
IN int a_attrnamespace;
IN const char *a_name;
IN struct ucred *a_cred;
IN struct thread *a_td;
};
#endif
static int
zfs_deleteextattr_dir(struct vop_deleteextattr_args *ap, const char *attrname)
{
struct nameidata nd;
vnode_t *xvp = NULL, *vp;
int error;
error = zfs_lookup(ap->a_vp, NULL, &xvp, NULL, 0, ap->a_cred,
LOOKUP_XATTR, B_FALSE);
if (error != 0)
return (error);
#if __FreeBSD_version < 1400043
NDINIT_ATVP(&nd, DELETE, NOFOLLOW | LOCKPARENT | LOCKLEAF,
UIO_SYSSPACE, attrname, xvp, ap->a_td);
#else
NDINIT_ATVP(&nd, DELETE, NOFOLLOW | LOCKPARENT | LOCKLEAF,
UIO_SYSSPACE, attrname, xvp);
#endif
error = namei(&nd);
vp = nd.ni_vp;
if (error != 0) {
NDFREE(&nd, NDF_ONLY_PNBUF);
return (error);
}
error = VOP_REMOVE(nd.ni_dvp, vp, &nd.ni_cnd);
NDFREE(&nd, NDF_ONLY_PNBUF);
vput(nd.ni_dvp);
if (vp == nd.ni_dvp)
vrele(vp);
else
vput(vp);
return (error);
}
static int
zfs_deleteextattr_sa(struct vop_deleteextattr_args *ap, const char *attrname)
{
znode_t *zp = VTOZ(ap->a_vp);
nvlist_t *nvl;
int error;
error = zfs_ensure_xattr_cached(zp);
if (error != 0)
return (error);
ASSERT(RW_WRITE_HELD(&zp->z_xattr_lock));
ASSERT3P(zp->z_xattr_cached, !=, NULL);
nvl = zp->z_xattr_cached;
error = nvlist_remove(nvl, attrname, DATA_TYPE_BYTE_ARRAY);
if (error == 0)
error = zfs_sa_set_xattr(zp);
if (error != 0) {
zp->z_xattr_cached = NULL;
nvlist_free(nvl);
}
return (error);
}
/*
* Vnode operation to remove a named attribute.
*/
static int
zfs_deleteextattr(struct vop_deleteextattr_args *ap)
{
znode_t *zp = VTOZ(ap->a_vp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
char attrname[EXTATTR_MAXNAMELEN+1];
int error;
/*
* If the xattr property is off, refuse the request.
*/
if (!(zfsvfs->z_flags & ZSB_XATTR))
return (SET_ERROR(EOPNOTSUPP));
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
ap->a_cred, ap->a_td, VWRITE);
if (error != 0)
return (error);
error = zfs_create_attrname(ap->a_attrnamespace, ap->a_name, attrname,
sizeof (attrname));
if (error != 0)
return (error);
size_t size = 0;
struct vop_getextattr_args vga = {
.a_vp = ap->a_vp,
.a_size = &size,
.a_cred = ap->a_cred,
.a_td = ap->a_td,
};
error = ENOENT;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
rw_enter(&zp->z_xattr_lock, RW_WRITER);
if (zfsvfs->z_use_sa && zp->z_is_sa) {
error = zfs_getextattr_sa(&vga, attrname);
if (error == 0)
error = zfs_deleteextattr_sa(ap, attrname);
}
if (error == ENOENT) {
error = zfs_getextattr_dir(&vga, attrname);
if (error == 0)
error = zfs_deleteextattr_dir(ap, attrname);
}
rw_exit(&zp->z_xattr_lock);
ZFS_EXIT(zfsvfs);
if (error == ENOENT)
error = SET_ERROR(ENOATTR);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_setextattr {
IN struct vnode *a_vp;
IN int a_attrnamespace;
IN const char *a_name;
INOUT struct uio *a_uio;
IN struct ucred *a_cred;
IN struct thread *a_td;
};
#endif
static int
zfs_setextattr_dir(struct vop_setextattr_args *ap, const char *attrname)
{
struct thread *td = ap->a_td;
struct nameidata nd;
struct vattr va;
vnode_t *xvp = NULL, *vp;
int error, flags;
error = zfs_lookup(ap->a_vp, NULL, &xvp, NULL, 0, ap->a_cred,
LOOKUP_XATTR | CREATE_XATTR_DIR, B_FALSE);
if (error != 0)
return (error);
flags = FFLAGS(O_WRONLY | O_CREAT);
#if __FreeBSD_version < 1400043
NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, attrname, xvp, td);
#else
NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, attrname, xvp);
#endif
error = vn_open_cred(&nd, &flags, 0600, VN_OPEN_INVFS, ap->a_cred,
NULL);
vp = nd.ni_vp;
NDFREE(&nd, NDF_ONLY_PNBUF);
if (error != 0)
return (error);
VATTR_NULL(&va);
va.va_size = 0;
error = VOP_SETATTR(vp, &va, ap->a_cred);
if (error == 0)
VOP_WRITE(vp, ap->a_uio, IO_UNIT, ap->a_cred);
VOP_UNLOCK1(vp);
vn_close(vp, flags, ap->a_cred, td);
return (error);
}
static int
zfs_setextattr_sa(struct vop_setextattr_args *ap, const char *attrname)
{
znode_t *zp = VTOZ(ap->a_vp);
nvlist_t *nvl;
size_t sa_size;
int error;
error = zfs_ensure_xattr_cached(zp);
if (error != 0)
return (error);
ASSERT(RW_WRITE_HELD(&zp->z_xattr_lock));
ASSERT3P(zp->z_xattr_cached, !=, NULL);
nvl = zp->z_xattr_cached;
size_t entry_size = ap->a_uio->uio_resid;
if (entry_size > DXATTR_MAX_ENTRY_SIZE)
return (SET_ERROR(EFBIG));
error = nvlist_size(nvl, &sa_size, NV_ENCODE_XDR);
if (error != 0)
return (error);
if (sa_size > DXATTR_MAX_SA_SIZE)
return (SET_ERROR(EFBIG));
uchar_t *buf = kmem_alloc(entry_size, KM_SLEEP);
error = uiomove(buf, entry_size, ap->a_uio);
if (error == 0)
error = nvlist_add_byte_array(nvl, attrname, buf, entry_size);
kmem_free(buf, entry_size);
if (error == 0)
error = zfs_sa_set_xattr(zp);
if (error != 0) {
zp->z_xattr_cached = NULL;
nvlist_free(nvl);
}
return (error);
}
/*
* Vnode operation to set a named attribute.
*/
static int
zfs_setextattr(struct vop_setextattr_args *ap)
{
znode_t *zp = VTOZ(ap->a_vp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
char attrname[EXTATTR_MAXNAMELEN+1];
int error;
/*
* If the xattr property is off, refuse the request.
*/
if (!(zfsvfs->z_flags & ZSB_XATTR))
return (SET_ERROR(EOPNOTSUPP));
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
ap->a_cred, ap->a_td, VWRITE);
if (error != 0)
return (error);
error = zfs_create_attrname(ap->a_attrnamespace, ap->a_name, attrname,
sizeof (attrname));
if (error != 0)
return (error);
struct vop_deleteextattr_args vda = {
.a_vp = ap->a_vp,
.a_cred = ap->a_cred,
.a_td = ap->a_td,
};
error = ENOENT;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
rw_enter(&zp->z_xattr_lock, RW_WRITER);
if (zfsvfs->z_use_sa && zp->z_is_sa && zfsvfs->z_xattr_sa) {
error = zfs_setextattr_sa(ap, attrname);
if (error == 0)
/*
* Successfully put into SA, we need to clear the one
* in dir if present.
*/
zfs_deleteextattr_dir(&vda, attrname);
}
if (error) {
error = zfs_setextattr_dir(ap, attrname);
if (error == 0 && zp->z_is_sa)
/*
* Successfully put into dir, we need to clear the one
* in SA if present.
*/
zfs_deleteextattr_sa(&vda, attrname);
}
rw_exit(&zp->z_xattr_lock);
ZFS_EXIT(zfsvfs);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_listextattr {
IN struct vnode *a_vp;
IN int a_attrnamespace;
INOUT struct uio *a_uio;
OUT size_t *a_size;
IN struct ucred *a_cred;
IN struct thread *a_td;
};
#endif
static int
zfs_listextattr_dir(struct vop_listextattr_args *ap, const char *attrprefix)
{
struct thread *td = ap->a_td;
struct nameidata nd;
uint8_t dirbuf[sizeof (struct dirent)];
struct iovec aiov;
struct uio auio;
vnode_t *xvp = NULL, *vp;
int error, eof;
error = zfs_lookup(ap->a_vp, NULL, &xvp, NULL, 0, ap->a_cred,
LOOKUP_XATTR, B_FALSE);
if (error != 0) {
/*
* ENOATTR means that the EA directory does not yet exist,
* i.e. there are no extended attributes there.
*/
if (error == ENOATTR)
error = 0;
return (error);
}
#if __FreeBSD_version < 1400043
NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | LOCKSHARED,
UIO_SYSSPACE, ".", xvp, td);
#else
NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | LOCKSHARED,
UIO_SYSSPACE, ".", xvp);
#endif
error = namei(&nd);
vp = nd.ni_vp;
NDFREE(&nd, NDF_ONLY_PNBUF);
if (error != 0)
return (error);
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
auio.uio_segflg = UIO_SYSSPACE;
auio.uio_td = td;
auio.uio_rw = UIO_READ;
auio.uio_offset = 0;
size_t plen = strlen(attrprefix);
do {
aiov.iov_base = (void *)dirbuf;
aiov.iov_len = sizeof (dirbuf);
auio.uio_resid = sizeof (dirbuf);
error = VOP_READDIR(vp, &auio, ap->a_cred, &eof, NULL, NULL);
if (error != 0)
break;
int done = sizeof (dirbuf) - auio.uio_resid;
for (int pos = 0; pos < done; ) {
struct dirent *dp = (struct dirent *)(dirbuf + pos);
pos += dp->d_reclen;
/*
* XXX: Temporarily we also accept DT_UNKNOWN, as this
* is what we get when attribute was created on Solaris.
*/
if (dp->d_type != DT_REG && dp->d_type != DT_UNKNOWN)
continue;
else if (plen == 0 &&
strncmp(dp->d_name, "freebsd:", 8) == 0)
continue;
else if (strncmp(dp->d_name, attrprefix, plen) != 0)
continue;
uint8_t nlen = dp->d_namlen - plen;
if (ap->a_size != NULL) {
*ap->a_size += 1 + nlen;
} else if (ap->a_uio != NULL) {
/*
* Format of extattr name entry is one byte for
* length and the rest for name.
*/
error = uiomove(&nlen, 1, ap->a_uio);
if (error == 0) {
char *namep = dp->d_name + plen;
error = uiomove(namep, nlen, ap->a_uio);
}
if (error != 0)
break;
}
}
} while (!eof && error == 0);
vput(vp);
return (error);
}
static int
zfs_listextattr_sa(struct vop_listextattr_args *ap, const char *attrprefix)
{
znode_t *zp = VTOZ(ap->a_vp);
int error;
error = zfs_ensure_xattr_cached(zp);
if (error != 0)
return (error);
ASSERT(RW_LOCK_HELD(&zp->z_xattr_lock));
ASSERT3P(zp->z_xattr_cached, !=, NULL);
size_t plen = strlen(attrprefix);
nvpair_t *nvp = NULL;
while ((nvp = nvlist_next_nvpair(zp->z_xattr_cached, nvp)) != NULL) {
ASSERT3U(nvpair_type(nvp), ==, DATA_TYPE_BYTE_ARRAY);
const char *name = nvpair_name(nvp);
if (plen == 0 && strncmp(name, "freebsd:", 8) == 0)
continue;
else if (strncmp(name, attrprefix, plen) != 0)
continue;
uint8_t nlen = strlen(name) - plen;
if (ap->a_size != NULL) {
*ap->a_size += 1 + nlen;
} else if (ap->a_uio != NULL) {
/*
* Format of extattr name entry is one byte for
* length and the rest for name.
*/
error = uiomove(&nlen, 1, ap->a_uio);
if (error == 0) {
char *namep = __DECONST(char *, name) + plen;
error = uiomove(namep, nlen, ap->a_uio);
}
if (error != 0)
break;
}
}
return (error);
}
/*
* Vnode operation to retrieve extended attributes on a vnode.
*/
static int
zfs_listextattr(struct vop_listextattr_args *ap)
{
znode_t *zp = VTOZ(ap->a_vp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
char attrprefix[16];
int error;
if (ap->a_size != NULL)
*ap->a_size = 0;
/*
* If the xattr property is off, refuse the request.
*/
if (!(zfsvfs->z_flags & ZSB_XATTR))
return (SET_ERROR(EOPNOTSUPP));
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
ap->a_cred, ap->a_td, VREAD);
if (error != 0)
return (error);
error = zfs_create_attrname(ap->a_attrnamespace, "", attrprefix,
sizeof (attrprefix));
if (error != 0)
return (error);
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
rw_enter(&zp->z_xattr_lock, RW_READER);
if (zfsvfs->z_use_sa && zp->z_is_sa)
error = zfs_listextattr_sa(ap, attrprefix);
if (error == 0)
error = zfs_listextattr_dir(ap, attrprefix);
rw_exit(&zp->z_xattr_lock);
ZFS_EXIT(zfsvfs);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_getacl_args {
struct vnode *vp;
acl_type_t type;
struct acl *aclp;
struct ucred *cred;
struct thread *td;
};
#endif
static int
zfs_freebsd_getacl(struct vop_getacl_args *ap)
{
int error;
vsecattr_t vsecattr;
if (ap->a_type != ACL_TYPE_NFS4)
return (EINVAL);
vsecattr.vsa_mask = VSA_ACE | VSA_ACECNT;
if ((error = zfs_getsecattr(VTOZ(ap->a_vp),
&vsecattr, 0, ap->a_cred)))
return (error);
error = acl_from_aces(ap->a_aclp, vsecattr.vsa_aclentp,
vsecattr.vsa_aclcnt);
if (vsecattr.vsa_aclentp != NULL)
kmem_free(vsecattr.vsa_aclentp, vsecattr.vsa_aclentsz);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_setacl_args {
struct vnode *vp;
acl_type_t type;
struct acl *aclp;
struct ucred *cred;
struct thread *td;
};
#endif
static int
zfs_freebsd_setacl(struct vop_setacl_args *ap)
{
int error;
vsecattr_t vsecattr;
int aclbsize; /* size of acl list in bytes */
aclent_t *aaclp;
if (ap->a_type != ACL_TYPE_NFS4)
return (EINVAL);
if (ap->a_aclp == NULL)
return (EINVAL);
if (ap->a_aclp->acl_cnt < 1 || ap->a_aclp->acl_cnt > MAX_ACL_ENTRIES)
return (EINVAL);
/*
* With NFSv4 ACLs, chmod(2) may need to add additional entries,
* splitting every entry into two and appending "canonical six"
* entries at the end. Don't allow for setting an ACL that would
* cause chmod(2) to run out of ACL entries.
*/
if (ap->a_aclp->acl_cnt * 2 + 6 > ACL_MAX_ENTRIES)
return (ENOSPC);
error = acl_nfs4_check(ap->a_aclp, ap->a_vp->v_type == VDIR);
if (error != 0)
return (error);
vsecattr.vsa_mask = VSA_ACE;
aclbsize = ap->a_aclp->acl_cnt * sizeof (ace_t);
vsecattr.vsa_aclentp = kmem_alloc(aclbsize, KM_SLEEP);
aaclp = vsecattr.vsa_aclentp;
vsecattr.vsa_aclentsz = aclbsize;
aces_from_acl(vsecattr.vsa_aclentp, &vsecattr.vsa_aclcnt, ap->a_aclp);
error = zfs_setsecattr(VTOZ(ap->a_vp), &vsecattr, 0, ap->a_cred);
kmem_free(aaclp, aclbsize);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_aclcheck_args {
struct vnode *vp;
acl_type_t type;
struct acl *aclp;
struct ucred *cred;
struct thread *td;
};
#endif
static int
zfs_freebsd_aclcheck(struct vop_aclcheck_args *ap)
{
return (EOPNOTSUPP);
}
static int
zfs_vptocnp(struct vop_vptocnp_args *ap)
{
vnode_t *covered_vp;
vnode_t *vp = ap->a_vp;
zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
znode_t *zp = VTOZ(vp);
int ltype;
int error;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
/*
* If we are a snapshot mounted under .zfs, run the operation
* on the covered vnode.
*/
if (zp->z_id != zfsvfs->z_root || zfsvfs->z_parent == zfsvfs) {
char name[MAXNAMLEN + 1];
znode_t *dzp;
size_t len;
error = zfs_znode_parent_and_name(zp, &dzp, name);
if (error == 0) {
len = strlen(name);
if (*ap->a_buflen < len)
error = SET_ERROR(ENOMEM);
}
if (error == 0) {
*ap->a_buflen -= len;
bcopy(name, ap->a_buf + *ap->a_buflen, len);
*ap->a_vpp = ZTOV(dzp);
}
ZFS_EXIT(zfsvfs);
return (error);
}
ZFS_EXIT(zfsvfs);
covered_vp = vp->v_mount->mnt_vnodecovered;
#if __FreeBSD_version >= 1300045
enum vgetstate vs = vget_prep(covered_vp);
#else
vhold(covered_vp);
#endif
ltype = VOP_ISLOCKED(vp);
VOP_UNLOCK1(vp);
#if __FreeBSD_version >= 1300045
error = vget_finish(covered_vp, LK_SHARED, vs);
#else
error = vget(covered_vp, LK_SHARED | LK_VNHELD, curthread);
#endif
if (error == 0) {
#if __FreeBSD_version >= 1300123
error = VOP_VPTOCNP(covered_vp, ap->a_vpp, ap->a_buf,
ap->a_buflen);
#else
error = VOP_VPTOCNP(covered_vp, ap->a_vpp, ap->a_cred,
ap->a_buf, ap->a_buflen);
#endif
vput(covered_vp);
}
vn_lock(vp, ltype | LK_RETRY);
if (VN_IS_DOOMED(vp))
error = SET_ERROR(ENOENT);
return (error);
}
struct vop_vector zfs_vnodeops;
struct vop_vector zfs_fifoops;
struct vop_vector zfs_shareops;
struct vop_vector zfs_vnodeops = {
.vop_default = &default_vnodeops,
.vop_inactive = zfs_freebsd_inactive,
#if __FreeBSD_version >= 1300042
.vop_need_inactive = zfs_freebsd_need_inactive,
#endif
.vop_reclaim = zfs_freebsd_reclaim,
#if __FreeBSD_version >= 1300102
.vop_fplookup_vexec = zfs_freebsd_fplookup_vexec,
#endif
#if __FreeBSD_version >= 1300139
.vop_fplookup_symlink = zfs_freebsd_fplookup_symlink,
#endif
.vop_access = zfs_freebsd_access,
.vop_allocate = VOP_EINVAL,
.vop_lookup = zfs_cache_lookup,
.vop_cachedlookup = zfs_freebsd_cachedlookup,
.vop_getattr = zfs_freebsd_getattr,
.vop_setattr = zfs_freebsd_setattr,
.vop_create = zfs_freebsd_create,
.vop_mknod = (vop_mknod_t *)zfs_freebsd_create,
.vop_mkdir = zfs_freebsd_mkdir,
.vop_readdir = zfs_freebsd_readdir,
.vop_fsync = zfs_freebsd_fsync,
.vop_open = zfs_freebsd_open,
.vop_close = zfs_freebsd_close,
.vop_rmdir = zfs_freebsd_rmdir,
.vop_ioctl = zfs_freebsd_ioctl,
.vop_link = zfs_freebsd_link,
.vop_symlink = zfs_freebsd_symlink,
.vop_readlink = zfs_freebsd_readlink,
.vop_read = zfs_freebsd_read,
.vop_write = zfs_freebsd_write,
.vop_remove = zfs_freebsd_remove,
.vop_rename = zfs_freebsd_rename,
.vop_pathconf = zfs_freebsd_pathconf,
.vop_bmap = zfs_freebsd_bmap,
.vop_fid = zfs_freebsd_fid,
.vop_getextattr = zfs_getextattr,
.vop_deleteextattr = zfs_deleteextattr,
.vop_setextattr = zfs_setextattr,
.vop_listextattr = zfs_listextattr,
.vop_getacl = zfs_freebsd_getacl,
.vop_setacl = zfs_freebsd_setacl,
.vop_aclcheck = zfs_freebsd_aclcheck,
.vop_getpages = zfs_freebsd_getpages,
.vop_putpages = zfs_freebsd_putpages,
.vop_vptocnp = zfs_vptocnp,
#if __FreeBSD_version >= 1300064
.vop_lock1 = vop_lock,
.vop_unlock = vop_unlock,
.vop_islocked = vop_islocked,
#endif
#if __FreeBSD_version >= 1400043
.vop_add_writecount = vop_stdadd_writecount_nomsync,
#endif
};
VFS_VOP_VECTOR_REGISTER(zfs_vnodeops);
struct vop_vector zfs_fifoops = {
.vop_default = &fifo_specops,
.vop_fsync = zfs_freebsd_fsync,
#if __FreeBSD_version >= 1300102
.vop_fplookup_vexec = zfs_freebsd_fplookup_vexec,
#endif
#if __FreeBSD_version >= 1300139
.vop_fplookup_symlink = zfs_freebsd_fplookup_symlink,
#endif
.vop_access = zfs_freebsd_access,
.vop_getattr = zfs_freebsd_getattr,
.vop_inactive = zfs_freebsd_inactive,
.vop_read = VOP_PANIC,
.vop_reclaim = zfs_freebsd_reclaim,
.vop_setattr = zfs_freebsd_setattr,
.vop_write = VOP_PANIC,
.vop_pathconf = zfs_freebsd_pathconf,
.vop_fid = zfs_freebsd_fid,
.vop_getacl = zfs_freebsd_getacl,
.vop_setacl = zfs_freebsd_setacl,
.vop_aclcheck = zfs_freebsd_aclcheck,
#if __FreeBSD_version >= 1400043
.vop_add_writecount = vop_stdadd_writecount_nomsync,
#endif
};
VFS_VOP_VECTOR_REGISTER(zfs_fifoops);
/*
* special share hidden files vnode operations template
*/
struct vop_vector zfs_shareops = {
.vop_default = &default_vnodeops,
#if __FreeBSD_version >= 1300121
.vop_fplookup_vexec = VOP_EAGAIN,
#endif
#if __FreeBSD_version >= 1300139
.vop_fplookup_symlink = VOP_EAGAIN,
#endif
.vop_access = zfs_freebsd_access,
.vop_inactive = zfs_freebsd_inactive,
.vop_reclaim = zfs_freebsd_reclaim,
.vop_fid = zfs_freebsd_fid,
.vop_pathconf = zfs_freebsd_pathconf,
#if __FreeBSD_version >= 1400043
.vop_add_writecount = vop_stdadd_writecount_nomsync,
#endif
};
VFS_VOP_VECTOR_REGISTER(zfs_shareops);
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zio_crypt.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zio_crypt.c
index aeb42b304e73..9e0ab52a2ea2 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zio_crypt.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zio_crypt.c
@@ -1,1826 +1,1840 @@
/*
* CDDL HEADER START
*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2017, Datto, Inc. All rights reserved.
*/
#include <sys/zio_crypt.h>
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
#include <sys/dnode.h>
#include <sys/fs/zfs.h>
#include <sys/zio.h>
#include <sys/zil.h>
#include <sys/sha2.h>
#include <sys/hkdf.h>
/*
* This file is responsible for handling all of the details of generating
* encryption parameters and performing encryption and authentication.
*
* BLOCK ENCRYPTION PARAMETERS:
* Encryption /Authentication Algorithm Suite (crypt):
* The encryption algorithm, mode, and key length we are going to use. We
* currently support AES in either GCM or CCM modes with 128, 192, and 256 bit
* keys. All authentication is currently done with SHA512-HMAC.
*
* Plaintext:
* The unencrypted data that we want to encrypt.
*
* Initialization Vector (IV):
* An initialization vector for the encryption algorithms. This is used to
* "tweak" the encryption algorithms so that two blocks of the same data are
* encrypted into different ciphertext outputs, thus obfuscating block patterns.
* The supported encryption modes (AES-GCM and AES-CCM) require that an IV is
* never reused with the same encryption key. This value is stored unencrypted
* and must simply be provided to the decryption function. We use a 96 bit IV
* (as recommended by NIST) for all block encryption. For non-dedup blocks we
* derive the IV randomly. The first 64 bits of the IV are stored in the second
* word of DVA[2] and the remaining 32 bits are stored in the upper 32 bits of
* blk_fill. This is safe because encrypted blocks can't use the upper 32 bits
* of blk_fill. We only encrypt level 0 blocks, which normally have a fill count
* of 1. The only exception is for DMU_OT_DNODE objects, where the fill count of
* level 0 blocks is the number of allocated dnodes in that block. The on-disk
* format supports at most 2^15 slots per L0 dnode block, because the maximum
* block size is 16MB (2^24). In either case, for level 0 blocks this number
* will still be smaller than UINT32_MAX so it is safe to store the IV in the
* top 32 bits of blk_fill, while leaving the bottom 32 bits of the fill count
* for the dnode code.
*
* Master key:
* This is the most important secret data of an encrypted dataset. It is used
* along with the salt to generate that actual encryption keys via HKDF. We
* do not use the master key to directly encrypt any data because there are
* theoretical limits on how much data can actually be safely encrypted with
* any encryption mode. The master key is stored encrypted on disk with the
* user's wrapping key. Its length is determined by the encryption algorithm.
* For details on how this is stored see the block comment in dsl_crypt.c
*
* Salt:
* Used as an input to the HKDF function, along with the master key. We use a
* 64 bit salt, stored unencrypted in the first word of DVA[2]. Any given salt
* can be used for encrypting many blocks, so we cache the current salt and the
* associated derived key in zio_crypt_t so we do not need to derive it again
* needlessly.
*
* Encryption Key:
* A secret binary key, generated from an HKDF function used to encrypt and
* decrypt data.
*
* Message Authentication Code (MAC)
* The MAC is an output of authenticated encryption modes such as AES-GCM and
* AES-CCM. Its purpose is to ensure that an attacker cannot modify encrypted
* data on disk and return garbage to the application. Effectively, it is a
* checksum that can not be reproduced by an attacker. We store the MAC in the
* second 128 bits of blk_cksum, leaving the first 128 bits for a truncated
* regular checksum of the ciphertext which can be used for scrubbing.
*
* OBJECT AUTHENTICATION:
* Some object types, such as DMU_OT_MASTER_NODE cannot be encrypted because
* they contain some info that always needs to be readable. To prevent this
* data from being altered, we authenticate this data using SHA512-HMAC. This
* will produce a MAC (similar to the one produced via encryption) which can
* be used to verify the object was not modified. HMACs do not require key
* rotation or IVs, so we can keep up to the full 3 copies of authenticated
* data.
*
* ZIL ENCRYPTION:
* ZIL blocks have their bp written to disk ahead of the associated data, so we
* cannot store the MAC there as we normally do. For these blocks the MAC is
* stored in the embedded checksum within the zil_chain_t header. The salt and
* IV are generated for the block on bp allocation instead of at encryption
* time. In addition, ZIL blocks have some pieces that must be left in plaintext
* for claiming even though all of the sensitive user data still needs to be
* encrypted. The function zio_crypt_init_uios_zil() handles parsing which
* pieces of the block need to be encrypted. All data that is not encrypted is
* authenticated using the AAD mechanisms that the supported encryption modes
* provide for. In order to preserve the semantics of the ZIL for encrypted
* datasets, the ZIL is not protected at the objset level as described below.
*
* DNODE ENCRYPTION:
* Similarly to ZIL blocks, the core part of each dnode_phys_t needs to be left
* in plaintext for scrubbing and claiming, but the bonus buffers might contain
* sensitive user data. The function zio_crypt_init_uios_dnode() handles parsing
* which pieces of the block need to be encrypted. For more details about
* dnode authentication and encryption, see zio_crypt_init_uios_dnode().
*
* OBJECT SET AUTHENTICATION:
* Up to this point, everything we have encrypted and authenticated has been
* at level 0 (or -2 for the ZIL). If we did not do any further work the
* on-disk format would be susceptible to attacks that deleted or rearranged
* the order of level 0 blocks. Ideally, the cleanest solution would be to
* maintain a tree of authentication MACs going up the bp tree. However, this
* presents a problem for raw sends. Send files do not send information about
* indirect blocks so there would be no convenient way to transfer the MACs and
* they cannot be recalculated on the receive side without the master key which
* would defeat one of the purposes of raw sends in the first place. Instead,
* for the indirect levels of the bp tree, we use a regular SHA512 of the MACs
* from the level below. We also include some portable fields from blk_prop such
* as the lsize and compression algorithm to prevent the data from being
* misinterpreted.
*
* At the objset level, we maintain 2 separate 256 bit MACs in the
* objset_phys_t. The first one is "portable" and is the logical root of the
* MAC tree maintained in the metadnode's bps. The second, is "local" and is
* used as the root MAC for the user accounting objects, which are also not
* transferred via "zfs send". The portable MAC is sent in the DRR_BEGIN payload
* of the send file. The useraccounting code ensures that the useraccounting
* info is not present upon a receive, so the local MAC can simply be cleared
* out at that time. For more info about objset_phys_t authentication, see
* zio_crypt_do_objset_hmacs().
*
* CONSIDERATIONS FOR DEDUP:
* In order for dedup to work, blocks that we want to dedup with one another
* need to use the same IV and encryption key, so that they will have the same
* ciphertext. Normally, one should never reuse an IV with the same encryption
* key or else AES-GCM and AES-CCM can both actually leak the plaintext of both
* blocks. In this case, however, since we are using the same plaintext as
* well all that we end up with is a duplicate of the original ciphertext we
* already had. As a result, an attacker with read access to the raw disk will
* be able to tell which blocks are the same but this information is given away
* by dedup anyway. In order to get the same IVs and encryption keys for
* equivalent blocks of data we use an HMAC of the plaintext. We use an HMAC
* here so that a reproducible checksum of the plaintext is never available to
* the attacker. The HMAC key is kept alongside the master key, encrypted on
* disk. The first 64 bits of the HMAC are used in place of the random salt, and
* the next 96 bits are used as the IV. As a result of this mechanism, dedup
* will only work within a clone family since encrypted dedup requires use of
* the same master and HMAC keys.
*/
/*
* After encrypting many blocks with the same key we may start to run up
* against the theoretical limits of how much data can securely be encrypted
* with a single key using the supported encryption modes. The most obvious
* limitation is that our risk of generating 2 equivalent 96 bit IVs increases
* the more IVs we generate (which both GCM and CCM modes strictly forbid).
* This risk actually grows surprisingly quickly over time according to the
* Birthday Problem. With a total IV space of 2^(96 bits), and assuming we have
* generated n IVs with a cryptographically secure RNG, the approximate
* probability p(n) of a collision is given as:
*
* p(n) ~= e^(-n*(n-1)/(2*(2^96)))
*
* [http://www.math.cornell.edu/~mec/2008-2009/TianyiZheng/Birthday.html]
*
* Assuming that we want to ensure that p(n) never goes over 1 / 1 trillion
* we must not write more than 398,065,730 blocks with the same encryption key.
* Therefore, we rotate our keys after 400,000,000 blocks have been written by
* generating a new random 64 bit salt for our HKDF encryption key generation
* function.
*/
#define ZFS_KEY_MAX_SALT_USES_DEFAULT 400000000
#define ZFS_CURRENT_MAX_SALT_USES \
(MIN(zfs_key_max_salt_uses, ZFS_KEY_MAX_SALT_USES_DEFAULT))
unsigned long zfs_key_max_salt_uses = ZFS_KEY_MAX_SALT_USES_DEFAULT;
/*
* Set to a nonzero value to cause zio_do_crypt_uio() to fail 1/this many
* calls, to test decryption error handling code paths.
*/
uint64_t zio_decrypt_fail_fraction = 0;
typedef struct blkptr_auth_buf {
uint64_t bab_prop; /* blk_prop - portable mask */
uint8_t bab_mac[ZIO_DATA_MAC_LEN]; /* MAC from blk_cksum */
uint64_t bab_pad; /* reserved for future use */
} blkptr_auth_buf_t;
zio_crypt_info_t zio_crypt_table[ZIO_CRYPT_FUNCTIONS] = {
{"", ZC_TYPE_NONE, 0, "inherit"},
{"", ZC_TYPE_NONE, 0, "on"},
{"", ZC_TYPE_NONE, 0, "off"},
{SUN_CKM_AES_CCM, ZC_TYPE_CCM, 16, "aes-128-ccm"},
{SUN_CKM_AES_CCM, ZC_TYPE_CCM, 24, "aes-192-ccm"},
{SUN_CKM_AES_CCM, ZC_TYPE_CCM, 32, "aes-256-ccm"},
{SUN_CKM_AES_GCM, ZC_TYPE_GCM, 16, "aes-128-gcm"},
{SUN_CKM_AES_GCM, ZC_TYPE_GCM, 24, "aes-192-gcm"},
{SUN_CKM_AES_GCM, ZC_TYPE_GCM, 32, "aes-256-gcm"}
};
static void
zio_crypt_key_destroy_early(zio_crypt_key_t *key)
{
rw_destroy(&key->zk_salt_lock);
/* free crypto templates */
bzero(&key->zk_session, sizeof (key->zk_session));
/* zero out sensitive data */
bzero(key, sizeof (zio_crypt_key_t));
}
void
zio_crypt_key_destroy(zio_crypt_key_t *key)
{
freebsd_crypt_freesession(&key->zk_session);
zio_crypt_key_destroy_early(key);
}
int
zio_crypt_key_init(uint64_t crypt, zio_crypt_key_t *key)
{
int ret;
crypto_mechanism_t mech __unused;
uint_t keydata_len;
zio_crypt_info_t *ci = NULL;
ASSERT3P(key, !=, NULL);
ASSERT3U(crypt, <, ZIO_CRYPT_FUNCTIONS);
ci = &zio_crypt_table[crypt];
if (ci->ci_crypt_type != ZC_TYPE_GCM &&
ci->ci_crypt_type != ZC_TYPE_CCM)
return (ENOTSUP);
keydata_len = zio_crypt_table[crypt].ci_keylen;
bzero(key, sizeof (zio_crypt_key_t));
rw_init(&key->zk_salt_lock, NULL, RW_DEFAULT, NULL);
/* fill keydata buffers and salt with random data */
ret = random_get_bytes((uint8_t *)&key->zk_guid, sizeof (uint64_t));
if (ret != 0)
goto error;
ret = random_get_bytes(key->zk_master_keydata, keydata_len);
if (ret != 0)
goto error;
ret = random_get_bytes(key->zk_hmac_keydata, SHA512_HMAC_KEYLEN);
if (ret != 0)
goto error;
ret = random_get_bytes(key->zk_salt, ZIO_DATA_SALT_LEN);
if (ret != 0)
goto error;
/* derive the current key from the master key */
ret = hkdf_sha512(key->zk_master_keydata, keydata_len, NULL, 0,
key->zk_salt, ZIO_DATA_SALT_LEN, key->zk_current_keydata,
keydata_len);
if (ret != 0)
goto error;
/* initialize keys for the ICP */
key->zk_current_key.ck_format = CRYPTO_KEY_RAW;
key->zk_current_key.ck_data = key->zk_current_keydata;
key->zk_current_key.ck_length = CRYPTO_BYTES2BITS(keydata_len);
key->zk_hmac_key.ck_format = CRYPTO_KEY_RAW;
key->zk_hmac_key.ck_data = &key->zk_hmac_key;
key->zk_hmac_key.ck_length = CRYPTO_BYTES2BITS(SHA512_HMAC_KEYLEN);
ci = &zio_crypt_table[crypt];
if (ci->ci_crypt_type != ZC_TYPE_GCM &&
ci->ci_crypt_type != ZC_TYPE_CCM)
return (ENOTSUP);
ret = freebsd_crypt_newsession(&key->zk_session, ci,
&key->zk_current_key);
if (ret)
goto error;
key->zk_crypt = crypt;
key->zk_version = ZIO_CRYPT_KEY_CURRENT_VERSION;
key->zk_salt_count = 0;
return (0);
error:
zio_crypt_key_destroy_early(key);
return (ret);
}
static int
zio_crypt_key_change_salt(zio_crypt_key_t *key)
{
int ret = 0;
uint8_t salt[ZIO_DATA_SALT_LEN];
crypto_mechanism_t mech __unused;
uint_t keydata_len = zio_crypt_table[key->zk_crypt].ci_keylen;
/* generate a new salt */
ret = random_get_bytes(salt, ZIO_DATA_SALT_LEN);
if (ret != 0)
goto error;
rw_enter(&key->zk_salt_lock, RW_WRITER);
/* someone beat us to the salt rotation, just unlock and return */
if (key->zk_salt_count < ZFS_CURRENT_MAX_SALT_USES)
goto out_unlock;
/* derive the current key from the master key and the new salt */
ret = hkdf_sha512(key->zk_master_keydata, keydata_len, NULL, 0,
salt, ZIO_DATA_SALT_LEN, key->zk_current_keydata, keydata_len);
if (ret != 0)
goto out_unlock;
/* assign the salt and reset the usage count */
bcopy(salt, key->zk_salt, ZIO_DATA_SALT_LEN);
key->zk_salt_count = 0;
freebsd_crypt_freesession(&key->zk_session);
ret = freebsd_crypt_newsession(&key->zk_session,
&zio_crypt_table[key->zk_crypt], &key->zk_current_key);
if (ret != 0)
goto out_unlock;
rw_exit(&key->zk_salt_lock);
return (0);
out_unlock:
rw_exit(&key->zk_salt_lock);
error:
return (ret);
}
/* See comment above zfs_key_max_salt_uses definition for details */
int
zio_crypt_key_get_salt(zio_crypt_key_t *key, uint8_t *salt)
{
int ret;
boolean_t salt_change;
rw_enter(&key->zk_salt_lock, RW_READER);
bcopy(key->zk_salt, salt, ZIO_DATA_SALT_LEN);
salt_change = (atomic_inc_64_nv(&key->zk_salt_count) >=
ZFS_CURRENT_MAX_SALT_USES);
rw_exit(&key->zk_salt_lock);
if (salt_change) {
ret = zio_crypt_key_change_salt(key);
if (ret != 0)
goto error;
}
return (0);
error:
return (ret);
}
void *failed_decrypt_buf;
int failed_decrypt_size;
/*
* This function handles all encryption and decryption in zfs. When
* encrypting it expects puio to reference the plaintext and cuio to
* reference the ciphertext. cuio must have enough space for the
* ciphertext + room for a MAC. datalen should be the length of the
* plaintext / ciphertext alone.
*/
/*
* The implementation for FreeBSD's OpenCrypto.
*
* The big difference between ICP and FOC is that FOC uses a single
* buffer for input and output. This means that (for AES-GCM, the
* only one supported right now) the source must be copied into the
* destination, and the destination must have the AAD, and the tag/MAC,
* already associated with it. (Both implementations can use a uio.)
*
* Since the auth data is part of the iovec array, all we need to know
* is the length: 0 means there's no AAD.
*
*/
static int
zio_do_crypt_uio_opencrypto(boolean_t encrypt, freebsd_crypt_session_t *sess,
uint64_t crypt, crypto_key_t *key, uint8_t *ivbuf, uint_t datalen,
zfs_uio_t *uio, uint_t auth_len)
{
zio_crypt_info_t *ci;
int ret;
ci = &zio_crypt_table[crypt];
if (ci->ci_crypt_type != ZC_TYPE_GCM &&
ci->ci_crypt_type != ZC_TYPE_CCM)
return (ENOTSUP);
ret = freebsd_crypt_uio(encrypt, sess, ci, uio, key, ivbuf,
datalen, auth_len);
if (ret != 0) {
#ifdef FCRYPTO_DEBUG
printf("%s(%d): Returning error %s\n",
__FUNCTION__, __LINE__, encrypt ? "EIO" : "ECKSUM");
#endif
ret = SET_ERROR(encrypt ? EIO : ECKSUM);
}
return (ret);
}
int
zio_crypt_key_wrap(crypto_key_t *cwkey, zio_crypt_key_t *key, uint8_t *iv,
uint8_t *mac, uint8_t *keydata_out, uint8_t *hmac_keydata_out)
{
int ret;
uint64_t aad[3];
/*
* With OpenCrypto in FreeBSD, the same buffer is used for
* input and output. Also, the AAD (for AES-GMC at least)
* needs to logically go in front.
*/
zfs_uio_t cuio;
struct uio cuio_s;
iovec_t iovecs[4];
uint64_t crypt = key->zk_crypt;
uint_t enc_len, keydata_len, aad_len;
ASSERT3U(crypt, <, ZIO_CRYPT_FUNCTIONS);
ASSERT3U(cwkey->ck_format, ==, CRYPTO_KEY_RAW);
zfs_uio_init(&cuio, &cuio_s);
keydata_len = zio_crypt_table[crypt].ci_keylen;
/* generate iv for wrapping the master and hmac key */
ret = random_get_pseudo_bytes(iv, WRAPPING_IV_LEN);
if (ret != 0)
goto error;
/*
* Since we only support one buffer, we need to copy
* the plain text (source) to the cipher buffer (dest).
* We set iovecs[0] -- the authentication data -- below.
*/
bcopy((void*)key->zk_master_keydata, keydata_out, keydata_len);
bcopy((void*)key->zk_hmac_keydata, hmac_keydata_out,
SHA512_HMAC_KEYLEN);
iovecs[1].iov_base = keydata_out;
iovecs[1].iov_len = keydata_len;
iovecs[2].iov_base = hmac_keydata_out;
iovecs[2].iov_len = SHA512_HMAC_KEYLEN;
iovecs[3].iov_base = mac;
iovecs[3].iov_len = WRAPPING_MAC_LEN;
/*
* Although we don't support writing to the old format, we do
* support rewrapping the key so that the user can move and
* quarantine datasets on the old format.
*/
if (key->zk_version == 0) {
aad_len = sizeof (uint64_t);
aad[0] = LE_64(key->zk_guid);
} else {
ASSERT3U(key->zk_version, ==, ZIO_CRYPT_KEY_CURRENT_VERSION);
aad_len = sizeof (uint64_t) * 3;
aad[0] = LE_64(key->zk_guid);
aad[1] = LE_64(crypt);
aad[2] = LE_64(key->zk_version);
}
iovecs[0].iov_base = aad;
iovecs[0].iov_len = aad_len;
enc_len = zio_crypt_table[crypt].ci_keylen + SHA512_HMAC_KEYLEN;
GET_UIO_STRUCT(&cuio)->uio_iov = iovecs;
zfs_uio_iovcnt(&cuio) = 4;
zfs_uio_segflg(&cuio) = UIO_SYSSPACE;
/* encrypt the keys and store the resulting ciphertext and mac */
ret = zio_do_crypt_uio_opencrypto(B_TRUE, NULL, crypt, cwkey,
iv, enc_len, &cuio, aad_len);
if (ret != 0)
goto error;
return (0);
error:
return (ret);
}
int
zio_crypt_key_unwrap(crypto_key_t *cwkey, uint64_t crypt, uint64_t version,
uint64_t guid, uint8_t *keydata, uint8_t *hmac_keydata, uint8_t *iv,
uint8_t *mac, zio_crypt_key_t *key)
{
int ret;
uint64_t aad[3];
/*
* With OpenCrypto in FreeBSD, the same buffer is used for
* input and output. Also, the AAD (for AES-GMC at least)
* needs to logically go in front.
*/
zfs_uio_t cuio;
struct uio cuio_s;
iovec_t iovecs[4];
void *src, *dst;
uint_t enc_len, keydata_len, aad_len;
ASSERT3U(crypt, <, ZIO_CRYPT_FUNCTIONS);
ASSERT3U(cwkey->ck_format, ==, CRYPTO_KEY_RAW);
keydata_len = zio_crypt_table[crypt].ci_keylen;
rw_init(&key->zk_salt_lock, NULL, RW_DEFAULT, NULL);
zfs_uio_init(&cuio, &cuio_s);
/*
* Since we only support one buffer, we need to copy
* the encrypted buffer (source) to the plain buffer
* (dest). We set iovecs[0] -- the authentication data --
* below.
*/
dst = key->zk_master_keydata;
src = keydata;
bcopy(src, dst, keydata_len);
dst = key->zk_hmac_keydata;
src = hmac_keydata;
bcopy(src, dst, SHA512_HMAC_KEYLEN);
iovecs[1].iov_base = key->zk_master_keydata;
iovecs[1].iov_len = keydata_len;
iovecs[2].iov_base = key->zk_hmac_keydata;
iovecs[2].iov_len = SHA512_HMAC_KEYLEN;
iovecs[3].iov_base = mac;
iovecs[3].iov_len = WRAPPING_MAC_LEN;
if (version == 0) {
aad_len = sizeof (uint64_t);
aad[0] = LE_64(guid);
} else {
ASSERT3U(version, ==, ZIO_CRYPT_KEY_CURRENT_VERSION);
aad_len = sizeof (uint64_t) * 3;
aad[0] = LE_64(guid);
aad[1] = LE_64(crypt);
aad[2] = LE_64(version);
}
enc_len = keydata_len + SHA512_HMAC_KEYLEN;
iovecs[0].iov_base = aad;
iovecs[0].iov_len = aad_len;
GET_UIO_STRUCT(&cuio)->uio_iov = iovecs;
zfs_uio_iovcnt(&cuio) = 4;
zfs_uio_segflg(&cuio) = UIO_SYSSPACE;
/* decrypt the keys and store the result in the output buffers */
ret = zio_do_crypt_uio_opencrypto(B_FALSE, NULL, crypt, cwkey,
iv, enc_len, &cuio, aad_len);
if (ret != 0)
goto error;
/* generate a fresh salt */
ret = random_get_bytes(key->zk_salt, ZIO_DATA_SALT_LEN);
if (ret != 0)
goto error;
/* derive the current key from the master key */
ret = hkdf_sha512(key->zk_master_keydata, keydata_len, NULL, 0,
key->zk_salt, ZIO_DATA_SALT_LEN, key->zk_current_keydata,
keydata_len);
if (ret != 0)
goto error;
/* initialize keys for ICP */
key->zk_current_key.ck_format = CRYPTO_KEY_RAW;
key->zk_current_key.ck_data = key->zk_current_keydata;
key->zk_current_key.ck_length = CRYPTO_BYTES2BITS(keydata_len);
key->zk_hmac_key.ck_format = CRYPTO_KEY_RAW;
key->zk_hmac_key.ck_data = key->zk_hmac_keydata;
key->zk_hmac_key.ck_length = CRYPTO_BYTES2BITS(SHA512_HMAC_KEYLEN);
ret = freebsd_crypt_newsession(&key->zk_session,
&zio_crypt_table[crypt], &key->zk_current_key);
if (ret != 0)
goto error;
key->zk_crypt = crypt;
key->zk_version = version;
key->zk_guid = guid;
key->zk_salt_count = 0;
return (0);
error:
zio_crypt_key_destroy_early(key);
return (ret);
}
int
zio_crypt_generate_iv(uint8_t *ivbuf)
{
int ret;
/* randomly generate the IV */
ret = random_get_pseudo_bytes(ivbuf, ZIO_DATA_IV_LEN);
if (ret != 0)
goto error;
return (0);
error:
bzero(ivbuf, ZIO_DATA_IV_LEN);
return (ret);
}
int
zio_crypt_do_hmac(zio_crypt_key_t *key, uint8_t *data, uint_t datalen,
uint8_t *digestbuf, uint_t digestlen)
{
uint8_t raw_digestbuf[SHA512_DIGEST_LENGTH];
ASSERT3U(digestlen, <=, SHA512_DIGEST_LENGTH);
crypto_mac(&key->zk_hmac_key, data, datalen,
raw_digestbuf, SHA512_DIGEST_LENGTH);
bcopy(raw_digestbuf, digestbuf, digestlen);
return (0);
}
int
zio_crypt_generate_iv_salt_dedup(zio_crypt_key_t *key, uint8_t *data,
uint_t datalen, uint8_t *ivbuf, uint8_t *salt)
{
int ret;
uint8_t digestbuf[SHA512_DIGEST_LENGTH];
ret = zio_crypt_do_hmac(key, data, datalen,
digestbuf, SHA512_DIGEST_LENGTH);
if (ret != 0)
return (ret);
bcopy(digestbuf, salt, ZIO_DATA_SALT_LEN);
bcopy(digestbuf + ZIO_DATA_SALT_LEN, ivbuf, ZIO_DATA_IV_LEN);
return (0);
}
/*
* The following functions are used to encode and decode encryption parameters
* into blkptr_t and zil_header_t. The ICP wants to use these parameters as
* byte strings, which normally means that these strings would not need to deal
* with byteswapping at all. However, both blkptr_t and zil_header_t may be
* byteswapped by lower layers and so we must "undo" that byteswap here upon
* decoding and encoding in a non-native byteorder. These functions require
* that the byteorder bit is correct before being called.
*/
void
zio_crypt_encode_params_bp(blkptr_t *bp, uint8_t *salt, uint8_t *iv)
{
uint64_t val64;
uint32_t val32;
ASSERT(BP_IS_ENCRYPTED(bp));
if (!BP_SHOULD_BYTESWAP(bp)) {
bcopy(salt, &bp->blk_dva[2].dva_word[0], sizeof (uint64_t));
bcopy(iv, &bp->blk_dva[2].dva_word[1], sizeof (uint64_t));
bcopy(iv + sizeof (uint64_t), &val32, sizeof (uint32_t));
BP_SET_IV2(bp, val32);
} else {
bcopy(salt, &val64, sizeof (uint64_t));
bp->blk_dva[2].dva_word[0] = BSWAP_64(val64);
bcopy(iv, &val64, sizeof (uint64_t));
bp->blk_dva[2].dva_word[1] = BSWAP_64(val64);
bcopy(iv + sizeof (uint64_t), &val32, sizeof (uint32_t));
BP_SET_IV2(bp, BSWAP_32(val32));
}
}
void
zio_crypt_decode_params_bp(const blkptr_t *bp, uint8_t *salt, uint8_t *iv)
{
uint64_t val64;
uint32_t val32;
ASSERT(BP_IS_PROTECTED(bp));
/* for convenience, so callers don't need to check */
if (BP_IS_AUTHENTICATED(bp)) {
bzero(salt, ZIO_DATA_SALT_LEN);
bzero(iv, ZIO_DATA_IV_LEN);
return;
}
if (!BP_SHOULD_BYTESWAP(bp)) {
bcopy(&bp->blk_dva[2].dva_word[0], salt, sizeof (uint64_t));
bcopy(&bp->blk_dva[2].dva_word[1], iv, sizeof (uint64_t));
val32 = (uint32_t)BP_GET_IV2(bp);
bcopy(&val32, iv + sizeof (uint64_t), sizeof (uint32_t));
} else {
val64 = BSWAP_64(bp->blk_dva[2].dva_word[0]);
bcopy(&val64, salt, sizeof (uint64_t));
val64 = BSWAP_64(bp->blk_dva[2].dva_word[1]);
bcopy(&val64, iv, sizeof (uint64_t));
val32 = BSWAP_32((uint32_t)BP_GET_IV2(bp));
bcopy(&val32, iv + sizeof (uint64_t), sizeof (uint32_t));
}
}
void
zio_crypt_encode_mac_bp(blkptr_t *bp, uint8_t *mac)
{
uint64_t val64;
ASSERT(BP_USES_CRYPT(bp));
ASSERT3U(BP_GET_TYPE(bp), !=, DMU_OT_OBJSET);
if (!BP_SHOULD_BYTESWAP(bp)) {
bcopy(mac, &bp->blk_cksum.zc_word[2], sizeof (uint64_t));
bcopy(mac + sizeof (uint64_t), &bp->blk_cksum.zc_word[3],
sizeof (uint64_t));
} else {
bcopy(mac, &val64, sizeof (uint64_t));
bp->blk_cksum.zc_word[2] = BSWAP_64(val64);
bcopy(mac + sizeof (uint64_t), &val64, sizeof (uint64_t));
bp->blk_cksum.zc_word[3] = BSWAP_64(val64);
}
}
void
zio_crypt_decode_mac_bp(const blkptr_t *bp, uint8_t *mac)
{
uint64_t val64;
ASSERT(BP_USES_CRYPT(bp) || BP_IS_HOLE(bp));
/* for convenience, so callers don't need to check */
if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
bzero(mac, ZIO_DATA_MAC_LEN);
return;
}
if (!BP_SHOULD_BYTESWAP(bp)) {
bcopy(&bp->blk_cksum.zc_word[2], mac, sizeof (uint64_t));
bcopy(&bp->blk_cksum.zc_word[3], mac + sizeof (uint64_t),
sizeof (uint64_t));
} else {
val64 = BSWAP_64(bp->blk_cksum.zc_word[2]);
bcopy(&val64, mac, sizeof (uint64_t));
val64 = BSWAP_64(bp->blk_cksum.zc_word[3]);
bcopy(&val64, mac + sizeof (uint64_t), sizeof (uint64_t));
}
}
void
zio_crypt_encode_mac_zil(void *data, uint8_t *mac)
{
zil_chain_t *zilc = data;
bcopy(mac, &zilc->zc_eck.zec_cksum.zc_word[2], sizeof (uint64_t));
bcopy(mac + sizeof (uint64_t), &zilc->zc_eck.zec_cksum.zc_word[3],
sizeof (uint64_t));
}
void
zio_crypt_decode_mac_zil(const void *data, uint8_t *mac)
{
/*
* The ZIL MAC is embedded in the block it protects, which will
* not have been byteswapped by the time this function has been called.
* As a result, we don't need to worry about byteswapping the MAC.
*/
const zil_chain_t *zilc = data;
bcopy(&zilc->zc_eck.zec_cksum.zc_word[2], mac, sizeof (uint64_t));
bcopy(&zilc->zc_eck.zec_cksum.zc_word[3], mac + sizeof (uint64_t),
sizeof (uint64_t));
}
/*
* This routine takes a block of dnodes (src_abd) and copies only the bonus
* buffers to the same offsets in the dst buffer. datalen should be the size
* of both the src_abd and the dst buffer (not just the length of the bonus
* buffers).
*/
void
zio_crypt_copy_dnode_bonus(abd_t *src_abd, uint8_t *dst, uint_t datalen)
{
uint_t i, max_dnp = datalen >> DNODE_SHIFT;
uint8_t *src;
dnode_phys_t *dnp, *sdnp, *ddnp;
src = abd_borrow_buf_copy(src_abd, datalen);
sdnp = (dnode_phys_t *)src;
ddnp = (dnode_phys_t *)dst;
for (i = 0; i < max_dnp; i += sdnp[i].dn_extra_slots + 1) {
dnp = &sdnp[i];
if (dnp->dn_type != DMU_OT_NONE &&
DMU_OT_IS_ENCRYPTED(dnp->dn_bonustype) &&
dnp->dn_bonuslen != 0) {
bcopy(DN_BONUS(dnp), DN_BONUS(&ddnp[i]),
DN_MAX_BONUS_LEN(dnp));
}
}
abd_return_buf(src_abd, src, datalen);
}
/*
* This function decides what fields from blk_prop are included in
* the on-disk various MAC algorithms.
*/
static void
zio_crypt_bp_zero_nonportable_blkprop(blkptr_t *bp, uint64_t version)
{
int avoidlint = SPA_MINBLOCKSIZE;
/*
* Version 0 did not properly zero out all non-portable fields
* as it should have done. We maintain this code so that we can
* do read-only imports of pools on this version.
*/
if (version == 0) {
BP_SET_DEDUP(bp, 0);
BP_SET_CHECKSUM(bp, 0);
BP_SET_PSIZE(bp, avoidlint);
return;
}
ASSERT3U(version, ==, ZIO_CRYPT_KEY_CURRENT_VERSION);
/*
* The hole_birth feature might set these fields even if this bp
* is a hole. We zero them out here to guarantee that raw sends
* will function with or without the feature.
*/
if (BP_IS_HOLE(bp)) {
bp->blk_prop = 0ULL;
return;
}
/*
* At L0 we want to verify these fields to ensure that data blocks
* can not be reinterpreted. For instance, we do not want an attacker
* to trick us into returning raw lz4 compressed data to the user
* by modifying the compression bits. At higher levels, we cannot
* enforce this policy since raw sends do not convey any information
* about indirect blocks, so these values might be different on the
* receive side. Fortunately, this does not open any new attack
* vectors, since any alterations that can be made to a higher level
* bp must still verify the correct order of the layer below it.
*/
if (BP_GET_LEVEL(bp) != 0) {
BP_SET_BYTEORDER(bp, 0);
BP_SET_COMPRESS(bp, 0);
/*
* psize cannot be set to zero or it will trigger
* asserts, but the value doesn't really matter as
* long as it is constant.
*/
BP_SET_PSIZE(bp, avoidlint);
}
BP_SET_DEDUP(bp, 0);
BP_SET_CHECKSUM(bp, 0);
}
static void
zio_crypt_bp_auth_init(uint64_t version, boolean_t should_bswap, blkptr_t *bp,
blkptr_auth_buf_t *bab, uint_t *bab_len)
{
blkptr_t tmpbp = *bp;
if (should_bswap)
byteswap_uint64_array(&tmpbp, sizeof (blkptr_t));
ASSERT(BP_USES_CRYPT(&tmpbp) || BP_IS_HOLE(&tmpbp));
ASSERT0(BP_IS_EMBEDDED(&tmpbp));
zio_crypt_decode_mac_bp(&tmpbp, bab->bab_mac);
/*
* We always MAC blk_prop in LE to ensure portability. This
* must be done after decoding the mac, since the endianness
* will get zero'd out here.
*/
zio_crypt_bp_zero_nonportable_blkprop(&tmpbp, version);
bab->bab_prop = LE_64(tmpbp.blk_prop);
bab->bab_pad = 0ULL;
/* version 0 did not include the padding */
*bab_len = sizeof (blkptr_auth_buf_t);
if (version == 0)
*bab_len -= sizeof (uint64_t);
}
static int
zio_crypt_bp_do_hmac_updates(crypto_context_t ctx, uint64_t version,
boolean_t should_bswap, blkptr_t *bp)
{
uint_t bab_len;
blkptr_auth_buf_t bab;
zio_crypt_bp_auth_init(version, should_bswap, bp, &bab, &bab_len);
crypto_mac_update(ctx, &bab, bab_len);
return (0);
}
static void
zio_crypt_bp_do_indrect_checksum_updates(SHA2_CTX *ctx, uint64_t version,
boolean_t should_bswap, blkptr_t *bp)
{
uint_t bab_len;
blkptr_auth_buf_t bab;
zio_crypt_bp_auth_init(version, should_bswap, bp, &bab, &bab_len);
SHA2Update(ctx, &bab, bab_len);
}
static void
zio_crypt_bp_do_aad_updates(uint8_t **aadp, uint_t *aad_len, uint64_t version,
boolean_t should_bswap, blkptr_t *bp)
{
uint_t bab_len;
blkptr_auth_buf_t bab;
zio_crypt_bp_auth_init(version, should_bswap, bp, &bab, &bab_len);
bcopy(&bab, *aadp, bab_len);
*aadp += bab_len;
*aad_len += bab_len;
}
static int
zio_crypt_do_dnode_hmac_updates(crypto_context_t ctx, uint64_t version,
boolean_t should_bswap, dnode_phys_t *dnp)
{
int ret, i;
dnode_phys_t *adnp;
boolean_t le_bswap = (should_bswap == ZFS_HOST_BYTEORDER);
uint8_t tmp_dncore[offsetof(dnode_phys_t, dn_blkptr)];
/* authenticate the core dnode (masking out non-portable bits) */
bcopy(dnp, tmp_dncore, sizeof (tmp_dncore));
adnp = (dnode_phys_t *)tmp_dncore;
if (le_bswap) {
adnp->dn_datablkszsec = BSWAP_16(adnp->dn_datablkszsec);
adnp->dn_bonuslen = BSWAP_16(adnp->dn_bonuslen);
adnp->dn_maxblkid = BSWAP_64(adnp->dn_maxblkid);
adnp->dn_used = BSWAP_64(adnp->dn_used);
}
adnp->dn_flags &= DNODE_CRYPT_PORTABLE_FLAGS_MASK;
adnp->dn_used = 0;
crypto_mac_update(ctx, adnp, sizeof (tmp_dncore));
for (i = 0; i < dnp->dn_nblkptr; i++) {
ret = zio_crypt_bp_do_hmac_updates(ctx, version,
should_bswap, &dnp->dn_blkptr[i]);
if (ret != 0)
goto error;
}
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
ret = zio_crypt_bp_do_hmac_updates(ctx, version,
should_bswap, DN_SPILL_BLKPTR(dnp));
if (ret != 0)
goto error;
}
return (0);
error:
return (ret);
}
/*
* objset_phys_t blocks introduce a number of exceptions to the normal
* authentication process. objset_phys_t's contain 2 separate HMACS for
* protecting the integrity of their data. The portable_mac protects the
* metadnode. This MAC can be sent with a raw send and protects against
* reordering of data within the metadnode. The local_mac protects the user
* accounting objects which are not sent from one system to another.
*
* In addition, objset blocks are the only blocks that can be modified and
* written to disk without the key loaded under certain circumstances. During
* zil_claim() we need to be able to update the zil_header_t to complete
* claiming log blocks and during raw receives we need to write out the
* portable_mac from the send file. Both of these actions are possible
* because these fields are not protected by either MAC so neither one will
* need to modify the MACs without the key. However, when the modified blocks
* are written out they will be byteswapped into the host machine's native
* endianness which will modify fields protected by the MAC. As a result, MAC
* calculation for objset blocks works slightly differently from other block
* types. Where other block types MAC the data in whatever endianness is
* written to disk, objset blocks always MAC little endian version of their
* values. In the code, should_bswap is the value from BP_SHOULD_BYTESWAP()
* and le_bswap indicates whether a byteswap is needed to get this block
* into little endian format.
*/
-/* ARGSUSED */
int
zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen,
boolean_t should_bswap, uint8_t *portable_mac, uint8_t *local_mac)
{
int ret;
struct hmac_ctx hash_ctx;
struct hmac_ctx *ctx = &hash_ctx;
objset_phys_t *osp = data;
uint64_t intval;
boolean_t le_bswap = (should_bswap == ZFS_HOST_BYTEORDER);
uint8_t raw_portable_mac[SHA512_DIGEST_LENGTH];
uint8_t raw_local_mac[SHA512_DIGEST_LENGTH];
/* calculate the portable MAC from the portable fields and metadnode */
crypto_mac_init(ctx, &key->zk_hmac_key);
/* add in the os_type */
intval = (le_bswap) ? osp->os_type : BSWAP_64(osp->os_type);
crypto_mac_update(ctx, &intval, sizeof (uint64_t));
/* add in the portable os_flags */
intval = osp->os_flags;
if (should_bswap)
intval = BSWAP_64(intval);
intval &= OBJSET_CRYPT_PORTABLE_FLAGS_MASK;
/* CONSTCOND */
if (!ZFS_HOST_BYTEORDER)
intval = BSWAP_64(intval);
crypto_mac_update(ctx, &intval, sizeof (uint64_t));
/* add in fields from the metadnode */
ret = zio_crypt_do_dnode_hmac_updates(ctx, key->zk_version,
should_bswap, &osp->os_meta_dnode);
if (ret)
goto error;
crypto_mac_final(ctx, raw_portable_mac, SHA512_DIGEST_LENGTH);
bcopy(raw_portable_mac, portable_mac, ZIO_OBJSET_MAC_LEN);
+ /*
+ * This is necessary here as we check next whether
+ * OBJSET_FLAG_USERACCOUNTING_COMPLETE is set in order to
+ * decide if the local_mac should be zeroed out. That flag will always
+ * be set by dmu_objset_id_quota_upgrade_cb() and
+ * dmu_objset_userspace_upgrade_cb() if useraccounting has been
+ * completed.
+ */
+ intval = osp->os_flags;
+ if (should_bswap)
+ intval = BSWAP_64(intval);
+ boolean_t uacct_incomplete =
+ !(intval & OBJSET_FLAG_USERACCOUNTING_COMPLETE);
+
/*
* The local MAC protects the user, group and project accounting.
* If these objects are not present, the local MAC is zeroed out.
*/
- if ((datalen >= OBJSET_PHYS_SIZE_V3 &&
+ if (uacct_incomplete ||
+ (datalen >= OBJSET_PHYS_SIZE_V3 &&
osp->os_userused_dnode.dn_type == DMU_OT_NONE &&
osp->os_groupused_dnode.dn_type == DMU_OT_NONE &&
osp->os_projectused_dnode.dn_type == DMU_OT_NONE) ||
(datalen >= OBJSET_PHYS_SIZE_V2 &&
osp->os_userused_dnode.dn_type == DMU_OT_NONE &&
osp->os_groupused_dnode.dn_type == DMU_OT_NONE) ||
(datalen <= OBJSET_PHYS_SIZE_V1)) {
bzero(local_mac, ZIO_OBJSET_MAC_LEN);
return (0);
}
/* calculate the local MAC from the userused and groupused dnodes */
crypto_mac_init(ctx, &key->zk_hmac_key);
/* add in the non-portable os_flags */
intval = osp->os_flags;
if (should_bswap)
intval = BSWAP_64(intval);
intval &= ~OBJSET_CRYPT_PORTABLE_FLAGS_MASK;
/* CONSTCOND */
if (!ZFS_HOST_BYTEORDER)
intval = BSWAP_64(intval);
crypto_mac_update(ctx, &intval, sizeof (uint64_t));
/* XXX check dnode type ... */
/* add in fields from the user accounting dnodes */
if (osp->os_userused_dnode.dn_type != DMU_OT_NONE) {
ret = zio_crypt_do_dnode_hmac_updates(ctx, key->zk_version,
should_bswap, &osp->os_userused_dnode);
if (ret)
goto error;
}
if (osp->os_groupused_dnode.dn_type != DMU_OT_NONE) {
ret = zio_crypt_do_dnode_hmac_updates(ctx, key->zk_version,
should_bswap, &osp->os_groupused_dnode);
if (ret)
goto error;
}
if (osp->os_projectused_dnode.dn_type != DMU_OT_NONE &&
datalen >= OBJSET_PHYS_SIZE_V3) {
ret = zio_crypt_do_dnode_hmac_updates(ctx, key->zk_version,
should_bswap, &osp->os_projectused_dnode);
if (ret)
goto error;
}
crypto_mac_final(ctx, raw_local_mac, SHA512_DIGEST_LENGTH);
bcopy(raw_local_mac, local_mac, ZIO_OBJSET_MAC_LEN);
return (0);
error:
bzero(portable_mac, ZIO_OBJSET_MAC_LEN);
bzero(local_mac, ZIO_OBJSET_MAC_LEN);
return (ret);
}
static void
zio_crypt_destroy_uio(zfs_uio_t *uio)
{
if (GET_UIO_STRUCT(uio)->uio_iov)
kmem_free(GET_UIO_STRUCT(uio)->uio_iov,
zfs_uio_iovcnt(uio) * sizeof (iovec_t));
}
/*
* This function parses an uncompressed indirect block and returns a checksum
* of all the portable fields from all of the contained bps. The portable
* fields are the MAC and all of the fields from blk_prop except for the dedup,
* checksum, and psize bits. For an explanation of the purpose of this, see
* the comment block on object set authentication.
*/
static int
zio_crypt_do_indirect_mac_checksum_impl(boolean_t generate, void *buf,
uint_t datalen, uint64_t version, boolean_t byteswap, uint8_t *cksum)
{
blkptr_t *bp;
int i, epb = datalen >> SPA_BLKPTRSHIFT;
SHA2_CTX ctx;
uint8_t digestbuf[SHA512_DIGEST_LENGTH];
/* checksum all of the MACs from the layer below */
SHA2Init(SHA512, &ctx);
for (i = 0, bp = buf; i < epb; i++, bp++) {
zio_crypt_bp_do_indrect_checksum_updates(&ctx, version,
byteswap, bp);
}
SHA2Final(digestbuf, &ctx);
if (generate) {
bcopy(digestbuf, cksum, ZIO_DATA_MAC_LEN);
return (0);
}
if (bcmp(digestbuf, cksum, ZIO_DATA_MAC_LEN) != 0) {
#ifdef FCRYPTO_DEBUG
printf("%s(%d): Setting ECKSUM\n", __FUNCTION__, __LINE__);
#endif
return (SET_ERROR(ECKSUM));
}
return (0);
}
int
zio_crypt_do_indirect_mac_checksum(boolean_t generate, void *buf,
uint_t datalen, boolean_t byteswap, uint8_t *cksum)
{
int ret;
/*
* Unfortunately, callers of this function will not always have
* easy access to the on-disk format version. This info is
* normally found in the DSL Crypto Key, but the checksum-of-MACs
* is expected to be verifiable even when the key isn't loaded.
* Here, instead of doing a ZAP lookup for the version for each
* zio, we simply try both existing formats.
*/
ret = zio_crypt_do_indirect_mac_checksum_impl(generate, buf,
datalen, ZIO_CRYPT_KEY_CURRENT_VERSION, byteswap, cksum);
if (ret == ECKSUM) {
ASSERT(!generate);
ret = zio_crypt_do_indirect_mac_checksum_impl(generate,
buf, datalen, 0, byteswap, cksum);
}
return (ret);
}
int
zio_crypt_do_indirect_mac_checksum_abd(boolean_t generate, abd_t *abd,
uint_t datalen, boolean_t byteswap, uint8_t *cksum)
{
int ret;
void *buf;
buf = abd_borrow_buf_copy(abd, datalen);
ret = zio_crypt_do_indirect_mac_checksum(generate, buf, datalen,
byteswap, cksum);
abd_return_buf(abd, buf, datalen);
return (ret);
}
/*
* Special case handling routine for encrypting / decrypting ZIL blocks.
* We do not check for the older ZIL chain because the encryption feature
* was not available before the newer ZIL chain was introduced. The goal
* here is to encrypt everything except the blkptr_t of a lr_write_t and
* the zil_chain_t header. Everything that is not encrypted is authenticated.
*/
/*
* The OpenCrypto used in FreeBSD does not use separate source and
* destination buffers; instead, the same buffer is used. Further, to
* accommodate some of the drivers, the authbuf needs to be logically before
* the data. This means that we need to copy the source to the destination,
* and set up an extra iovec_t at the beginning to handle the authbuf.
* It also means we'll only return one zfs_uio_t.
*/
-/* ARGSUSED */
static int
zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf,
uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap, zfs_uio_t *puio,
zfs_uio_t *out_uio, uint_t *enc_len, uint8_t **authbuf, uint_t *auth_len,
boolean_t *no_crypt)
{
+ (void) puio;
uint8_t *aadbuf = zio_buf_alloc(datalen);
uint8_t *src, *dst, *slrp, *dlrp, *blkend, *aadp;
iovec_t *dst_iovecs;
zil_chain_t *zilc;
lr_t *lr;
uint64_t txtype, lr_len;
uint_t crypt_len, nr_iovecs, vec;
uint_t aad_len = 0, total_len = 0;
if (encrypt) {
src = plainbuf;
dst = cipherbuf;
} else {
src = cipherbuf;
dst = plainbuf;
}
bcopy(src, dst, datalen);
/* Find the start and end record of the log block. */
zilc = (zil_chain_t *)src;
slrp = src + sizeof (zil_chain_t);
aadp = aadbuf;
blkend = src + ((byteswap) ? BSWAP_64(zilc->zc_nused) : zilc->zc_nused);
/*
* Calculate the number of encrypted iovecs we will need.
*/
/* We need at least two iovecs -- one for the AAD, one for the MAC. */
nr_iovecs = 2;
for (; slrp < blkend; slrp += lr_len) {
lr = (lr_t *)slrp;
if (byteswap) {
txtype = BSWAP_64(lr->lrc_txtype);
lr_len = BSWAP_64(lr->lrc_reclen);
} else {
txtype = lr->lrc_txtype;
lr_len = lr->lrc_reclen;
}
nr_iovecs++;
if (txtype == TX_WRITE && lr_len != sizeof (lr_write_t))
nr_iovecs++;
}
dst_iovecs = kmem_alloc(nr_iovecs * sizeof (iovec_t), KM_SLEEP);
/*
* Copy the plain zil header over and authenticate everything except
* the checksum that will store our MAC. If we are writing the data
* the embedded checksum will not have been calculated yet, so we don't
* authenticate that.
*/
bcopy(src, aadp, sizeof (zil_chain_t) - sizeof (zio_eck_t));
aadp += sizeof (zil_chain_t) - sizeof (zio_eck_t);
aad_len += sizeof (zil_chain_t) - sizeof (zio_eck_t);
slrp = src + sizeof (zil_chain_t);
dlrp = dst + sizeof (zil_chain_t);
/*
* Loop over records again, filling in iovecs.
*/
/* The first iovec will contain the authbuf. */
vec = 1;
for (; slrp < blkend; slrp += lr_len, dlrp += lr_len) {
lr = (lr_t *)slrp;
if (!byteswap) {
txtype = lr->lrc_txtype;
lr_len = lr->lrc_reclen;
} else {
txtype = BSWAP_64(lr->lrc_txtype);
lr_len = BSWAP_64(lr->lrc_reclen);
}
/* copy the common lr_t */
bcopy(slrp, dlrp, sizeof (lr_t));
bcopy(slrp, aadp, sizeof (lr_t));
aadp += sizeof (lr_t);
aad_len += sizeof (lr_t);
/*
* If this is a TX_WRITE record we want to encrypt everything
* except the bp if exists. If the bp does exist we want to
* authenticate it.
*/
if (txtype == TX_WRITE) {
crypt_len = sizeof (lr_write_t) -
sizeof (lr_t) - sizeof (blkptr_t);
dst_iovecs[vec].iov_base = (char *)dlrp +
sizeof (lr_t);
dst_iovecs[vec].iov_len = crypt_len;
/* copy the bp now since it will not be encrypted */
bcopy(slrp + sizeof (lr_write_t) - sizeof (blkptr_t),
dlrp + sizeof (lr_write_t) - sizeof (blkptr_t),
sizeof (blkptr_t));
bcopy(slrp + sizeof (lr_write_t) - sizeof (blkptr_t),
aadp, sizeof (blkptr_t));
aadp += sizeof (blkptr_t);
aad_len += sizeof (blkptr_t);
vec++;
total_len += crypt_len;
if (lr_len != sizeof (lr_write_t)) {
crypt_len = lr_len - sizeof (lr_write_t);
dst_iovecs[vec].iov_base = (char *)
dlrp + sizeof (lr_write_t);
dst_iovecs[vec].iov_len = crypt_len;
vec++;
total_len += crypt_len;
}
} else {
crypt_len = lr_len - sizeof (lr_t);
dst_iovecs[vec].iov_base = (char *)dlrp +
sizeof (lr_t);
dst_iovecs[vec].iov_len = crypt_len;
vec++;
total_len += crypt_len;
}
}
/* The last iovec will contain the MAC. */
ASSERT3U(vec, ==, nr_iovecs - 1);
/* AAD */
dst_iovecs[0].iov_base = aadbuf;
dst_iovecs[0].iov_len = aad_len;
/* MAC */
dst_iovecs[vec].iov_base = 0;
dst_iovecs[vec].iov_len = 0;
*no_crypt = (vec == 1);
*enc_len = total_len;
*authbuf = aadbuf;
*auth_len = aad_len;
GET_UIO_STRUCT(out_uio)->uio_iov = dst_iovecs;
zfs_uio_iovcnt(out_uio) = nr_iovecs;
return (0);
}
/*
* Special case handling routine for encrypting / decrypting dnode blocks.
*/
static int
zio_crypt_init_uios_dnode(boolean_t encrypt, uint64_t version,
uint8_t *plainbuf, uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap,
zfs_uio_t *puio, zfs_uio_t *out_uio, uint_t *enc_len, uint8_t **authbuf,
uint_t *auth_len, boolean_t *no_crypt)
{
uint8_t *aadbuf = zio_buf_alloc(datalen);
uint8_t *src, *dst, *aadp;
dnode_phys_t *dnp, *adnp, *sdnp, *ddnp;
iovec_t *dst_iovecs;
uint_t nr_iovecs, crypt_len, vec;
uint_t aad_len = 0, total_len = 0;
uint_t i, j, max_dnp = datalen >> DNODE_SHIFT;
if (encrypt) {
src = plainbuf;
dst = cipherbuf;
} else {
src = cipherbuf;
dst = plainbuf;
}
bcopy(src, dst, datalen);
sdnp = (dnode_phys_t *)src;
ddnp = (dnode_phys_t *)dst;
aadp = aadbuf;
/*
* Count the number of iovecs we will need to do the encryption by
* counting the number of bonus buffers that need to be encrypted.
*/
/* We need at least two iovecs -- one for the AAD, one for the MAC. */
nr_iovecs = 2;
for (i = 0; i < max_dnp; i += sdnp[i].dn_extra_slots + 1) {
/*
* This block may still be byteswapped. However, all of the
* values we use are either uint8_t's (for which byteswapping
* is a noop) or a * != 0 check, which will work regardless
* of whether or not we byteswap.
*/
if (sdnp[i].dn_type != DMU_OT_NONE &&
DMU_OT_IS_ENCRYPTED(sdnp[i].dn_bonustype) &&
sdnp[i].dn_bonuslen != 0) {
nr_iovecs++;
}
}
dst_iovecs = kmem_alloc(nr_iovecs * sizeof (iovec_t), KM_SLEEP);
/*
* Iterate through the dnodes again, this time filling in the uios
* we allocated earlier. We also concatenate any data we want to
* authenticate onto aadbuf.
*/
/* The first iovec will contain the authbuf. */
vec = 1;
for (i = 0; i < max_dnp; i += sdnp[i].dn_extra_slots + 1) {
dnp = &sdnp[i];
/* copy over the core fields and blkptrs (kept as plaintext) */
bcopy(dnp, &ddnp[i], (uint8_t *)DN_BONUS(dnp) - (uint8_t *)dnp);
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
bcopy(DN_SPILL_BLKPTR(dnp), DN_SPILL_BLKPTR(&ddnp[i]),
sizeof (blkptr_t));
}
/*
* Handle authenticated data. We authenticate everything in
* the dnode that can be brought over when we do a raw send.
* This includes all of the core fields as well as the MACs
* stored in the bp checksums and all of the portable bits
* from blk_prop. We include the dnode padding here in case it
* ever gets used in the future. Some dn_flags and dn_used are
* not portable so we mask those out values out of the
* authenticated data.
*/
crypt_len = offsetof(dnode_phys_t, dn_blkptr);
bcopy(dnp, aadp, crypt_len);
adnp = (dnode_phys_t *)aadp;
adnp->dn_flags &= DNODE_CRYPT_PORTABLE_FLAGS_MASK;
adnp->dn_used = 0;
aadp += crypt_len;
aad_len += crypt_len;
for (j = 0; j < dnp->dn_nblkptr; j++) {
zio_crypt_bp_do_aad_updates(&aadp, &aad_len,
version, byteswap, &dnp->dn_blkptr[j]);
}
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
zio_crypt_bp_do_aad_updates(&aadp, &aad_len,
version, byteswap, DN_SPILL_BLKPTR(dnp));
}
/*
* If this bonus buffer needs to be encrypted, we prepare an
* iovec_t. The encryption / decryption functions will fill
* this in for us with the encrypted or decrypted data.
* Otherwise we add the bonus buffer to the authenticated
* data buffer and copy it over to the destination. The
* encrypted iovec extends to DN_MAX_BONUS_LEN(dnp) so that
* we can guarantee alignment with the AES block size
* (128 bits).
*/
crypt_len = DN_MAX_BONUS_LEN(dnp);
if (dnp->dn_type != DMU_OT_NONE &&
DMU_OT_IS_ENCRYPTED(dnp->dn_bonustype) &&
dnp->dn_bonuslen != 0) {
dst_iovecs[vec].iov_base = DN_BONUS(&ddnp[i]);
dst_iovecs[vec].iov_len = crypt_len;
vec++;
total_len += crypt_len;
} else {
bcopy(DN_BONUS(dnp), DN_BONUS(&ddnp[i]), crypt_len);
bcopy(DN_BONUS(dnp), aadp, crypt_len);
aadp += crypt_len;
aad_len += crypt_len;
}
}
/* The last iovec will contain the MAC. */
ASSERT3U(vec, ==, nr_iovecs - 1);
/* AAD */
dst_iovecs[0].iov_base = aadbuf;
dst_iovecs[0].iov_len = aad_len;
/* MAC */
dst_iovecs[vec].iov_base = 0;
dst_iovecs[vec].iov_len = 0;
*no_crypt = (vec == 1);
*enc_len = total_len;
*authbuf = aadbuf;
*auth_len = aad_len;
GET_UIO_STRUCT(out_uio)->uio_iov = dst_iovecs;
zfs_uio_iovcnt(out_uio) = nr_iovecs;
return (0);
}
-/* ARGSUSED */
static int
zio_crypt_init_uios_normal(boolean_t encrypt, uint8_t *plainbuf,
uint8_t *cipherbuf, uint_t datalen, zfs_uio_t *puio, zfs_uio_t *out_uio,
uint_t *enc_len)
{
+ (void) puio;
int ret;
uint_t nr_plain = 1, nr_cipher = 2;
iovec_t *plain_iovecs = NULL, *cipher_iovecs = NULL;
void *src, *dst;
cipher_iovecs = kmem_alloc(nr_cipher * sizeof (iovec_t),
KM_SLEEP);
if (!cipher_iovecs) {
ret = SET_ERROR(ENOMEM);
goto error;
}
bzero(cipher_iovecs, nr_cipher * sizeof (iovec_t));
if (encrypt) {
src = plainbuf;
dst = cipherbuf;
} else {
src = cipherbuf;
dst = plainbuf;
}
bcopy(src, dst, datalen);
cipher_iovecs[0].iov_base = dst;
cipher_iovecs[0].iov_len = datalen;
*enc_len = datalen;
GET_UIO_STRUCT(out_uio)->uio_iov = cipher_iovecs;
zfs_uio_iovcnt(out_uio) = nr_cipher;
return (0);
error:
if (plain_iovecs != NULL)
kmem_free(plain_iovecs, nr_plain * sizeof (iovec_t));
if (cipher_iovecs != NULL)
kmem_free(cipher_iovecs, nr_cipher * sizeof (iovec_t));
*enc_len = 0;
GET_UIO_STRUCT(out_uio)->uio_iov = NULL;
zfs_uio_iovcnt(out_uio) = 0;
return (ret);
}
/*
* This function builds up the plaintext (puio) and ciphertext (cuio) uios so
* that they can be used for encryption and decryption by zio_do_crypt_uio().
* Most blocks will use zio_crypt_init_uios_normal(), with ZIL and dnode blocks
* requiring special handling to parse out pieces that are to be encrypted. The
* authbuf is used by these special cases to store additional authenticated
* data (AAD) for the encryption modes.
*/
static int
zio_crypt_init_uios(boolean_t encrypt, uint64_t version, dmu_object_type_t ot,
uint8_t *plainbuf, uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap,
uint8_t *mac, zfs_uio_t *puio, zfs_uio_t *cuio, uint_t *enc_len,
uint8_t **authbuf, uint_t *auth_len, boolean_t *no_crypt)
{
int ret;
iovec_t *mac_iov;
ASSERT(DMU_OT_IS_ENCRYPTED(ot) || ot == DMU_OT_NONE);
/* route to handler */
switch (ot) {
case DMU_OT_INTENT_LOG:
ret = zio_crypt_init_uios_zil(encrypt, plainbuf, cipherbuf,
datalen, byteswap, puio, cuio, enc_len, authbuf, auth_len,
no_crypt);
break;
case DMU_OT_DNODE:
ret = zio_crypt_init_uios_dnode(encrypt, version, plainbuf,
cipherbuf, datalen, byteswap, puio, cuio, enc_len, authbuf,
auth_len, no_crypt);
break;
default:
ret = zio_crypt_init_uios_normal(encrypt, plainbuf, cipherbuf,
datalen, puio, cuio, enc_len);
*authbuf = NULL;
*auth_len = 0;
*no_crypt = B_FALSE;
break;
}
if (ret != 0)
goto error;
/* populate the uios */
zfs_uio_segflg(cuio) = UIO_SYSSPACE;
mac_iov =
((iovec_t *)&(GET_UIO_STRUCT(cuio)->
uio_iov[zfs_uio_iovcnt(cuio) - 1]));
mac_iov->iov_base = (void *)mac;
mac_iov->iov_len = ZIO_DATA_MAC_LEN;
return (0);
error:
return (ret);
}
void *failed_decrypt_buf;
int faile_decrypt_size;
/*
* Primary encryption / decryption entrypoint for zio data.
*/
int
zio_do_crypt_data(boolean_t encrypt, zio_crypt_key_t *key,
dmu_object_type_t ot, boolean_t byteswap, uint8_t *salt, uint8_t *iv,
uint8_t *mac, uint_t datalen, uint8_t *plainbuf, uint8_t *cipherbuf,
boolean_t *no_crypt)
{
int ret;
boolean_t locked = B_FALSE;
uint64_t crypt = key->zk_crypt;
uint_t keydata_len = zio_crypt_table[crypt].ci_keylen;
uint_t enc_len, auth_len;
zfs_uio_t puio, cuio;
struct uio puio_s, cuio_s;
uint8_t enc_keydata[MASTER_KEY_MAX_LEN];
crypto_key_t tmp_ckey, *ckey = NULL;
freebsd_crypt_session_t *tmpl = NULL;
uint8_t *authbuf = NULL;
zfs_uio_init(&puio, &puio_s);
zfs_uio_init(&cuio, &cuio_s);
bzero(GET_UIO_STRUCT(&puio), sizeof (struct uio));
bzero(GET_UIO_STRUCT(&cuio), sizeof (struct uio));
#ifdef FCRYPTO_DEBUG
printf("%s(%s, %p, %p, %d, %p, %p, %u, %s, %p, %p, %p)\n",
__FUNCTION__,
encrypt ? "encrypt" : "decrypt",
key, salt, ot, iv, mac, datalen,
byteswap ? "byteswap" : "native_endian", plainbuf,
cipherbuf, no_crypt);
printf("\tkey = {");
for (int i = 0; i < key->zk_current_key.ck_length/8; i++)
printf("%02x ", ((uint8_t *)key->zk_current_key.ck_data)[i]);
printf("}\n");
#endif
/* create uios for encryption */
ret = zio_crypt_init_uios(encrypt, key->zk_version, ot, plainbuf,
cipherbuf, datalen, byteswap, mac, &puio, &cuio, &enc_len,
&authbuf, &auth_len, no_crypt);
if (ret != 0)
return (ret);
/*
* If the needed key is the current one, just use it. Otherwise we
* need to generate a temporary one from the given salt + master key.
* If we are encrypting, we must return a copy of the current salt
* so that it can be stored in the blkptr_t.
*/
rw_enter(&key->zk_salt_lock, RW_READER);
locked = B_TRUE;
if (bcmp(salt, key->zk_salt, ZIO_DATA_SALT_LEN) == 0) {
ckey = &key->zk_current_key;
tmpl = &key->zk_session;
} else {
rw_exit(&key->zk_salt_lock);
locked = B_FALSE;
ret = hkdf_sha512(key->zk_master_keydata, keydata_len, NULL, 0,
salt, ZIO_DATA_SALT_LEN, enc_keydata, keydata_len);
if (ret != 0)
goto error;
tmp_ckey.ck_format = CRYPTO_KEY_RAW;
tmp_ckey.ck_data = enc_keydata;
tmp_ckey.ck_length = CRYPTO_BYTES2BITS(keydata_len);
ckey = &tmp_ckey;
tmpl = NULL;
}
/* perform the encryption / decryption */
ret = zio_do_crypt_uio_opencrypto(encrypt, tmpl, key->zk_crypt,
ckey, iv, enc_len, &cuio, auth_len);
if (ret != 0)
goto error;
if (locked) {
rw_exit(&key->zk_salt_lock);
locked = B_FALSE;
}
if (authbuf != NULL)
zio_buf_free(authbuf, datalen);
if (ckey == &tmp_ckey)
bzero(enc_keydata, keydata_len);
zio_crypt_destroy_uio(&puio);
zio_crypt_destroy_uio(&cuio);
return (0);
error:
if (!encrypt) {
if (failed_decrypt_buf != NULL)
kmem_free(failed_decrypt_buf, failed_decrypt_size);
failed_decrypt_buf = kmem_alloc(datalen, KM_SLEEP);
failed_decrypt_size = datalen;
bcopy(cipherbuf, failed_decrypt_buf, datalen);
}
if (locked)
rw_exit(&key->zk_salt_lock);
if (authbuf != NULL)
zio_buf_free(authbuf, datalen);
if (ckey == &tmp_ckey)
bzero(enc_keydata, keydata_len);
zio_crypt_destroy_uio(&puio);
zio_crypt_destroy_uio(&cuio);
return (SET_ERROR(ret));
}
/*
* Simple wrapper around zio_do_crypt_data() to work with abd's instead of
* linear buffers.
*/
int
zio_do_crypt_abd(boolean_t encrypt, zio_crypt_key_t *key, dmu_object_type_t ot,
boolean_t byteswap, uint8_t *salt, uint8_t *iv, uint8_t *mac,
uint_t datalen, abd_t *pabd, abd_t *cabd, boolean_t *no_crypt)
{
int ret;
void *ptmp, *ctmp;
if (encrypt) {
ptmp = abd_borrow_buf_copy(pabd, datalen);
ctmp = abd_borrow_buf(cabd, datalen);
} else {
ptmp = abd_borrow_buf(pabd, datalen);
ctmp = abd_borrow_buf_copy(cabd, datalen);
}
ret = zio_do_crypt_data(encrypt, key, ot, byteswap, salt, iv, mac,
datalen, ptmp, ctmp, no_crypt);
if (ret != 0)
goto error;
if (encrypt) {
abd_return_buf(pabd, ptmp, datalen);
abd_return_buf_copy(cabd, ctmp, datalen);
} else {
abd_return_buf_copy(pabd, ptmp, datalen);
abd_return_buf(cabd, ctmp, datalen);
}
return (0);
error:
if (encrypt) {
abd_return_buf(pabd, ptmp, datalen);
abd_return_buf_copy(cabd, ctmp, datalen);
} else {
abd_return_buf_copy(pabd, ptmp, datalen);
abd_return_buf(cabd, ctmp, datalen);
}
return (SET_ERROR(ret));
}
#if defined(_KERNEL) && defined(HAVE_SPL)
/* BEGIN CSTYLED */
module_param(zfs_key_max_salt_uses, ulong, 0644);
MODULE_PARM_DESC(zfs_key_max_salt_uses, "Max number of times a salt value "
"can be used for generating encryption keys before it is rotated");
/* END CSTYLED */
#endif
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c
index 450369192569..0b347e7f98fc 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c
@@ -1,1543 +1,1571 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
*
* Copyright (c) 2006-2010 Pawel Jakub Dawidek <pjd@FreeBSD.org>
* All rights reserved.
*
* Portions Copyright 2010 Robert Milkowski
*
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2012, 2017 by Delphix. All rights reserved.
* Copyright (c) 2013, Joyent, Inc. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
*/
/* Portions Copyright 2011 Martin Matuska <mm@FreeBSD.org> */
/*
* ZFS volume emulation driver.
*
* Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
* Volumes are accessed through the symbolic links named:
*
* /dev/zvol/<pool_name>/<dataset_name>
*
* Volumes are persistent through reboot. No user command needs to be
* run before opening and using a device.
*
* On FreeBSD ZVOLs are simply GEOM providers like any other storage device
* in the system. Except when they're simply character devices (volmode=dev).
*/
#include <sys/types.h>
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/errno.h>
#include <sys/uio.h>
#include <sys/bio.h>
#include <sys/buf.h>
#include <sys/kmem.h>
#include <sys/conf.h>
#include <sys/cmn_err.h>
#include <sys/stat.h>
#include <sys/proc.h>
#include <sys/zap.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/zio.h>
#include <sys/disk.h>
#include <sys/dmu_traverse.h>
#include <sys/dnode.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_dir.h>
#include <sys/byteorder.h>
#include <sys/sunddi.h>
#include <sys/dirent.h>
#include <sys/policy.h>
#include <sys/queue.h>
#include <sys/fs/zfs.h>
#include <sys/zfs_ioctl.h>
#include <sys/zil.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_rlock.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_raidz.h>
#include <sys/zvol.h>
#include <sys/zil_impl.h>
#include <sys/dataset_kstats.h>
#include <sys/dbuf.h>
#include <sys/dmu_tx.h>
#include <sys/zfeature.h>
#include <sys/zio_checksum.h>
#include <sys/zil_impl.h>
#include <sys/filio.h>
#include <geom/geom.h>
#include <sys/zvol.h>
#include <sys/zvol_impl.h>
#include "zfs_namecheck.h"
#define ZVOL_DUMPSIZE "dumpsize"
#ifdef ZVOL_LOCK_DEBUG
#define ZVOL_RW_READER RW_WRITER
#define ZVOL_RW_READ_HELD RW_WRITE_HELD
#else
#define ZVOL_RW_READER RW_READER
#define ZVOL_RW_READ_HELD RW_READ_HELD
#endif
enum zvol_geom_state {
ZVOL_GEOM_UNINIT,
ZVOL_GEOM_STOPPED,
ZVOL_GEOM_RUNNING,
};
struct zvol_state_os {
#define zso_dev _zso_state._zso_dev
#define zso_geom _zso_state._zso_geom
union {
/* volmode=dev */
struct zvol_state_dev {
struct cdev *zsd_cdev;
uint64_t zsd_sync_cnt;
} _zso_dev;
/* volmode=geom */
struct zvol_state_geom {
struct g_provider *zsg_provider;
struct bio_queue_head zsg_queue;
struct mtx zsg_queue_mtx;
enum zvol_geom_state zsg_state;
} _zso_geom;
} _zso_state;
int zso_dying;
};
static uint32_t zvol_minors;
SYSCTL_DECL(_vfs_zfs);
SYSCTL_NODE(_vfs_zfs, OID_AUTO, vol, CTLFLAG_RW, 0, "ZFS VOLUME");
SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, mode, CTLFLAG_RWTUN, &zvol_volmode, 0,
"Expose as GEOM providers (1), device files (2) or neither");
static boolean_t zpool_on_zvol = B_FALSE;
SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, recursive, CTLFLAG_RWTUN, &zpool_on_zvol, 0,
"Allow zpools to use zvols as vdevs (DANGEROUS)");
/*
* Toggle unmap functionality.
*/
boolean_t zvol_unmap_enabled = B_TRUE;
SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, unmap_enabled, CTLFLAG_RWTUN,
&zvol_unmap_enabled, 0, "Enable UNMAP functionality");
/*
* zvol maximum transfer in one DMU tx.
*/
int zvol_maxphys = DMU_MAX_ACCESS / 2;
static void zvol_ensure_zilog(zvol_state_t *zv);
static d_open_t zvol_cdev_open;
static d_close_t zvol_cdev_close;
static d_ioctl_t zvol_cdev_ioctl;
static d_read_t zvol_cdev_read;
static d_write_t zvol_cdev_write;
static d_strategy_t zvol_geom_bio_strategy;
static struct cdevsw zvol_cdevsw = {
.d_name = "zvol",
.d_version = D_VERSION,
.d_flags = D_DISK | D_TRACKCLOSE,
.d_open = zvol_cdev_open,
.d_close = zvol_cdev_close,
.d_ioctl = zvol_cdev_ioctl,
.d_read = zvol_cdev_read,
.d_write = zvol_cdev_write,
.d_strategy = zvol_geom_bio_strategy,
};
extern uint_t zfs_geom_probe_vdev_key;
struct g_class zfs_zvol_class = {
.name = "ZFS::ZVOL",
.version = G_VERSION,
};
DECLARE_GEOM_CLASS(zfs_zvol_class, zfs_zvol);
static int zvol_geom_open(struct g_provider *pp, int flag, int count);
static int zvol_geom_close(struct g_provider *pp, int flag, int count);
static void zvol_geom_run(zvol_state_t *zv);
static void zvol_geom_destroy(zvol_state_t *zv);
static int zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace);
static void zvol_geom_worker(void *arg);
static void zvol_geom_bio_start(struct bio *bp);
static int zvol_geom_bio_getattr(struct bio *bp);
/* static d_strategy_t zvol_geom_bio_strategy; (declared elsewhere) */
/*
* GEOM mode implementation
*/
/*ARGSUSED*/
static int
zvol_geom_open(struct g_provider *pp, int flag, int count)
{
zvol_state_t *zv;
int err = 0;
boolean_t drop_suspend = B_FALSE;
- boolean_t drop_namespace = B_FALSE;
if (!zpool_on_zvol && tsd_get(zfs_geom_probe_vdev_key) != NULL) {
/*
* if zfs_geom_probe_vdev_key is set, that means that zfs is
* attempting to probe geom providers while looking for a
* replacement for a missing VDEV. In this case, the
* spa_namespace_lock will not be held, but it is still illegal
* to use a zvol as a vdev. Deadlocks can result if another
* thread has spa_namespace_lock
*/
return (SET_ERROR(EOPNOTSUPP));
}
retry:
rw_enter(&zvol_state_lock, ZVOL_RW_READER);
+ /*
+ * Obtain a copy of private under zvol_state_lock to make sure either
+ * the result of zvol free code setting private to NULL is observed,
+ * or the zv is protected from being freed because of the positive
+ * zv_open_count.
+ */
zv = pp->private;
if (zv == NULL) {
rw_exit(&zvol_state_lock);
err = SET_ERROR(ENXIO);
goto out_locked;
}
- if (zv->zv_open_count == 0 && !mutex_owned(&spa_namespace_lock)) {
- /*
- * We need to guarantee that the namespace lock is held
- * to avoid spurious failures in zvol_first_open.
- */
- drop_namespace = B_TRUE;
- if (!mutex_tryenter(&spa_namespace_lock)) {
- rw_exit(&zvol_state_lock);
- mutex_enter(&spa_namespace_lock);
- goto retry;
- }
- }
mutex_enter(&zv->zv_state_lock);
if (zv->zv_zso->zso_dying) {
rw_exit(&zvol_state_lock);
err = SET_ERROR(ENXIO);
goto out_zv_locked;
}
ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM);
/*
* make sure zvol is not suspended during first open
* (hold zv_suspend_lock) and respect proper lock acquisition
* ordering - zv_suspend_lock before zv_state_lock
*/
if (zv->zv_open_count == 0) {
drop_suspend = B_TRUE;
if (!rw_tryenter(&zv->zv_suspend_lock, ZVOL_RW_READER)) {
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
mutex_enter(&zv->zv_state_lock);
/* check to see if zv_suspend_lock is needed */
if (zv->zv_open_count != 0) {
rw_exit(&zv->zv_suspend_lock);
drop_suspend = B_FALSE;
}
}
}
rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
if (zv->zv_open_count == 0) {
+ boolean_t drop_namespace = B_FALSE;
+
ASSERT(ZVOL_RW_READ_HELD(&zv->zv_suspend_lock));
+
+ /*
+ * Take spa_namespace_lock to prevent lock inversion when
+ * zvols from one pool are opened as vdevs in another.
+ */
+ if (!mutex_owned(&spa_namespace_lock)) {
+ if (!mutex_tryenter(&spa_namespace_lock)) {
+ mutex_exit(&zv->zv_state_lock);
+ rw_exit(&zv->zv_suspend_lock);
+ kern_yield(PRI_USER);
+ goto retry;
+ } else {
+ drop_namespace = B_TRUE;
+ }
+ }
err = zvol_first_open(zv, !(flag & FWRITE));
+ if (drop_namespace)
+ mutex_exit(&spa_namespace_lock);
if (err)
goto out_zv_locked;
pp->mediasize = zv->zv_volsize;
pp->stripeoffset = 0;
pp->stripesize = zv->zv_volblocksize;
}
+ ASSERT(MUTEX_HELD(&zv->zv_state_lock));
+
/*
* Check for a bad on-disk format version now since we
* lied about owning the dataset readonly before.
*/
if ((flag & FWRITE) && ((zv->zv_flags & ZVOL_RDONLY) ||
dmu_objset_incompatible_encryption_version(zv->zv_objset))) {
err = SET_ERROR(EROFS);
goto out_opened;
}
if (zv->zv_flags & ZVOL_EXCL) {
err = SET_ERROR(EBUSY);
goto out_opened;
}
#ifdef FEXCL
if (flag & FEXCL) {
if (zv->zv_open_count != 0) {
err = SET_ERROR(EBUSY);
goto out_opened;
}
zv->zv_flags |= ZVOL_EXCL;
}
#endif
zv->zv_open_count += count;
out_opened:
if (zv->zv_open_count == 0) {
zvol_last_close(zv);
wakeup(zv);
}
out_zv_locked:
mutex_exit(&zv->zv_state_lock);
out_locked:
- if (drop_namespace)
- mutex_exit(&spa_namespace_lock);
if (drop_suspend)
rw_exit(&zv->zv_suspend_lock);
return (err);
}
/*ARGSUSED*/
static int
zvol_geom_close(struct g_provider *pp, int flag, int count)
{
zvol_state_t *zv;
boolean_t drop_suspend = B_TRUE;
int new_open_count;
rw_enter(&zvol_state_lock, ZVOL_RW_READER);
zv = pp->private;
if (zv == NULL) {
rw_exit(&zvol_state_lock);
return (SET_ERROR(ENXIO));
}
mutex_enter(&zv->zv_state_lock);
if (zv->zv_flags & ZVOL_EXCL) {
ASSERT3U(zv->zv_open_count, ==, 1);
zv->zv_flags &= ~ZVOL_EXCL;
}
ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM);
/*
* If the open count is zero, this is a spurious close.
* That indicates a bug in the kernel / DDI framework.
*/
ASSERT3U(zv->zv_open_count, >, 0);
/*
* make sure zvol is not suspended during last close
* (hold zv_suspend_lock) and respect proper lock acquisition
* ordering - zv_suspend_lock before zv_state_lock
*/
new_open_count = zv->zv_open_count - count;
if (new_open_count == 0) {
if (!rw_tryenter(&zv->zv_suspend_lock, ZVOL_RW_READER)) {
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
mutex_enter(&zv->zv_state_lock);
/* check to see if zv_suspend_lock is needed */
new_open_count = zv->zv_open_count - count;
if (new_open_count != 0) {
rw_exit(&zv->zv_suspend_lock);
drop_suspend = B_FALSE;
}
}
} else {
drop_suspend = B_FALSE;
}
rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
/*
* You may get multiple opens, but only one close.
*/
zv->zv_open_count = new_open_count;
if (zv->zv_open_count == 0) {
ASSERT(ZVOL_RW_READ_HELD(&zv->zv_suspend_lock));
zvol_last_close(zv);
wakeup(zv);
}
mutex_exit(&zv->zv_state_lock);
if (drop_suspend)
rw_exit(&zv->zv_suspend_lock);
return (0);
}
static void
zvol_geom_run(zvol_state_t *zv)
{
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct g_provider *pp = zsg->zsg_provider;
ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM);
g_error_provider(pp, 0);
kproc_kthread_add(zvol_geom_worker, zv, &system_proc, NULL, 0, 0,
"zfskern", "zvol %s", pp->name + sizeof (ZVOL_DRIVER));
}
static void
zvol_geom_destroy(zvol_state_t *zv)
{
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct g_provider *pp = zsg->zsg_provider;
ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM);
g_topology_assert();
mutex_enter(&zv->zv_state_lock);
VERIFY3S(zsg->zsg_state, ==, ZVOL_GEOM_RUNNING);
mutex_exit(&zv->zv_state_lock);
zsg->zsg_provider = NULL;
g_wither_geom(pp->geom, ENXIO);
}
void
zvol_wait_close(zvol_state_t *zv)
{
if (zv->zv_volmode != ZFS_VOLMODE_GEOM)
return;
mutex_enter(&zv->zv_state_lock);
zv->zv_zso->zso_dying = B_TRUE;
if (zv->zv_open_count)
msleep(zv, &zv->zv_state_lock,
PRIBIO, "zvol:dying", 10*hz);
mutex_exit(&zv->zv_state_lock);
}
static int
zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace)
{
int count, error, flags;
g_topology_assert();
/*
* To make it easier we expect either open or close, but not both
* at the same time.
*/
KASSERT((acr >= 0 && acw >= 0 && ace >= 0) ||
(acr <= 0 && acw <= 0 && ace <= 0),
("Unsupported access request to %s (acr=%d, acw=%d, ace=%d).",
pp->name, acr, acw, ace));
if (pp->private == NULL) {
if (acr <= 0 && acw <= 0 && ace <= 0)
return (0);
return (pp->error);
}
/*
* We don't pass FEXCL flag to zvol_geom_open()/zvol_geom_close() if
* ace != 0, because GEOM already handles that and handles it a bit
* differently. GEOM allows for multiple read/exclusive consumers and
* ZFS allows only one exclusive consumer, no matter if it is reader or
* writer. I like better the way GEOM works so I'll leave it for GEOM
* to decide what to do.
*/
count = acr + acw + ace;
if (count == 0)
return (0);
flags = 0;
if (acr != 0 || ace != 0)
flags |= FREAD;
if (acw != 0)
flags |= FWRITE;
g_topology_unlock();
if (count > 0)
error = zvol_geom_open(pp, flags, count);
else
error = zvol_geom_close(pp, flags, -count);
g_topology_lock();
return (error);
}
static void
zvol_geom_worker(void *arg)
{
zvol_state_t *zv = arg;
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct bio *bp;
ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM);
thread_lock(curthread);
sched_prio(curthread, PRIBIO);
thread_unlock(curthread);
for (;;) {
mtx_lock(&zsg->zsg_queue_mtx);
bp = bioq_takefirst(&zsg->zsg_queue);
if (bp == NULL) {
if (zsg->zsg_state == ZVOL_GEOM_STOPPED) {
zsg->zsg_state = ZVOL_GEOM_RUNNING;
wakeup(&zsg->zsg_state);
mtx_unlock(&zsg->zsg_queue_mtx);
kthread_exit();
}
msleep(&zsg->zsg_queue, &zsg->zsg_queue_mtx,
PRIBIO | PDROP, "zvol:io", 0);
continue;
}
mtx_unlock(&zsg->zsg_queue_mtx);
zvol_geom_bio_strategy(bp);
}
}
static void
zvol_geom_bio_start(struct bio *bp)
{
zvol_state_t *zv = bp->bio_to->private;
struct zvol_state_geom *zsg;
boolean_t first;
if (zv == NULL) {
g_io_deliver(bp, ENXIO);
return;
}
if (bp->bio_cmd == BIO_GETATTR) {
if (zvol_geom_bio_getattr(bp))
g_io_deliver(bp, EOPNOTSUPP);
return;
}
if (!THREAD_CAN_SLEEP()) {
zsg = &zv->zv_zso->zso_geom;
mtx_lock(&zsg->zsg_queue_mtx);
first = (bioq_first(&zsg->zsg_queue) == NULL);
bioq_insert_tail(&zsg->zsg_queue, bp);
mtx_unlock(&zsg->zsg_queue_mtx);
if (first)
wakeup_one(&zsg->zsg_queue);
return;
}
zvol_geom_bio_strategy(bp);
}
static int
zvol_geom_bio_getattr(struct bio *bp)
{
zvol_state_t *zv;
zv = bp->bio_to->private;
ASSERT3P(zv, !=, NULL);
spa_t *spa = dmu_objset_spa(zv->zv_objset);
uint64_t refd, avail, usedobjs, availobjs;
if (g_handleattr_int(bp, "GEOM::candelete", 1))
return (0);
if (strcmp(bp->bio_attribute, "blocksavail") == 0) {
dmu_objset_space(zv->zv_objset, &refd, &avail,
&usedobjs, &availobjs);
if (g_handleattr_off_t(bp, "blocksavail", avail / DEV_BSIZE))
return (0);
} else if (strcmp(bp->bio_attribute, "blocksused") == 0) {
dmu_objset_space(zv->zv_objset, &refd, &avail,
&usedobjs, &availobjs);
if (g_handleattr_off_t(bp, "blocksused", refd / DEV_BSIZE))
return (0);
} else if (strcmp(bp->bio_attribute, "poolblocksavail") == 0) {
avail = metaslab_class_get_space(spa_normal_class(spa));
avail -= metaslab_class_get_alloc(spa_normal_class(spa));
if (g_handleattr_off_t(bp, "poolblocksavail",
avail / DEV_BSIZE))
return (0);
} else if (strcmp(bp->bio_attribute, "poolblocksused") == 0) {
refd = metaslab_class_get_alloc(spa_normal_class(spa));
if (g_handleattr_off_t(bp, "poolblocksused", refd / DEV_BSIZE))
return (0);
}
return (1);
}
static void
zvol_geom_bio_strategy(struct bio *bp)
{
zvol_state_t *zv;
uint64_t off, volsize;
size_t resid;
char *addr;
objset_t *os;
zfs_locked_range_t *lr;
int error = 0;
boolean_t doread = B_FALSE;
boolean_t is_dumpified;
boolean_t sync;
if (bp->bio_to)
zv = bp->bio_to->private;
else
zv = bp->bio_dev->si_drv2;
if (zv == NULL) {
error = SET_ERROR(ENXIO);
goto out;
}
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
switch (bp->bio_cmd) {
case BIO_READ:
doread = B_TRUE;
break;
case BIO_WRITE:
case BIO_FLUSH:
case BIO_DELETE:
if (zv->zv_flags & ZVOL_RDONLY) {
error = SET_ERROR(EROFS);
goto resume;
}
zvol_ensure_zilog(zv);
if (bp->bio_cmd == BIO_FLUSH)
goto sync;
break;
default:
error = SET_ERROR(EOPNOTSUPP);
goto resume;
}
off = bp->bio_offset;
volsize = zv->zv_volsize;
os = zv->zv_objset;
ASSERT3P(os, !=, NULL);
addr = bp->bio_data;
resid = bp->bio_length;
if (resid > 0 && off >= volsize) {
error = SET_ERROR(EIO);
goto resume;
}
is_dumpified = B_FALSE;
sync = !doread && !is_dumpified &&
zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
/*
* There must be no buffer changes when doing a dmu_sync() because
* we can't change the data whilst calculating the checksum.
*/
lr = zfs_rangelock_enter(&zv->zv_rangelock, off, resid,
doread ? RL_READER : RL_WRITER);
if (bp->bio_cmd == BIO_DELETE) {
dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error != 0) {
dmu_tx_abort(tx);
} else {
zvol_log_truncate(zv, tx, off, resid, sync);
dmu_tx_commit(tx);
error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
off, resid);
resid = 0;
}
goto unlock;
}
while (resid != 0 && off < volsize) {
size_t size = MIN(resid, zvol_maxphys);
if (doread) {
error = dmu_read(os, ZVOL_OBJ, off, size, addr,
DMU_READ_PREFETCH);
} else {
dmu_tx_t *tx = dmu_tx_create(os);
dmu_tx_hold_write_by_dnode(tx, zv->zv_dn, off, size);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
} else {
dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
zvol_log_write(zv, tx, off, size, sync);
dmu_tx_commit(tx);
}
}
if (error) {
/* convert checksum errors into IO errors */
if (error == ECKSUM)
error = SET_ERROR(EIO);
break;
}
off += size;
addr += size;
resid -= size;
}
unlock:
zfs_rangelock_exit(lr);
bp->bio_completed = bp->bio_length - resid;
if (bp->bio_completed < bp->bio_length && off > volsize)
error = SET_ERROR(EINVAL);
switch (bp->bio_cmd) {
case BIO_FLUSH:
break;
case BIO_READ:
dataset_kstats_update_read_kstats(&zv->zv_kstat,
bp->bio_completed);
break;
case BIO_WRITE:
dataset_kstats_update_write_kstats(&zv->zv_kstat,
bp->bio_completed);
break;
case BIO_DELETE:
break;
default:
break;
}
if (sync) {
sync:
zil_commit(zv->zv_zilog, ZVOL_OBJ);
}
resume:
rw_exit(&zv->zv_suspend_lock);
out:
if (bp->bio_to)
g_io_deliver(bp, error);
else
biofinish(bp, NULL, error);
}
/*
* Character device mode implementation
*/
static int
zvol_cdev_read(struct cdev *dev, struct uio *uio_s, int ioflag)
{
zvol_state_t *zv;
uint64_t volsize;
zfs_locked_range_t *lr;
int error = 0;
zfs_uio_t uio;
zfs_uio_init(&uio, uio_s);
zv = dev->si_drv2;
volsize = zv->zv_volsize;
/*
* uio_loffset == volsize isn't an error as
* it's required for EOF processing.
*/
if (zfs_uio_resid(&uio) > 0 &&
(zfs_uio_offset(&uio) < 0 || zfs_uio_offset(&uio) > volsize))
return (SET_ERROR(EIO));
ssize_t start_resid = zfs_uio_resid(&uio);
lr = zfs_rangelock_enter(&zv->zv_rangelock, zfs_uio_offset(&uio),
zfs_uio_resid(&uio), RL_READER);
while (zfs_uio_resid(&uio) > 0 && zfs_uio_offset(&uio) < volsize) {
uint64_t bytes = MIN(zfs_uio_resid(&uio), DMU_MAX_ACCESS >> 1);
/* don't read past the end */
if (bytes > volsize - zfs_uio_offset(&uio))
bytes = volsize - zfs_uio_offset(&uio);
error = dmu_read_uio_dnode(zv->zv_dn, &uio, bytes);
if (error) {
/* convert checksum errors into IO errors */
if (error == ECKSUM)
error = SET_ERROR(EIO);
break;
}
}
zfs_rangelock_exit(lr);
int64_t nread = start_resid - zfs_uio_resid(&uio);
dataset_kstats_update_read_kstats(&zv->zv_kstat, nread);
return (error);
}
static int
zvol_cdev_write(struct cdev *dev, struct uio *uio_s, int ioflag)
{
zvol_state_t *zv;
uint64_t volsize;
zfs_locked_range_t *lr;
int error = 0;
boolean_t sync;
zfs_uio_t uio;
zv = dev->si_drv2;
volsize = zv->zv_volsize;
zfs_uio_init(&uio, uio_s);
if (zfs_uio_resid(&uio) > 0 &&
(zfs_uio_offset(&uio) < 0 || zfs_uio_offset(&uio) > volsize))
return (SET_ERROR(EIO));
ssize_t start_resid = zfs_uio_resid(&uio);
sync = (ioflag & IO_SYNC) ||
(zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
zvol_ensure_zilog(zv);
lr = zfs_rangelock_enter(&zv->zv_rangelock, zfs_uio_offset(&uio),
zfs_uio_resid(&uio), RL_WRITER);
while (zfs_uio_resid(&uio) > 0 && zfs_uio_offset(&uio) < volsize) {
uint64_t bytes = MIN(zfs_uio_resid(&uio), DMU_MAX_ACCESS >> 1);
uint64_t off = zfs_uio_offset(&uio);
dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
if (bytes > volsize - off) /* don't write past the end */
bytes = volsize - off;
dmu_tx_hold_write_by_dnode(tx, zv->zv_dn, off, bytes);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
break;
}
error = dmu_write_uio_dnode(zv->zv_dn, &uio, bytes, tx);
if (error == 0)
zvol_log_write(zv, tx, off, bytes, sync);
dmu_tx_commit(tx);
if (error)
break;
}
zfs_rangelock_exit(lr);
int64_t nwritten = start_resid - zfs_uio_resid(&uio);
dataset_kstats_update_write_kstats(&zv->zv_kstat, nwritten);
if (sync)
zil_commit(zv->zv_zilog, ZVOL_OBJ);
rw_exit(&zv->zv_suspend_lock);
return (error);
}
static int
zvol_cdev_open(struct cdev *dev, int flags, int fmt, struct thread *td)
{
zvol_state_t *zv;
struct zvol_state_dev *zsd;
int err = 0;
boolean_t drop_suspend = B_FALSE;
- boolean_t drop_namespace = B_FALSE;
retry:
rw_enter(&zvol_state_lock, ZVOL_RW_READER);
+ /*
+ * Obtain a copy of si_drv2 under zvol_state_lock to make sure either
+ * the result of zvol free code setting si_drv2 to NULL is observed,
+ * or the zv is protected from being freed because of the positive
+ * zv_open_count.
+ */
zv = dev->si_drv2;
if (zv == NULL) {
rw_exit(&zvol_state_lock);
err = SET_ERROR(ENXIO);
goto out_locked;
}
- if (zv->zv_open_count == 0 && !mutex_owned(&spa_namespace_lock)) {
- /*
- * We need to guarantee that the namespace lock is held
- * to avoid spurious failures in zvol_first_open.
- */
- drop_namespace = B_TRUE;
- if (!mutex_tryenter(&spa_namespace_lock)) {
- rw_exit(&zvol_state_lock);
- mutex_enter(&spa_namespace_lock);
- goto retry;
- }
- }
mutex_enter(&zv->zv_state_lock);
-
+ if (zv->zv_zso->zso_dying) {
+ rw_exit(&zvol_state_lock);
+ err = SET_ERROR(ENXIO);
+ goto out_zv_locked;
+ }
ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_DEV);
/*
* make sure zvol is not suspended during first open
* (hold zv_suspend_lock) and respect proper lock acquisition
* ordering - zv_suspend_lock before zv_state_lock
*/
if (zv->zv_open_count == 0) {
drop_suspend = B_TRUE;
if (!rw_tryenter(&zv->zv_suspend_lock, ZVOL_RW_READER)) {
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
mutex_enter(&zv->zv_state_lock);
/* check to see if zv_suspend_lock is needed */
if (zv->zv_open_count != 0) {
rw_exit(&zv->zv_suspend_lock);
drop_suspend = B_FALSE;
}
}
}
rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
if (zv->zv_open_count == 0) {
+ boolean_t drop_namespace = B_FALSE;
+
ASSERT(ZVOL_RW_READ_HELD(&zv->zv_suspend_lock));
+
+ /*
+ * Take spa_namespace_lock to prevent lock inversion when
+ * zvols from one pool are opened as vdevs in another.
+ */
+ if (!mutex_owned(&spa_namespace_lock)) {
+ if (!mutex_tryenter(&spa_namespace_lock)) {
+ mutex_exit(&zv->zv_state_lock);
+ rw_exit(&zv->zv_suspend_lock);
+ kern_yield(PRI_USER);
+ goto retry;
+ } else {
+ drop_namespace = B_TRUE;
+ }
+ }
err = zvol_first_open(zv, !(flags & FWRITE));
+ if (drop_namespace)
+ mutex_exit(&spa_namespace_lock);
if (err)
goto out_zv_locked;
}
+ ASSERT(MUTEX_HELD(&zv->zv_state_lock));
+
if ((flags & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
err = SET_ERROR(EROFS);
goto out_opened;
}
if (zv->zv_flags & ZVOL_EXCL) {
err = SET_ERROR(EBUSY);
goto out_opened;
}
#ifdef FEXCL
if (flags & FEXCL) {
if (zv->zv_open_count != 0) {
err = SET_ERROR(EBUSY);
goto out_opened;
}
zv->zv_flags |= ZVOL_EXCL;
}
#endif
zv->zv_open_count++;
if (flags & (FSYNC | FDSYNC)) {
zsd = &zv->zv_zso->zso_dev;
zsd->zsd_sync_cnt++;
if (zsd->zsd_sync_cnt == 1 &&
(zv->zv_flags & ZVOL_WRITTEN_TO) != 0)
zil_async_to_sync(zv->zv_zilog, ZVOL_OBJ);
}
out_opened:
if (zv->zv_open_count == 0) {
zvol_last_close(zv);
wakeup(zv);
}
out_zv_locked:
mutex_exit(&zv->zv_state_lock);
out_locked:
- if (drop_namespace)
- mutex_exit(&spa_namespace_lock);
if (drop_suspend)
rw_exit(&zv->zv_suspend_lock);
return (err);
}
static int
zvol_cdev_close(struct cdev *dev, int flags, int fmt, struct thread *td)
{
zvol_state_t *zv;
struct zvol_state_dev *zsd;
boolean_t drop_suspend = B_TRUE;
rw_enter(&zvol_state_lock, ZVOL_RW_READER);
zv = dev->si_drv2;
if (zv == NULL) {
rw_exit(&zvol_state_lock);
return (SET_ERROR(ENXIO));
}
mutex_enter(&zv->zv_state_lock);
if (zv->zv_flags & ZVOL_EXCL) {
ASSERT3U(zv->zv_open_count, ==, 1);
zv->zv_flags &= ~ZVOL_EXCL;
}
ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_DEV);
/*
* If the open count is zero, this is a spurious close.
* That indicates a bug in the kernel / DDI framework.
*/
ASSERT3U(zv->zv_open_count, >, 0);
/*
* make sure zvol is not suspended during last close
* (hold zv_suspend_lock) and respect proper lock acquisition
* ordering - zv_suspend_lock before zv_state_lock
*/
if (zv->zv_open_count == 1) {
if (!rw_tryenter(&zv->zv_suspend_lock, ZVOL_RW_READER)) {
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
mutex_enter(&zv->zv_state_lock);
/* check to see if zv_suspend_lock is needed */
if (zv->zv_open_count != 1) {
rw_exit(&zv->zv_suspend_lock);
drop_suspend = B_FALSE;
}
}
} else {
drop_suspend = B_FALSE;
}
rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
/*
* You may get multiple opens, but only one close.
*/
zv->zv_open_count--;
if (flags & (FSYNC | FDSYNC)) {
zsd = &zv->zv_zso->zso_dev;
zsd->zsd_sync_cnt--;
}
if (zv->zv_open_count == 0) {
ASSERT(ZVOL_RW_READ_HELD(&zv->zv_suspend_lock));
zvol_last_close(zv);
wakeup(zv);
}
mutex_exit(&zv->zv_state_lock);
if (drop_suspend)
rw_exit(&zv->zv_suspend_lock);
return (0);
}
static int
zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data,
int fflag, struct thread *td)
{
zvol_state_t *zv;
zfs_locked_range_t *lr;
off_t offset, length;
int i, error;
boolean_t sync;
zv = dev->si_drv2;
error = 0;
KASSERT(zv->zv_open_count > 0,
("Device with zero access count in %s", __func__));
i = IOCPARM_LEN(cmd);
switch (cmd) {
case DIOCGSECTORSIZE:
*(uint32_t *)data = DEV_BSIZE;
break;
case DIOCGMEDIASIZE:
*(off_t *)data = zv->zv_volsize;
break;
case DIOCGFLUSH:
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
if (zv->zv_zilog != NULL)
zil_commit(zv->zv_zilog, ZVOL_OBJ);
rw_exit(&zv->zv_suspend_lock);
break;
case DIOCGDELETE:
if (!zvol_unmap_enabled)
break;
offset = ((off_t *)data)[0];
length = ((off_t *)data)[1];
if ((offset % DEV_BSIZE) != 0 || (length % DEV_BSIZE) != 0 ||
offset < 0 || offset >= zv->zv_volsize ||
length <= 0) {
printf("%s: offset=%jd length=%jd\n", __func__, offset,
length);
error = SET_ERROR(EINVAL);
break;
}
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
zvol_ensure_zilog(zv);
lr = zfs_rangelock_enter(&zv->zv_rangelock, offset, length,
RL_WRITER);
dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error != 0) {
sync = FALSE;
dmu_tx_abort(tx);
} else {
sync = (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
zvol_log_truncate(zv, tx, offset, length, sync);
dmu_tx_commit(tx);
error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
offset, length);
}
zfs_rangelock_exit(lr);
if (sync)
zil_commit(zv->zv_zilog, ZVOL_OBJ);
rw_exit(&zv->zv_suspend_lock);
break;
case DIOCGSTRIPESIZE:
*(off_t *)data = zv->zv_volblocksize;
break;
case DIOCGSTRIPEOFFSET:
*(off_t *)data = 0;
break;
case DIOCGATTR: {
spa_t *spa = dmu_objset_spa(zv->zv_objset);
struct diocgattr_arg *arg = (struct diocgattr_arg *)data;
uint64_t refd, avail, usedobjs, availobjs;
if (strcmp(arg->name, "GEOM::candelete") == 0)
arg->value.i = 1;
else if (strcmp(arg->name, "blocksavail") == 0) {
dmu_objset_space(zv->zv_objset, &refd, &avail,
&usedobjs, &availobjs);
arg->value.off = avail / DEV_BSIZE;
} else if (strcmp(arg->name, "blocksused") == 0) {
dmu_objset_space(zv->zv_objset, &refd, &avail,
&usedobjs, &availobjs);
arg->value.off = refd / DEV_BSIZE;
} else if (strcmp(arg->name, "poolblocksavail") == 0) {
avail = metaslab_class_get_space(spa_normal_class(spa));
avail -= metaslab_class_get_alloc(
spa_normal_class(spa));
arg->value.off = avail / DEV_BSIZE;
} else if (strcmp(arg->name, "poolblocksused") == 0) {
refd = metaslab_class_get_alloc(spa_normal_class(spa));
arg->value.off = refd / DEV_BSIZE;
} else
error = SET_ERROR(ENOIOCTL);
break;
}
case FIOSEEKHOLE:
case FIOSEEKDATA: {
off_t *off = (off_t *)data;
uint64_t noff;
boolean_t hole;
hole = (cmd == FIOSEEKHOLE);
noff = *off;
error = dmu_offset_next(zv->zv_objset, ZVOL_OBJ, hole, &noff);
*off = noff;
break;
}
default:
error = SET_ERROR(ENOIOCTL);
}
return (error);
}
/*
* Misc. helpers
*/
static void
zvol_ensure_zilog(zvol_state_t *zv)
{
ASSERT(ZVOL_RW_READ_HELD(&zv->zv_suspend_lock));
/*
* Open a ZIL if this is the first time we have written to this
* zvol. We protect zv->zv_zilog with zv_suspend_lock rather
* than zv_state_lock so that we don't need to acquire an
* additional lock in this path.
*/
if (zv->zv_zilog == NULL) {
if (!rw_tryupgrade(&zv->zv_suspend_lock)) {
rw_exit(&zv->zv_suspend_lock);
rw_enter(&zv->zv_suspend_lock, RW_WRITER);
}
if (zv->zv_zilog == NULL) {
zv->zv_zilog = zil_open(zv->zv_objset,
zvol_get_data);
zv->zv_flags |= ZVOL_WRITTEN_TO;
/* replay / destroy done in zvol_create_minor_impl() */
VERIFY0(zv->zv_zilog->zl_header->zh_flags &
ZIL_REPLAY_NEEDED);
}
rw_downgrade(&zv->zv_suspend_lock);
}
}
static boolean_t
zvol_is_zvol_impl(const char *device)
{
return (device && strncmp(device, ZVOL_DIR, strlen(ZVOL_DIR)) == 0);
}
static void
zvol_rename_minor(zvol_state_t *zv, const char *newname)
{
ASSERT(RW_LOCK_HELD(&zvol_state_lock));
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
/* move to new hashtable entry */
zv->zv_hash = zvol_name_hash(zv->zv_name);
hlist_del(&zv->zv_hlink);
hlist_add_head(&zv->zv_hlink, ZVOL_HT_HEAD(zv->zv_hash));
if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct g_provider *pp = zsg->zsg_provider;
struct g_geom *gp;
g_topology_lock();
gp = pp->geom;
ASSERT3P(gp, !=, NULL);
zsg->zsg_provider = NULL;
g_wither_provider(pp, ENXIO);
pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, newname);
pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND;
pp->sectorsize = DEV_BSIZE;
pp->mediasize = zv->zv_volsize;
pp->private = zv;
zsg->zsg_provider = pp;
g_error_provider(pp, 0);
g_topology_unlock();
} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
struct zvol_state_dev *zsd = &zv->zv_zso->zso_dev;
struct cdev *dev;
struct make_dev_args args;
dev = zsd->zsd_cdev;
if (dev != NULL) {
destroy_dev(dev);
dev = zsd->zsd_cdev = NULL;
if (zv->zv_open_count > 0) {
zv->zv_flags &= ~ZVOL_EXCL;
zv->zv_open_count = 0;
/* XXX need suspend lock but lock order */
zvol_last_close(zv);
}
}
make_dev_args_init(&args);
args.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK;
args.mda_devsw = &zvol_cdevsw;
args.mda_cr = NULL;
args.mda_uid = UID_ROOT;
args.mda_gid = GID_OPERATOR;
args.mda_mode = 0640;
args.mda_si_drv2 = zv;
if (make_dev_s(&args, &dev, "%s/%s", ZVOL_DRIVER, newname)
== 0) {
#if __FreeBSD_version > 1300130
dev->si_iosize_max = maxphys;
#else
dev->si_iosize_max = MAXPHYS;
#endif
zsd->zsd_cdev = dev;
}
}
strlcpy(zv->zv_name, newname, sizeof (zv->zv_name));
}
/*
* Remove minor node for the specified volume.
*/
static void
zvol_free(zvol_state_t *zv)
{
ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
ASSERT(!MUTEX_HELD(&zv->zv_state_lock));
ASSERT0(zv->zv_open_count);
ZFS_LOG(1, "ZVOL %s destroyed.", zv->zv_name);
rw_destroy(&zv->zv_suspend_lock);
zfs_rangelock_fini(&zv->zv_rangelock);
if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct g_provider *pp __maybe_unused = zsg->zsg_provider;
ASSERT3P(pp->private, ==, NULL);
g_topology_lock();
zvol_geom_destroy(zv);
g_topology_unlock();
mtx_destroy(&zsg->zsg_queue_mtx);
} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
struct zvol_state_dev *zsd = &zv->zv_zso->zso_dev;
struct cdev *dev = zsd->zsd_cdev;
if (dev != NULL) {
ASSERT3P(dev->si_drv2, ==, NULL);
destroy_dev(dev);
}
}
mutex_destroy(&zv->zv_state_lock);
dataset_kstats_destroy(&zv->zv_kstat);
kmem_free(zv->zv_zso, sizeof (struct zvol_state_os));
kmem_free(zv, sizeof (zvol_state_t));
zvol_minors--;
}
/*
* Create a minor node (plus a whole lot more) for the specified volume.
*/
static int
zvol_create_minor_impl(const char *name)
{
zvol_state_t *zv;
objset_t *os;
dmu_object_info_t *doi;
uint64_t volsize;
uint64_t volmode, hash;
int error;
ZFS_LOG(1, "Creating ZVOL %s...", name);
hash = zvol_name_hash(name);
if ((zv = zvol_find_by_name_hash(name, hash, RW_NONE)) != NULL) {
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
mutex_exit(&zv->zv_state_lock);
return (SET_ERROR(EEXIST));
}
DROP_GIANT();
doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
/* lie and say we're read-only */
error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, B_TRUE, FTAG, &os);
if (error)
goto out_doi;
error = dmu_object_info(os, ZVOL_OBJ, doi);
if (error)
goto out_dmu_objset_disown;
error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
if (error)
goto out_dmu_objset_disown;
error = dsl_prop_get_integer(name,
zfs_prop_to_name(ZFS_PROP_VOLMODE), &volmode, NULL);
if (error || volmode == ZFS_VOLMODE_DEFAULT)
volmode = zvol_volmode;
error = 0;
/*
* zvol_alloc equivalent ...
*/
zv = kmem_zalloc(sizeof (*zv), KM_SLEEP);
zv->zv_hash = hash;
mutex_init(&zv->zv_state_lock, NULL, MUTEX_DEFAULT, NULL);
zv->zv_zso = kmem_zalloc(sizeof (struct zvol_state_os), KM_SLEEP);
zv->zv_volmode = volmode;
if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct g_provider *pp;
struct g_geom *gp;
zsg->zsg_state = ZVOL_GEOM_UNINIT;
mtx_init(&zsg->zsg_queue_mtx, "zvol", NULL, MTX_DEF);
g_topology_lock();
gp = g_new_geomf(&zfs_zvol_class, "zfs::zvol::%s", name);
gp->start = zvol_geom_bio_start;
gp->access = zvol_geom_access;
pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, name);
pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND;
pp->sectorsize = DEV_BSIZE;
pp->mediasize = 0;
pp->private = zv;
zsg->zsg_provider = pp;
bioq_init(&zsg->zsg_queue);
} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
struct zvol_state_dev *zsd = &zv->zv_zso->zso_dev;
struct cdev *dev;
struct make_dev_args args;
make_dev_args_init(&args);
args.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK;
args.mda_devsw = &zvol_cdevsw;
args.mda_cr = NULL;
args.mda_uid = UID_ROOT;
args.mda_gid = GID_OPERATOR;
args.mda_mode = 0640;
args.mda_si_drv2 = zv;
if (make_dev_s(&args, &dev, "%s/%s", ZVOL_DRIVER, name)
== 0) {
#if __FreeBSD_version > 1300130
dev->si_iosize_max = maxphys;
#else
dev->si_iosize_max = MAXPHYS;
#endif
zsd->zsd_cdev = dev;
}
}
(void) strlcpy(zv->zv_name, name, MAXPATHLEN);
rw_init(&zv->zv_suspend_lock, NULL, RW_DEFAULT, NULL);
zfs_rangelock_init(&zv->zv_rangelock, NULL, NULL);
if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os)))
zv->zv_flags |= ZVOL_RDONLY;
zv->zv_volblocksize = doi->doi_data_block_size;
zv->zv_volsize = volsize;
zv->zv_objset = os;
ASSERT3P(zv->zv_zilog, ==, NULL);
zv->zv_zilog = zil_open(os, zvol_get_data);
if (spa_writeable(dmu_objset_spa(os))) {
if (zil_replay_disable)
zil_destroy(zv->zv_zilog, B_FALSE);
else
zil_replay(os, zv, zvol_replay_vector);
}
zil_close(zv->zv_zilog);
zv->zv_zilog = NULL;
ASSERT3P(zv->zv_kstat.dk_kstats, ==, NULL);
dataset_kstats_create(&zv->zv_kstat, zv->zv_objset);
/* TODO: prefetch for geom tasting */
zv->zv_objset = NULL;
out_dmu_objset_disown:
dmu_objset_disown(os, B_TRUE, FTAG);
if (error == 0 && volmode == ZFS_VOLMODE_GEOM) {
zvol_geom_run(zv);
g_topology_unlock();
}
out_doi:
kmem_free(doi, sizeof (dmu_object_info_t));
if (error == 0) {
rw_enter(&zvol_state_lock, RW_WRITER);
zvol_insert(zv);
zvol_minors++;
rw_exit(&zvol_state_lock);
ZFS_LOG(1, "ZVOL %s created.", name);
}
PICKUP_GIANT();
return (error);
}
static void
zvol_clear_private(zvol_state_t *zv)
{
ASSERT(RW_LOCK_HELD(&zvol_state_lock));
if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct g_provider *pp = zsg->zsg_provider;
if (pp->private == NULL) /* already cleared */
return;
mtx_lock(&zsg->zsg_queue_mtx);
zsg->zsg_state = ZVOL_GEOM_STOPPED;
pp->private = NULL;
wakeup_one(&zsg->zsg_queue);
while (zsg->zsg_state != ZVOL_GEOM_RUNNING)
msleep(&zsg->zsg_state, &zsg->zsg_queue_mtx,
0, "zvol:w", 0);
mtx_unlock(&zsg->zsg_queue_mtx);
ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
struct zvol_state_dev *zsd = &zv->zv_zso->zso_dev;
struct cdev *dev = zsd->zsd_cdev;
if (dev != NULL)
dev->si_drv2 = NULL;
}
}
static int
zvol_update_volsize(zvol_state_t *zv, uint64_t volsize)
{
zv->zv_volsize = volsize;
if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
struct g_provider *pp = zsg->zsg_provider;
g_topology_lock();
if (pp->private == NULL) {
g_topology_unlock();
return (SET_ERROR(ENXIO));
}
/*
* Do not invoke resize event when initial size was zero.
* ZVOL initializes the size on first open, this is not
* real resizing.
*/
if (pp->mediasize == 0)
pp->mediasize = zv->zv_volsize;
else
g_resize_provider(pp, zv->zv_volsize);
g_topology_unlock();
}
return (0);
}
static void
zvol_set_disk_ro_impl(zvol_state_t *zv, int flags)
{
// XXX? set_disk_ro(zv->zv_zso->zvo_disk, flags);
}
static void
zvol_set_capacity_impl(zvol_state_t *zv, uint64_t capacity)
{
// XXX? set_capacity(zv->zv_zso->zvo_disk, capacity);
}
const static zvol_platform_ops_t zvol_freebsd_ops = {
.zv_free = zvol_free,
.zv_rename_minor = zvol_rename_minor,
.zv_create_minor = zvol_create_minor_impl,
.zv_update_volsize = zvol_update_volsize,
.zv_clear_private = zvol_clear_private,
.zv_is_zvol = zvol_is_zvol_impl,
.zv_set_disk_ro = zvol_set_disk_ro_impl,
.zv_set_capacity = zvol_set_capacity_impl,
};
/*
* Public interfaces
*/
int
zvol_busy(void)
{
return (zvol_minors != 0);
}
int
zvol_init(void)
{
zvol_init_impl();
zvol_register_ops(&zvol_freebsd_ops);
return (0);
}
void
zvol_fini(void)
{
zvol_fini_impl();
}
diff --git a/sys/contrib/openzfs/module/os/linux/spl/spl-kstat.c b/sys/contrib/openzfs/module/os/linux/spl/spl-kstat.c
index 0c46708326d8..b5666e78842b 100644
--- a/sys/contrib/openzfs/module/os/linux/spl/spl-kstat.c
+++ b/sys/contrib/openzfs/module/os/linux/spl/spl-kstat.c
@@ -1,715 +1,715 @@
/*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* UCRL-CODE-235197
*
* This file is part of the SPL, Solaris Porting Layer.
*
* The SPL is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* The SPL is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
*
* Solaris Porting Layer (SPL) Kstat Implementation.
*
* Links to Illumos.org for more information on kstat function:
* [1] https://illumos.org/man/1M/kstat
* [2] https://illumos.org/man/9f/kstat_create
*/
#include <linux/seq_file.h>
#include <sys/kstat.h>
#include <sys/vmem.h>
#include <sys/cmn_err.h>
#include <sys/sysmacros.h>
static kmutex_t kstat_module_lock;
static struct list_head kstat_module_list;
static kid_t kstat_id;
static int
kstat_resize_raw(kstat_t *ksp)
{
if (ksp->ks_raw_bufsize == KSTAT_RAW_MAX)
return (ENOMEM);
vmem_free(ksp->ks_raw_buf, ksp->ks_raw_bufsize);
ksp->ks_raw_bufsize = MIN(ksp->ks_raw_bufsize * 2, KSTAT_RAW_MAX);
ksp->ks_raw_buf = vmem_alloc(ksp->ks_raw_bufsize, KM_SLEEP);
return (0);
}
static int
kstat_seq_show_headers(struct seq_file *f)
{
kstat_t *ksp = (kstat_t *)f->private;
int rc = 0;
ASSERT(ksp->ks_magic == KS_MAGIC);
seq_printf(f, "%d %d 0x%02x %d %d %lld %lld\n",
ksp->ks_kid, ksp->ks_type, ksp->ks_flags,
ksp->ks_ndata, (int)ksp->ks_data_size,
ksp->ks_crtime, ksp->ks_snaptime);
switch (ksp->ks_type) {
case KSTAT_TYPE_RAW:
restart:
if (ksp->ks_raw_ops.headers) {
rc = ksp->ks_raw_ops.headers(
ksp->ks_raw_buf, ksp->ks_raw_bufsize);
if (rc == ENOMEM && !kstat_resize_raw(ksp))
goto restart;
if (!rc)
seq_puts(f, ksp->ks_raw_buf);
} else {
seq_printf(f, "raw data\n");
}
break;
case KSTAT_TYPE_NAMED:
seq_printf(f, "%-31s %-4s %s\n",
"name", "type", "data");
break;
case KSTAT_TYPE_INTR:
seq_printf(f, "%-8s %-8s %-8s %-8s %-8s\n",
"hard", "soft", "watchdog",
"spurious", "multsvc");
break;
case KSTAT_TYPE_IO:
seq_printf(f,
"%-8s %-8s %-8s %-8s %-8s %-8s "
"%-8s %-8s %-8s %-8s %-8s %-8s\n",
"nread", "nwritten", "reads", "writes",
"wtime", "wlentime", "wupdate",
"rtime", "rlentime", "rupdate",
"wcnt", "rcnt");
break;
case KSTAT_TYPE_TIMER:
seq_printf(f,
"%-31s %-8s "
"%-8s %-8s %-8s %-8s %-8s\n",
"name", "events", "elapsed",
"min", "max", "start", "stop");
break;
default:
PANIC("Undefined kstat type %d\n", ksp->ks_type);
}
return (-rc);
}
static int
kstat_seq_show_raw(struct seq_file *f, unsigned char *p, int l)
{
int i, j;
for (i = 0; ; i++) {
seq_printf(f, "%03x:", i);
for (j = 0; j < 16; j++) {
if (i * 16 + j >= l) {
seq_printf(f, "\n");
goto out;
}
seq_printf(f, " %02x", (unsigned char)p[i * 16 + j]);
}
seq_printf(f, "\n");
}
out:
return (0);
}
static int
kstat_seq_show_named(struct seq_file *f, kstat_named_t *knp)
{
seq_printf(f, "%-31s %-4d ", knp->name, knp->data_type);
switch (knp->data_type) {
case KSTAT_DATA_CHAR:
knp->value.c[15] = '\0'; /* NULL terminate */
seq_printf(f, "%-16s", knp->value.c);
break;
/*
* NOTE - We need to be more careful able what tokens are
* used for each arch, for now this is correct for x86_64.
*/
case KSTAT_DATA_INT32:
seq_printf(f, "%d", knp->value.i32);
break;
case KSTAT_DATA_UINT32:
seq_printf(f, "%u", knp->value.ui32);
break;
case KSTAT_DATA_INT64:
seq_printf(f, "%lld", (signed long long)knp->value.i64);
break;
case KSTAT_DATA_UINT64:
seq_printf(f, "%llu",
(unsigned long long)knp->value.ui64);
break;
case KSTAT_DATA_LONG:
seq_printf(f, "%ld", knp->value.l);
break;
case KSTAT_DATA_ULONG:
seq_printf(f, "%lu", knp->value.ul);
break;
case KSTAT_DATA_STRING:
KSTAT_NAMED_STR_PTR(knp)
[KSTAT_NAMED_STR_BUFLEN(knp)-1] = '\0';
seq_printf(f, "%s", KSTAT_NAMED_STR_PTR(knp));
break;
default:
PANIC("Undefined kstat data type %d\n", knp->data_type);
}
seq_printf(f, "\n");
return (0);
}
static int
kstat_seq_show_intr(struct seq_file *f, kstat_intr_t *kip)
{
seq_printf(f, "%-8u %-8u %-8u %-8u %-8u\n",
kip->intrs[KSTAT_INTR_HARD],
kip->intrs[KSTAT_INTR_SOFT],
kip->intrs[KSTAT_INTR_WATCHDOG],
kip->intrs[KSTAT_INTR_SPURIOUS],
kip->intrs[KSTAT_INTR_MULTSVC]);
return (0);
}
static int
kstat_seq_show_io(struct seq_file *f, kstat_io_t *kip)
{
/* though wlentime & friends are signed, they will never be negative */
seq_printf(f,
"%-8llu %-8llu %-8u %-8u %-8llu %-8llu "
"%-8llu %-8llu %-8llu %-8llu %-8u %-8u\n",
kip->nread, kip->nwritten,
kip->reads, kip->writes,
kip->wtime, kip->wlentime, kip->wlastupdate,
kip->rtime, kip->rlentime, kip->rlastupdate,
kip->wcnt, kip->rcnt);
return (0);
}
static int
kstat_seq_show_timer(struct seq_file *f, kstat_timer_t *ktp)
{
seq_printf(f,
"%-31s %-8llu %-8llu %-8llu %-8llu %-8llu %-8llu\n",
ktp->name, ktp->num_events, ktp->elapsed_time,
ktp->min_time, ktp->max_time,
ktp->start_time, ktp->stop_time);
return (0);
}
static int
kstat_seq_show(struct seq_file *f, void *p)
{
kstat_t *ksp = (kstat_t *)f->private;
int rc = 0;
ASSERT(ksp->ks_magic == KS_MAGIC);
switch (ksp->ks_type) {
case KSTAT_TYPE_RAW:
restart:
if (ksp->ks_raw_ops.data) {
rc = ksp->ks_raw_ops.data(
ksp->ks_raw_buf, ksp->ks_raw_bufsize, p);
if (rc == ENOMEM && !kstat_resize_raw(ksp))
goto restart;
if (!rc)
seq_puts(f, ksp->ks_raw_buf);
} else {
ASSERT(ksp->ks_ndata == 1);
rc = kstat_seq_show_raw(f, ksp->ks_data,
ksp->ks_data_size);
}
break;
case KSTAT_TYPE_NAMED:
rc = kstat_seq_show_named(f, (kstat_named_t *)p);
break;
case KSTAT_TYPE_INTR:
rc = kstat_seq_show_intr(f, (kstat_intr_t *)p);
break;
case KSTAT_TYPE_IO:
rc = kstat_seq_show_io(f, (kstat_io_t *)p);
break;
case KSTAT_TYPE_TIMER:
rc = kstat_seq_show_timer(f, (kstat_timer_t *)p);
break;
default:
PANIC("Undefined kstat type %d\n", ksp->ks_type);
}
return (-rc);
}
static int
kstat_default_update(kstat_t *ksp, int rw)
{
ASSERT(ksp != NULL);
if (rw == KSTAT_WRITE)
return (EACCES);
return (0);
}
static void *
kstat_seq_data_addr(kstat_t *ksp, loff_t n)
{
void *rc = NULL;
switch (ksp->ks_type) {
case KSTAT_TYPE_RAW:
if (ksp->ks_raw_ops.addr)
rc = ksp->ks_raw_ops.addr(ksp, n);
else
rc = ksp->ks_data;
break;
case KSTAT_TYPE_NAMED:
rc = ksp->ks_data + n * sizeof (kstat_named_t);
break;
case KSTAT_TYPE_INTR:
rc = ksp->ks_data + n * sizeof (kstat_intr_t);
break;
case KSTAT_TYPE_IO:
rc = ksp->ks_data + n * sizeof (kstat_io_t);
break;
case KSTAT_TYPE_TIMER:
rc = ksp->ks_data + n * sizeof (kstat_timer_t);
break;
default:
PANIC("Undefined kstat type %d\n", ksp->ks_type);
}
return (rc);
}
static void *
kstat_seq_start(struct seq_file *f, loff_t *pos)
{
loff_t n = *pos;
kstat_t *ksp = (kstat_t *)f->private;
ASSERT(ksp->ks_magic == KS_MAGIC);
mutex_enter(ksp->ks_lock);
if (ksp->ks_type == KSTAT_TYPE_RAW) {
ksp->ks_raw_bufsize = PAGE_SIZE;
ksp->ks_raw_buf = vmem_alloc(ksp->ks_raw_bufsize, KM_SLEEP);
}
/* Dynamically update kstat, on error existing kstats are used */
(void) ksp->ks_update(ksp, KSTAT_READ);
ksp->ks_snaptime = gethrtime();
if (!(ksp->ks_flags & KSTAT_FLAG_NO_HEADERS) && !n &&
kstat_seq_show_headers(f))
return (NULL);
if (n >= ksp->ks_ndata)
return (NULL);
return (kstat_seq_data_addr(ksp, n));
}
static void *
kstat_seq_next(struct seq_file *f, void *p, loff_t *pos)
{
kstat_t *ksp = (kstat_t *)f->private;
ASSERT(ksp->ks_magic == KS_MAGIC);
++*pos;
if (*pos >= ksp->ks_ndata)
return (NULL);
return (kstat_seq_data_addr(ksp, *pos));
}
static void
kstat_seq_stop(struct seq_file *f, void *v)
{
kstat_t *ksp = (kstat_t *)f->private;
ASSERT(ksp->ks_magic == KS_MAGIC);
if (ksp->ks_type == KSTAT_TYPE_RAW)
vmem_free(ksp->ks_raw_buf, ksp->ks_raw_bufsize);
mutex_exit(ksp->ks_lock);
}
static struct seq_operations kstat_seq_ops = {
.show = kstat_seq_show,
.start = kstat_seq_start,
.next = kstat_seq_next,
.stop = kstat_seq_stop,
};
static kstat_module_t *
kstat_find_module(char *name)
{
kstat_module_t *module = NULL;
list_for_each_entry(module, &kstat_module_list, ksm_module_list) {
if (strncmp(name, module->ksm_name, KSTAT_STRLEN) == 0)
return (module);
}
return (NULL);
}
static kstat_module_t *
kstat_create_module(char *name)
{
kstat_module_t *module;
struct proc_dir_entry *pde;
pde = proc_mkdir(name, proc_spl_kstat);
if (pde == NULL)
return (NULL);
module = kmem_alloc(sizeof (kstat_module_t), KM_SLEEP);
module->ksm_proc = pde;
strlcpy(module->ksm_name, name, KSTAT_STRLEN+1);
INIT_LIST_HEAD(&module->ksm_kstat_list);
list_add_tail(&module->ksm_module_list, &kstat_module_list);
return (module);
}
static void
kstat_delete_module(kstat_module_t *module)
{
ASSERT(list_empty(&module->ksm_kstat_list));
remove_proc_entry(module->ksm_name, proc_spl_kstat);
list_del(&module->ksm_module_list);
kmem_free(module, sizeof (kstat_module_t));
}
static int
proc_kstat_open(struct inode *inode, struct file *filp)
{
struct seq_file *f;
int rc;
rc = seq_open(filp, &kstat_seq_ops);
if (rc)
return (rc);
f = filp->private_data;
- f->private = PDE_DATA(inode);
+ f->private = SPL_PDE_DATA(inode);
return (0);
}
static ssize_t
proc_kstat_write(struct file *filp, const char __user *buf, size_t len,
loff_t *ppos)
{
struct seq_file *f = filp->private_data;
kstat_t *ksp = f->private;
int rc;
ASSERT(ksp->ks_magic == KS_MAGIC);
mutex_enter(ksp->ks_lock);
rc = ksp->ks_update(ksp, KSTAT_WRITE);
mutex_exit(ksp->ks_lock);
if (rc)
return (-rc);
*ppos += len;
return (len);
}
static const kstat_proc_op_t proc_kstat_operations = {
#ifdef HAVE_PROC_OPS_STRUCT
.proc_open = proc_kstat_open,
.proc_write = proc_kstat_write,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_release = seq_release,
#else
.open = proc_kstat_open,
.write = proc_kstat_write,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
#endif
};
void
__kstat_set_raw_ops(kstat_t *ksp,
int (*headers)(char *buf, size_t size),
int (*data)(char *buf, size_t size, void *data),
void *(*addr)(kstat_t *ksp, loff_t index))
{
ksp->ks_raw_ops.headers = headers;
ksp->ks_raw_ops.data = data;
ksp->ks_raw_ops.addr = addr;
}
EXPORT_SYMBOL(__kstat_set_raw_ops);
void
kstat_proc_entry_init(kstat_proc_entry_t *kpep, const char *module,
const char *name)
{
kpep->kpe_owner = NULL;
kpep->kpe_proc = NULL;
INIT_LIST_HEAD(&kpep->kpe_list);
strncpy(kpep->kpe_module, module, KSTAT_STRLEN);
strncpy(kpep->kpe_name, name, KSTAT_STRLEN);
}
EXPORT_SYMBOL(kstat_proc_entry_init);
kstat_t *
__kstat_create(const char *ks_module, int ks_instance, const char *ks_name,
const char *ks_class, uchar_t ks_type, uint_t ks_ndata,
uchar_t ks_flags)
{
kstat_t *ksp;
ASSERT(ks_module);
ASSERT(ks_instance == 0);
ASSERT(ks_name);
if ((ks_type == KSTAT_TYPE_INTR) || (ks_type == KSTAT_TYPE_IO))
ASSERT(ks_ndata == 1);
ksp = kmem_zalloc(sizeof (*ksp), KM_SLEEP);
if (ksp == NULL)
return (ksp);
mutex_enter(&kstat_module_lock);
ksp->ks_kid = kstat_id;
kstat_id++;
mutex_exit(&kstat_module_lock);
ksp->ks_magic = KS_MAGIC;
mutex_init(&ksp->ks_private_lock, NULL, MUTEX_DEFAULT, NULL);
ksp->ks_lock = &ksp->ks_private_lock;
ksp->ks_crtime = gethrtime();
ksp->ks_snaptime = ksp->ks_crtime;
ksp->ks_instance = ks_instance;
strncpy(ksp->ks_class, ks_class, KSTAT_STRLEN);
ksp->ks_type = ks_type;
ksp->ks_flags = ks_flags;
ksp->ks_update = kstat_default_update;
ksp->ks_private = NULL;
ksp->ks_raw_ops.headers = NULL;
ksp->ks_raw_ops.data = NULL;
ksp->ks_raw_ops.addr = NULL;
ksp->ks_raw_buf = NULL;
ksp->ks_raw_bufsize = 0;
kstat_proc_entry_init(&ksp->ks_proc, ks_module, ks_name);
switch (ksp->ks_type) {
case KSTAT_TYPE_RAW:
ksp->ks_ndata = 1;
ksp->ks_data_size = ks_ndata;
break;
case KSTAT_TYPE_NAMED:
ksp->ks_ndata = ks_ndata;
ksp->ks_data_size = ks_ndata * sizeof (kstat_named_t);
break;
case KSTAT_TYPE_INTR:
ksp->ks_ndata = ks_ndata;
ksp->ks_data_size = ks_ndata * sizeof (kstat_intr_t);
break;
case KSTAT_TYPE_IO:
ksp->ks_ndata = ks_ndata;
ksp->ks_data_size = ks_ndata * sizeof (kstat_io_t);
break;
case KSTAT_TYPE_TIMER:
ksp->ks_ndata = ks_ndata;
ksp->ks_data_size = ks_ndata * sizeof (kstat_timer_t);
break;
default:
PANIC("Undefined kstat type %d\n", ksp->ks_type);
}
if (ksp->ks_flags & KSTAT_FLAG_VIRTUAL) {
ksp->ks_data = NULL;
} else {
ksp->ks_data = kmem_zalloc(ksp->ks_data_size, KM_SLEEP);
if (ksp->ks_data == NULL) {
kmem_free(ksp, sizeof (*ksp));
ksp = NULL;
}
}
return (ksp);
}
EXPORT_SYMBOL(__kstat_create);
static int
kstat_detect_collision(kstat_proc_entry_t *kpep)
{
kstat_module_t *module;
kstat_proc_entry_t *tmp = NULL;
char *parent;
char *cp;
parent = kmem_asprintf("%s", kpep->kpe_module);
if ((cp = strrchr(parent, '/')) == NULL) {
kmem_strfree(parent);
return (0);
}
cp[0] = '\0';
if ((module = kstat_find_module(parent)) != NULL) {
list_for_each_entry(tmp, &module->ksm_kstat_list, kpe_list) {
if (strncmp(tmp->kpe_name, cp+1, KSTAT_STRLEN) == 0) {
kmem_strfree(parent);
return (EEXIST);
}
}
}
kmem_strfree(parent);
return (0);
}
/*
* Add a file to the proc filesystem under the kstat namespace (i.e.
* /proc/spl/kstat/). The file need not necessarily be implemented as a
* kstat.
*/
void
kstat_proc_entry_install(kstat_proc_entry_t *kpep, mode_t mode,
const kstat_proc_op_t *proc_ops, void *data)
{
kstat_module_t *module;
kstat_proc_entry_t *tmp = NULL;
ASSERT(kpep);
mutex_enter(&kstat_module_lock);
module = kstat_find_module(kpep->kpe_module);
if (module == NULL) {
if (kstat_detect_collision(kpep) != 0) {
cmn_err(CE_WARN, "kstat_create('%s', '%s'): namespace" \
" collision", kpep->kpe_module, kpep->kpe_name);
goto out;
}
module = kstat_create_module(kpep->kpe_module);
if (module == NULL)
goto out;
}
/*
* Only one entry by this name per-module, on failure the module
* shouldn't be deleted because we know it has at least one entry.
*/
list_for_each_entry(tmp, &module->ksm_kstat_list, kpe_list) {
if (strncmp(tmp->kpe_name, kpep->kpe_name, KSTAT_STRLEN) == 0)
goto out;
}
list_add_tail(&kpep->kpe_list, &module->ksm_kstat_list);
kpep->kpe_owner = module;
kpep->kpe_proc = proc_create_data(kpep->kpe_name, mode,
module->ksm_proc, proc_ops, data);
if (kpep->kpe_proc == NULL) {
list_del_init(&kpep->kpe_list);
if (list_empty(&module->ksm_kstat_list))
kstat_delete_module(module);
}
out:
mutex_exit(&kstat_module_lock);
}
EXPORT_SYMBOL(kstat_proc_entry_install);
void
__kstat_install(kstat_t *ksp)
{
ASSERT(ksp);
mode_t mode;
/* Specify permission modes for different kstats */
if (strncmp(ksp->ks_proc.kpe_name, "dbufs", KSTAT_STRLEN) == 0) {
mode = 0600;
} else {
mode = 0644;
}
kstat_proc_entry_install(
&ksp->ks_proc, mode, &proc_kstat_operations, ksp);
}
EXPORT_SYMBOL(__kstat_install);
void
kstat_proc_entry_delete(kstat_proc_entry_t *kpep)
{
kstat_module_t *module = kpep->kpe_owner;
if (kpep->kpe_proc)
remove_proc_entry(kpep->kpe_name, module->ksm_proc);
mutex_enter(&kstat_module_lock);
list_del_init(&kpep->kpe_list);
/*
* Remove top level module directory if it wasn't empty before, but now
* is.
*/
if (kpep->kpe_proc && list_empty(&module->ksm_kstat_list))
kstat_delete_module(module);
mutex_exit(&kstat_module_lock);
}
EXPORT_SYMBOL(kstat_proc_entry_delete);
void
__kstat_delete(kstat_t *ksp)
{
kstat_proc_entry_delete(&ksp->ks_proc);
if (!(ksp->ks_flags & KSTAT_FLAG_VIRTUAL))
kmem_free(ksp->ks_data, ksp->ks_data_size);
ksp->ks_lock = NULL;
mutex_destroy(&ksp->ks_private_lock);
kmem_free(ksp, sizeof (*ksp));
}
EXPORT_SYMBOL(__kstat_delete);
int
spl_kstat_init(void)
{
mutex_init(&kstat_module_lock, NULL, MUTEX_DEFAULT, NULL);
INIT_LIST_HEAD(&kstat_module_list);
kstat_id = 0;
return (0);
}
void
spl_kstat_fini(void)
{
ASSERT(list_empty(&kstat_module_list));
mutex_destroy(&kstat_module_lock);
}
diff --git a/sys/contrib/openzfs/module/os/linux/spl/spl-procfs-list.c b/sys/contrib/openzfs/module/os/linux/spl/spl-procfs-list.c
index cae13228c62c..2cba1a3022e0 100644
--- a/sys/contrib/openzfs/module/os/linux/spl/spl-procfs-list.c
+++ b/sys/contrib/openzfs/module/os/linux/spl/spl-procfs-list.c
@@ -1,284 +1,284 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2018 by Delphix. All rights reserved.
*/
#include <sys/list.h>
#include <sys/mutex.h>
#include <sys/procfs_list.h>
#include <linux/proc_fs.h>
/*
* A procfs_list is a wrapper around a linked list which implements the seq_file
* interface, allowing the contents of the list to be exposed through procfs.
* The kernel already has some utilities to help implement the seq_file
* interface for linked lists (seq_list_*), but they aren't appropriate for use
* with lists that have many entries, because seq_list_start walks the list at
* the start of each read syscall to find where it left off, so reading a file
* ends up being quadratic in the number of entries in the list.
*
* This implementation avoids this penalty by maintaining a separate cursor into
* the list per instance of the file that is open. It also maintains some extra
* information in each node of the list to prevent reads of entries that have
* been dropped from the list.
*
* Callers should only add elements to the list using procfs_list_add, which
* adds an element to the tail of the list. Other operations can be performed
* directly on the wrapped list using the normal list manipulation functions,
* but elements should only be removed from the head of the list.
*/
#define NODE_ID(procfs_list, obj) \
(((procfs_list_node_t *)(((char *)obj) + \
(procfs_list)->pl_node_offset))->pln_id)
typedef struct procfs_list_cursor {
procfs_list_t *procfs_list; /* List into which this cursor points */
void *cached_node; /* Most recently accessed node */
loff_t cached_pos; /* Position of cached_node */
} procfs_list_cursor_t;
static int
procfs_list_seq_show(struct seq_file *f, void *p)
{
procfs_list_cursor_t *cursor = f->private;
procfs_list_t *procfs_list = cursor->procfs_list;
ASSERT(MUTEX_HELD(&procfs_list->pl_lock));
if (p == SEQ_START_TOKEN) {
if (procfs_list->pl_show_header != NULL)
return (procfs_list->pl_show_header(f));
else
return (0);
}
return (procfs_list->pl_show(f, p));
}
static void *
procfs_list_next_node(procfs_list_cursor_t *cursor, loff_t *pos)
{
void *next_node;
procfs_list_t *procfs_list = cursor->procfs_list;
if (cursor->cached_node == SEQ_START_TOKEN)
next_node = list_head(&procfs_list->pl_list);
else
next_node = list_next(&procfs_list->pl_list,
cursor->cached_node);
if (next_node != NULL) {
cursor->cached_node = next_node;
cursor->cached_pos = NODE_ID(procfs_list, cursor->cached_node);
*pos = cursor->cached_pos;
} else {
/*
* seq_read() expects ->next() to update the position even
* when there are no more entries. Advance the position to
* prevent a warning from being logged.
*/
cursor->cached_node = NULL;
cursor->cached_pos++;
*pos = cursor->cached_pos;
}
return (next_node);
}
static void *
procfs_list_seq_start(struct seq_file *f, loff_t *pos)
{
procfs_list_cursor_t *cursor = f->private;
procfs_list_t *procfs_list = cursor->procfs_list;
mutex_enter(&procfs_list->pl_lock);
if (*pos == 0) {
cursor->cached_node = SEQ_START_TOKEN;
cursor->cached_pos = 0;
return (SEQ_START_TOKEN);
} else if (cursor->cached_node == NULL) {
return (NULL);
}
/*
* Check if our cached pointer has become stale, which happens if the
* the message where we left off has been dropped from the list since
* the last read syscall completed.
*/
void *oldest_node = list_head(&procfs_list->pl_list);
if (cursor->cached_node != SEQ_START_TOKEN && (oldest_node == NULL ||
NODE_ID(procfs_list, oldest_node) > cursor->cached_pos))
return (ERR_PTR(-EIO));
/*
* If it isn't starting from the beginning of the file, the seq_file
* code will either pick up at the same position it visited last or the
* following one.
*/
if (*pos == cursor->cached_pos) {
return (cursor->cached_node);
} else {
ASSERT3U(*pos, ==, cursor->cached_pos + 1);
return (procfs_list_next_node(cursor, pos));
}
}
static void *
procfs_list_seq_next(struct seq_file *f, void *p, loff_t *pos)
{
procfs_list_cursor_t *cursor = f->private;
ASSERT(MUTEX_HELD(&cursor->procfs_list->pl_lock));
return (procfs_list_next_node(cursor, pos));
}
static void
procfs_list_seq_stop(struct seq_file *f, void *p)
{
procfs_list_cursor_t *cursor = f->private;
procfs_list_t *procfs_list = cursor->procfs_list;
mutex_exit(&procfs_list->pl_lock);
}
static struct seq_operations procfs_list_seq_ops = {
.show = procfs_list_seq_show,
.start = procfs_list_seq_start,
.next = procfs_list_seq_next,
.stop = procfs_list_seq_stop,
};
static int
procfs_list_open(struct inode *inode, struct file *filp)
{
int rc = seq_open_private(filp, &procfs_list_seq_ops,
sizeof (procfs_list_cursor_t));
if (rc != 0)
return (rc);
struct seq_file *f = filp->private_data;
procfs_list_cursor_t *cursor = f->private;
- cursor->procfs_list = PDE_DATA(inode);
+ cursor->procfs_list = SPL_PDE_DATA(inode);
cursor->cached_node = NULL;
cursor->cached_pos = 0;
return (0);
}
static ssize_t
procfs_list_write(struct file *filp, const char __user *buf, size_t len,
loff_t *ppos)
{
struct seq_file *f = filp->private_data;
procfs_list_cursor_t *cursor = f->private;
procfs_list_t *procfs_list = cursor->procfs_list;
int rc;
if (procfs_list->pl_clear != NULL &&
(rc = procfs_list->pl_clear(procfs_list)) != 0)
return (-rc);
return (len);
}
static const kstat_proc_op_t procfs_list_operations = {
#ifdef HAVE_PROC_OPS_STRUCT
.proc_open = procfs_list_open,
.proc_write = procfs_list_write,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_release = seq_release_private,
#else
.open = procfs_list_open,
.write = procfs_list_write,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
#endif
};
/*
* Initialize a procfs_list and create a file for it in the proc filesystem
* under the kstat namespace.
*/
void
procfs_list_install(const char *module,
const char *submodule,
const char *name,
mode_t mode,
procfs_list_t *procfs_list,
int (*show)(struct seq_file *f, void *p),
int (*show_header)(struct seq_file *f),
int (*clear)(procfs_list_t *procfs_list),
size_t procfs_list_node_off)
{
char *modulestr;
if (submodule != NULL)
modulestr = kmem_asprintf("%s/%s", module, submodule);
else
modulestr = kmem_asprintf("%s", module);
mutex_init(&procfs_list->pl_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&procfs_list->pl_list,
procfs_list_node_off + sizeof (procfs_list_node_t),
procfs_list_node_off + offsetof(procfs_list_node_t, pln_link));
procfs_list->pl_next_id = 1; /* Save id 0 for SEQ_START_TOKEN */
procfs_list->pl_show = show;
procfs_list->pl_show_header = show_header;
procfs_list->pl_clear = clear;
procfs_list->pl_node_offset = procfs_list_node_off;
kstat_proc_entry_init(&procfs_list->pl_kstat_entry, modulestr, name);
kstat_proc_entry_install(&procfs_list->pl_kstat_entry, mode,
&procfs_list_operations, procfs_list);
kmem_strfree(modulestr);
}
EXPORT_SYMBOL(procfs_list_install);
/* Remove the proc filesystem file corresponding to the given list */
void
procfs_list_uninstall(procfs_list_t *procfs_list)
{
kstat_proc_entry_delete(&procfs_list->pl_kstat_entry);
}
EXPORT_SYMBOL(procfs_list_uninstall);
void
procfs_list_destroy(procfs_list_t *procfs_list)
{
ASSERT(list_is_empty(&procfs_list->pl_list));
list_destroy(&procfs_list->pl_list);
mutex_destroy(&procfs_list->pl_lock);
}
EXPORT_SYMBOL(procfs_list_destroy);
/*
* Add a new node to the tail of the list. While the standard list manipulation
* functions can be use for all other operation, adding elements to the list
* should only be done using this helper so that the id of the new node is set
* correctly.
*/
void
procfs_list_add(procfs_list_t *procfs_list, void *p)
{
ASSERT(MUTEX_HELD(&procfs_list->pl_lock));
NODE_ID(procfs_list, p) = procfs_list->pl_next_id++;
list_insert_tail(&procfs_list->pl_list, p);
}
EXPORT_SYMBOL(procfs_list_add);
diff --git a/sys/contrib/openzfs/module/os/linux/spl/spl-taskq.c b/sys/contrib/openzfs/module/os/linux/spl/spl-taskq.c
index 61631256c858..fb25a4154485 100644
--- a/sys/contrib/openzfs/module/os/linux/spl/spl-taskq.c
+++ b/sys/contrib/openzfs/module/os/linux/spl/spl-taskq.c
@@ -1,1428 +1,1429 @@
/*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* UCRL-CODE-235197
*
* This file is part of the SPL, Solaris Porting Layer.
*
* The SPL is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* The SPL is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
*
* Solaris Porting Layer (SPL) Task Queue Implementation.
*/
#include <sys/timer.h>
#include <sys/taskq.h>
#include <sys/kmem.h>
#include <sys/tsd.h>
#include <sys/trace_spl.h>
#ifdef HAVE_CPU_HOTPLUG
#include <linux/cpuhotplug.h>
#endif
int spl_taskq_thread_bind = 0;
module_param(spl_taskq_thread_bind, int, 0644);
MODULE_PARM_DESC(spl_taskq_thread_bind, "Bind taskq thread to CPU by default");
int spl_taskq_thread_dynamic = 1;
module_param(spl_taskq_thread_dynamic, int, 0444);
MODULE_PARM_DESC(spl_taskq_thread_dynamic, "Allow dynamic taskq threads");
int spl_taskq_thread_priority = 1;
module_param(spl_taskq_thread_priority, int, 0644);
MODULE_PARM_DESC(spl_taskq_thread_priority,
"Allow non-default priority for taskq threads");
int spl_taskq_thread_sequential = 4;
module_param(spl_taskq_thread_sequential, int, 0644);
MODULE_PARM_DESC(spl_taskq_thread_sequential,
"Create new taskq threads after N sequential tasks");
/* Global system-wide dynamic task queue available for all consumers */
taskq_t *system_taskq;
EXPORT_SYMBOL(system_taskq);
/* Global dynamic task queue for long delay */
taskq_t *system_delay_taskq;
EXPORT_SYMBOL(system_delay_taskq);
/* Private dedicated taskq for creating new taskq threads on demand. */
static taskq_t *dynamic_taskq;
static taskq_thread_t *taskq_thread_create(taskq_t *);
#ifdef HAVE_CPU_HOTPLUG
/* Multi-callback id for cpu hotplugging. */
static int spl_taskq_cpuhp_state;
#endif
/* List of all taskqs */
LIST_HEAD(tq_list);
struct rw_semaphore tq_list_sem;
static uint_t taskq_tsd;
static int
task_km_flags(uint_t flags)
{
if (flags & TQ_NOSLEEP)
return (KM_NOSLEEP);
if (flags & TQ_PUSHPAGE)
return (KM_PUSHPAGE);
return (KM_SLEEP);
}
/*
* taskq_find_by_name - Find the largest instance number of a named taskq.
*/
static int
taskq_find_by_name(const char *name)
{
struct list_head *tql = NULL;
taskq_t *tq;
list_for_each_prev(tql, &tq_list) {
tq = list_entry(tql, taskq_t, tq_taskqs);
if (strcmp(name, tq->tq_name) == 0)
return (tq->tq_instance);
}
return (-1);
}
/*
* NOTE: Must be called with tq->tq_lock held, returns a list_t which
* is not attached to the free, work, or pending taskq lists.
*/
static taskq_ent_t *
task_alloc(taskq_t *tq, uint_t flags, unsigned long *irqflags)
{
taskq_ent_t *t;
int count = 0;
ASSERT(tq);
retry:
/* Acquire taskq_ent_t's from free list if available */
if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) {
t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list);
ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
ASSERT(!(t->tqent_flags & TQENT_FLAG_CANCEL));
ASSERT(!timer_pending(&t->tqent_timer));
list_del_init(&t->tqent_list);
return (t);
}
/* Free list is empty and memory allocations are prohibited */
if (flags & TQ_NOALLOC)
return (NULL);
/* Hit maximum taskq_ent_t pool size */
if (tq->tq_nalloc >= tq->tq_maxalloc) {
if (flags & TQ_NOSLEEP)
return (NULL);
/*
* Sleep periodically polling the free list for an available
* taskq_ent_t. Dispatching with TQ_SLEEP should always succeed
* but we cannot block forever waiting for an taskq_ent_t to
* show up in the free list, otherwise a deadlock can happen.
*
* Therefore, we need to allocate a new task even if the number
* of allocated tasks is above tq->tq_maxalloc, but we still
* end up delaying the task allocation by one second, thereby
* throttling the task dispatch rate.
*/
spin_unlock_irqrestore(&tq->tq_lock, *irqflags);
schedule_timeout(HZ / 100);
spin_lock_irqsave_nested(&tq->tq_lock, *irqflags,
tq->tq_lock_class);
if (count < 100) {
count++;
goto retry;
}
}
spin_unlock_irqrestore(&tq->tq_lock, *irqflags);
t = kmem_alloc(sizeof (taskq_ent_t), task_km_flags(flags));
spin_lock_irqsave_nested(&tq->tq_lock, *irqflags, tq->tq_lock_class);
if (t) {
taskq_init_ent(t);
tq->tq_nalloc++;
}
return (t);
}
/*
* NOTE: Must be called with tq->tq_lock held, expects the taskq_ent_t
* to already be removed from the free, work, or pending taskq lists.
*/
static void
task_free(taskq_t *tq, taskq_ent_t *t)
{
ASSERT(tq);
ASSERT(t);
ASSERT(list_empty(&t->tqent_list));
ASSERT(!timer_pending(&t->tqent_timer));
kmem_free(t, sizeof (taskq_ent_t));
tq->tq_nalloc--;
}
/*
* NOTE: Must be called with tq->tq_lock held, either destroys the
* taskq_ent_t if too many exist or moves it to the free list for later use.
*/
static void
task_done(taskq_t *tq, taskq_ent_t *t)
{
ASSERT(tq);
ASSERT(t);
/* Wake tasks blocked in taskq_wait_id() */
wake_up_all(&t->tqent_waitq);
list_del_init(&t->tqent_list);
if (tq->tq_nalloc <= tq->tq_minalloc) {
t->tqent_id = TASKQID_INVALID;
t->tqent_func = NULL;
t->tqent_arg = NULL;
t->tqent_flags = 0;
list_add_tail(&t->tqent_list, &tq->tq_free_list);
} else {
task_free(tq, t);
}
}
/*
* When a delayed task timer expires remove it from the delay list and
* add it to the priority list in order for immediate processing.
*/
static void
task_expire_impl(taskq_ent_t *t)
{
taskq_ent_t *w;
taskq_t *tq = t->tqent_taskq;
struct list_head *l = NULL;
unsigned long flags;
spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
if (t->tqent_flags & TQENT_FLAG_CANCEL) {
ASSERT(list_empty(&t->tqent_list));
spin_unlock_irqrestore(&tq->tq_lock, flags);
return;
}
t->tqent_birth = jiffies;
DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t);
/*
* The priority list must be maintained in strict task id order
* from lowest to highest for lowest_id to be easily calculable.
*/
list_del(&t->tqent_list);
list_for_each_prev(l, &tq->tq_prio_list) {
w = list_entry(l, taskq_ent_t, tqent_list);
if (w->tqent_id < t->tqent_id) {
list_add(&t->tqent_list, l);
break;
}
}
if (l == &tq->tq_prio_list)
list_add(&t->tqent_list, &tq->tq_prio_list);
spin_unlock_irqrestore(&tq->tq_lock, flags);
wake_up(&tq->tq_work_waitq);
}
static void
task_expire(spl_timer_list_t tl)
{
struct timer_list *tmr = (struct timer_list *)tl;
taskq_ent_t *t = from_timer(t, tmr, tqent_timer);
task_expire_impl(t);
}
/*
* Returns the lowest incomplete taskqid_t. The taskqid_t may
* be queued on the pending list, on the priority list, on the
* delay list, or on the work list currently being handled, but
* it is not 100% complete yet.
*/
static taskqid_t
taskq_lowest_id(taskq_t *tq)
{
taskqid_t lowest_id = tq->tq_next_id;
taskq_ent_t *t;
taskq_thread_t *tqt;
if (!list_empty(&tq->tq_pend_list)) {
t = list_entry(tq->tq_pend_list.next, taskq_ent_t, tqent_list);
lowest_id = MIN(lowest_id, t->tqent_id);
}
if (!list_empty(&tq->tq_prio_list)) {
t = list_entry(tq->tq_prio_list.next, taskq_ent_t, tqent_list);
lowest_id = MIN(lowest_id, t->tqent_id);
}
if (!list_empty(&tq->tq_delay_list)) {
t = list_entry(tq->tq_delay_list.next, taskq_ent_t, tqent_list);
lowest_id = MIN(lowest_id, t->tqent_id);
}
if (!list_empty(&tq->tq_active_list)) {
tqt = list_entry(tq->tq_active_list.next, taskq_thread_t,
tqt_active_list);
ASSERT(tqt->tqt_id != TASKQID_INVALID);
lowest_id = MIN(lowest_id, tqt->tqt_id);
}
return (lowest_id);
}
/*
* Insert a task into a list keeping the list sorted by increasing taskqid.
*/
static void
taskq_insert_in_order(taskq_t *tq, taskq_thread_t *tqt)
{
taskq_thread_t *w;
struct list_head *l = NULL;
ASSERT(tq);
ASSERT(tqt);
list_for_each_prev(l, &tq->tq_active_list) {
w = list_entry(l, taskq_thread_t, tqt_active_list);
if (w->tqt_id < tqt->tqt_id) {
list_add(&tqt->tqt_active_list, l);
break;
}
}
if (l == &tq->tq_active_list)
list_add(&tqt->tqt_active_list, &tq->tq_active_list);
}
/*
* Find and return a task from the given list if it exists. The list
* must be in lowest to highest task id order.
*/
static taskq_ent_t *
taskq_find_list(taskq_t *tq, struct list_head *lh, taskqid_t id)
{
struct list_head *l = NULL;
taskq_ent_t *t;
list_for_each(l, lh) {
t = list_entry(l, taskq_ent_t, tqent_list);
if (t->tqent_id == id)
return (t);
if (t->tqent_id > id)
break;
}
return (NULL);
}
/*
* Find an already dispatched task given the task id regardless of what
* state it is in. If a task is still pending it will be returned.
* If a task is executing, then -EBUSY will be returned instead.
* If the task has already been run then NULL is returned.
*/
static taskq_ent_t *
taskq_find(taskq_t *tq, taskqid_t id)
{
taskq_thread_t *tqt;
struct list_head *l = NULL;
taskq_ent_t *t;
t = taskq_find_list(tq, &tq->tq_delay_list, id);
if (t)
return (t);
t = taskq_find_list(tq, &tq->tq_prio_list, id);
if (t)
return (t);
t = taskq_find_list(tq, &tq->tq_pend_list, id);
if (t)
return (t);
list_for_each(l, &tq->tq_active_list) {
tqt = list_entry(l, taskq_thread_t, tqt_active_list);
if (tqt->tqt_id == id) {
/*
* Instead of returning tqt_task, we just return a non
* NULL value to prevent misuse, since tqt_task only
* has two valid fields.
*/
return (ERR_PTR(-EBUSY));
}
}
return (NULL);
}
/*
* Theory for the taskq_wait_id(), taskq_wait_outstanding(), and
* taskq_wait() functions below.
*
* Taskq waiting is accomplished by tracking the lowest outstanding task
* id and the next available task id. As tasks are dispatched they are
* added to the tail of the pending, priority, or delay lists. As worker
* threads become available the tasks are removed from the heads of these
* lists and linked to the worker threads. This ensures the lists are
* kept sorted by lowest to highest task id.
*
* Therefore the lowest outstanding task id can be quickly determined by
* checking the head item from all of these lists. This value is stored
* with the taskq as the lowest id. It only needs to be recalculated when
* either the task with the current lowest id completes or is canceled.
*
* By blocking until the lowest task id exceeds the passed task id the
* taskq_wait_outstanding() function can be easily implemented. Similarly,
* by blocking until the lowest task id matches the next task id taskq_wait()
* can be implemented.
*
* Callers should be aware that when there are multiple worked threads it
* is possible for larger task ids to complete before smaller ones. Also
* when the taskq contains delay tasks with small task ids callers may
* block for a considerable length of time waiting for them to expire and
* execute.
*/
static int
taskq_wait_id_check(taskq_t *tq, taskqid_t id)
{
int rc;
unsigned long flags;
spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
rc = (taskq_find(tq, id) == NULL);
spin_unlock_irqrestore(&tq->tq_lock, flags);
return (rc);
}
/*
* The taskq_wait_id() function blocks until the passed task id completes.
* This does not guarantee that all lower task ids have completed.
*/
void
taskq_wait_id(taskq_t *tq, taskqid_t id)
{
wait_event(tq->tq_wait_waitq, taskq_wait_id_check(tq, id));
}
EXPORT_SYMBOL(taskq_wait_id);
static int
taskq_wait_outstanding_check(taskq_t *tq, taskqid_t id)
{
int rc;
unsigned long flags;
spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
rc = (id < tq->tq_lowest_id);
spin_unlock_irqrestore(&tq->tq_lock, flags);
return (rc);
}
/*
* The taskq_wait_outstanding() function will block until all tasks with a
* lower taskqid than the passed 'id' have been completed. Note that all
* task id's are assigned monotonically at dispatch time. Zero may be
* passed for the id to indicate all tasks dispatch up to this point,
* but not after, should be waited for.
*/
void
taskq_wait_outstanding(taskq_t *tq, taskqid_t id)
{
id = id ? id : tq->tq_next_id - 1;
wait_event(tq->tq_wait_waitq, taskq_wait_outstanding_check(tq, id));
}
EXPORT_SYMBOL(taskq_wait_outstanding);
static int
taskq_wait_check(taskq_t *tq)
{
int rc;
unsigned long flags;
spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
rc = (tq->tq_lowest_id == tq->tq_next_id);
spin_unlock_irqrestore(&tq->tq_lock, flags);
return (rc);
}
/*
* The taskq_wait() function will block until the taskq is empty.
* This means that if a taskq re-dispatches work to itself taskq_wait()
* callers will block indefinitely.
*/
void
taskq_wait(taskq_t *tq)
{
wait_event(tq->tq_wait_waitq, taskq_wait_check(tq));
}
EXPORT_SYMBOL(taskq_wait);
int
taskq_member(taskq_t *tq, kthread_t *t)
{
return (tq == (taskq_t *)tsd_get_by_thread(taskq_tsd, t));
}
EXPORT_SYMBOL(taskq_member);
taskq_t *
taskq_of_curthread(void)
{
return (tsd_get(taskq_tsd));
}
EXPORT_SYMBOL(taskq_of_curthread);
/*
* Cancel an already dispatched task given the task id. Still pending tasks
* will be immediately canceled, and if the task is active the function will
* block until it completes. Preallocated tasks which are canceled must be
* freed by the caller.
*/
int
taskq_cancel_id(taskq_t *tq, taskqid_t id)
{
taskq_ent_t *t;
int rc = ENOENT;
unsigned long flags;
ASSERT(tq);
spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
t = taskq_find(tq, id);
if (t && t != ERR_PTR(-EBUSY)) {
list_del_init(&t->tqent_list);
t->tqent_flags |= TQENT_FLAG_CANCEL;
/*
* When canceling the lowest outstanding task id we
* must recalculate the new lowest outstanding id.
*/
if (tq->tq_lowest_id == t->tqent_id) {
tq->tq_lowest_id = taskq_lowest_id(tq);
ASSERT3S(tq->tq_lowest_id, >, t->tqent_id);
}
/*
* The task_expire() function takes the tq->tq_lock so drop
* drop the lock before synchronously cancelling the timer.
*/
if (timer_pending(&t->tqent_timer)) {
spin_unlock_irqrestore(&tq->tq_lock, flags);
del_timer_sync(&t->tqent_timer);
spin_lock_irqsave_nested(&tq->tq_lock, flags,
tq->tq_lock_class);
}
if (!(t->tqent_flags & TQENT_FLAG_PREALLOC))
task_done(tq, t);
rc = 0;
}
spin_unlock_irqrestore(&tq->tq_lock, flags);
if (t == ERR_PTR(-EBUSY)) {
taskq_wait_id(tq, id);
rc = EBUSY;
}
return (rc);
}
EXPORT_SYMBOL(taskq_cancel_id);
static int taskq_thread_spawn(taskq_t *tq);
taskqid_t
taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
{
taskq_ent_t *t;
taskqid_t rc = TASKQID_INVALID;
unsigned long irqflags;
ASSERT(tq);
ASSERT(func);
spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class);
/* Taskq being destroyed and all tasks drained */
if (!(tq->tq_flags & TASKQ_ACTIVE))
goto out;
/* Do not queue the task unless there is idle thread for it */
ASSERT(tq->tq_nactive <= tq->tq_nthreads);
if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) {
/* Dynamic taskq may be able to spawn another thread */
if (!(tq->tq_flags & TASKQ_DYNAMIC) ||
taskq_thread_spawn(tq) == 0)
goto out;
}
if ((t = task_alloc(tq, flags, &irqflags)) == NULL)
goto out;
spin_lock(&t->tqent_lock);
/* Queue to the front of the list to enforce TQ_NOQUEUE semantics */
if (flags & TQ_NOQUEUE)
list_add(&t->tqent_list, &tq->tq_prio_list);
/* Queue to the priority list instead of the pending list */
else if (flags & TQ_FRONT)
list_add_tail(&t->tqent_list, &tq->tq_prio_list);
else
list_add_tail(&t->tqent_list, &tq->tq_pend_list);
t->tqent_id = rc = tq->tq_next_id;
tq->tq_next_id++;
t->tqent_func = func;
t->tqent_arg = arg;
t->tqent_taskq = tq;
t->tqent_timer.function = NULL;
t->tqent_timer.expires = 0;
t->tqent_birth = jiffies;
DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t);
ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
spin_unlock(&t->tqent_lock);
wake_up(&tq->tq_work_waitq);
out:
/* Spawn additional taskq threads if required. */
if (!(flags & TQ_NOQUEUE) && tq->tq_nactive == tq->tq_nthreads)
(void) taskq_thread_spawn(tq);
spin_unlock_irqrestore(&tq->tq_lock, irqflags);
return (rc);
}
EXPORT_SYMBOL(taskq_dispatch);
taskqid_t
taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg,
uint_t flags, clock_t expire_time)
{
taskqid_t rc = TASKQID_INVALID;
taskq_ent_t *t;
unsigned long irqflags;
ASSERT(tq);
ASSERT(func);
spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class);
/* Taskq being destroyed and all tasks drained */
if (!(tq->tq_flags & TASKQ_ACTIVE))
goto out;
if ((t = task_alloc(tq, flags, &irqflags)) == NULL)
goto out;
spin_lock(&t->tqent_lock);
/* Queue to the delay list for subsequent execution */
list_add_tail(&t->tqent_list, &tq->tq_delay_list);
t->tqent_id = rc = tq->tq_next_id;
tq->tq_next_id++;
t->tqent_func = func;
t->tqent_arg = arg;
t->tqent_taskq = tq;
t->tqent_timer.function = task_expire;
t->tqent_timer.expires = (unsigned long)expire_time;
add_timer(&t->tqent_timer);
ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
spin_unlock(&t->tqent_lock);
out:
/* Spawn additional taskq threads if required. */
if (tq->tq_nactive == tq->tq_nthreads)
(void) taskq_thread_spawn(tq);
spin_unlock_irqrestore(&tq->tq_lock, irqflags);
return (rc);
}
EXPORT_SYMBOL(taskq_dispatch_delay);
void
taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
taskq_ent_t *t)
{
unsigned long irqflags;
ASSERT(tq);
ASSERT(func);
spin_lock_irqsave_nested(&tq->tq_lock, irqflags,
tq->tq_lock_class);
/* Taskq being destroyed and all tasks drained */
if (!(tq->tq_flags & TASKQ_ACTIVE)) {
t->tqent_id = TASKQID_INVALID;
goto out;
}
if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) {
/* Dynamic taskq may be able to spawn another thread */
if (!(tq->tq_flags & TASKQ_DYNAMIC) ||
taskq_thread_spawn(tq) == 0)
goto out2;
flags |= TQ_FRONT;
}
spin_lock(&t->tqent_lock);
/*
* Make sure the entry is not on some other taskq; it is important to
* ASSERT() under lock
*/
ASSERT(taskq_empty_ent(t));
/*
* Mark it as a prealloc'd task. This is important
* to ensure that we don't free it later.
*/
t->tqent_flags |= TQENT_FLAG_PREALLOC;
/* Queue to the priority list instead of the pending list */
if (flags & TQ_FRONT)
list_add_tail(&t->tqent_list, &tq->tq_prio_list);
else
list_add_tail(&t->tqent_list, &tq->tq_pend_list);
t->tqent_id = tq->tq_next_id;
tq->tq_next_id++;
t->tqent_func = func;
t->tqent_arg = arg;
t->tqent_taskq = tq;
t->tqent_birth = jiffies;
DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t);
spin_unlock(&t->tqent_lock);
wake_up(&tq->tq_work_waitq);
out:
/* Spawn additional taskq threads if required. */
if (tq->tq_nactive == tq->tq_nthreads)
(void) taskq_thread_spawn(tq);
out2:
spin_unlock_irqrestore(&tq->tq_lock, irqflags);
}
EXPORT_SYMBOL(taskq_dispatch_ent);
int
taskq_empty_ent(taskq_ent_t *t)
{
return (list_empty(&t->tqent_list));
}
EXPORT_SYMBOL(taskq_empty_ent);
void
taskq_init_ent(taskq_ent_t *t)
{
spin_lock_init(&t->tqent_lock);
init_waitqueue_head(&t->tqent_waitq);
timer_setup(&t->tqent_timer, NULL, 0);
INIT_LIST_HEAD(&t->tqent_list);
t->tqent_id = 0;
t->tqent_func = NULL;
t->tqent_arg = NULL;
t->tqent_flags = 0;
t->tqent_taskq = NULL;
}
EXPORT_SYMBOL(taskq_init_ent);
/*
* Return the next pending task, preference is given to tasks on the
* priority list which were dispatched with TQ_FRONT.
*/
static taskq_ent_t *
taskq_next_ent(taskq_t *tq)
{
struct list_head *list;
if (!list_empty(&tq->tq_prio_list))
list = &tq->tq_prio_list;
else if (!list_empty(&tq->tq_pend_list))
list = &tq->tq_pend_list;
else
return (NULL);
return (list_entry(list->next, taskq_ent_t, tqent_list));
}
/*
* Spawns a new thread for the specified taskq.
*/
static void
taskq_thread_spawn_task(void *arg)
{
taskq_t *tq = (taskq_t *)arg;
unsigned long flags;
if (taskq_thread_create(tq) == NULL) {
/* restore spawning count if failed */
spin_lock_irqsave_nested(&tq->tq_lock, flags,
tq->tq_lock_class);
tq->tq_nspawn--;
spin_unlock_irqrestore(&tq->tq_lock, flags);
}
}
/*
* Spawn addition threads for dynamic taskqs (TASKQ_DYNAMIC) the current
* number of threads is insufficient to handle the pending tasks. These
* new threads must be created by the dedicated dynamic_taskq to avoid
* deadlocks between thread creation and memory reclaim. The system_taskq
* which is also a dynamic taskq cannot be safely used for this.
*/
static int
taskq_thread_spawn(taskq_t *tq)
{
int spawning = 0;
if (!(tq->tq_flags & TASKQ_DYNAMIC))
return (0);
if ((tq->tq_nthreads + tq->tq_nspawn < tq->tq_maxthreads) &&
(tq->tq_flags & TASKQ_ACTIVE)) {
spawning = (++tq->tq_nspawn);
taskq_dispatch(dynamic_taskq, taskq_thread_spawn_task,
tq, TQ_NOSLEEP);
}
return (spawning);
}
/*
* Threads in a dynamic taskq should only exit once it has been completely
* drained and no other threads are actively servicing tasks. This prevents
* threads from being created and destroyed more than is required.
*
* The first thread is the thread list is treated as the primary thread.
* There is nothing special about the primary thread but in order to avoid
* all the taskq pids from changing we opt to make it long running.
*/
static int
taskq_thread_should_stop(taskq_t *tq, taskq_thread_t *tqt)
{
if (!(tq->tq_flags & TASKQ_DYNAMIC))
return (0);
if (list_first_entry(&(tq->tq_thread_list), taskq_thread_t,
tqt_thread_list) == tqt)
return (0);
return
((tq->tq_nspawn == 0) && /* No threads are being spawned */
(tq->tq_nactive == 0) && /* No threads are handling tasks */
(tq->tq_nthreads > 1) && /* More than 1 thread is running */
(!taskq_next_ent(tq)) && /* There are no pending tasks */
(spl_taskq_thread_dynamic)); /* Dynamic taskqs are allowed */
}
static int
taskq_thread(void *args)
{
DECLARE_WAITQUEUE(wait, current);
sigset_t blocked;
taskq_thread_t *tqt = args;
taskq_t *tq;
taskq_ent_t *t;
int seq_tasks = 0;
unsigned long flags;
taskq_ent_t dup_task = {};
ASSERT(tqt);
ASSERT(tqt->tqt_tq);
tq = tqt->tqt_tq;
current->flags |= PF_NOFREEZE;
(void) spl_fstrans_mark();
sigfillset(&blocked);
sigprocmask(SIG_BLOCK, &blocked, NULL);
flush_signals(current);
tsd_set(taskq_tsd, tq);
spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
/*
* If we are dynamically spawned, decrease spawning count. Note that
* we could be created during taskq_create, in which case we shouldn't
* do the decrement. But it's fine because taskq_create will reset
* tq_nspawn later.
*/
if (tq->tq_flags & TASKQ_DYNAMIC)
tq->tq_nspawn--;
/* Immediately exit if more threads than allowed were created. */
if (tq->tq_nthreads >= tq->tq_maxthreads)
goto error;
tq->tq_nthreads++;
list_add_tail(&tqt->tqt_thread_list, &tq->tq_thread_list);
wake_up(&tq->tq_wait_waitq);
set_current_state(TASK_INTERRUPTIBLE);
while (!kthread_should_stop()) {
if (list_empty(&tq->tq_pend_list) &&
list_empty(&tq->tq_prio_list)) {
if (taskq_thread_should_stop(tq, tqt)) {
wake_up_all(&tq->tq_wait_waitq);
break;
}
add_wait_queue_exclusive(&tq->tq_work_waitq, &wait);
spin_unlock_irqrestore(&tq->tq_lock, flags);
schedule();
seq_tasks = 0;
spin_lock_irqsave_nested(&tq->tq_lock, flags,
tq->tq_lock_class);
remove_wait_queue(&tq->tq_work_waitq, &wait);
} else {
__set_current_state(TASK_RUNNING);
}
if ((t = taskq_next_ent(tq)) != NULL) {
list_del_init(&t->tqent_list);
/*
* A TQENT_FLAG_PREALLOC task may be reused or freed
* during the task function call. Store tqent_id and
* tqent_flags here.
*
* Also use an on stack taskq_ent_t for tqt_task
* assignment in this case; we want to make sure
* to duplicate all fields, so the values are
* correct when it's accessed via DTRACE_PROBE*.
*/
tqt->tqt_id = t->tqent_id;
tqt->tqt_flags = t->tqent_flags;
if (t->tqent_flags & TQENT_FLAG_PREALLOC) {
dup_task = *t;
t = &dup_task;
}
tqt->tqt_task = t;
taskq_insert_in_order(tq, tqt);
tq->tq_nactive++;
spin_unlock_irqrestore(&tq->tq_lock, flags);
DTRACE_PROBE1(taskq_ent__start, taskq_ent_t *, t);
/* Perform the requested task */
t->tqent_func(t->tqent_arg);
DTRACE_PROBE1(taskq_ent__finish, taskq_ent_t *, t);
spin_lock_irqsave_nested(&tq->tq_lock, flags,
tq->tq_lock_class);
tq->tq_nactive--;
list_del_init(&tqt->tqt_active_list);
tqt->tqt_task = NULL;
/* For prealloc'd tasks, we don't free anything. */
if (!(tqt->tqt_flags & TQENT_FLAG_PREALLOC))
task_done(tq, t);
/*
* When the current lowest outstanding taskqid is
* done calculate the new lowest outstanding id
*/
if (tq->tq_lowest_id == tqt->tqt_id) {
tq->tq_lowest_id = taskq_lowest_id(tq);
ASSERT3S(tq->tq_lowest_id, >, tqt->tqt_id);
}
/* Spawn additional taskq threads if required. */
if ((++seq_tasks) > spl_taskq_thread_sequential &&
taskq_thread_spawn(tq))
seq_tasks = 0;
tqt->tqt_id = TASKQID_INVALID;
tqt->tqt_flags = 0;
wake_up_all(&tq->tq_wait_waitq);
} else {
if (taskq_thread_should_stop(tq, tqt))
break;
}
set_current_state(TASK_INTERRUPTIBLE);
}
__set_current_state(TASK_RUNNING);
tq->tq_nthreads--;
list_del_init(&tqt->tqt_thread_list);
error:
kmem_free(tqt, sizeof (taskq_thread_t));
spin_unlock_irqrestore(&tq->tq_lock, flags);
tsd_set(taskq_tsd, NULL);
thread_exit();
return (0);
}
static taskq_thread_t *
taskq_thread_create(taskq_t *tq)
{
static int last_used_cpu = 0;
taskq_thread_t *tqt;
tqt = kmem_alloc(sizeof (*tqt), KM_PUSHPAGE);
INIT_LIST_HEAD(&tqt->tqt_thread_list);
INIT_LIST_HEAD(&tqt->tqt_active_list);
tqt->tqt_tq = tq;
tqt->tqt_id = TASKQID_INVALID;
tqt->tqt_thread = spl_kthread_create(taskq_thread, tqt,
"%s", tq->tq_name);
if (tqt->tqt_thread == NULL) {
kmem_free(tqt, sizeof (taskq_thread_t));
return (NULL);
}
if (spl_taskq_thread_bind) {
last_used_cpu = (last_used_cpu + 1) % num_online_cpus();
kthread_bind(tqt->tqt_thread, last_used_cpu);
}
if (spl_taskq_thread_priority)
set_user_nice(tqt->tqt_thread, PRIO_TO_NICE(tq->tq_pri));
wake_up_process(tqt->tqt_thread);
return (tqt);
}
taskq_t *
taskq_create(const char *name, int threads_arg, pri_t pri,
int minalloc, int maxalloc, uint_t flags)
{
taskq_t *tq;
taskq_thread_t *tqt;
int count = 0, rc = 0, i;
unsigned long irqflags;
int nthreads = threads_arg;
ASSERT(name != NULL);
ASSERT(minalloc >= 0);
ASSERT(maxalloc <= INT_MAX);
ASSERT(!(flags & (TASKQ_CPR_SAFE))); /* Unsupported */
/* Scale the number of threads using nthreads as a percentage */
if (flags & TASKQ_THREADS_CPU_PCT) {
ASSERT(nthreads <= 100);
ASSERT(nthreads >= 0);
nthreads = MIN(threads_arg, 100);
nthreads = MAX(nthreads, 0);
nthreads = MAX((num_online_cpus() * nthreads) /100, 1);
}
tq = kmem_alloc(sizeof (*tq), KM_PUSHPAGE);
if (tq == NULL)
return (NULL);
tq->tq_hp_support = B_FALSE;
#ifdef HAVE_CPU_HOTPLUG
if (flags & TASKQ_THREADS_CPU_PCT) {
tq->tq_hp_support = B_TRUE;
if (cpuhp_state_add_instance_nocalls(spl_taskq_cpuhp_state,
&tq->tq_hp_cb_node) != 0) {
kmem_free(tq, sizeof (*tq));
return (NULL);
}
}
#endif
spin_lock_init(&tq->tq_lock);
INIT_LIST_HEAD(&tq->tq_thread_list);
INIT_LIST_HEAD(&tq->tq_active_list);
tq->tq_name = kmem_strdup(name);
tq->tq_nactive = 0;
tq->tq_nthreads = 0;
tq->tq_nspawn = 0;
tq->tq_maxthreads = nthreads;
tq->tq_cpu_pct = threads_arg;
tq->tq_pri = pri;
tq->tq_minalloc = minalloc;
tq->tq_maxalloc = maxalloc;
tq->tq_nalloc = 0;
tq->tq_flags = (flags | TASKQ_ACTIVE);
tq->tq_next_id = TASKQID_INITIAL;
tq->tq_lowest_id = TASKQID_INITIAL;
INIT_LIST_HEAD(&tq->tq_free_list);
INIT_LIST_HEAD(&tq->tq_pend_list);
INIT_LIST_HEAD(&tq->tq_prio_list);
INIT_LIST_HEAD(&tq->tq_delay_list);
init_waitqueue_head(&tq->tq_work_waitq);
init_waitqueue_head(&tq->tq_wait_waitq);
tq->tq_lock_class = TQ_LOCK_GENERAL;
INIT_LIST_HEAD(&tq->tq_taskqs);
if (flags & TASKQ_PREPOPULATE) {
spin_lock_irqsave_nested(&tq->tq_lock, irqflags,
tq->tq_lock_class);
for (i = 0; i < minalloc; i++)
task_done(tq, task_alloc(tq, TQ_PUSHPAGE | TQ_NEW,
&irqflags));
spin_unlock_irqrestore(&tq->tq_lock, irqflags);
}
if ((flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic)
nthreads = 1;
for (i = 0; i < nthreads; i++) {
tqt = taskq_thread_create(tq);
if (tqt == NULL)
rc = 1;
else
count++;
}
/* Wait for all threads to be started before potential destroy */
wait_event(tq->tq_wait_waitq, tq->tq_nthreads == count);
/*
* taskq_thread might have touched nspawn, but we don't want them to
* because they're not dynamically spawned. So we reset it to 0
*/
tq->tq_nspawn = 0;
if (rc) {
taskq_destroy(tq);
tq = NULL;
} else {
down_write(&tq_list_sem);
tq->tq_instance = taskq_find_by_name(name) + 1;
list_add_tail(&tq->tq_taskqs, &tq_list);
up_write(&tq_list_sem);
}
return (tq);
}
EXPORT_SYMBOL(taskq_create);
void
taskq_destroy(taskq_t *tq)
{
struct task_struct *thread;
taskq_thread_t *tqt;
taskq_ent_t *t;
unsigned long flags;
ASSERT(tq);
spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
tq->tq_flags &= ~TASKQ_ACTIVE;
spin_unlock_irqrestore(&tq->tq_lock, flags);
#ifdef HAVE_CPU_HOTPLUG
if (tq->tq_hp_support) {
VERIFY0(cpuhp_state_remove_instance_nocalls(
spl_taskq_cpuhp_state, &tq->tq_hp_cb_node));
}
#endif
/*
* When TASKQ_ACTIVE is clear new tasks may not be added nor may
* new worker threads be spawned for dynamic taskq.
*/
if (dynamic_taskq != NULL)
taskq_wait_outstanding(dynamic_taskq, 0);
taskq_wait(tq);
/* remove taskq from global list used by the kstats */
down_write(&tq_list_sem);
list_del(&tq->tq_taskqs);
up_write(&tq_list_sem);
spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
/* wait for spawning threads to insert themselves to the list */
while (tq->tq_nspawn) {
spin_unlock_irqrestore(&tq->tq_lock, flags);
schedule_timeout_interruptible(1);
spin_lock_irqsave_nested(&tq->tq_lock, flags,
tq->tq_lock_class);
}
/*
* Signal each thread to exit and block until it does. Each thread
* is responsible for removing itself from the list and freeing its
* taskq_thread_t. This allows for idle threads to opt to remove
* themselves from the taskq. They can be recreated as needed.
*/
while (!list_empty(&tq->tq_thread_list)) {
tqt = list_entry(tq->tq_thread_list.next,
taskq_thread_t, tqt_thread_list);
thread = tqt->tqt_thread;
spin_unlock_irqrestore(&tq->tq_lock, flags);
kthread_stop(thread);
spin_lock_irqsave_nested(&tq->tq_lock, flags,
tq->tq_lock_class);
}
while (!list_empty(&tq->tq_free_list)) {
t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list);
ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
list_del_init(&t->tqent_list);
task_free(tq, t);
}
ASSERT0(tq->tq_nthreads);
ASSERT0(tq->tq_nalloc);
ASSERT0(tq->tq_nspawn);
ASSERT(list_empty(&tq->tq_thread_list));
ASSERT(list_empty(&tq->tq_active_list));
ASSERT(list_empty(&tq->tq_free_list));
ASSERT(list_empty(&tq->tq_pend_list));
ASSERT(list_empty(&tq->tq_prio_list));
ASSERT(list_empty(&tq->tq_delay_list));
spin_unlock_irqrestore(&tq->tq_lock, flags);
kmem_strfree(tq->tq_name);
kmem_free(tq, sizeof (taskq_t));
}
EXPORT_SYMBOL(taskq_destroy);
static unsigned int spl_taskq_kick = 0;
/*
* 2.6.36 API Change
* module_param_cb is introduced to take kernel_param_ops and
* module_param_call is marked as obsolete. Also set and get operations
* were changed to take a 'const struct kernel_param *'.
*/
static int
#ifdef module_param_cb
param_set_taskq_kick(const char *val, const struct kernel_param *kp)
#else
param_set_taskq_kick(const char *val, struct kernel_param *kp)
#endif
{
int ret;
taskq_t *tq = NULL;
taskq_ent_t *t;
unsigned long flags;
ret = param_set_uint(val, kp);
if (ret < 0 || !spl_taskq_kick)
return (ret);
/* reset value */
spl_taskq_kick = 0;
down_read(&tq_list_sem);
list_for_each_entry(tq, &tq_list, tq_taskqs) {
spin_lock_irqsave_nested(&tq->tq_lock, flags,
tq->tq_lock_class);
/* Check if the first pending is older than 5 seconds */
t = taskq_next_ent(tq);
if (t && time_after(jiffies, t->tqent_birth + 5*HZ)) {
(void) taskq_thread_spawn(tq);
printk(KERN_INFO "spl: Kicked taskq %s/%d\n",
tq->tq_name, tq->tq_instance);
}
spin_unlock_irqrestore(&tq->tq_lock, flags);
}
up_read(&tq_list_sem);
return (ret);
}
#ifdef module_param_cb
static const struct kernel_param_ops param_ops_taskq_kick = {
.set = param_set_taskq_kick,
.get = param_get_uint,
};
module_param_cb(spl_taskq_kick, &param_ops_taskq_kick, &spl_taskq_kick, 0644);
#else
module_param_call(spl_taskq_kick, param_set_taskq_kick, param_get_uint,
&spl_taskq_kick, 0644);
#endif
MODULE_PARM_DESC(spl_taskq_kick,
"Write nonzero to kick stuck taskqs to spawn more threads");
#ifdef HAVE_CPU_HOTPLUG
/*
* This callback will be called exactly once for each core that comes online,
* for each dynamic taskq. We attempt to expand taskqs that have
* TASKQ_THREADS_CPU_PCT set. We need to redo the percentage calculation every
* time, to correctly determine whether or not to add a thread.
*/
static int
spl_taskq_expand(unsigned int cpu, struct hlist_node *node)
{
taskq_t *tq = list_entry(node, taskq_t, tq_hp_cb_node);
unsigned long flags;
int err = 0;
ASSERT(tq);
spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
- if (!(tq->tq_flags & TASKQ_ACTIVE))
- goto out;
+ if (!(tq->tq_flags & TASKQ_ACTIVE)) {
+ spin_unlock_irqrestore(&tq->tq_lock, flags);
+ return (err);
+ }
ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT);
int nthreads = MIN(tq->tq_cpu_pct, 100);
nthreads = MAX(((num_online_cpus() + 1) * nthreads) / 100, 1);
tq->tq_maxthreads = nthreads;
if (!((tq->tq_flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) &&
tq->tq_maxthreads > tq->tq_nthreads) {
- ASSERT3U(tq->tq_maxthreads, ==, tq->tq_nthreads + 1);
+ spin_unlock_irqrestore(&tq->tq_lock, flags);
taskq_thread_t *tqt = taskq_thread_create(tq);
if (tqt == NULL)
err = -1;
+ return (err);
}
-
-out:
spin_unlock_irqrestore(&tq->tq_lock, flags);
return (err);
}
/*
* While we don't support offlining CPUs, it is possible that CPUs will fail
* to online successfully. We do need to be able to handle this case
* gracefully.
*/
static int
spl_taskq_prepare_down(unsigned int cpu, struct hlist_node *node)
{
taskq_t *tq = list_entry(node, taskq_t, tq_hp_cb_node);
unsigned long flags;
ASSERT(tq);
spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
if (!(tq->tq_flags & TASKQ_ACTIVE))
goto out;
ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT);
int nthreads = MIN(tq->tq_cpu_pct, 100);
nthreads = MAX(((num_online_cpus()) * nthreads) / 100, 1);
tq->tq_maxthreads = nthreads;
if (!((tq->tq_flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) &&
tq->tq_maxthreads < tq->tq_nthreads) {
ASSERT3U(tq->tq_maxthreads, ==, tq->tq_nthreads - 1);
taskq_thread_t *tqt = list_entry(tq->tq_thread_list.next,
taskq_thread_t, tqt_thread_list);
struct task_struct *thread = tqt->tqt_thread;
spin_unlock_irqrestore(&tq->tq_lock, flags);
kthread_stop(thread);
return (0);
}
out:
spin_unlock_irqrestore(&tq->tq_lock, flags);
return (0);
}
#endif
int
spl_taskq_init(void)
{
init_rwsem(&tq_list_sem);
tsd_create(&taskq_tsd, NULL);
#ifdef HAVE_CPU_HOTPLUG
spl_taskq_cpuhp_state = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
"fs/spl_taskq:online", spl_taskq_expand, spl_taskq_prepare_down);
#endif
system_taskq = taskq_create("spl_system_taskq", MAX(boot_ncpus, 64),
maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC);
if (system_taskq == NULL)
return (1);
system_delay_taskq = taskq_create("spl_delay_taskq", MAX(boot_ncpus, 4),
maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC);
if (system_delay_taskq == NULL) {
#ifdef HAVE_CPU_HOTPLUG
cpuhp_remove_multi_state(spl_taskq_cpuhp_state);
#endif
taskq_destroy(system_taskq);
return (1);
}
dynamic_taskq = taskq_create("spl_dynamic_taskq", 1,
maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE);
if (dynamic_taskq == NULL) {
#ifdef HAVE_CPU_HOTPLUG
cpuhp_remove_multi_state(spl_taskq_cpuhp_state);
#endif
taskq_destroy(system_taskq);
taskq_destroy(system_delay_taskq);
return (1);
}
/*
* This is used to annotate tq_lock, so
* taskq_dispatch -> taskq_thread_spawn -> taskq_dispatch
* does not trigger a lockdep warning re: possible recursive locking
*/
dynamic_taskq->tq_lock_class = TQ_LOCK_DYNAMIC;
return (0);
}
void
spl_taskq_fini(void)
{
taskq_destroy(dynamic_taskq);
dynamic_taskq = NULL;
taskq_destroy(system_delay_taskq);
system_delay_taskq = NULL;
taskq_destroy(system_taskq);
system_taskq = NULL;
tsd_destroy(&taskq_tsd);
#ifdef HAVE_CPU_HOTPLUG
cpuhp_remove_multi_state(spl_taskq_cpuhp_state);
spl_taskq_cpuhp_state = 0;
#endif
}
diff --git a/sys/contrib/openzfs/module/os/linux/spl/spl-thread.c b/sys/contrib/openzfs/module/os/linux/spl/spl-thread.c
index 834c527117a3..16d2ca1b133b 100644
--- a/sys/contrib/openzfs/module/os/linux/spl/spl-thread.c
+++ b/sys/contrib/openzfs/module/os/linux/spl/spl-thread.c
@@ -1,211 +1,216 @@
/*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* UCRL-CODE-235197
*
* This file is part of the SPL, Solaris Porting Layer.
*
* The SPL is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* The SPL is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
*
* Solaris Porting Layer (SPL) Thread Implementation.
*/
#include <sys/thread.h>
#include <sys/kmem.h>
#include <sys/tsd.h>
/*
* Thread interfaces
*/
typedef struct thread_priv_s {
unsigned long tp_magic; /* Magic */
int tp_name_size; /* Name size */
char *tp_name; /* Name (without _thread suffix) */
void (*tp_func)(void *); /* Registered function */
void *tp_args; /* Args to be passed to function */
size_t tp_len; /* Len to be passed to function */
int tp_state; /* State to start thread at */
pri_t tp_pri; /* Priority to start threat at */
} thread_priv_t;
static int
thread_generic_wrapper(void *arg)
{
thread_priv_t *tp = (thread_priv_t *)arg;
void (*func)(void *);
void *args;
ASSERT(tp->tp_magic == TP_MAGIC);
func = tp->tp_func;
args = tp->tp_args;
set_current_state(tp->tp_state);
set_user_nice((kthread_t *)current, PRIO_TO_NICE(tp->tp_pri));
kmem_free(tp->tp_name, tp->tp_name_size);
kmem_free(tp, sizeof (thread_priv_t));
if (func)
func(args);
return (0);
}
void
__thread_exit(void)
{
tsd_exit();
- complete_and_exit(NULL, 0);
+ SPL_KTHREAD_COMPLETE_AND_EXIT(NULL, 0);
/* Unreachable */
}
EXPORT_SYMBOL(__thread_exit);
/*
* thread_create() may block forever if it cannot create a thread or
* allocate memory. This is preferable to returning a NULL which Solaris
* style callers likely never check for... since it can't fail.
*/
kthread_t *
__thread_create(caddr_t stk, size_t stksize, thread_func_t func,
const char *name, void *args, size_t len, proc_t *pp, int state, pri_t pri)
{
thread_priv_t *tp;
struct task_struct *tsk;
char *p;
/* Option pp is simply ignored */
/* Variable stack size unsupported */
ASSERT(stk == NULL);
tp = kmem_alloc(sizeof (thread_priv_t), KM_PUSHPAGE);
if (tp == NULL)
return (NULL);
tp->tp_magic = TP_MAGIC;
tp->tp_name_size = strlen(name) + 1;
tp->tp_name = kmem_alloc(tp->tp_name_size, KM_PUSHPAGE);
if (tp->tp_name == NULL) {
kmem_free(tp, sizeof (thread_priv_t));
return (NULL);
}
strncpy(tp->tp_name, name, tp->tp_name_size);
/*
* Strip trailing "_thread" from passed name which will be the func
* name since the exposed API has no parameter for passing a name.
*/
p = strstr(tp->tp_name, "_thread");
if (p)
p[0] = '\0';
tp->tp_func = func;
tp->tp_args = args;
tp->tp_len = len;
tp->tp_state = state;
tp->tp_pri = pri;
tsk = spl_kthread_create(thread_generic_wrapper, (void *)tp,
"%s", tp->tp_name);
if (IS_ERR(tsk))
return (NULL);
wake_up_process(tsk);
return ((kthread_t *)tsk);
}
EXPORT_SYMBOL(__thread_create);
/*
* spl_kthread_create - Wrapper providing pre-3.13 semantics for
* kthread_create() in which it is not killable and less likely
* to return -ENOMEM.
*/
struct task_struct *
spl_kthread_create(int (*func)(void *), void *data, const char namefmt[], ...)
{
struct task_struct *tsk;
va_list args;
char name[TASK_COMM_LEN];
va_start(args, namefmt);
vsnprintf(name, sizeof (name), namefmt, args);
va_end(args);
do {
tsk = kthread_create(func, data, "%s", name);
if (IS_ERR(tsk)) {
if (signal_pending(current)) {
clear_thread_flag(TIF_SIGPENDING);
continue;
}
if (PTR_ERR(tsk) == -ENOMEM)
continue;
return (NULL);
} else {
return (tsk);
}
} while (1);
}
EXPORT_SYMBOL(spl_kthread_create);
/*
* The "why" argument indicates the allowable side-effects of the call:
*
* FORREAL: Extract the next pending signal from p_sig into p_cursig;
* stop the process if a stop has been requested or if a traced signal
* is pending.
*
* JUSTLOOKING: Don't stop the process, just indicate whether or not
* a signal might be pending (FORREAL is needed to tell for sure).
*/
int
issig(int why)
{
ASSERT(why == FORREAL || why == JUSTLOOKING);
if (!signal_pending(current))
return (0);
if (why != FORREAL)
return (1);
struct task_struct *task = current;
spl_kernel_siginfo_t __info;
sigset_t set;
siginitsetinv(&set, 1ULL << (SIGSTOP - 1) | 1ULL << (SIGTSTP - 1));
sigorsets(&set, &task->blocked, &set);
spin_lock_irq(&task->sighand->siglock);
int ret;
+#ifdef HAVE_DEQUEUE_SIGNAL_4ARG
+ enum pid_type __type;
+ if ((ret = dequeue_signal(task, &set, &__info, &__type)) != 0) {
+#else
if ((ret = dequeue_signal(task, &set, &__info)) != 0) {
+#endif
#ifdef HAVE_SIGNAL_STOP
spin_unlock_irq(&task->sighand->siglock);
kernel_signal_stop();
#else
if (current->jobctl & JOBCTL_STOP_DEQUEUED)
spl_set_special_state(TASK_STOPPED);
spin_unlock_irq(&current->sighand->siglock);
schedule();
#endif
return (0);
}
spin_unlock_irq(&task->sighand->siglock);
return (1);
}
EXPORT_SYMBOL(issig);
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/vdev_disk.c b/sys/contrib/openzfs/module/os/linux/zfs/vdev_disk.c
index a432a736453c..581a790865b7 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/vdev_disk.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/vdev_disk.c
@@ -1,963 +1,963 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
* LLNL-CODE-403049.
* Copyright (c) 2012, 2019 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa_impl.h>
#include <sys/vdev_disk.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_trim.h>
#include <sys/abd.h>
#include <sys/fs/zfs.h>
#include <sys/zio.h>
#include <linux/blkpg.h>
#include <linux/msdos_fs.h>
#include <linux/vfs_compat.h>
#ifdef HAVE_LINUX_BLK_CGROUP_HEADER
#include <linux/blk-cgroup.h>
#endif
typedef struct vdev_disk {
struct block_device *vd_bdev;
krwlock_t vd_lock;
} vdev_disk_t;
/*
* Unique identifier for the exclusive vdev holder.
*/
static void *zfs_vdev_holder = VDEV_HOLDER;
/*
* Wait up to zfs_vdev_open_timeout_ms milliseconds before determining the
* device is missing. The missing path may be transient since the links
* can be briefly removed and recreated in response to udev events.
*/
static unsigned zfs_vdev_open_timeout_ms = 1000;
/*
* Size of the "reserved" partition, in blocks.
*/
#define EFI_MIN_RESV_SIZE (16 * 1024)
/*
* Virtual device vector for disks.
*/
typedef struct dio_request {
zio_t *dr_zio; /* Parent ZIO */
atomic_t dr_ref; /* References */
int dr_error; /* Bio error */
int dr_bio_count; /* Count of bio's */
struct bio *dr_bio[0]; /* Attached bio's */
} dio_request_t;
static fmode_t
vdev_bdev_mode(spa_mode_t spa_mode)
{
fmode_t mode = 0;
if (spa_mode & SPA_MODE_READ)
mode |= FMODE_READ;
if (spa_mode & SPA_MODE_WRITE)
mode |= FMODE_WRITE;
return (mode);
}
/*
* Returns the usable capacity (in bytes) for the partition or disk.
*/
static uint64_t
bdev_capacity(struct block_device *bdev)
{
return (i_size_read(bdev->bd_inode));
}
#if !defined(HAVE_BDEV_WHOLE)
static inline struct block_device *
bdev_whole(struct block_device *bdev)
{
return (bdev->bd_contains);
}
#endif
/*
* Returns the maximum expansion capacity of the block device (in bytes).
*
* It is possible to expand a vdev when it has been created as a wholedisk
* and the containing block device has increased in capacity. Or when the
* partition containing the pool has been manually increased in size.
*
* This function is only responsible for calculating the potential expansion
* size so it can be reported by 'zpool list'. The efi_use_whole_disk() is
* responsible for verifying the expected partition layout in the wholedisk
* case, and updating the partition table if appropriate. Once the partition
* size has been increased the additional capacity will be visible using
* bdev_capacity().
*
* The returned maximum expansion capacity is always expected to be larger, or
* at the very least equal, to its usable capacity to prevent overestimating
* the pool expandsize.
*/
static uint64_t
bdev_max_capacity(struct block_device *bdev, uint64_t wholedisk)
{
uint64_t psize;
int64_t available;
if (wholedisk && bdev != bdev_whole(bdev)) {
/*
* When reporting maximum expansion capacity for a wholedisk
* deduct any capacity which is expected to be lost due to
* alignment restrictions. Over reporting this value isn't
* harmful and would only result in slightly less capacity
* than expected post expansion.
* The estimated available space may be slightly smaller than
* bdev_capacity() for devices where the number of sectors is
* not a multiple of the alignment size and the partition layout
* is keeping less than PARTITION_END_ALIGNMENT bytes after the
* "reserved" EFI partition: in such cases return the device
* usable capacity.
*/
available = i_size_read(bdev_whole(bdev)->bd_inode) -
((EFI_MIN_RESV_SIZE + NEW_START_BLOCK +
PARTITION_END_ALIGNMENT) << SECTOR_BITS);
psize = MAX(available, bdev_capacity(bdev));
} else {
psize = bdev_capacity(bdev);
}
return (psize);
}
static void
vdev_disk_error(zio_t *zio)
{
/*
* This function can be called in interrupt context, for instance while
* handling IRQs coming from a misbehaving disk device; use printk()
* which is safe from any context.
*/
printk(KERN_WARNING "zio pool=%s vdev=%s error=%d type=%d "
"offset=%llu size=%llu flags=%x\n", spa_name(zio->io_spa),
zio->io_vd->vdev_path, zio->io_error, zio->io_type,
(u_longlong_t)zio->io_offset, (u_longlong_t)zio->io_size,
zio->io_flags);
}
static int
vdev_disk_open(vdev_t *v, uint64_t *psize, uint64_t *max_psize,
uint64_t *logical_ashift, uint64_t *physical_ashift)
{
struct block_device *bdev;
fmode_t mode = vdev_bdev_mode(spa_mode(v->vdev_spa));
hrtime_t timeout = MSEC2NSEC(zfs_vdev_open_timeout_ms);
vdev_disk_t *vd;
/* Must have a pathname and it must be absolute. */
if (v->vdev_path == NULL || v->vdev_path[0] != '/') {
v->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
vdev_dbgmsg(v, "invalid vdev_path");
return (SET_ERROR(EINVAL));
}
/*
* Reopen the device if it is currently open. When expanding a
* partition force re-scanning the partition table if userland
* did not take care of this already. We need to do this while closed
* in order to get an accurate updated block device size. Then
* since udev may need to recreate the device links increase the
* open retry timeout before reporting the device as unavailable.
*/
vd = v->vdev_tsd;
if (vd) {
char disk_name[BDEVNAME_SIZE + 6] = "/dev/";
boolean_t reread_part = B_FALSE;
rw_enter(&vd->vd_lock, RW_WRITER);
bdev = vd->vd_bdev;
vd->vd_bdev = NULL;
if (bdev) {
if (v->vdev_expanding && bdev != bdev_whole(bdev)) {
bdevname(bdev_whole(bdev), disk_name + 5);
/*
* If userland has BLKPG_RESIZE_PARTITION,
* then it should have updated the partition
* table already. We can detect this by
* comparing our current physical size
* with that of the device. If they are
* the same, then we must not have
* BLKPG_RESIZE_PARTITION or it failed to
* update the partition table online. We
* fallback to rescanning the partition
* table from the kernel below. However,
* if the capacity already reflects the
* updated partition, then we skip
* rescanning the partition table here.
*/
if (v->vdev_psize == bdev_capacity(bdev))
reread_part = B_TRUE;
}
blkdev_put(bdev, mode | FMODE_EXCL);
}
if (reread_part) {
bdev = blkdev_get_by_path(disk_name, mode | FMODE_EXCL,
zfs_vdev_holder);
if (!IS_ERR(bdev)) {
int error = vdev_bdev_reread_part(bdev);
blkdev_put(bdev, mode | FMODE_EXCL);
if (error == 0) {
timeout = MSEC2NSEC(
zfs_vdev_open_timeout_ms * 2);
}
}
}
} else {
vd = kmem_zalloc(sizeof (vdev_disk_t), KM_SLEEP);
rw_init(&vd->vd_lock, NULL, RW_DEFAULT, NULL);
rw_enter(&vd->vd_lock, RW_WRITER);
}
/*
* Devices are always opened by the path provided at configuration
* time. This means that if the provided path is a udev by-id path
* then drives may be re-cabled without an issue. If the provided
* path is a udev by-path path, then the physical location information
* will be preserved. This can be critical for more complicated
* configurations where drives are located in specific physical
* locations to maximize the systems tolerance to component failure.
*
* Alternatively, you can provide your own udev rule to flexibly map
* the drives as you see fit. It is not advised that you use the
* /dev/[hd]d devices which may be reordered due to probing order.
* Devices in the wrong locations will be detected by the higher
* level vdev validation.
*
* The specified paths may be briefly removed and recreated in
* response to udev events. This should be exceptionally unlikely
* because the zpool command makes every effort to verify these paths
* have already settled prior to reaching this point. Therefore,
* a ENOENT failure at this point is highly likely to be transient
* and it is reasonable to sleep and retry before giving up. In
* practice delays have been observed to be on the order of 100ms.
*
* When ERESTARTSYS is returned it indicates the block device is
* a zvol which could not be opened due to the deadlock detection
* logic in zvol_open(). Extend the timeout and retry the open
* subsequent attempts are expected to eventually succeed.
*/
hrtime_t start = gethrtime();
bdev = ERR_PTR(-ENXIO);
while (IS_ERR(bdev) && ((gethrtime() - start) < timeout)) {
bdev = blkdev_get_by_path(v->vdev_path, mode | FMODE_EXCL,
zfs_vdev_holder);
if (unlikely(PTR_ERR(bdev) == -ENOENT)) {
schedule_timeout(MSEC_TO_TICK(10));
} else if (unlikely(PTR_ERR(bdev) == -ERESTARTSYS)) {
timeout = MSEC2NSEC(zfs_vdev_open_timeout_ms * 10);
continue;
} else if (IS_ERR(bdev)) {
break;
}
}
if (IS_ERR(bdev)) {
int error = -PTR_ERR(bdev);
vdev_dbgmsg(v, "open error=%d timeout=%llu/%llu", error,
(u_longlong_t)(gethrtime() - start),
(u_longlong_t)timeout);
vd->vd_bdev = NULL;
v->vdev_tsd = vd;
rw_exit(&vd->vd_lock);
return (SET_ERROR(error));
} else {
vd->vd_bdev = bdev;
v->vdev_tsd = vd;
rw_exit(&vd->vd_lock);
}
struct request_queue *q = bdev_get_queue(vd->vd_bdev);
/* Determine the physical block size */
int physical_block_size = bdev_physical_block_size(vd->vd_bdev);
/* Determine the logical block size */
int logical_block_size = bdev_logical_block_size(vd->vd_bdev);
/* Clear the nowritecache bit, causes vdev_reopen() to try again. */
v->vdev_nowritecache = B_FALSE;
/* Set when device reports it supports TRIM. */
v->vdev_has_trim = !!blk_queue_discard(q);
/* Set when device reports it supports secure TRIM. */
v->vdev_has_securetrim = !!blk_queue_discard_secure(q);
/* Inform the ZIO pipeline that we are non-rotational */
v->vdev_nonrot = blk_queue_nonrot(q);
/* Physical volume size in bytes for the partition */
*psize = bdev_capacity(vd->vd_bdev);
/* Physical volume size in bytes including possible expansion space */
*max_psize = bdev_max_capacity(vd->vd_bdev, v->vdev_wholedisk);
/* Based on the minimum sector size set the block size */
*physical_ashift = highbit64(MAX(physical_block_size,
SPA_MINBLOCKSIZE)) - 1;
*logical_ashift = highbit64(MAX(logical_block_size,
SPA_MINBLOCKSIZE)) - 1;
return (0);
}
static void
vdev_disk_close(vdev_t *v)
{
vdev_disk_t *vd = v->vdev_tsd;
if (v->vdev_reopening || vd == NULL)
return;
if (vd->vd_bdev != NULL) {
blkdev_put(vd->vd_bdev,
vdev_bdev_mode(spa_mode(v->vdev_spa)) | FMODE_EXCL);
}
rw_destroy(&vd->vd_lock);
kmem_free(vd, sizeof (vdev_disk_t));
v->vdev_tsd = NULL;
}
static dio_request_t *
vdev_disk_dio_alloc(int bio_count)
{
dio_request_t *dr = kmem_zalloc(sizeof (dio_request_t) +
sizeof (struct bio *) * bio_count, KM_SLEEP);
atomic_set(&dr->dr_ref, 0);
dr->dr_bio_count = bio_count;
dr->dr_error = 0;
for (int i = 0; i < dr->dr_bio_count; i++)
dr->dr_bio[i] = NULL;
return (dr);
}
static void
vdev_disk_dio_free(dio_request_t *dr)
{
int i;
for (i = 0; i < dr->dr_bio_count; i++)
if (dr->dr_bio[i])
bio_put(dr->dr_bio[i]);
kmem_free(dr, sizeof (dio_request_t) +
sizeof (struct bio *) * dr->dr_bio_count);
}
static void
vdev_disk_dio_get(dio_request_t *dr)
{
atomic_inc(&dr->dr_ref);
}
static int
vdev_disk_dio_put(dio_request_t *dr)
{
int rc = atomic_dec_return(&dr->dr_ref);
/*
* Free the dio_request when the last reference is dropped and
* ensure zio_interpret is called only once with the correct zio
*/
if (rc == 0) {
zio_t *zio = dr->dr_zio;
int error = dr->dr_error;
vdev_disk_dio_free(dr);
if (zio) {
zio->io_error = error;
ASSERT3S(zio->io_error, >=, 0);
if (zio->io_error)
vdev_disk_error(zio);
zio_delay_interrupt(zio);
}
}
return (rc);
}
BIO_END_IO_PROTO(vdev_disk_physio_completion, bio, error)
{
dio_request_t *dr = bio->bi_private;
int rc;
if (dr->dr_error == 0) {
#ifdef HAVE_1ARG_BIO_END_IO_T
dr->dr_error = BIO_END_IO_ERROR(bio);
#else
if (error)
dr->dr_error = -(error);
else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
dr->dr_error = EIO;
#endif
}
/* Drop reference acquired by __vdev_disk_physio */
rc = vdev_disk_dio_put(dr);
}
static inline void
vdev_submit_bio_impl(struct bio *bio)
{
#ifdef HAVE_1ARG_SUBMIT_BIO
(void) submit_bio(bio);
#else
- (void) submit_bio(0, bio);
+ (void) submit_bio(bio_data_dir(bio), bio);
#endif
}
/*
* preempt_schedule_notrace is GPL-only which breaks the ZFS build, so
* replace it with preempt_schedule under the following condition:
*/
#if defined(CONFIG_ARM64) && \
defined(CONFIG_PREEMPTION) && \
defined(CONFIG_BLK_CGROUP)
#define preempt_schedule_notrace(x) preempt_schedule(x)
#endif
#ifdef HAVE_BIO_SET_DEV
#if defined(CONFIG_BLK_CGROUP) && defined(HAVE_BIO_SET_DEV_GPL_ONLY)
/*
* The Linux 5.5 kernel updated percpu_ref_tryget() which is inlined by
* blkg_tryget() to use rcu_read_lock() instead of rcu_read_lock_sched().
* As a side effect the function was converted to GPL-only. Define our
* own version when needed which uses rcu_read_lock_sched().
*/
#if defined(HAVE_BLKG_TRYGET_GPL_ONLY)
static inline bool
vdev_blkg_tryget(struct blkcg_gq *blkg)
{
struct percpu_ref *ref = &blkg->refcnt;
unsigned long __percpu *count;
bool rc;
rcu_read_lock_sched();
if (__ref_is_percpu(ref, &count)) {
this_cpu_inc(*count);
rc = true;
} else {
#ifdef ZFS_PERCPU_REF_COUNT_IN_DATA
rc = atomic_long_inc_not_zero(&ref->data->count);
#else
rc = atomic_long_inc_not_zero(&ref->count);
#endif
}
rcu_read_unlock_sched();
return (rc);
}
#elif defined(HAVE_BLKG_TRYGET)
#define vdev_blkg_tryget(bg) blkg_tryget(bg)
#endif
#ifdef HAVE_BIO_SET_DEV_MACRO
/*
* The Linux 5.0 kernel updated the bio_set_dev() macro so it calls the
* GPL-only bio_associate_blkg() symbol thus inadvertently converting
* the entire macro. Provide a minimal version which always assigns the
* request queue's root_blkg to the bio.
*/
static inline void
vdev_bio_associate_blkg(struct bio *bio)
{
#if defined(HAVE_BIO_BDEV_DISK)
struct request_queue *q = bio->bi_bdev->bd_disk->queue;
#else
struct request_queue *q = bio->bi_disk->queue;
#endif
ASSERT3P(q, !=, NULL);
ASSERT3P(bio->bi_blkg, ==, NULL);
if (q->root_blkg && vdev_blkg_tryget(q->root_blkg))
bio->bi_blkg = q->root_blkg;
}
#define bio_associate_blkg vdev_bio_associate_blkg
#else
static inline void
vdev_bio_set_dev(struct bio *bio, struct block_device *bdev)
{
#if defined(HAVE_BIO_BDEV_DISK)
struct request_queue *q = bdev->bd_disk->queue;
#else
struct request_queue *q = bio->bi_disk->queue;
#endif
bio_clear_flag(bio, BIO_REMAPPED);
if (bio->bi_bdev != bdev)
bio_clear_flag(bio, BIO_THROTTLED);
bio->bi_bdev = bdev;
ASSERT3P(q, !=, NULL);
ASSERT3P(bio->bi_blkg, ==, NULL);
if (q->root_blkg && vdev_blkg_tryget(q->root_blkg))
bio->bi_blkg = q->root_blkg;
}
#define bio_set_dev vdev_bio_set_dev
#endif
#endif
#else
/*
* Provide a bio_set_dev() helper macro for pre-Linux 4.14 kernels.
*/
static inline void
bio_set_dev(struct bio *bio, struct block_device *bdev)
{
bio->bi_bdev = bdev;
}
#endif /* HAVE_BIO_SET_DEV */
static inline void
vdev_submit_bio(struct bio *bio)
{
struct bio_list *bio_list = current->bio_list;
current->bio_list = NULL;
vdev_submit_bio_impl(bio);
current->bio_list = bio_list;
}
static int
__vdev_disk_physio(struct block_device *bdev, zio_t *zio,
size_t io_size, uint64_t io_offset, int rw, int flags)
{
dio_request_t *dr;
uint64_t abd_offset;
uint64_t bio_offset;
int bio_size;
int bio_count = 16;
int error = 0;
struct blk_plug plug;
/*
* Accessing outside the block device is never allowed.
*/
if (io_offset + io_size > bdev->bd_inode->i_size) {
vdev_dbgmsg(zio->io_vd,
"Illegal access %llu size %llu, device size %llu",
io_offset, io_size, i_size_read(bdev->bd_inode));
return (SET_ERROR(EIO));
}
retry:
dr = vdev_disk_dio_alloc(bio_count);
if (zio && !(zio->io_flags & (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD)))
bio_set_flags_failfast(bdev, &flags);
dr->dr_zio = zio;
/*
* Since bio's can have up to BIO_MAX_PAGES=256 iovec's, each of which
* is at least 512 bytes and at most PAGESIZE (typically 4K), one bio
* can cover at least 128KB and at most 1MB. When the required number
* of iovec's exceeds this, we are forced to break the IO in multiple
* bio's and wait for them all to complete. This is likely if the
* recordsize property is increased beyond 1MB. The default
* bio_count=16 should typically accommodate the maximum-size zio of
* 16MB.
*/
abd_offset = 0;
bio_offset = io_offset;
bio_size = io_size;
for (int i = 0; i <= dr->dr_bio_count; i++) {
/* Finished constructing bio's for given buffer */
if (bio_size <= 0)
break;
/*
* If additional bio's are required, we have to retry, but
* this should be rare - see the comment above.
*/
if (dr->dr_bio_count == i) {
vdev_disk_dio_free(dr);
bio_count *= 2;
goto retry;
}
/* bio_alloc() with __GFP_WAIT never returns NULL */
#ifdef HAVE_BIO_MAX_SEGS
dr->dr_bio[i] = bio_alloc(GFP_NOIO, bio_max_segs(
abd_nr_pages_off(zio->io_abd, bio_size, abd_offset)));
#else
dr->dr_bio[i] = bio_alloc(GFP_NOIO,
MIN(abd_nr_pages_off(zio->io_abd, bio_size, abd_offset),
BIO_MAX_PAGES));
#endif
if (unlikely(dr->dr_bio[i] == NULL)) {
vdev_disk_dio_free(dr);
return (SET_ERROR(ENOMEM));
}
/* Matching put called by vdev_disk_physio_completion */
vdev_disk_dio_get(dr);
bio_set_dev(dr->dr_bio[i], bdev);
BIO_BI_SECTOR(dr->dr_bio[i]) = bio_offset >> 9;
dr->dr_bio[i]->bi_end_io = vdev_disk_physio_completion;
dr->dr_bio[i]->bi_private = dr;
bio_set_op_attrs(dr->dr_bio[i], rw, flags);
/* Remaining size is returned to become the new size */
bio_size = abd_bio_map_off(dr->dr_bio[i], zio->io_abd,
bio_size, abd_offset);
/* Advance in buffer and construct another bio if needed */
abd_offset += BIO_BI_SIZE(dr->dr_bio[i]);
bio_offset += BIO_BI_SIZE(dr->dr_bio[i]);
}
/* Extra reference to protect dio_request during vdev_submit_bio */
vdev_disk_dio_get(dr);
if (dr->dr_bio_count > 1)
blk_start_plug(&plug);
/* Submit all bio's associated with this dio */
for (int i = 0; i < dr->dr_bio_count; i++) {
if (dr->dr_bio[i])
vdev_submit_bio(dr->dr_bio[i]);
}
if (dr->dr_bio_count > 1)
blk_finish_plug(&plug);
(void) vdev_disk_dio_put(dr);
return (error);
}
BIO_END_IO_PROTO(vdev_disk_io_flush_completion, bio, error)
{
zio_t *zio = bio->bi_private;
#ifdef HAVE_1ARG_BIO_END_IO_T
zio->io_error = BIO_END_IO_ERROR(bio);
#else
zio->io_error = -error;
#endif
if (zio->io_error && (zio->io_error == EOPNOTSUPP))
zio->io_vd->vdev_nowritecache = B_TRUE;
bio_put(bio);
ASSERT3S(zio->io_error, >=, 0);
if (zio->io_error)
vdev_disk_error(zio);
zio_interrupt(zio);
}
static int
vdev_disk_io_flush(struct block_device *bdev, zio_t *zio)
{
struct request_queue *q;
struct bio *bio;
q = bdev_get_queue(bdev);
if (!q)
return (SET_ERROR(ENXIO));
bio = bio_alloc(GFP_NOIO, 0);
/* bio_alloc() with __GFP_WAIT never returns NULL */
if (unlikely(bio == NULL))
return (SET_ERROR(ENOMEM));
bio->bi_end_io = vdev_disk_io_flush_completion;
bio->bi_private = zio;
bio_set_dev(bio, bdev);
bio_set_flush(bio);
vdev_submit_bio(bio);
invalidate_bdev(bdev);
return (0);
}
static void
vdev_disk_io_start(zio_t *zio)
{
vdev_t *v = zio->io_vd;
vdev_disk_t *vd = v->vdev_tsd;
unsigned long trim_flags = 0;
int rw, error;
/*
* If the vdev is closed, it's likely in the REMOVED or FAULTED state.
* Nothing to be done here but return failure.
*/
if (vd == NULL) {
zio->io_error = ENXIO;
zio_interrupt(zio);
return;
}
rw_enter(&vd->vd_lock, RW_READER);
/*
* If the vdev is closed, it's likely due to a failed reopen and is
* in the UNAVAIL state. Nothing to be done here but return failure.
*/
if (vd->vd_bdev == NULL) {
rw_exit(&vd->vd_lock);
zio->io_error = ENXIO;
zio_interrupt(zio);
return;
}
switch (zio->io_type) {
case ZIO_TYPE_IOCTL:
if (!vdev_readable(v)) {
rw_exit(&vd->vd_lock);
zio->io_error = SET_ERROR(ENXIO);
zio_interrupt(zio);
return;
}
switch (zio->io_cmd) {
case DKIOCFLUSHWRITECACHE:
if (zfs_nocacheflush)
break;
if (v->vdev_nowritecache) {
zio->io_error = SET_ERROR(ENOTSUP);
break;
}
error = vdev_disk_io_flush(vd->vd_bdev, zio);
if (error == 0) {
rw_exit(&vd->vd_lock);
return;
}
zio->io_error = error;
break;
default:
zio->io_error = SET_ERROR(ENOTSUP);
}
rw_exit(&vd->vd_lock);
zio_execute(zio);
return;
case ZIO_TYPE_WRITE:
rw = WRITE;
break;
case ZIO_TYPE_READ:
rw = READ;
break;
case ZIO_TYPE_TRIM:
#if defined(BLKDEV_DISCARD_SECURE)
if (zio->io_trim_flags & ZIO_TRIM_SECURE)
trim_flags |= BLKDEV_DISCARD_SECURE;
#endif
zio->io_error = -blkdev_issue_discard(vd->vd_bdev,
zio->io_offset >> 9, zio->io_size >> 9, GFP_NOFS,
trim_flags);
rw_exit(&vd->vd_lock);
zio_interrupt(zio);
return;
default:
rw_exit(&vd->vd_lock);
zio->io_error = SET_ERROR(ENOTSUP);
zio_interrupt(zio);
return;
}
zio->io_target_timestamp = zio_handle_io_delay(zio);
error = __vdev_disk_physio(vd->vd_bdev, zio,
zio->io_size, zio->io_offset, rw, 0);
rw_exit(&vd->vd_lock);
if (error) {
zio->io_error = error;
zio_interrupt(zio);
return;
}
}
static void
vdev_disk_io_done(zio_t *zio)
{
/*
* If the device returned EIO, we revalidate the media. If it is
* determined the media has changed this triggers the asynchronous
* removal of the device from the configuration.
*/
if (zio->io_error == EIO) {
vdev_t *v = zio->io_vd;
vdev_disk_t *vd = v->vdev_tsd;
if (zfs_check_media_change(vd->vd_bdev)) {
invalidate_bdev(vd->vd_bdev);
v->vdev_remove_wanted = B_TRUE;
spa_async_request(zio->io_spa, SPA_ASYNC_REMOVE);
}
}
}
static void
vdev_disk_hold(vdev_t *vd)
{
ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER));
/* We must have a pathname, and it must be absolute. */
if (vd->vdev_path == NULL || vd->vdev_path[0] != '/')
return;
/*
* Only prefetch path and devid info if the device has
* never been opened.
*/
if (vd->vdev_tsd != NULL)
return;
}
static void
vdev_disk_rele(vdev_t *vd)
{
ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER));
/* XXX: Implement me as a vnode rele for the device */
}
vdev_ops_t vdev_disk_ops = {
.vdev_op_init = NULL,
.vdev_op_fini = NULL,
.vdev_op_open = vdev_disk_open,
.vdev_op_close = vdev_disk_close,
.vdev_op_asize = vdev_default_asize,
.vdev_op_min_asize = vdev_default_min_asize,
.vdev_op_min_alloc = NULL,
.vdev_op_io_start = vdev_disk_io_start,
.vdev_op_io_done = vdev_disk_io_done,
.vdev_op_state_change = NULL,
.vdev_op_need_resilver = NULL,
.vdev_op_hold = vdev_disk_hold,
.vdev_op_rele = vdev_disk_rele,
.vdev_op_remap = NULL,
.vdev_op_xlate = vdev_default_xlate,
.vdev_op_rebuild_asize = NULL,
.vdev_op_metaslab_init = NULL,
.vdev_op_config_generate = NULL,
.vdev_op_nparity = NULL,
.vdev_op_ndisks = NULL,
.vdev_op_type = VDEV_TYPE_DISK, /* name of this vdev type */
.vdev_op_leaf = B_TRUE /* leaf vdev */
};
/*
* The zfs_vdev_scheduler module option has been deprecated. Setting this
* value no longer has any effect. It has not yet been entirely removed
* to allow the module to be loaded if this option is specified in the
* /etc/modprobe.d/zfs.conf file. The following warning will be logged.
*/
static int
param_set_vdev_scheduler(const char *val, zfs_kernel_param_t *kp)
{
int error = param_set_charp(val, kp);
if (error == 0) {
printk(KERN_INFO "The 'zfs_vdev_scheduler' module option "
"is not supported.\n");
}
return (error);
}
char *zfs_vdev_scheduler = "unused";
module_param_call(zfs_vdev_scheduler, param_set_vdev_scheduler,
param_get_charp, &zfs_vdev_scheduler, 0644);
MODULE_PARM_DESC(zfs_vdev_scheduler, "I/O scheduler");
int
param_set_min_auto_ashift(const char *buf, zfs_kernel_param_t *kp)
{
uint64_t val;
int error;
error = kstrtoull(buf, 0, &val);
if (error < 0)
return (SET_ERROR(error));
if (val < ASHIFT_MIN || val > zfs_vdev_max_auto_ashift)
return (SET_ERROR(-EINVAL));
error = param_set_ulong(buf, kp);
if (error < 0)
return (SET_ERROR(error));
return (0);
}
int
param_set_max_auto_ashift(const char *buf, zfs_kernel_param_t *kp)
{
uint64_t val;
int error;
error = kstrtoull(buf, 0, &val);
if (error < 0)
return (SET_ERROR(error));
if (val > ASHIFT_MAX || val < zfs_vdev_min_auto_ashift)
return (SET_ERROR(-EINVAL));
error = param_set_ulong(buf, kp);
if (error < 0)
return (SET_ERROR(error));
return (0);
}
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/vdev_file.c b/sys/contrib/openzfs/module/os/linux/zfs/vdev_file.c
index bf8a13ae6154..98338e604fa8 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/vdev_file.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/vdev_file.c
@@ -1,382 +1,382 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/vdev_file.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_trim.h>
#include <sys/zio.h>
#include <sys/fs/zfs.h>
#include <sys/fm/fs/zfs.h>
#include <sys/abd.h>
#include <sys/fcntl.h>
#include <sys/vnode.h>
#include <sys/zfs_file.h>
#ifdef _KERNEL
#include <linux/falloc.h>
#endif
/*
* Virtual device vector for files.
*/
static taskq_t *vdev_file_taskq;
/*
* By default, the logical/physical ashift for file vdevs is set to
* SPA_MINBLOCKSHIFT (9). This allows all file vdevs to use 512B (1 << 9)
* blocksizes. Users may opt to change one or both of these for testing
* or performance reasons. Care should be taken as these values will
* impact the vdev_ashift setting which can only be set at vdev creation
* time.
*/
unsigned long vdev_file_logical_ashift = SPA_MINBLOCKSHIFT;
unsigned long vdev_file_physical_ashift = SPA_MINBLOCKSHIFT;
static void
vdev_file_hold(vdev_t *vd)
{
ASSERT(vd->vdev_path != NULL);
}
static void
vdev_file_rele(vdev_t *vd)
{
ASSERT(vd->vdev_path != NULL);
}
static mode_t
vdev_file_open_mode(spa_mode_t spa_mode)
{
mode_t mode = 0;
if ((spa_mode & SPA_MODE_READ) && (spa_mode & SPA_MODE_WRITE)) {
mode = O_RDWR;
} else if (spa_mode & SPA_MODE_READ) {
mode = O_RDONLY;
} else if (spa_mode & SPA_MODE_WRITE) {
mode = O_WRONLY;
}
return (mode | O_LARGEFILE);
}
static int
vdev_file_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
uint64_t *logical_ashift, uint64_t *physical_ashift)
{
vdev_file_t *vf;
zfs_file_t *fp;
zfs_file_attr_t zfa;
int error;
/*
* Rotational optimizations only make sense on block devices.
*/
vd->vdev_nonrot = B_TRUE;
/*
* Allow TRIM on file based vdevs. This may not always be supported,
* since it depends on your kernel version and underlying filesystem
* type but it is always safe to attempt.
*/
vd->vdev_has_trim = B_TRUE;
/*
* Disable secure TRIM on file based vdevs. There is no way to
* request this behavior from the underlying filesystem.
*/
vd->vdev_has_securetrim = B_FALSE;
/*
* We must have a pathname, and it must be absolute.
*/
if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') {
vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
return (SET_ERROR(EINVAL));
}
/*
* Reopen the device if it's not currently open. Otherwise,
* just update the physical size of the device.
*/
if (vd->vdev_tsd != NULL) {
ASSERT(vd->vdev_reopening);
vf = vd->vdev_tsd;
goto skip_open;
}
vf = vd->vdev_tsd = kmem_zalloc(sizeof (vdev_file_t), KM_SLEEP);
/*
* We always open the files from the root of the global zone, even if
* we're in a local zone. If the user has gotten to this point, the
* administrator has already decided that the pool should be available
* to local zone users, so the underlying devices should be as well.
*/
ASSERT(vd->vdev_path != NULL && vd->vdev_path[0] == '/');
error = zfs_file_open(vd->vdev_path,
vdev_file_open_mode(spa_mode(vd->vdev_spa)), 0, &fp);
if (error) {
vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
return (error);
}
vf->vf_file = fp;
#ifdef _KERNEL
/*
* Make sure it's a regular file.
*/
if (zfs_file_getattr(fp, &zfa)) {
return (SET_ERROR(ENODEV));
}
if (!S_ISREG(zfa.zfa_mode)) {
vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
return (SET_ERROR(ENODEV));
}
#endif
skip_open:
error = zfs_file_getattr(vf->vf_file, &zfa);
if (error) {
vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
return (error);
}
*max_psize = *psize = zfa.zfa_size;
*logical_ashift = vdev_file_logical_ashift;
*physical_ashift = vdev_file_physical_ashift;
return (0);
}
static void
vdev_file_close(vdev_t *vd)
{
vdev_file_t *vf = vd->vdev_tsd;
if (vd->vdev_reopening || vf == NULL)
return;
if (vf->vf_file != NULL) {
(void) zfs_file_close(vf->vf_file);
}
vd->vdev_delayed_close = B_FALSE;
kmem_free(vf, sizeof (vdev_file_t));
vd->vdev_tsd = NULL;
}
static void
vdev_file_io_strategy(void *arg)
{
zio_t *zio = (zio_t *)arg;
vdev_t *vd = zio->io_vd;
vdev_file_t *vf = vd->vdev_tsd;
ssize_t resid;
void *buf;
loff_t off;
ssize_t size;
int err;
off = zio->io_offset;
size = zio->io_size;
resid = 0;
if (zio->io_type == ZIO_TYPE_READ) {
buf = abd_borrow_buf(zio->io_abd, zio->io_size);
err = zfs_file_pread(vf->vf_file, buf, size, off, &resid);
abd_return_buf_copy(zio->io_abd, buf, size);
} else {
buf = abd_borrow_buf_copy(zio->io_abd, zio->io_size);
err = zfs_file_pwrite(vf->vf_file, buf, size, off, &resid);
abd_return_buf(zio->io_abd, buf, size);
}
zio->io_error = err;
if (resid != 0 && zio->io_error == 0)
zio->io_error = SET_ERROR(ENOSPC);
zio_delay_interrupt(zio);
}
static void
vdev_file_io_fsync(void *arg)
{
zio_t *zio = (zio_t *)arg;
vdev_file_t *vf = zio->io_vd->vdev_tsd;
zio->io_error = zfs_file_fsync(vf->vf_file, O_SYNC | O_DSYNC);
zio_interrupt(zio);
}
static void
vdev_file_io_start(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
vdev_file_t *vf = vd->vdev_tsd;
if (zio->io_type == ZIO_TYPE_IOCTL) {
/* XXPOLICY */
if (!vdev_readable(vd)) {
zio->io_error = SET_ERROR(ENXIO);
zio_interrupt(zio);
return;
}
switch (zio->io_cmd) {
case DKIOCFLUSHWRITECACHE:
if (zfs_nocacheflush)
break;
/*
* We cannot safely call vfs_fsync() when PF_FSTRANS
* is set in the current context. Filesystems like
* XFS include sanity checks to verify it is not
* already set, see xfs_vm_writepage(). Therefore
* the sync must be dispatched to a different context.
*/
if (__spl_pf_fstrans_check()) {
VERIFY3U(taskq_dispatch(vdev_file_taskq,
vdev_file_io_fsync, zio, TQ_SLEEP), !=,
TASKQID_INVALID);
return;
}
zio->io_error = zfs_file_fsync(vf->vf_file,
O_SYNC | O_DSYNC);
break;
default:
zio->io_error = SET_ERROR(ENOTSUP);
}
zio_execute(zio);
return;
} else if (zio->io_type == ZIO_TYPE_TRIM) {
int mode = 0;
ASSERT3U(zio->io_size, !=, 0);
#ifdef __linux__
mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
#endif
zio->io_error = zfs_file_fallocate(vf->vf_file,
mode, zio->io_offset, zio->io_size);
zio_execute(zio);
return;
}
zio->io_target_timestamp = zio_handle_io_delay(zio);
VERIFY3U(taskq_dispatch(vdev_file_taskq, vdev_file_io_strategy, zio,
TQ_SLEEP), !=, TASKQID_INVALID);
}
-/* ARGSUSED */
static void
vdev_file_io_done(zio_t *zio)
{
+ (void) zio;
}
vdev_ops_t vdev_file_ops = {
.vdev_op_init = NULL,
.vdev_op_fini = NULL,
.vdev_op_open = vdev_file_open,
.vdev_op_close = vdev_file_close,
.vdev_op_asize = vdev_default_asize,
.vdev_op_min_asize = vdev_default_min_asize,
.vdev_op_min_alloc = NULL,
.vdev_op_io_start = vdev_file_io_start,
.vdev_op_io_done = vdev_file_io_done,
.vdev_op_state_change = NULL,
.vdev_op_need_resilver = NULL,
.vdev_op_hold = vdev_file_hold,
.vdev_op_rele = vdev_file_rele,
.vdev_op_remap = NULL,
.vdev_op_xlate = vdev_default_xlate,
.vdev_op_rebuild_asize = NULL,
.vdev_op_metaslab_init = NULL,
.vdev_op_config_generate = NULL,
.vdev_op_nparity = NULL,
.vdev_op_ndisks = NULL,
.vdev_op_type = VDEV_TYPE_FILE, /* name of this vdev type */
.vdev_op_leaf = B_TRUE /* leaf vdev */
};
void
vdev_file_init(void)
{
vdev_file_taskq = taskq_create("z_vdev_file", MAX(boot_ncpus, 16),
minclsyspri, boot_ncpus, INT_MAX, TASKQ_DYNAMIC);
VERIFY(vdev_file_taskq);
}
void
vdev_file_fini(void)
{
taskq_destroy(vdev_file_taskq);
}
/*
* From userland we access disks just like files.
*/
#ifndef _KERNEL
vdev_ops_t vdev_disk_ops = {
.vdev_op_init = NULL,
.vdev_op_fini = NULL,
.vdev_op_open = vdev_file_open,
.vdev_op_close = vdev_file_close,
.vdev_op_asize = vdev_default_asize,
.vdev_op_min_asize = vdev_default_min_asize,
.vdev_op_min_alloc = NULL,
.vdev_op_io_start = vdev_file_io_start,
.vdev_op_io_done = vdev_file_io_done,
.vdev_op_state_change = NULL,
.vdev_op_need_resilver = NULL,
.vdev_op_hold = vdev_file_hold,
.vdev_op_rele = vdev_file_rele,
.vdev_op_remap = NULL,
.vdev_op_xlate = vdev_default_xlate,
.vdev_op_rebuild_asize = NULL,
.vdev_op_metaslab_init = NULL,
.vdev_op_config_generate = NULL,
.vdev_op_nparity = NULL,
.vdev_op_ndisks = NULL,
.vdev_op_type = VDEV_TYPE_DISK, /* name of this vdev type */
.vdev_op_leaf = B_TRUE /* leaf vdev */
};
#endif
ZFS_MODULE_PARAM(zfs_vdev_file, vdev_file_, logical_ashift, ULONG, ZMOD_RW,
"Logical ashift for file-based devices");
ZFS_MODULE_PARAM(zfs_vdev_file, vdev_file_, physical_ashift, ULONG, ZMOD_RW,
"Physical ashift for file-based devices");
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_racct.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_racct.c
index 7897e0f9edc1..ce623ef9d185 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_racct.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_racct.c
@@ -1,36 +1,38 @@
/*
* Copyright (c) 2021 iXsystems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/zfs_racct.h>
void
zfs_racct_read(uint64_t size, uint64_t iops)
{
+ (void) size, (void) iops;
}
void
zfs_racct_write(uint64_t size, uint64_t iops)
{
+ (void) size, (void) iops;
}
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_vfsops.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_vfsops.c
index 5d672af0e8aa..469b82efeeb7 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_vfsops.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_vfsops.c
@@ -1,2180 +1,2185 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
*/
/* Portions Copyright 2010 Robert Milkowski */
#include <sys/types.h>
#include <sys/param.h>
#include <sys/sysmacros.h>
#include <sys/kmem.h>
#include <sys/pathname.h>
#include <sys/vnode.h>
#include <sys/vfs.h>
#include <sys/mntent.h>
#include <sys/cmn_err.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_vnops.h>
#include <sys/zfs_dir.h>
#include <sys/zil.h>
#include <sys/fs/zfs.h>
#include <sys/dmu.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_deleg.h>
#include <sys/spa.h>
#include <sys/zap.h>
#include <sys/sa.h>
#include <sys/sa_impl.h>
#include <sys/policy.h>
#include <sys/atomic.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_ctldir.h>
#include <sys/zfs_fuid.h>
#include <sys/zfs_quota.h>
#include <sys/sunddi.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dir.h>
#include <sys/spa_boot.h>
#include <sys/objlist.h>
#include <sys/zpl.h>
#include <linux/vfs_compat.h>
#include "zfs_comutil.h"
enum {
TOKEN_RO,
TOKEN_RW,
TOKEN_SETUID,
TOKEN_NOSETUID,
TOKEN_EXEC,
TOKEN_NOEXEC,
TOKEN_DEVICES,
TOKEN_NODEVICES,
TOKEN_DIRXATTR,
TOKEN_SAXATTR,
TOKEN_XATTR,
TOKEN_NOXATTR,
TOKEN_ATIME,
TOKEN_NOATIME,
TOKEN_RELATIME,
TOKEN_NORELATIME,
TOKEN_NBMAND,
TOKEN_NONBMAND,
TOKEN_MNTPOINT,
TOKEN_LAST,
};
static const match_table_t zpl_tokens = {
{ TOKEN_RO, MNTOPT_RO },
{ TOKEN_RW, MNTOPT_RW },
{ TOKEN_SETUID, MNTOPT_SETUID },
{ TOKEN_NOSETUID, MNTOPT_NOSETUID },
{ TOKEN_EXEC, MNTOPT_EXEC },
{ TOKEN_NOEXEC, MNTOPT_NOEXEC },
{ TOKEN_DEVICES, MNTOPT_DEVICES },
{ TOKEN_NODEVICES, MNTOPT_NODEVICES },
{ TOKEN_DIRXATTR, MNTOPT_DIRXATTR },
{ TOKEN_SAXATTR, MNTOPT_SAXATTR },
{ TOKEN_XATTR, MNTOPT_XATTR },
{ TOKEN_NOXATTR, MNTOPT_NOXATTR },
{ TOKEN_ATIME, MNTOPT_ATIME },
{ TOKEN_NOATIME, MNTOPT_NOATIME },
{ TOKEN_RELATIME, MNTOPT_RELATIME },
{ TOKEN_NORELATIME, MNTOPT_NORELATIME },
{ TOKEN_NBMAND, MNTOPT_NBMAND },
{ TOKEN_NONBMAND, MNTOPT_NONBMAND },
{ TOKEN_MNTPOINT, MNTOPT_MNTPOINT "=%s" },
{ TOKEN_LAST, NULL },
};
static void
zfsvfs_vfs_free(vfs_t *vfsp)
{
if (vfsp != NULL) {
if (vfsp->vfs_mntpoint != NULL)
kmem_strfree(vfsp->vfs_mntpoint);
kmem_free(vfsp, sizeof (vfs_t));
}
}
static int
zfsvfs_parse_option(char *option, int token, substring_t *args, vfs_t *vfsp)
{
switch (token) {
case TOKEN_RO:
vfsp->vfs_readonly = B_TRUE;
vfsp->vfs_do_readonly = B_TRUE;
break;
case TOKEN_RW:
vfsp->vfs_readonly = B_FALSE;
vfsp->vfs_do_readonly = B_TRUE;
break;
case TOKEN_SETUID:
vfsp->vfs_setuid = B_TRUE;
vfsp->vfs_do_setuid = B_TRUE;
break;
case TOKEN_NOSETUID:
vfsp->vfs_setuid = B_FALSE;
vfsp->vfs_do_setuid = B_TRUE;
break;
case TOKEN_EXEC:
vfsp->vfs_exec = B_TRUE;
vfsp->vfs_do_exec = B_TRUE;
break;
case TOKEN_NOEXEC:
vfsp->vfs_exec = B_FALSE;
vfsp->vfs_do_exec = B_TRUE;
break;
case TOKEN_DEVICES:
vfsp->vfs_devices = B_TRUE;
vfsp->vfs_do_devices = B_TRUE;
break;
case TOKEN_NODEVICES:
vfsp->vfs_devices = B_FALSE;
vfsp->vfs_do_devices = B_TRUE;
break;
case TOKEN_DIRXATTR:
vfsp->vfs_xattr = ZFS_XATTR_DIR;
vfsp->vfs_do_xattr = B_TRUE;
break;
case TOKEN_SAXATTR:
vfsp->vfs_xattr = ZFS_XATTR_SA;
vfsp->vfs_do_xattr = B_TRUE;
break;
case TOKEN_XATTR:
vfsp->vfs_xattr = ZFS_XATTR_DIR;
vfsp->vfs_do_xattr = B_TRUE;
break;
case TOKEN_NOXATTR:
vfsp->vfs_xattr = ZFS_XATTR_OFF;
vfsp->vfs_do_xattr = B_TRUE;
break;
case TOKEN_ATIME:
vfsp->vfs_atime = B_TRUE;
vfsp->vfs_do_atime = B_TRUE;
break;
case TOKEN_NOATIME:
vfsp->vfs_atime = B_FALSE;
vfsp->vfs_do_atime = B_TRUE;
break;
case TOKEN_RELATIME:
vfsp->vfs_relatime = B_TRUE;
vfsp->vfs_do_relatime = B_TRUE;
break;
case TOKEN_NORELATIME:
vfsp->vfs_relatime = B_FALSE;
vfsp->vfs_do_relatime = B_TRUE;
break;
case TOKEN_NBMAND:
vfsp->vfs_nbmand = B_TRUE;
vfsp->vfs_do_nbmand = B_TRUE;
break;
case TOKEN_NONBMAND:
vfsp->vfs_nbmand = B_FALSE;
vfsp->vfs_do_nbmand = B_TRUE;
break;
case TOKEN_MNTPOINT:
vfsp->vfs_mntpoint = match_strdup(&args[0]);
if (vfsp->vfs_mntpoint == NULL)
return (SET_ERROR(ENOMEM));
break;
default:
break;
}
return (0);
}
/*
* Parse the raw mntopts and return a vfs_t describing the options.
*/
static int
zfsvfs_parse_options(char *mntopts, vfs_t **vfsp)
{
vfs_t *tmp_vfsp;
int error;
tmp_vfsp = kmem_zalloc(sizeof (vfs_t), KM_SLEEP);
if (mntopts != NULL) {
substring_t args[MAX_OPT_ARGS];
char *tmp_mntopts, *p, *t;
int token;
tmp_mntopts = t = kmem_strdup(mntopts);
if (tmp_mntopts == NULL)
return (SET_ERROR(ENOMEM));
while ((p = strsep(&t, ",")) != NULL) {
if (!*p)
continue;
args[0].to = args[0].from = NULL;
token = match_token(p, zpl_tokens, args);
error = zfsvfs_parse_option(p, token, args, tmp_vfsp);
if (error) {
kmem_strfree(tmp_mntopts);
zfsvfs_vfs_free(tmp_vfsp);
return (error);
}
}
kmem_strfree(tmp_mntopts);
}
*vfsp = tmp_vfsp;
return (0);
}
boolean_t
zfs_is_readonly(zfsvfs_t *zfsvfs)
{
return (!!(zfsvfs->z_sb->s_flags & SB_RDONLY));
}
/*ARGSUSED*/
int
zfs_sync(struct super_block *sb, int wait, cred_t *cr)
{
zfsvfs_t *zfsvfs = sb->s_fs_info;
/*
* Semantically, the only requirement is that the sync be initiated.
* The DMU syncs out txgs frequently, so there's nothing to do.
*/
if (!wait)
return (0);
if (zfsvfs != NULL) {
/*
* Sync a specific filesystem.
*/
dsl_pool_t *dp;
ZFS_ENTER(zfsvfs);
dp = dmu_objset_pool(zfsvfs->z_os);
/*
* If the system is shutting down, then skip any
* filesystems which may exist on a suspended pool.
*/
if (spa_suspended(dp->dp_spa)) {
ZFS_EXIT(zfsvfs);
return (0);
}
if (zfsvfs->z_log != NULL)
zil_commit(zfsvfs->z_log, 0);
ZFS_EXIT(zfsvfs);
} else {
/*
* Sync all ZFS filesystems. This is what happens when you
* run sync(1). Unlike other filesystems, ZFS honors the
* request by waiting for all pools to commit all dirty data.
*/
spa_sync_allpools();
}
return (0);
}
static void
atime_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
struct super_block *sb = zfsvfs->z_sb;
if (sb == NULL)
return;
/*
* Update SB_NOATIME bit in VFS super block. Since atime update is
* determined by atime_needs_update(), atime_needs_update() needs to
* return false if atime is turned off, and not unconditionally return
* false if atime is turned on.
*/
if (newval)
sb->s_flags &= ~SB_NOATIME;
else
sb->s_flags |= SB_NOATIME;
}
static void
relatime_changed_cb(void *arg, uint64_t newval)
{
((zfsvfs_t *)arg)->z_relatime = newval;
}
static void
xattr_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
if (newval == ZFS_XATTR_OFF) {
zfsvfs->z_flags &= ~ZSB_XATTR;
} else {
zfsvfs->z_flags |= ZSB_XATTR;
if (newval == ZFS_XATTR_SA)
zfsvfs->z_xattr_sa = B_TRUE;
else
zfsvfs->z_xattr_sa = B_FALSE;
}
}
static void
acltype_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
switch (newval) {
case ZFS_ACLTYPE_NFSV4:
case ZFS_ACLTYPE_OFF:
zfsvfs->z_acl_type = ZFS_ACLTYPE_OFF;
zfsvfs->z_sb->s_flags &= ~SB_POSIXACL;
break;
case ZFS_ACLTYPE_POSIX:
#ifdef CONFIG_FS_POSIX_ACL
zfsvfs->z_acl_type = ZFS_ACLTYPE_POSIX;
zfsvfs->z_sb->s_flags |= SB_POSIXACL;
#else
zfsvfs->z_acl_type = ZFS_ACLTYPE_OFF;
zfsvfs->z_sb->s_flags &= ~SB_POSIXACL;
#endif /* CONFIG_FS_POSIX_ACL */
break;
default:
break;
}
}
static void
blksz_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
ASSERT3U(newval, <=, spa_maxblocksize(dmu_objset_spa(zfsvfs->z_os)));
ASSERT3U(newval, >=, SPA_MINBLOCKSIZE);
ASSERT(ISP2(newval));
zfsvfs->z_max_blksz = newval;
}
static void
readonly_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
struct super_block *sb = zfsvfs->z_sb;
if (sb == NULL)
return;
if (newval)
sb->s_flags |= SB_RDONLY;
else
sb->s_flags &= ~SB_RDONLY;
}
static void
devices_changed_cb(void *arg, uint64_t newval)
{
}
static void
setuid_changed_cb(void *arg, uint64_t newval)
{
}
static void
exec_changed_cb(void *arg, uint64_t newval)
{
}
static void
nbmand_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
struct super_block *sb = zfsvfs->z_sb;
if (sb == NULL)
return;
if (newval == TRUE)
sb->s_flags |= SB_MANDLOCK;
else
sb->s_flags &= ~SB_MANDLOCK;
}
static void
snapdir_changed_cb(void *arg, uint64_t newval)
{
((zfsvfs_t *)arg)->z_show_ctldir = newval;
}
static void
vscan_changed_cb(void *arg, uint64_t newval)
{
((zfsvfs_t *)arg)->z_vscan = newval;
}
static void
acl_mode_changed_cb(void *arg, uint64_t newval)
{
zfsvfs_t *zfsvfs = arg;
zfsvfs->z_acl_mode = newval;
}
static void
acl_inherit_changed_cb(void *arg, uint64_t newval)
{
((zfsvfs_t *)arg)->z_acl_inherit = newval;
}
static int
zfs_register_callbacks(vfs_t *vfsp)
{
struct dsl_dataset *ds = NULL;
objset_t *os = NULL;
zfsvfs_t *zfsvfs = NULL;
int error = 0;
ASSERT(vfsp);
zfsvfs = vfsp->vfs_data;
ASSERT(zfsvfs);
os = zfsvfs->z_os;
/*
* The act of registering our callbacks will destroy any mount
* options we may have. In order to enable temporary overrides
* of mount options, we stash away the current values and
* restore them after we register the callbacks.
*/
if (zfs_is_readonly(zfsvfs) || !spa_writeable(dmu_objset_spa(os))) {
vfsp->vfs_do_readonly = B_TRUE;
vfsp->vfs_readonly = B_TRUE;
}
/*
* Register property callbacks.
*
* It would probably be fine to just check for i/o error from
* the first prop_register(), but I guess I like to go
* overboard...
*/
ds = dmu_objset_ds(os);
dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
error = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ATIME), atime_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_RELATIME), relatime_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_XATTR), xattr_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_RECORDSIZE), blksz_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_READONLY), readonly_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_DEVICES), devices_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_SETUID), setuid_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_EXEC), exec_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_SNAPDIR), snapdir_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ACLTYPE), acltype_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ACLMODE), acl_mode_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_ACLINHERIT), acl_inherit_changed_cb,
zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_VSCAN), vscan_changed_cb, zfsvfs);
error = error ? error : dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_NBMAND), nbmand_changed_cb, zfsvfs);
dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
if (error)
goto unregister;
/*
* Invoke our callbacks to restore temporary mount options.
*/
if (vfsp->vfs_do_readonly)
readonly_changed_cb(zfsvfs, vfsp->vfs_readonly);
if (vfsp->vfs_do_setuid)
setuid_changed_cb(zfsvfs, vfsp->vfs_setuid);
if (vfsp->vfs_do_exec)
exec_changed_cb(zfsvfs, vfsp->vfs_exec);
if (vfsp->vfs_do_devices)
devices_changed_cb(zfsvfs, vfsp->vfs_devices);
if (vfsp->vfs_do_xattr)
xattr_changed_cb(zfsvfs, vfsp->vfs_xattr);
if (vfsp->vfs_do_atime)
atime_changed_cb(zfsvfs, vfsp->vfs_atime);
if (vfsp->vfs_do_relatime)
relatime_changed_cb(zfsvfs, vfsp->vfs_relatime);
if (vfsp->vfs_do_nbmand)
nbmand_changed_cb(zfsvfs, vfsp->vfs_nbmand);
return (0);
unregister:
dsl_prop_unregister_all(ds, zfsvfs);
return (error);
}
/*
* Takes a dataset, a property, a value and that value's setpoint as
* found in the ZAP. Checks if the property has been changed in the vfs.
* If so, val and setpoint will be overwritten with updated content.
* Otherwise, they are left unchanged.
*/
int
zfs_get_temporary_prop(dsl_dataset_t *ds, zfs_prop_t zfs_prop, uint64_t *val,
char *setpoint)
{
int error;
zfsvfs_t *zfvp;
vfs_t *vfsp;
objset_t *os;
uint64_t tmp = *val;
error = dmu_objset_from_ds(ds, &os);
if (error != 0)
return (error);
if (dmu_objset_type(os) != DMU_OST_ZFS)
return (EINVAL);
mutex_enter(&os->os_user_ptr_lock);
zfvp = dmu_objset_get_user(os);
mutex_exit(&os->os_user_ptr_lock);
if (zfvp == NULL)
return (ESRCH);
vfsp = zfvp->z_vfs;
switch (zfs_prop) {
case ZFS_PROP_ATIME:
if (vfsp->vfs_do_atime)
tmp = vfsp->vfs_atime;
break;
case ZFS_PROP_RELATIME:
if (vfsp->vfs_do_relatime)
tmp = vfsp->vfs_relatime;
break;
case ZFS_PROP_DEVICES:
if (vfsp->vfs_do_devices)
tmp = vfsp->vfs_devices;
break;
case ZFS_PROP_EXEC:
if (vfsp->vfs_do_exec)
tmp = vfsp->vfs_exec;
break;
case ZFS_PROP_SETUID:
if (vfsp->vfs_do_setuid)
tmp = vfsp->vfs_setuid;
break;
case ZFS_PROP_READONLY:
if (vfsp->vfs_do_readonly)
tmp = vfsp->vfs_readonly;
break;
case ZFS_PROP_XATTR:
if (vfsp->vfs_do_xattr)
tmp = vfsp->vfs_xattr;
break;
case ZFS_PROP_NBMAND:
if (vfsp->vfs_do_nbmand)
tmp = vfsp->vfs_nbmand;
break;
default:
return (ENOENT);
}
if (tmp != *val) {
(void) strcpy(setpoint, "temporary");
*val = tmp;
}
return (0);
}
/*
* Associate this zfsvfs with the given objset, which must be owned.
* This will cache a bunch of on-disk state from the objset in the
* zfsvfs.
*/
static int
zfsvfs_init(zfsvfs_t *zfsvfs, objset_t *os)
{
int error;
uint64_t val;
zfsvfs->z_max_blksz = SPA_OLD_MAXBLOCKSIZE;
zfsvfs->z_show_ctldir = ZFS_SNAPDIR_VISIBLE;
zfsvfs->z_os = os;
error = zfs_get_zplprop(os, ZFS_PROP_VERSION, &zfsvfs->z_version);
if (error != 0)
return (error);
if (zfsvfs->z_version >
zfs_zpl_version_map(spa_version(dmu_objset_spa(os)))) {
(void) printk("Can't mount a version %lld file system "
"on a version %lld pool\n. Pool must be upgraded to mount "
"this file system.\n", (u_longlong_t)zfsvfs->z_version,
(u_longlong_t)spa_version(dmu_objset_spa(os)));
return (SET_ERROR(ENOTSUP));
}
error = zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &val);
if (error != 0)
return (error);
zfsvfs->z_norm = (int)val;
error = zfs_get_zplprop(os, ZFS_PROP_UTF8ONLY, &val);
if (error != 0)
return (error);
zfsvfs->z_utf8 = (val != 0);
error = zfs_get_zplprop(os, ZFS_PROP_CASE, &val);
if (error != 0)
return (error);
zfsvfs->z_case = (uint_t)val;
if ((error = zfs_get_zplprop(os, ZFS_PROP_ACLTYPE, &val)) != 0)
return (error);
zfsvfs->z_acl_type = (uint_t)val;
/*
* Fold case on file systems that are always or sometimes case
* insensitive.
*/
if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE ||
zfsvfs->z_case == ZFS_CASE_MIXED)
zfsvfs->z_norm |= U8_TEXTPREP_TOUPPER;
zfsvfs->z_use_fuids = USE_FUIDS(zfsvfs->z_version, zfsvfs->z_os);
zfsvfs->z_use_sa = USE_SA(zfsvfs->z_version, zfsvfs->z_os);
uint64_t sa_obj = 0;
if (zfsvfs->z_use_sa) {
/* should either have both of these objects or none */
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1,
&sa_obj);
if (error != 0)
return (error);
error = zfs_get_zplprop(os, ZFS_PROP_XATTR, &val);
if ((error == 0) && (val == ZFS_XATTR_SA))
zfsvfs->z_xattr_sa = B_TRUE;
}
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1,
&zfsvfs->z_root);
if (error != 0)
return (error);
ASSERT(zfsvfs->z_root != 0);
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET, 8, 1,
&zfsvfs->z_unlinkedobj);
if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_USERQUOTA],
8, 1, &zfsvfs->z_userquota_obj);
if (error == ENOENT)
zfsvfs->z_userquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_GROUPQUOTA],
8, 1, &zfsvfs->z_groupquota_obj);
if (error == ENOENT)
zfsvfs->z_groupquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_PROJECTQUOTA],
8, 1, &zfsvfs->z_projectquota_obj);
if (error == ENOENT)
zfsvfs->z_projectquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_USEROBJQUOTA],
8, 1, &zfsvfs->z_userobjquota_obj);
if (error == ENOENT)
zfsvfs->z_userobjquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_GROUPOBJQUOTA],
8, 1, &zfsvfs->z_groupobjquota_obj);
if (error == ENOENT)
zfsvfs->z_groupobjquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ,
zfs_userquota_prop_prefixes[ZFS_PROP_PROJECTOBJQUOTA],
8, 1, &zfsvfs->z_projectobjquota_obj);
if (error == ENOENT)
zfsvfs->z_projectobjquota_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 8, 1,
&zfsvfs->z_fuid_obj);
if (error == ENOENT)
zfsvfs->z_fuid_obj = 0;
else if (error != 0)
return (error);
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SHARES_DIR, 8, 1,
&zfsvfs->z_shares_dir);
if (error == ENOENT)
zfsvfs->z_shares_dir = 0;
else if (error != 0)
return (error);
error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
&zfsvfs->z_attr_table);
if (error != 0)
return (error);
if (zfsvfs->z_version >= ZPL_VERSION_SA)
sa_register_update_callback(os, zfs_sa_upgrade);
return (0);
}
int
zfsvfs_create(const char *osname, boolean_t readonly, zfsvfs_t **zfvp)
{
objset_t *os;
zfsvfs_t *zfsvfs;
int error;
boolean_t ro = (readonly || (strchr(osname, '@') != NULL));
zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP);
error = dmu_objset_own(osname, DMU_OST_ZFS, ro, B_TRUE, zfsvfs, &os);
if (error != 0) {
kmem_free(zfsvfs, sizeof (zfsvfs_t));
return (error);
}
error = zfsvfs_create_impl(zfvp, zfsvfs, os);
if (error != 0) {
dmu_objset_disown(os, B_TRUE, zfsvfs);
}
return (error);
}
/*
* Note: zfsvfs is assumed to be malloc'd, and will be freed by this function
* on a failure. Do not pass in a statically allocated zfsvfs.
*/
int
zfsvfs_create_impl(zfsvfs_t **zfvp, zfsvfs_t *zfsvfs, objset_t *os)
{
int error;
zfsvfs->z_vfs = NULL;
zfsvfs->z_sb = NULL;
zfsvfs->z_parent = zfsvfs;
mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&zfsvfs->z_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&zfsvfs->z_all_znodes, sizeof (znode_t),
offsetof(znode_t, z_link_node));
ZFS_TEARDOWN_INIT(zfsvfs);
rw_init(&zfsvfs->z_teardown_inactive_lock, NULL, RW_DEFAULT, NULL);
rw_init(&zfsvfs->z_fuid_lock, NULL, RW_DEFAULT, NULL);
int size = MIN(1 << (highbit64(zfs_object_mutex_size) - 1),
ZFS_OBJ_MTX_MAX);
zfsvfs->z_hold_size = size;
zfsvfs->z_hold_trees = vmem_zalloc(sizeof (avl_tree_t) * size,
KM_SLEEP);
zfsvfs->z_hold_locks = vmem_zalloc(sizeof (kmutex_t) * size, KM_SLEEP);
for (int i = 0; i != size; i++) {
avl_create(&zfsvfs->z_hold_trees[i], zfs_znode_hold_compare,
sizeof (znode_hold_t), offsetof(znode_hold_t, zh_node));
mutex_init(&zfsvfs->z_hold_locks[i], NULL, MUTEX_DEFAULT, NULL);
}
error = zfsvfs_init(zfsvfs, os);
if (error != 0) {
*zfvp = NULL;
zfsvfs_free(zfsvfs);
return (error);
}
zfsvfs->z_drain_task = TASKQID_INVALID;
zfsvfs->z_draining = B_FALSE;
zfsvfs->z_drain_cancel = B_TRUE;
*zfvp = zfsvfs;
return (0);
}
static int
zfsvfs_setup(zfsvfs_t *zfsvfs, boolean_t mounting)
{
int error;
boolean_t readonly = zfs_is_readonly(zfsvfs);
error = zfs_register_callbacks(zfsvfs->z_vfs);
if (error)
return (error);
zfsvfs->z_log = zil_open(zfsvfs->z_os, zfs_get_data);
/*
* If we are not mounting (ie: online recv), then we don't
* have to worry about replaying the log as we blocked all
* operations out since we closed the ZIL.
*/
if (mounting) {
ASSERT3P(zfsvfs->z_kstat.dk_kstats, ==, NULL);
dataset_kstats_create(&zfsvfs->z_kstat, zfsvfs->z_os);
/*
* During replay we remove the read only flag to
* allow replays to succeed.
*/
if (readonly != 0) {
readonly_changed_cb(zfsvfs, B_FALSE);
} else {
zap_stats_t zs;
if (zap_get_stats(zfsvfs->z_os, zfsvfs->z_unlinkedobj,
&zs) == 0) {
dataset_kstats_update_nunlinks_kstat(
&zfsvfs->z_kstat, zs.zs_num_entries);
dprintf_ds(zfsvfs->z_os->os_dsl_dataset,
"num_entries in unlinked set: %llu",
zs.zs_num_entries);
}
zfs_unlinked_drain(zfsvfs);
dsl_dir_t *dd = zfsvfs->z_os->os_dsl_dataset->ds_dir;
dd->dd_activity_cancelled = B_FALSE;
}
/*
* Parse and replay the intent log.
*
* Because of ziltest, this must be done after
* zfs_unlinked_drain(). (Further note: ziltest
* doesn't use readonly mounts, where
* zfs_unlinked_drain() isn't called.) This is because
* ziltest causes spa_sync() to think it's committed,
* but actually it is not, so the intent log contains
* many txg's worth of changes.
*
* In particular, if object N is in the unlinked set in
* the last txg to actually sync, then it could be
* actually freed in a later txg and then reallocated
* in a yet later txg. This would write a "create
* object N" record to the intent log. Normally, this
* would be fine because the spa_sync() would have
* written out the fact that object N is free, before
* we could write the "create object N" intent log
* record.
*
* But when we are in ziltest mode, we advance the "open
* txg" without actually spa_sync()-ing the changes to
* disk. So we would see that object N is still
* allocated and in the unlinked set, and there is an
* intent log record saying to allocate it.
*/
if (spa_writeable(dmu_objset_spa(zfsvfs->z_os))) {
if (zil_replay_disable) {
zil_destroy(zfsvfs->z_log, B_FALSE);
} else {
zfsvfs->z_replay = B_TRUE;
zil_replay(zfsvfs->z_os, zfsvfs,
zfs_replay_vector);
zfsvfs->z_replay = B_FALSE;
}
}
/* restore readonly bit */
if (readonly != 0)
readonly_changed_cb(zfsvfs, B_TRUE);
}
/*
* Set the objset user_ptr to track its zfsvfs.
*/
mutex_enter(&zfsvfs->z_os->os_user_ptr_lock);
dmu_objset_set_user(zfsvfs->z_os, zfsvfs);
mutex_exit(&zfsvfs->z_os->os_user_ptr_lock);
return (0);
}
void
zfsvfs_free(zfsvfs_t *zfsvfs)
{
int i, size = zfsvfs->z_hold_size;
zfs_fuid_destroy(zfsvfs);
mutex_destroy(&zfsvfs->z_znodes_lock);
mutex_destroy(&zfsvfs->z_lock);
list_destroy(&zfsvfs->z_all_znodes);
ZFS_TEARDOWN_DESTROY(zfsvfs);
rw_destroy(&zfsvfs->z_teardown_inactive_lock);
rw_destroy(&zfsvfs->z_fuid_lock);
for (i = 0; i != size; i++) {
avl_destroy(&zfsvfs->z_hold_trees[i]);
mutex_destroy(&zfsvfs->z_hold_locks[i]);
}
vmem_free(zfsvfs->z_hold_trees, sizeof (avl_tree_t) * size);
vmem_free(zfsvfs->z_hold_locks, sizeof (kmutex_t) * size);
zfsvfs_vfs_free(zfsvfs->z_vfs);
dataset_kstats_destroy(&zfsvfs->z_kstat);
kmem_free(zfsvfs, sizeof (zfsvfs_t));
}
static void
zfs_set_fuid_feature(zfsvfs_t *zfsvfs)
{
zfsvfs->z_use_fuids = USE_FUIDS(zfsvfs->z_version, zfsvfs->z_os);
zfsvfs->z_use_sa = USE_SA(zfsvfs->z_version, zfsvfs->z_os);
}
static void
zfs_unregister_callbacks(zfsvfs_t *zfsvfs)
{
objset_t *os = zfsvfs->z_os;
if (!dmu_objset_is_snapshot(os))
dsl_prop_unregister_all(dmu_objset_ds(os), zfsvfs);
}
#ifdef HAVE_MLSLABEL
/*
* Check that the hex label string is appropriate for the dataset being
* mounted into the global_zone proper.
*
* Return an error if the hex label string is not default or
* admin_low/admin_high. For admin_low labels, the corresponding
* dataset must be readonly.
*/
int
zfs_check_global_label(const char *dsname, const char *hexsl)
{
if (strcasecmp(hexsl, ZFS_MLSLABEL_DEFAULT) == 0)
return (0);
if (strcasecmp(hexsl, ADMIN_HIGH) == 0)
return (0);
if (strcasecmp(hexsl, ADMIN_LOW) == 0) {
/* must be readonly */
uint64_t rdonly;
if (dsl_prop_get_integer(dsname,
zfs_prop_to_name(ZFS_PROP_READONLY), &rdonly, NULL))
return (SET_ERROR(EACCES));
return (rdonly ? 0 : SET_ERROR(EACCES));
}
return (SET_ERROR(EACCES));
}
#endif /* HAVE_MLSLABEL */
static int
zfs_statfs_project(zfsvfs_t *zfsvfs, znode_t *zp, struct kstatfs *statp,
uint32_t bshift)
{
char buf[20 + DMU_OBJACCT_PREFIX_LEN];
uint64_t offset = DMU_OBJACCT_PREFIX_LEN;
uint64_t quota;
uint64_t used;
int err;
strlcpy(buf, DMU_OBJACCT_PREFIX, DMU_OBJACCT_PREFIX_LEN + 1);
err = zfs_id_to_fuidstr(zfsvfs, NULL, zp->z_projid, buf + offset,
sizeof (buf) - offset, B_FALSE);
if (err)
return (err);
if (zfsvfs->z_projectquota_obj == 0)
goto objs;
err = zap_lookup(zfsvfs->z_os, zfsvfs->z_projectquota_obj,
buf + offset, 8, 1, &quota);
if (err == ENOENT)
goto objs;
else if (err)
return (err);
err = zap_lookup(zfsvfs->z_os, DMU_PROJECTUSED_OBJECT,
buf + offset, 8, 1, &used);
if (unlikely(err == ENOENT)) {
uint32_t blksize;
u_longlong_t nblocks;
/*
* Quota accounting is async, so it is possible race case.
* There is at least one object with the given project ID.
*/
sa_object_size(zp->z_sa_hdl, &blksize, &nblocks);
if (unlikely(zp->z_blksz == 0))
blksize = zfsvfs->z_max_blksz;
used = blksize * nblocks;
} else if (err) {
return (err);
}
statp->f_blocks = quota >> bshift;
statp->f_bfree = (quota > used) ? ((quota - used) >> bshift) : 0;
statp->f_bavail = statp->f_bfree;
objs:
if (zfsvfs->z_projectobjquota_obj == 0)
return (0);
err = zap_lookup(zfsvfs->z_os, zfsvfs->z_projectobjquota_obj,
buf + offset, 8, 1, &quota);
if (err == ENOENT)
return (0);
else if (err)
return (err);
err = zap_lookup(zfsvfs->z_os, DMU_PROJECTUSED_OBJECT,
buf, 8, 1, &used);
if (unlikely(err == ENOENT)) {
/*
* Quota accounting is async, so it is possible race case.
* There is at least one object with the given project ID.
*/
used = 1;
} else if (err) {
return (err);
}
statp->f_files = quota;
statp->f_ffree = (quota > used) ? (quota - used) : 0;
return (0);
}
int
zfs_statvfs(struct inode *ip, struct kstatfs *statp)
{
zfsvfs_t *zfsvfs = ITOZSB(ip);
uint64_t refdbytes, availbytes, usedobjs, availobjs;
int err = 0;
ZFS_ENTER(zfsvfs);
dmu_objset_space(zfsvfs->z_os,
&refdbytes, &availbytes, &usedobjs, &availobjs);
uint64_t fsid = dmu_objset_fsid_guid(zfsvfs->z_os);
/*
* The underlying storage pool actually uses multiple block
* size. Under Solaris frsize (fragment size) is reported as
* the smallest block size we support, and bsize (block size)
* as the filesystem's maximum block size. Unfortunately,
* under Linux the fragment size and block size are often used
* interchangeably. Thus we are forced to report both of them
* as the filesystem's maximum block size.
*/
statp->f_frsize = zfsvfs->z_max_blksz;
statp->f_bsize = zfsvfs->z_max_blksz;
uint32_t bshift = fls(statp->f_bsize) - 1;
/*
* The following report "total" blocks of various kinds in
* the file system, but reported in terms of f_bsize - the
* "preferred" size.
*/
/* Round up so we never have a filesystem using 0 blocks. */
refdbytes = P2ROUNDUP(refdbytes, statp->f_bsize);
statp->f_blocks = (refdbytes + availbytes) >> bshift;
statp->f_bfree = availbytes >> bshift;
statp->f_bavail = statp->f_bfree; /* no root reservation */
/*
* statvfs() should really be called statufs(), because it assumes
* static metadata. ZFS doesn't preallocate files, so the best
* we can do is report the max that could possibly fit in f_files,
* and that minus the number actually used in f_ffree.
* For f_ffree, report the smaller of the number of objects available
* and the number of blocks (each object will take at least a block).
*/
statp->f_ffree = MIN(availobjs, availbytes >> DNODE_SHIFT);
statp->f_files = statp->f_ffree + usedobjs;
statp->f_fsid.val[0] = (uint32_t)fsid;
statp->f_fsid.val[1] = (uint32_t)(fsid >> 32);
statp->f_type = ZFS_SUPER_MAGIC;
statp->f_namelen = MAXNAMELEN - 1;
/*
* We have all of 40 characters to stuff a string here.
* Is there anything useful we could/should provide?
*/
bzero(statp->f_spare, sizeof (statp->f_spare));
if (dmu_objset_projectquota_enabled(zfsvfs->z_os) &&
dmu_objset_projectquota_present(zfsvfs->z_os)) {
znode_t *zp = ITOZ(ip);
if (zp->z_pflags & ZFS_PROJINHERIT && zp->z_projid &&
zpl_is_valid_projid(zp->z_projid))
err = zfs_statfs_project(zfsvfs, zp, statp, bshift);
}
ZFS_EXIT(zfsvfs);
return (err);
}
static int
zfs_root(zfsvfs_t *zfsvfs, struct inode **ipp)
{
znode_t *rootzp;
int error;
ZFS_ENTER(zfsvfs);
error = zfs_zget(zfsvfs, zfsvfs->z_root, &rootzp);
if (error == 0)
*ipp = ZTOI(rootzp);
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Linux kernels older than 3.1 do not support a per-filesystem shrinker.
* To accommodate this we must improvise and manually walk the list of znodes
* attempting to prune dentries in order to be able to drop the inodes.
*
* To avoid scanning the same znodes multiple times they are always rotated
* to the end of the z_all_znodes list. New znodes are inserted at the
* end of the list so we're always scanning the oldest znodes first.
*/
static int
zfs_prune_aliases(zfsvfs_t *zfsvfs, unsigned long nr_to_scan)
{
znode_t **zp_array, *zp;
int max_array = MIN(nr_to_scan, PAGE_SIZE * 8 / sizeof (znode_t *));
int objects = 0;
int i = 0, j = 0;
zp_array = kmem_zalloc(max_array * sizeof (znode_t *), KM_SLEEP);
mutex_enter(&zfsvfs->z_znodes_lock);
while ((zp = list_head(&zfsvfs->z_all_znodes)) != NULL) {
if ((i++ > nr_to_scan) || (j >= max_array))
break;
ASSERT(list_link_active(&zp->z_link_node));
list_remove(&zfsvfs->z_all_znodes, zp);
list_insert_tail(&zfsvfs->z_all_znodes, zp);
/* Skip active znodes and .zfs entries */
if (MUTEX_HELD(&zp->z_lock) || zp->z_is_ctldir)
continue;
if (igrab(ZTOI(zp)) == NULL)
continue;
zp_array[j] = zp;
j++;
}
mutex_exit(&zfsvfs->z_znodes_lock);
for (i = 0; i < j; i++) {
zp = zp_array[i];
ASSERT3P(zp, !=, NULL);
d_prune_aliases(ZTOI(zp));
if (atomic_read(&ZTOI(zp)->i_count) == 1)
objects++;
zrele(zp);
}
kmem_free(zp_array, max_array * sizeof (znode_t *));
return (objects);
}
/*
* The ARC has requested that the filesystem drop entries from the dentry
* and inode caches. This can occur when the ARC needs to free meta data
* blocks but can't because they are all pinned by entries in these caches.
*/
int
zfs_prune(struct super_block *sb, unsigned long nr_to_scan, int *objects)
{
zfsvfs_t *zfsvfs = sb->s_fs_info;
int error = 0;
struct shrinker *shrinker = &sb->s_shrink;
struct shrink_control sc = {
.nr_to_scan = nr_to_scan,
.gfp_mask = GFP_KERNEL,
};
ZFS_ENTER(zfsvfs);
#if defined(HAVE_SPLIT_SHRINKER_CALLBACK) && \
defined(SHRINK_CONTROL_HAS_NID) && \
defined(SHRINKER_NUMA_AWARE)
if (sb->s_shrink.flags & SHRINKER_NUMA_AWARE) {
*objects = 0;
for_each_online_node(sc.nid) {
*objects += (*shrinker->scan_objects)(shrinker, &sc);
+ /*
+ * reset sc.nr_to_scan, modified by
+ * scan_objects == super_cache_scan
+ */
+ sc.nr_to_scan = nr_to_scan;
}
} else {
*objects = (*shrinker->scan_objects)(shrinker, &sc);
}
#elif defined(HAVE_SPLIT_SHRINKER_CALLBACK)
*objects = (*shrinker->scan_objects)(shrinker, &sc);
#elif defined(HAVE_SINGLE_SHRINKER_CALLBACK)
*objects = (*shrinker->shrink)(shrinker, &sc);
#elif defined(HAVE_D_PRUNE_ALIASES)
#define D_PRUNE_ALIASES_IS_DEFAULT
*objects = zfs_prune_aliases(zfsvfs, nr_to_scan);
#else
#error "No available dentry and inode cache pruning mechanism."
#endif
#if defined(HAVE_D_PRUNE_ALIASES) && !defined(D_PRUNE_ALIASES_IS_DEFAULT)
#undef D_PRUNE_ALIASES_IS_DEFAULT
/*
* Fall back to zfs_prune_aliases if the kernel's per-superblock
* shrinker couldn't free anything, possibly due to the inodes being
* allocated in a different memcg.
*/
if (*objects == 0)
*objects = zfs_prune_aliases(zfsvfs, nr_to_scan);
#endif
ZFS_EXIT(zfsvfs);
dprintf_ds(zfsvfs->z_os->os_dsl_dataset,
"pruning, nr_to_scan=%lu objects=%d error=%d\n",
nr_to_scan, *objects, error);
return (error);
}
/*
* Teardown the zfsvfs_t.
*
* Note, if 'unmounting' is FALSE, we return with the 'z_teardown_lock'
* and 'z_teardown_inactive_lock' held.
*/
static int
zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting)
{
znode_t *zp;
zfs_unlinked_drain_stop_wait(zfsvfs);
/*
* If someone has not already unmounted this file system,
* drain the zrele_taskq to ensure all active references to the
* zfsvfs_t have been handled only then can it be safely destroyed.
*/
if (zfsvfs->z_os) {
/*
* If we're unmounting we have to wait for the list to
* drain completely.
*
* If we're not unmounting there's no guarantee the list
* will drain completely, but iputs run from the taskq
* may add the parents of dir-based xattrs to the taskq
* so we want to wait for these.
*
* We can safely read z_nr_znodes without locking because the
* VFS has already blocked operations which add to the
* z_all_znodes list and thus increment z_nr_znodes.
*/
int round = 0;
while (zfsvfs->z_nr_znodes > 0) {
taskq_wait_outstanding(dsl_pool_zrele_taskq(
dmu_objset_pool(zfsvfs->z_os)), 0);
if (++round > 1 && !unmounting)
break;
}
}
ZFS_TEARDOWN_ENTER_WRITE(zfsvfs, FTAG);
if (!unmounting) {
/*
* We purge the parent filesystem's super block as the
* parent filesystem and all of its snapshots have their
* inode's super block set to the parent's filesystem's
* super block. Note, 'z_parent' is self referential
* for non-snapshots.
*/
shrink_dcache_sb(zfsvfs->z_parent->z_sb);
}
/*
* Close the zil. NB: Can't close the zil while zfs_inactive
* threads are blocked as zil_close can call zfs_inactive.
*/
if (zfsvfs->z_log) {
zil_close(zfsvfs->z_log);
zfsvfs->z_log = NULL;
}
rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_WRITER);
/*
* If we are not unmounting (ie: online recv) and someone already
* unmounted this file system while we were doing the switcheroo,
* or a reopen of z_os failed then just bail out now.
*/
if (!unmounting && (zfsvfs->z_unmounted || zfsvfs->z_os == NULL)) {
rw_exit(&zfsvfs->z_teardown_inactive_lock);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
return (SET_ERROR(EIO));
}
/*
* At this point there are no VFS ops active, and any new VFS ops
* will fail with EIO since we have z_teardown_lock for writer (only
* relevant for forced unmount).
*
* Release all holds on dbufs. We also grab an extra reference to all
* the remaining inodes so that the kernel does not attempt to free
* any inodes of a suspended fs. This can cause deadlocks since the
* zfs_resume_fs() process may involve starting threads, which might
* attempt to free unreferenced inodes to free up memory for the new
* thread.
*/
if (!unmounting) {
mutex_enter(&zfsvfs->z_znodes_lock);
for (zp = list_head(&zfsvfs->z_all_znodes); zp != NULL;
zp = list_next(&zfsvfs->z_all_znodes, zp)) {
if (zp->z_sa_hdl)
zfs_znode_dmu_fini(zp);
if (igrab(ZTOI(zp)) != NULL)
zp->z_suspended = B_TRUE;
}
mutex_exit(&zfsvfs->z_znodes_lock);
}
/*
* If we are unmounting, set the unmounted flag and let new VFS ops
* unblock. zfs_inactive will have the unmounted behavior, and all
* other VFS ops will fail with EIO.
*/
if (unmounting) {
zfsvfs->z_unmounted = B_TRUE;
rw_exit(&zfsvfs->z_teardown_inactive_lock);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
}
/*
* z_os will be NULL if there was an error in attempting to reopen
* zfsvfs, so just return as the properties had already been
*
* unregistered and cached data had been evicted before.
*/
if (zfsvfs->z_os == NULL)
return (0);
/*
* Unregister properties.
*/
zfs_unregister_callbacks(zfsvfs);
/*
* Evict cached data. We must write out any dirty data before
* disowning the dataset.
*/
objset_t *os = zfsvfs->z_os;
boolean_t os_dirty = B_FALSE;
for (int t = 0; t < TXG_SIZE; t++) {
if (dmu_objset_is_dirty(os, t)) {
os_dirty = B_TRUE;
break;
}
}
if (!zfs_is_readonly(zfsvfs) && os_dirty) {
txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0);
}
dmu_objset_evict_dbufs(zfsvfs->z_os);
dsl_dir_t *dd = os->os_dsl_dataset->ds_dir;
dsl_dir_cancel_waiters(dd);
return (0);
}
#if defined(HAVE_SUPER_SETUP_BDI_NAME)
atomic_long_t zfs_bdi_seq = ATOMIC_LONG_INIT(0);
#endif
int
zfs_domount(struct super_block *sb, zfs_mnt_t *zm, int silent)
{
const char *osname = zm->mnt_osname;
struct inode *root_inode = NULL;
uint64_t recordsize;
int error = 0;
zfsvfs_t *zfsvfs = NULL;
vfs_t *vfs = NULL;
ASSERT(zm);
ASSERT(osname);
error = zfsvfs_parse_options(zm->mnt_data, &vfs);
if (error)
return (error);
error = zfsvfs_create(osname, vfs->vfs_readonly, &zfsvfs);
if (error) {
zfsvfs_vfs_free(vfs);
goto out;
}
if ((error = dsl_prop_get_integer(osname, "recordsize",
&recordsize, NULL))) {
zfsvfs_vfs_free(vfs);
goto out;
}
vfs->vfs_data = zfsvfs;
zfsvfs->z_vfs = vfs;
zfsvfs->z_sb = sb;
sb->s_fs_info = zfsvfs;
sb->s_magic = ZFS_SUPER_MAGIC;
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_time_gran = 1;
sb->s_blocksize = recordsize;
sb->s_blocksize_bits = ilog2(recordsize);
error = -zpl_bdi_setup(sb, "zfs");
if (error)
goto out;
sb->s_bdi->ra_pages = 0;
/* Set callback operations for the file system. */
sb->s_op = &zpl_super_operations;
sb->s_xattr = zpl_xattr_handlers;
sb->s_export_op = &zpl_export_operations;
sb->s_d_op = &zpl_dentry_operations;
/* Set features for file system. */
zfs_set_fuid_feature(zfsvfs);
if (dmu_objset_is_snapshot(zfsvfs->z_os)) {
uint64_t pval;
atime_changed_cb(zfsvfs, B_FALSE);
readonly_changed_cb(zfsvfs, B_TRUE);
if ((error = dsl_prop_get_integer(osname,
"xattr", &pval, NULL)))
goto out;
xattr_changed_cb(zfsvfs, pval);
if ((error = dsl_prop_get_integer(osname,
"acltype", &pval, NULL)))
goto out;
acltype_changed_cb(zfsvfs, pval);
zfsvfs->z_issnap = B_TRUE;
zfsvfs->z_os->os_sync = ZFS_SYNC_DISABLED;
zfsvfs->z_snap_defer_time = jiffies;
mutex_enter(&zfsvfs->z_os->os_user_ptr_lock);
dmu_objset_set_user(zfsvfs->z_os, zfsvfs);
mutex_exit(&zfsvfs->z_os->os_user_ptr_lock);
} else {
if ((error = zfsvfs_setup(zfsvfs, B_TRUE)))
goto out;
}
/* Allocate a root inode for the filesystem. */
error = zfs_root(zfsvfs, &root_inode);
if (error) {
(void) zfs_umount(sb);
goto out;
}
/* Allocate a root dentry for the filesystem */
sb->s_root = d_make_root(root_inode);
if (sb->s_root == NULL) {
(void) zfs_umount(sb);
error = SET_ERROR(ENOMEM);
goto out;
}
if (!zfsvfs->z_issnap)
zfsctl_create(zfsvfs);
zfsvfs->z_arc_prune = arc_add_prune_callback(zpl_prune_sb, sb);
out:
if (error) {
if (zfsvfs != NULL) {
dmu_objset_disown(zfsvfs->z_os, B_TRUE, zfsvfs);
zfsvfs_free(zfsvfs);
}
/*
* make sure we don't have dangling sb->s_fs_info which
* zfs_preumount will use.
*/
sb->s_fs_info = NULL;
}
return (error);
}
/*
* Called when an unmount is requested and certain sanity checks have
* already passed. At this point no dentries or inodes have been reclaimed
* from their respective caches. We drop the extra reference on the .zfs
* control directory to allow everything to be reclaimed. All snapshots
* must already have been unmounted to reach this point.
*/
void
zfs_preumount(struct super_block *sb)
{
zfsvfs_t *zfsvfs = sb->s_fs_info;
/* zfsvfs is NULL when zfs_domount fails during mount */
if (zfsvfs) {
zfs_unlinked_drain_stop_wait(zfsvfs);
zfsctl_destroy(sb->s_fs_info);
/*
* Wait for zrele_async before entering evict_inodes in
* generic_shutdown_super. The reason we must finish before
* evict_inodes is when lazytime is on, or when zfs_purgedir
* calls zfs_zget, zrele would bump i_count from 0 to 1. This
* would race with the i_count check in evict_inodes. This means
* it could destroy the inode while we are still using it.
*
* We wait for two passes. xattr directories in the first pass
* may add xattr entries in zfs_purgedir, so in the second pass
* we wait for them. We don't use taskq_wait here because it is
* a pool wide taskq. Other mounted filesystems can constantly
* do zrele_async and there's no guarantee when taskq will be
* empty.
*/
taskq_wait_outstanding(dsl_pool_zrele_taskq(
dmu_objset_pool(zfsvfs->z_os)), 0);
taskq_wait_outstanding(dsl_pool_zrele_taskq(
dmu_objset_pool(zfsvfs->z_os)), 0);
}
}
/*
* Called once all other unmount released tear down has occurred.
* It is our responsibility to release any remaining infrastructure.
*/
/*ARGSUSED*/
int
zfs_umount(struct super_block *sb)
{
zfsvfs_t *zfsvfs = sb->s_fs_info;
objset_t *os;
if (zfsvfs->z_arc_prune != NULL)
arc_remove_prune_callback(zfsvfs->z_arc_prune);
VERIFY(zfsvfs_teardown(zfsvfs, B_TRUE) == 0);
os = zfsvfs->z_os;
zpl_bdi_destroy(sb);
/*
* z_os will be NULL if there was an error in
* attempting to reopen zfsvfs.
*/
if (os != NULL) {
/*
* Unset the objset user_ptr.
*/
mutex_enter(&os->os_user_ptr_lock);
dmu_objset_set_user(os, NULL);
mutex_exit(&os->os_user_ptr_lock);
/*
* Finally release the objset
*/
dmu_objset_disown(os, B_TRUE, zfsvfs);
}
zfsvfs_free(zfsvfs);
return (0);
}
int
zfs_remount(struct super_block *sb, int *flags, zfs_mnt_t *zm)
{
zfsvfs_t *zfsvfs = sb->s_fs_info;
vfs_t *vfsp;
boolean_t issnap = dmu_objset_is_snapshot(zfsvfs->z_os);
int error;
if ((issnap || !spa_writeable(dmu_objset_spa(zfsvfs->z_os))) &&
!(*flags & SB_RDONLY)) {
*flags |= SB_RDONLY;
return (EROFS);
}
error = zfsvfs_parse_options(zm->mnt_data, &vfsp);
if (error)
return (error);
if (!zfs_is_readonly(zfsvfs) && (*flags & SB_RDONLY))
txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0);
zfs_unregister_callbacks(zfsvfs);
zfsvfs_vfs_free(zfsvfs->z_vfs);
vfsp->vfs_data = zfsvfs;
zfsvfs->z_vfs = vfsp;
if (!issnap)
(void) zfs_register_callbacks(vfsp);
return (error);
}
int
zfs_vget(struct super_block *sb, struct inode **ipp, fid_t *fidp)
{
zfsvfs_t *zfsvfs = sb->s_fs_info;
znode_t *zp;
uint64_t object = 0;
uint64_t fid_gen = 0;
uint64_t gen_mask;
uint64_t zp_gen;
int i, err;
*ipp = NULL;
if (fidp->fid_len == SHORT_FID_LEN || fidp->fid_len == LONG_FID_LEN) {
zfid_short_t *zfid = (zfid_short_t *)fidp;
for (i = 0; i < sizeof (zfid->zf_object); i++)
object |= ((uint64_t)zfid->zf_object[i]) << (8 * i);
for (i = 0; i < sizeof (zfid->zf_gen); i++)
fid_gen |= ((uint64_t)zfid->zf_gen[i]) << (8 * i);
} else {
return (SET_ERROR(EINVAL));
}
/* LONG_FID_LEN means snapdirs */
if (fidp->fid_len == LONG_FID_LEN) {
zfid_long_t *zlfid = (zfid_long_t *)fidp;
uint64_t objsetid = 0;
uint64_t setgen = 0;
for (i = 0; i < sizeof (zlfid->zf_setid); i++)
objsetid |= ((uint64_t)zlfid->zf_setid[i]) << (8 * i);
for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
setgen |= ((uint64_t)zlfid->zf_setgen[i]) << (8 * i);
if (objsetid != ZFSCTL_INO_SNAPDIRS - object) {
dprintf("snapdir fid: objsetid (%llu) != "
"ZFSCTL_INO_SNAPDIRS (%llu) - object (%llu)\n",
objsetid, ZFSCTL_INO_SNAPDIRS, object);
return (SET_ERROR(EINVAL));
}
if (fid_gen > 1 || setgen != 0) {
dprintf("snapdir fid: fid_gen (%llu) and setgen "
"(%llu)\n", fid_gen, setgen);
return (SET_ERROR(EINVAL));
}
return (zfsctl_snapdir_vget(sb, objsetid, fid_gen, ipp));
}
ZFS_ENTER(zfsvfs);
/* A zero fid_gen means we are in the .zfs control directories */
if (fid_gen == 0 &&
(object == ZFSCTL_INO_ROOT || object == ZFSCTL_INO_SNAPDIR)) {
*ipp = zfsvfs->z_ctldir;
ASSERT(*ipp != NULL);
if (object == ZFSCTL_INO_SNAPDIR) {
VERIFY(zfsctl_root_lookup(*ipp, "snapshot", ipp,
0, kcred, NULL, NULL) == 0);
} else {
/*
* Must have an existing ref, so igrab()
* cannot return NULL
*/
VERIFY3P(igrab(*ipp), !=, NULL);
}
ZFS_EXIT(zfsvfs);
return (0);
}
gen_mask = -1ULL >> (64 - 8 * i);
dprintf("getting %llu [%llu mask %llx]\n", object, fid_gen, gen_mask);
if ((err = zfs_zget(zfsvfs, object, &zp))) {
ZFS_EXIT(zfsvfs);
return (err);
}
/* Don't export xattr stuff */
if (zp->z_pflags & ZFS_XATTR) {
zrele(zp);
ZFS_EXIT(zfsvfs);
return (SET_ERROR(ENOENT));
}
(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs), &zp_gen,
sizeof (uint64_t));
zp_gen = zp_gen & gen_mask;
if (zp_gen == 0)
zp_gen = 1;
if ((fid_gen == 0) && (zfsvfs->z_root == object))
fid_gen = zp_gen;
if (zp->z_unlinked || zp_gen != fid_gen) {
dprintf("znode gen (%llu) != fid gen (%llu)\n", zp_gen,
fid_gen);
zrele(zp);
ZFS_EXIT(zfsvfs);
return (SET_ERROR(ENOENT));
}
*ipp = ZTOI(zp);
if (*ipp)
zfs_znode_update_vfs(ITOZ(*ipp));
ZFS_EXIT(zfsvfs);
return (0);
}
/*
* Block out VFS ops and close zfsvfs_t
*
* Note, if successful, then we return with the 'z_teardown_lock' and
* 'z_teardown_inactive_lock' write held. We leave ownership of the underlying
* dataset and objset intact so that they can be atomically handed off during
* a subsequent rollback or recv operation and the resume thereafter.
*/
int
zfs_suspend_fs(zfsvfs_t *zfsvfs)
{
int error;
if ((error = zfsvfs_teardown(zfsvfs, B_FALSE)) != 0)
return (error);
return (0);
}
/*
* Rebuild SA and release VOPs. Note that ownership of the underlying dataset
* is an invariant across any of the operations that can be performed while the
* filesystem was suspended. Whether it succeeded or failed, the preconditions
* are the same: the relevant objset and associated dataset are owned by
* zfsvfs, held, and long held on entry.
*/
int
zfs_resume_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds)
{
int err, err2;
znode_t *zp;
ASSERT(ZFS_TEARDOWN_WRITE_HELD(zfsvfs));
ASSERT(RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock));
/*
* We already own this, so just update the objset_t, as the one we
* had before may have been evicted.
*/
objset_t *os;
VERIFY3P(ds->ds_owner, ==, zfsvfs);
VERIFY(dsl_dataset_long_held(ds));
dsl_pool_t *dp = spa_get_dsl(dsl_dataset_get_spa(ds));
dsl_pool_config_enter(dp, FTAG);
VERIFY0(dmu_objset_from_ds(ds, &os));
dsl_pool_config_exit(dp, FTAG);
err = zfsvfs_init(zfsvfs, os);
if (err != 0)
goto bail;
ds->ds_dir->dd_activity_cancelled = B_FALSE;
VERIFY(zfsvfs_setup(zfsvfs, B_FALSE) == 0);
zfs_set_fuid_feature(zfsvfs);
zfsvfs->z_rollback_time = jiffies;
/*
* Attempt to re-establish all the active inodes with their
* dbufs. If a zfs_rezget() fails, then we unhash the inode
* and mark it stale. This prevents a collision if a new
* inode/object is created which must use the same inode
* number. The stale inode will be be released when the
* VFS prunes the dentry holding the remaining references
* on the stale inode.
*/
mutex_enter(&zfsvfs->z_znodes_lock);
for (zp = list_head(&zfsvfs->z_all_znodes); zp;
zp = list_next(&zfsvfs->z_all_znodes, zp)) {
err2 = zfs_rezget(zp);
if (err2) {
remove_inode_hash(ZTOI(zp));
zp->z_is_stale = B_TRUE;
}
/* see comment in zfs_suspend_fs() */
if (zp->z_suspended) {
zfs_zrele_async(zp);
zp->z_suspended = B_FALSE;
}
}
mutex_exit(&zfsvfs->z_znodes_lock);
if (!zfs_is_readonly(zfsvfs) && !zfsvfs->z_unmounted) {
/*
* zfs_suspend_fs() could have interrupted freeing
* of dnodes. We need to restart this freeing so
* that we don't "leak" the space.
*/
zfs_unlinked_drain(zfsvfs);
}
/*
* Most of the time zfs_suspend_fs is used for changing the contents
* of the underlying dataset. ZFS rollback and receive operations
* might create files for which negative dentries are present in
* the cache. Since walking the dcache would require a lot of GPL-only
* code duplication, it's much easier on these rather rare occasions
* just to flush the whole dcache for the given dataset/filesystem.
*/
shrink_dcache_sb(zfsvfs->z_sb);
bail:
if (err != 0)
zfsvfs->z_unmounted = B_TRUE;
/* release the VFS ops */
rw_exit(&zfsvfs->z_teardown_inactive_lock);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
if (err != 0) {
/*
* Since we couldn't setup the sa framework, try to force
* unmount this file system.
*/
if (zfsvfs->z_os)
(void) zfs_umount(zfsvfs->z_sb);
}
return (err);
}
/*
* Release VOPs and unmount a suspended filesystem.
*/
int
zfs_end_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds)
{
ASSERT(ZFS_TEARDOWN_WRITE_HELD(zfsvfs));
ASSERT(RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock));
/*
* We already own this, so just hold and rele it to update the
* objset_t, as the one we had before may have been evicted.
*/
objset_t *os;
VERIFY3P(ds->ds_owner, ==, zfsvfs);
VERIFY(dsl_dataset_long_held(ds));
dsl_pool_t *dp = spa_get_dsl(dsl_dataset_get_spa(ds));
dsl_pool_config_enter(dp, FTAG);
VERIFY0(dmu_objset_from_ds(ds, &os));
dsl_pool_config_exit(dp, FTAG);
zfsvfs->z_os = os;
/* release the VOPs */
rw_exit(&zfsvfs->z_teardown_inactive_lock);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
/*
* Try to force unmount this file system.
*/
(void) zfs_umount(zfsvfs->z_sb);
zfsvfs->z_unmounted = B_TRUE;
return (0);
}
/*
* Automounted snapshots rely on periodic revalidation
* to defer snapshots from being automatically unmounted.
*/
inline void
zfs_exit_fs(zfsvfs_t *zfsvfs)
{
if (!zfsvfs->z_issnap)
return;
if (time_after(jiffies, zfsvfs->z_snap_defer_time +
MAX(zfs_expire_snapshot * HZ / 2, HZ))) {
zfsvfs->z_snap_defer_time = jiffies;
zfsctl_snapshot_unmount_delay(zfsvfs->z_os->os_spa,
dmu_objset_id(zfsvfs->z_os),
zfs_expire_snapshot);
}
}
int
zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers)
{
int error;
objset_t *os = zfsvfs->z_os;
dmu_tx_t *tx;
if (newvers < ZPL_VERSION_INITIAL || newvers > ZPL_VERSION)
return (SET_ERROR(EINVAL));
if (newvers < zfsvfs->z_version)
return (SET_ERROR(EINVAL));
if (zfs_spa_version_map(newvers) >
spa_version(dmu_objset_spa(zfsvfs->z_os)))
return (SET_ERROR(ENOTSUP));
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_FALSE, ZPL_VERSION_STR);
if (newvers >= ZPL_VERSION_SA && !zfsvfs->z_use_sa) {
dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_TRUE,
ZFS_SA_ATTRS);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
}
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
return (error);
}
error = zap_update(os, MASTER_NODE_OBJ, ZPL_VERSION_STR,
8, 1, &newvers, tx);
if (error) {
dmu_tx_commit(tx);
return (error);
}
if (newvers >= ZPL_VERSION_SA && !zfsvfs->z_use_sa) {
uint64_t sa_obj;
ASSERT3U(spa_version(dmu_objset_spa(zfsvfs->z_os)), >=,
SPA_VERSION_SA);
sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE,
DMU_OT_NONE, 0, tx);
error = zap_add(os, MASTER_NODE_OBJ,
ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
ASSERT0(error);
VERIFY(0 == sa_set_sa_object(os, sa_obj));
sa_register_update_callback(os, zfs_sa_upgrade);
}
spa_history_log_internal_ds(dmu_objset_ds(os), "upgrade", tx,
"from %llu to %llu", zfsvfs->z_version, newvers);
dmu_tx_commit(tx);
zfsvfs->z_version = newvers;
os->os_version = newvers;
zfs_set_fuid_feature(zfsvfs);
return (0);
}
/*
* Read a property stored within the master node.
*/
int
zfs_get_zplprop(objset_t *os, zfs_prop_t prop, uint64_t *value)
{
uint64_t *cached_copy = NULL;
/*
* Figure out where in the objset_t the cached copy would live, if it
* is available for the requested property.
*/
if (os != NULL) {
switch (prop) {
case ZFS_PROP_VERSION:
cached_copy = &os->os_version;
break;
case ZFS_PROP_NORMALIZE:
cached_copy = &os->os_normalization;
break;
case ZFS_PROP_UTF8ONLY:
cached_copy = &os->os_utf8only;
break;
case ZFS_PROP_CASE:
cached_copy = &os->os_casesensitivity;
break;
default:
break;
}
}
if (cached_copy != NULL && *cached_copy != OBJSET_PROP_UNINITIALIZED) {
*value = *cached_copy;
return (0);
}
/*
* If the property wasn't cached, look up the file system's value for
* the property. For the version property, we look up a slightly
* different string.
*/
const char *pname;
int error = ENOENT;
if (prop == ZFS_PROP_VERSION)
pname = ZPL_VERSION_STR;
else
pname = zfs_prop_to_name(prop);
if (os != NULL) {
ASSERT3U(os->os_phys->os_type, ==, DMU_OST_ZFS);
error = zap_lookup(os, MASTER_NODE_OBJ, pname, 8, 1, value);
}
if (error == ENOENT) {
/* No value set, use the default value */
switch (prop) {
case ZFS_PROP_VERSION:
*value = ZPL_VERSION;
break;
case ZFS_PROP_NORMALIZE:
case ZFS_PROP_UTF8ONLY:
*value = 0;
break;
case ZFS_PROP_CASE:
*value = ZFS_CASE_SENSITIVE;
break;
case ZFS_PROP_ACLTYPE:
*value = ZFS_ACLTYPE_OFF;
break;
default:
return (error);
}
error = 0;
}
/*
* If one of the methods for getting the property value above worked,
* copy it into the objset_t's cache.
*/
if (error == 0 && cached_copy != NULL) {
*cached_copy = *value;
}
return (error);
}
/*
* Return true if the corresponding vfs's unmounted flag is set.
* Otherwise return false.
* If this function returns true we know VFS unmount has been initiated.
*/
boolean_t
zfs_get_vfs_flag_unmounted(objset_t *os)
{
zfsvfs_t *zfvp;
boolean_t unmounted = B_FALSE;
ASSERT(dmu_objset_type(os) == DMU_OST_ZFS);
mutex_enter(&os->os_user_ptr_lock);
zfvp = dmu_objset_get_user(os);
if (zfvp != NULL && zfvp->z_unmounted)
unmounted = B_TRUE;
mutex_exit(&os->os_user_ptr_lock);
return (unmounted);
}
/*ARGSUSED*/
void
zfsvfs_update_fromname(const char *oldname, const char *newname)
{
/*
* We don't need to do anything here, the devname is always current by
* virtue of zfsvfs->z_sb->s_op->show_devname.
*/
}
void
zfs_init(void)
{
zfsctl_init();
zfs_znode_init();
dmu_objset_register_type(DMU_OST_ZFS, zpl_get_file_info);
register_filesystem(&zpl_fs_type);
}
void
zfs_fini(void)
{
/*
* we don't use outstanding because zpl_posix_acl_free might add more.
*/
taskq_wait(system_delay_taskq);
taskq_wait(system_taskq);
unregister_filesystem(&zpl_fs_type);
zfs_znode_fini();
zfsctl_fini();
}
#if defined(_KERNEL)
EXPORT_SYMBOL(zfs_suspend_fs);
EXPORT_SYMBOL(zfs_resume_fs);
EXPORT_SYMBOL(zfs_set_version);
EXPORT_SYMBOL(zfsvfs_create);
EXPORT_SYMBOL(zfsvfs_free);
EXPORT_SYMBOL(zfs_is_readonly);
EXPORT_SYMBOL(zfs_domount);
EXPORT_SYMBOL(zfs_preumount);
EXPORT_SYMBOL(zfs_umount);
EXPORT_SYMBOL(zfs_remount);
EXPORT_SYMBOL(zfs_statvfs);
EXPORT_SYMBOL(zfs_vget);
EXPORT_SYMBOL(zfs_prune);
#endif
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_vnops_os.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_vnops_os.c
index a1a0e44bb31f..2958439ace82 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_vnops_os.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_vnops_os.c
@@ -1,4028 +1,4032 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
* Copyright (c) 2015 by Chunwei Chen. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc.
*/
/* Portions Copyright 2007 Jeremy Teo */
/* Portions Copyright 2010 Robert Milkowski */
#include <sys/types.h>
#include <sys/param.h>
#include <sys/time.h>
#include <sys/sysmacros.h>
#include <sys/vfs.h>
#include <sys/file.h>
#include <sys/stat.h>
#include <sys/kmem.h>
#include <sys/taskq.h>
#include <sys/uio.h>
#include <sys/vmsystm.h>
#include <sys/atomic.h>
#include <sys/pathname.h>
#include <sys/cmn_err.h>
#include <sys/errno.h>
#include <sys/zfs_dir.h>
#include <sys/zfs_acl.h>
#include <sys/zfs_ioctl.h>
#include <sys/fs/zfs.h>
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/dbuf.h>
#include <sys/zap.h>
#include <sys/sa.h>
#include <sys/policy.h>
#include <sys/sunddi.h>
#include <sys/sid.h>
#include <sys/zfs_ctldir.h>
#include <sys/zfs_fuid.h>
#include <sys/zfs_quota.h>
#include <sys/zfs_sa.h>
#include <sys/zfs_vnops.h>
#include <sys/zfs_rlock.h>
#include <sys/cred.h>
#include <sys/zpl.h>
#include <sys/zil.h>
#include <sys/sa_impl.h>
/*
* Programming rules.
*
* Each vnode op performs some logical unit of work. To do this, the ZPL must
* properly lock its in-core state, create a DMU transaction, do the work,
* record this work in the intent log (ZIL), commit the DMU transaction,
* and wait for the intent log to commit if it is a synchronous operation.
* Moreover, the vnode ops must work in both normal and log replay context.
* The ordering of events is important to avoid deadlocks and references
* to freed memory. The example below illustrates the following Big Rules:
*
* (1) A check must be made in each zfs thread for a mounted file system.
* This is done avoiding races using ZFS_ENTER(zfsvfs).
* A ZFS_EXIT(zfsvfs) is needed before all returns. Any znodes
* must be checked with ZFS_VERIFY_ZP(zp). Both of these macros
* can return EIO from the calling function.
*
* (2) zrele() should always be the last thing except for zil_commit() (if
* necessary) and ZFS_EXIT(). This is for 3 reasons: First, if it's the
* last reference, the vnode/znode can be freed, so the zp may point to
* freed memory. Second, the last reference will call zfs_zinactive(),
* which may induce a lot of work -- pushing cached pages (which acquires
* range locks) and syncing out cached atime changes. Third,
* zfs_zinactive() may require a new tx, which could deadlock the system
* if you were already holding one. This deadlock occurs because the tx
* currently being operated on prevents a txg from syncing, which
* prevents the new tx from progressing, resulting in a deadlock. If you
* must call zrele() within a tx, use zfs_zrele_async(). Note that iput()
* is a synonym for zrele().
*
* (3) All range locks must be grabbed before calling dmu_tx_assign(),
* as they can span dmu_tx_assign() calls.
*
* (4) If ZPL locks are held, pass TXG_NOWAIT as the second argument to
* dmu_tx_assign(). This is critical because we don't want to block
* while holding locks.
*
* If no ZPL locks are held (aside from ZFS_ENTER()), use TXG_WAIT. This
* reduces lock contention and CPU usage when we must wait (note that if
* throughput is constrained by the storage, nearly every transaction
* must wait).
*
* Note, in particular, that if a lock is sometimes acquired before
* the tx assigns, and sometimes after (e.g. z_lock), then failing
* to use a non-blocking assign can deadlock the system. The scenario:
*
* Thread A has grabbed a lock before calling dmu_tx_assign().
* Thread B is in an already-assigned tx, and blocks for this lock.
* Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
* forever, because the previous txg can't quiesce until B's tx commits.
*
* If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
* then drop all locks, call dmu_tx_wait(), and try again. On subsequent
* calls to dmu_tx_assign(), pass TXG_NOTHROTTLE in addition to TXG_NOWAIT,
* to indicate that this operation has already called dmu_tx_wait().
* This will ensure that we don't retry forever, waiting a short bit
* each time.
*
* (5) If the operation succeeded, generate the intent log entry for it
* before dropping locks. This ensures that the ordering of events
* in the intent log matches the order in which they actually occurred.
* During ZIL replay the zfs_log_* functions will update the sequence
* number to indicate the zil transaction has replayed.
*
* (6) At the end of each vnode op, the DMU tx must always commit,
* regardless of whether there were any errors.
*
* (7) After dropping all locks, invoke zil_commit(zilog, foid)
* to ensure that synchronous semantics are provided when necessary.
*
* In general, this is how things should be ordered in each vnode op:
*
* ZFS_ENTER(zfsvfs); // exit if unmounted
* top:
* zfs_dirent_lock(&dl, ...) // lock directory entry (may igrab())
* rw_enter(...); // grab any other locks you need
* tx = dmu_tx_create(...); // get DMU tx
* dmu_tx_hold_*(); // hold each object you might modify
* error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
* if (error) {
* rw_exit(...); // drop locks
* zfs_dirent_unlock(dl); // unlock directory entry
* zrele(...); // release held znodes
* if (error == ERESTART) {
* waited = B_TRUE;
* dmu_tx_wait(tx);
* dmu_tx_abort(tx);
* goto top;
* }
* dmu_tx_abort(tx); // abort DMU tx
* ZFS_EXIT(zfsvfs); // finished in zfs
* return (error); // really out of space
* }
* error = do_real_work(); // do whatever this VOP does
* if (error == 0)
* zfs_log_*(...); // on success, make ZIL entry
* dmu_tx_commit(tx); // commit DMU tx -- error or not
* rw_exit(...); // drop locks
* zfs_dirent_unlock(dl); // unlock directory entry
* zrele(...); // release held znodes
* zil_commit(zilog, foid); // synchronous when necessary
* ZFS_EXIT(zfsvfs); // finished in zfs
* return (error); // done, report error
*/
/*
* Virus scanning is unsupported. It would be possible to add a hook
* here to performance the required virus scan. This could be done
* entirely in the kernel or potentially as an update to invoke a
* scanning utility.
*/
static int
zfs_vscan(struct inode *ip, cred_t *cr, int async)
{
return (0);
}
/* ARGSUSED */
int
zfs_open(struct inode *ip, int mode, int flag, cred_t *cr)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
/* Honor ZFS_APPENDONLY file attribute */
if ((mode & FMODE_WRITE) && (zp->z_pflags & ZFS_APPENDONLY) &&
((flag & O_APPEND) == 0)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EPERM));
}
/* Virus scan eligible files on open */
if (!zfs_has_ctldir(zp) && zfsvfs->z_vscan && S_ISREG(ip->i_mode) &&
!(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) {
if (zfs_vscan(ip, cr, 0) != 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EACCES));
}
}
/* Keep a count of the synchronous opens in the znode */
if (flag & O_SYNC)
atomic_inc_32(&zp->z_sync_cnt);
ZFS_EXIT(zfsvfs);
return (0);
}
/* ARGSUSED */
int
zfs_close(struct inode *ip, int flag, cred_t *cr)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
/* Decrement the synchronous opens in the znode */
if (flag & O_SYNC)
atomic_dec_32(&zp->z_sync_cnt);
if (!zfs_has_ctldir(zp) && zfsvfs->z_vscan && S_ISREG(ip->i_mode) &&
!(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0)
VERIFY(zfs_vscan(ip, cr, 1) == 0);
ZFS_EXIT(zfsvfs);
return (0);
}
#if defined(_KERNEL)
/*
* When a file is memory mapped, we must keep the IO data synchronized
* between the DMU cache and the memory mapped pages. What this means:
*
* On Write: If we find a memory mapped page, we write to *both*
* the page and the dmu buffer.
*/
void
update_pages(znode_t *zp, int64_t start, int len, objset_t *os)
{
struct inode *ip = ZTOI(zp);
struct address_space *mp = ip->i_mapping;
struct page *pp;
uint64_t nbytes;
int64_t off;
void *pb;
off = start & (PAGE_SIZE-1);
for (start &= PAGE_MASK; len > 0; start += PAGE_SIZE) {
nbytes = MIN(PAGE_SIZE - off, len);
pp = find_lock_page(mp, start >> PAGE_SHIFT);
if (pp) {
if (mapping_writably_mapped(mp))
flush_dcache_page(pp);
pb = kmap(pp);
(void) dmu_read(os, zp->z_id, start + off, nbytes,
pb + off, DMU_READ_PREFETCH);
kunmap(pp);
if (mapping_writably_mapped(mp))
flush_dcache_page(pp);
mark_page_accessed(pp);
SetPageUptodate(pp);
ClearPageError(pp);
unlock_page(pp);
put_page(pp);
}
len -= nbytes;
off = 0;
}
}
/*
* When a file is memory mapped, we must keep the IO data synchronized
* between the DMU cache and the memory mapped pages. What this means:
*
* On Read: We "read" preferentially from memory mapped pages,
* else we default from the dmu buffer.
*
* NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
* the file is memory mapped.
*/
int
mappedread(znode_t *zp, int nbytes, zfs_uio_t *uio)
{
struct inode *ip = ZTOI(zp);
struct address_space *mp = ip->i_mapping;
struct page *pp;
int64_t start, off;
uint64_t bytes;
int len = nbytes;
int error = 0;
void *pb;
start = uio->uio_loffset;
off = start & (PAGE_SIZE-1);
for (start &= PAGE_MASK; len > 0; start += PAGE_SIZE) {
bytes = MIN(PAGE_SIZE - off, len);
pp = find_lock_page(mp, start >> PAGE_SHIFT);
if (pp) {
ASSERT(PageUptodate(pp));
unlock_page(pp);
pb = kmap(pp);
error = zfs_uiomove(pb + off, bytes, UIO_READ, uio);
kunmap(pp);
if (mapping_writably_mapped(mp))
flush_dcache_page(pp);
mark_page_accessed(pp);
put_page(pp);
} else {
error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
uio, bytes);
}
len -= bytes;
off = 0;
if (error)
break;
}
return (error);
}
#endif /* _KERNEL */
unsigned long zfs_delete_blocks = DMU_MAX_DELETEBLKCNT;
/*
* Write the bytes to a file.
*
* IN: zp - znode of file to be written to
* data - bytes to write
* len - number of bytes to write
* pos - offset to start writing at
*
* OUT: resid - remaining bytes to write
*
* RETURN: 0 if success
* positive error code if failure. EIO is returned
* for a short write when residp isn't provided.
*
* Timestamps:
* zp - ctime|mtime updated if byte count > 0
*/
int
zfs_write_simple(znode_t *zp, const void *data, size_t len,
loff_t pos, size_t *residp)
{
fstrans_cookie_t cookie;
int error;
struct iovec iov;
iov.iov_base = (void *)data;
iov.iov_len = len;
zfs_uio_t uio;
zfs_uio_iovec_init(&uio, &iov, 1, pos, UIO_SYSSPACE, len, 0);
cookie = spl_fstrans_mark();
error = zfs_write(zp, &uio, 0, kcred);
spl_fstrans_unmark(cookie);
if (error == 0) {
if (residp != NULL)
*residp = zfs_uio_resid(&uio);
else if (zfs_uio_resid(&uio) != 0)
error = SET_ERROR(EIO);
}
return (error);
}
static void
zfs_rele_async_task(void *arg)
{
iput(arg);
}
void
zfs_zrele_async(znode_t *zp)
{
struct inode *ip = ZTOI(zp);
objset_t *os = ITOZSB(ip)->z_os;
ASSERT(atomic_read(&ip->i_count) > 0);
ASSERT(os != NULL);
/*
* If decrementing the count would put us at 0, we can't do it inline
* here, because that would be synchronous. Instead, dispatch an iput
* to run later.
*
* For more information on the dangers of a synchronous iput, see the
* header comment of this file.
*/
if (!atomic_add_unless(&ip->i_count, -1, 1)) {
VERIFY(taskq_dispatch(dsl_pool_zrele_taskq(dmu_objset_pool(os)),
zfs_rele_async_task, ip, TQ_SLEEP) != TASKQID_INVALID);
}
}
/*
* Lookup an entry in a directory, or an extended attribute directory.
* If it exists, return a held inode reference for it.
*
* IN: zdp - znode of directory to search.
* nm - name of entry to lookup.
* flags - LOOKUP_XATTR set if looking for an attribute.
* cr - credentials of caller.
* direntflags - directory lookup flags
* realpnp - returned pathname.
*
* OUT: zpp - znode of located entry, NULL if not found.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* NA
*/
/* ARGSUSED */
int
zfs_lookup(znode_t *zdp, char *nm, znode_t **zpp, int flags, cred_t *cr,
int *direntflags, pathname_t *realpnp)
{
zfsvfs_t *zfsvfs = ZTOZSB(zdp);
int error = 0;
/*
* Fast path lookup, however we must skip DNLC lookup
* for case folding or normalizing lookups because the
* DNLC code only stores the passed in name. This means
* creating 'a' and removing 'A' on a case insensitive
* file system would work, but DNLC still thinks 'a'
* exists and won't let you create it again on the next
* pass through fast path.
*/
if (!(flags & (LOOKUP_XATTR | FIGNORECASE))) {
if (!S_ISDIR(ZTOI(zdp)->i_mode)) {
return (SET_ERROR(ENOTDIR));
} else if (zdp->z_sa_hdl == NULL) {
return (SET_ERROR(EIO));
}
if (nm[0] == 0 || (nm[0] == '.' && nm[1] == '\0')) {
error = zfs_fastaccesschk_execute(zdp, cr);
if (!error) {
*zpp = zdp;
zhold(*zpp);
return (0);
}
return (error);
}
}
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zdp);
*zpp = NULL;
if (flags & LOOKUP_XATTR) {
/*
* We don't allow recursive attributes..
* Maybe someday we will.
*/
if (zdp->z_pflags & ZFS_XATTR) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
if ((error = zfs_get_xattrdir(zdp, zpp, cr, flags))) {
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Do we have permission to get into attribute directory?
*/
if ((error = zfs_zaccess(*zpp, ACE_EXECUTE, 0,
B_FALSE, cr))) {
zrele(*zpp);
*zpp = NULL;
}
ZFS_EXIT(zfsvfs);
return (error);
}
if (!S_ISDIR(ZTOI(zdp)->i_mode)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(ENOTDIR));
}
/*
* Check accessibility of directory.
*/
if ((error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr))) {
ZFS_EXIT(zfsvfs);
return (error);
}
if (zfsvfs->z_utf8 && u8_validate(nm, strlen(nm),
NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EILSEQ));
}
error = zfs_dirlook(zdp, nm, zpp, flags, direntflags, realpnp);
if ((error == 0) && (*zpp))
zfs_znode_update_vfs(*zpp);
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Attempt to create a new entry in a directory. If the entry
* already exists, truncate the file if permissible, else return
* an error. Return the ip of the created or trunc'd file.
*
* IN: dzp - znode of directory to put new file entry in.
* name - name of new file entry.
* vap - attributes of new file.
* excl - flag indicating exclusive or non-exclusive mode.
* mode - mode to open file with.
* cr - credentials of caller.
* flag - file flag.
* vsecp - ACL to be set
*
* OUT: zpp - znode of created or trunc'd entry.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* dzp - ctime|mtime updated if new entry created
* zp - ctime|mtime always, atime if new
*/
/* ARGSUSED */
int
zfs_create(znode_t *dzp, char *name, vattr_t *vap, int excl,
int mode, znode_t **zpp, cred_t *cr, int flag, vsecattr_t *vsecp)
{
znode_t *zp;
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
zilog_t *zilog;
objset_t *os;
zfs_dirlock_t *dl;
dmu_tx_t *tx;
int error;
uid_t uid;
gid_t gid;
zfs_acl_ids_t acl_ids;
boolean_t fuid_dirtied;
boolean_t have_acl = B_FALSE;
boolean_t waited = B_FALSE;
/*
* If we have an ephemeral id, ACL, or XVATTR then
* make sure file system is at proper version
*/
gid = crgetgid(cr);
uid = crgetuid(cr);
if (zfsvfs->z_use_fuids == B_FALSE &&
(vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
return (SET_ERROR(EINVAL));
if (name == NULL)
return (SET_ERROR(EINVAL));
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(dzp);
os = zfsvfs->z_os;
zilog = zfsvfs->z_log;
if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EILSEQ));
}
if (vap->va_mask & ATTR_XVATTR) {
if ((error = secpolicy_xvattr((xvattr_t *)vap,
crgetuid(cr), cr, vap->va_mode)) != 0) {
ZFS_EXIT(zfsvfs);
return (error);
}
}
top:
*zpp = NULL;
if (*name == '\0') {
/*
* Null component name refers to the directory itself.
*/
zhold(dzp);
zp = dzp;
dl = NULL;
error = 0;
} else {
/* possible igrab(zp) */
int zflg = 0;
if (flag & FIGNORECASE)
zflg |= ZCILOOK;
error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
NULL, NULL);
if (error) {
if (have_acl)
zfs_acl_ids_free(&acl_ids);
if (strcmp(name, "..") == 0)
error = SET_ERROR(EISDIR);
ZFS_EXIT(zfsvfs);
return (error);
}
}
if (zp == NULL) {
uint64_t txtype;
uint64_t projid = ZFS_DEFAULT_PROJID;
/*
* Create a new file object and update the directory
* to reference it.
*/
if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
if (have_acl)
zfs_acl_ids_free(&acl_ids);
goto out;
}
/*
* We only support the creation of regular files in
* extended attribute directories.
*/
if ((dzp->z_pflags & ZFS_XATTR) && !S_ISREG(vap->va_mode)) {
if (have_acl)
zfs_acl_ids_free(&acl_ids);
error = SET_ERROR(EINVAL);
goto out;
}
if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap,
cr, vsecp, &acl_ids)) != 0)
goto out;
have_acl = B_TRUE;
if (S_ISREG(vap->va_mode) || S_ISDIR(vap->va_mode))
projid = zfs_inherit_projid(dzp);
if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, projid)) {
zfs_acl_ids_free(&acl_ids);
error = SET_ERROR(EDQUOT);
goto out;
}
tx = dmu_tx_create(os);
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
ZFS_SA_BASE_ATTR_SIZE);
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
if (!zfsvfs->z_use_sa &&
acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
0, acl_ids.z_aclp->z_acl_bytes);
}
error = dmu_tx_assign(tx,
(waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
if (error) {
zfs_dirent_unlock(dl);
if (error == ERESTART) {
waited = B_TRUE;
dmu_tx_wait(tx);
dmu_tx_abort(tx);
goto top;
}
zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx);
ZFS_EXIT(zfsvfs);
return (error);
}
zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
error = zfs_link_create(dl, zp, tx, ZNEW);
if (error != 0) {
/*
* Since, we failed to add the directory entry for it,
* delete the newly created dnode.
*/
zfs_znode_delete(zp, tx);
remove_inode_hash(ZTOI(zp));
zfs_acl_ids_free(&acl_ids);
dmu_tx_commit(tx);
goto out;
}
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap);
if (flag & FIGNORECASE)
txtype |= TX_CI;
zfs_log_create(zilog, tx, txtype, dzp, zp, name,
vsecp, acl_ids.z_fuidp, vap);
zfs_acl_ids_free(&acl_ids);
dmu_tx_commit(tx);
} else {
int aflags = (flag & O_APPEND) ? V_APPEND : 0;
if (have_acl)
zfs_acl_ids_free(&acl_ids);
have_acl = B_FALSE;
/*
* A directory entry already exists for this name.
*/
/*
* Can't truncate an existing file if in exclusive mode.
*/
if (excl) {
error = SET_ERROR(EEXIST);
goto out;
}
/*
* Can't open a directory for writing.
*/
if (S_ISDIR(ZTOI(zp)->i_mode)) {
error = SET_ERROR(EISDIR);
goto out;
}
/*
* Verify requested access to file.
*/
if (mode && (error = zfs_zaccess_rwx(zp, mode, aflags, cr))) {
goto out;
}
mutex_enter(&dzp->z_lock);
dzp->z_seq++;
mutex_exit(&dzp->z_lock);
/*
* Truncate regular files if requested.
*/
if (S_ISREG(ZTOI(zp)->i_mode) &&
(vap->va_mask & ATTR_SIZE) && (vap->va_size == 0)) {
/* we can't hold any locks when calling zfs_freesp() */
if (dl) {
zfs_dirent_unlock(dl);
dl = NULL;
}
error = zfs_freesp(zp, 0, 0, mode, TRUE);
}
}
out:
if (dl)
zfs_dirent_unlock(dl);
if (error) {
if (zp)
zrele(zp);
} else {
zfs_znode_update_vfs(dzp);
zfs_znode_update_vfs(zp);
*zpp = zp;
}
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
ZFS_EXIT(zfsvfs);
return (error);
}
/* ARGSUSED */
int
zfs_tmpfile(struct inode *dip, vattr_t *vap, int excl,
int mode, struct inode **ipp, cred_t *cr, int flag, vsecattr_t *vsecp)
{
znode_t *zp = NULL, *dzp = ITOZ(dip);
zfsvfs_t *zfsvfs = ITOZSB(dip);
objset_t *os;
dmu_tx_t *tx;
int error;
uid_t uid;
gid_t gid;
zfs_acl_ids_t acl_ids;
uint64_t projid = ZFS_DEFAULT_PROJID;
boolean_t fuid_dirtied;
boolean_t have_acl = B_FALSE;
boolean_t waited = B_FALSE;
/*
* If we have an ephemeral id, ACL, or XVATTR then
* make sure file system is at proper version
*/
gid = crgetgid(cr);
uid = crgetuid(cr);
if (zfsvfs->z_use_fuids == B_FALSE &&
(vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
return (SET_ERROR(EINVAL));
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(dzp);
os = zfsvfs->z_os;
if (vap->va_mask & ATTR_XVATTR) {
if ((error = secpolicy_xvattr((xvattr_t *)vap,
crgetuid(cr), cr, vap->va_mode)) != 0) {
ZFS_EXIT(zfsvfs);
return (error);
}
}
top:
*ipp = NULL;
/*
* Create a new file object and update the directory
* to reference it.
*/
if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
if (have_acl)
zfs_acl_ids_free(&acl_ids);
goto out;
}
if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap,
cr, vsecp, &acl_ids)) != 0)
goto out;
have_acl = B_TRUE;
if (S_ISREG(vap->va_mode) || S_ISDIR(vap->va_mode))
projid = zfs_inherit_projid(dzp);
if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, projid)) {
zfs_acl_ids_free(&acl_ids);
error = SET_ERROR(EDQUOT);
goto out;
}
tx = dmu_tx_create(os);
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
ZFS_SA_BASE_ATTR_SIZE);
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
if (!zfsvfs->z_use_sa &&
acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
0, acl_ids.z_aclp->z_acl_bytes);
}
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
if (error) {
if (error == ERESTART) {
waited = B_TRUE;
dmu_tx_wait(tx);
dmu_tx_abort(tx);
goto top;
}
zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx);
ZFS_EXIT(zfsvfs);
return (error);
}
zfs_mknode(dzp, vap, tx, cr, IS_TMPFILE, &zp, &acl_ids);
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
/* Add to unlinked set */
zp->z_unlinked = B_TRUE;
zfs_unlinked_add(zp, tx);
zfs_acl_ids_free(&acl_ids);
dmu_tx_commit(tx);
out:
if (error) {
if (zp)
zrele(zp);
} else {
zfs_znode_update_vfs(dzp);
zfs_znode_update_vfs(zp);
*ipp = ZTOI(zp);
}
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Remove an entry from a directory.
*
* IN: dzp - znode of directory to remove entry from.
* name - name of entry to remove.
* cr - credentials of caller.
* flags - case flags.
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
* dzp - ctime|mtime
* ip - ctime (if nlink > 0)
*/
uint64_t null_xattr = 0;
/*ARGSUSED*/
int
zfs_remove(znode_t *dzp, char *name, cred_t *cr, int flags)
{
znode_t *zp;
znode_t *xzp;
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
zilog_t *zilog;
uint64_t acl_obj, xattr_obj;
uint64_t xattr_obj_unlinked = 0;
uint64_t obj = 0;
uint64_t links;
zfs_dirlock_t *dl;
dmu_tx_t *tx;
boolean_t may_delete_now, delete_now = FALSE;
boolean_t unlinked, toobig = FALSE;
uint64_t txtype;
pathname_t *realnmp = NULL;
pathname_t realnm;
int error;
int zflg = ZEXISTS;
boolean_t waited = B_FALSE;
if (name == NULL)
return (SET_ERROR(EINVAL));
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(dzp);
zilog = zfsvfs->z_log;
if (flags & FIGNORECASE) {
zflg |= ZCILOOK;
pn_alloc(&realnm);
realnmp = &realnm;
}
top:
xattr_obj = 0;
xzp = NULL;
/*
* Attempt to lock directory; fail if entry doesn't exist.
*/
if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
NULL, realnmp))) {
if (realnmp)
pn_free(realnmp);
ZFS_EXIT(zfsvfs);
return (error);
}
if ((error = zfs_zaccess_delete(dzp, zp, cr))) {
goto out;
}
/*
* Need to use rmdir for removing directories.
*/
if (S_ISDIR(ZTOI(zp)->i_mode)) {
error = SET_ERROR(EPERM);
goto out;
}
mutex_enter(&zp->z_lock);
may_delete_now = atomic_read(&ZTOI(zp)->i_count) == 1 &&
!(zp->z_is_mapped);
mutex_exit(&zp->z_lock);
/*
* We may delete the znode now, or we may put it in the unlinked set;
* it depends on whether we're the last link, and on whether there are
* other holds on the inode. So we dmu_tx_hold() the right things to
* allow for either case.
*/
obj = zp->z_id;
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
zfs_sa_upgrade_txholds(tx, dzp);
if (may_delete_now) {
toobig = zp->z_size > zp->z_blksz * zfs_delete_blocks;
/* if the file is too big, only hold_free a token amount */
dmu_tx_hold_free(tx, zp->z_id, 0,
(toobig ? DMU_MAX_ACCESS : DMU_OBJECT_END));
}
/* are there any extended attributes? */
error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
&xattr_obj, sizeof (xattr_obj));
if (error == 0 && xattr_obj) {
error = zfs_zget(zfsvfs, xattr_obj, &xzp);
ASSERT0(error);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
}
mutex_enter(&zp->z_lock);
if ((acl_obj = zfs_external_acl(zp)) != 0 && may_delete_now)
dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END);
mutex_exit(&zp->z_lock);
/* charge as an update -- would be nice not to charge at all */
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
/*
* Mark this transaction as typically resulting in a net free of space
*/
dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
if (error) {
zfs_dirent_unlock(dl);
if (error == ERESTART) {
waited = B_TRUE;
dmu_tx_wait(tx);
dmu_tx_abort(tx);
zrele(zp);
if (xzp)
zrele(xzp);
goto top;
}
if (realnmp)
pn_free(realnmp);
dmu_tx_abort(tx);
zrele(zp);
if (xzp)
zrele(xzp);
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Remove the directory entry.
*/
error = zfs_link_destroy(dl, zp, tx, zflg, &unlinked);
if (error) {
dmu_tx_commit(tx);
goto out;
}
if (unlinked) {
/*
* Hold z_lock so that we can make sure that the ACL obj
* hasn't changed. Could have been deleted due to
* zfs_sa_upgrade().
*/
mutex_enter(&zp->z_lock);
(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
&xattr_obj_unlinked, sizeof (xattr_obj_unlinked));
delete_now = may_delete_now && !toobig &&
atomic_read(&ZTOI(zp)->i_count) == 1 &&
!(zp->z_is_mapped) && xattr_obj == xattr_obj_unlinked &&
zfs_external_acl(zp) == acl_obj;
}
if (delete_now) {
if (xattr_obj_unlinked) {
ASSERT3U(ZTOI(xzp)->i_nlink, ==, 2);
mutex_enter(&xzp->z_lock);
xzp->z_unlinked = B_TRUE;
clear_nlink(ZTOI(xzp));
links = 0;
error = sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs),
&links, sizeof (links), tx);
ASSERT3U(error, ==, 0);
mutex_exit(&xzp->z_lock);
zfs_unlinked_add(xzp, tx);
if (zp->z_is_sa)
error = sa_remove(zp->z_sa_hdl,
SA_ZPL_XATTR(zfsvfs), tx);
else
error = sa_update(zp->z_sa_hdl,
SA_ZPL_XATTR(zfsvfs), &null_xattr,
sizeof (uint64_t), tx);
ASSERT0(error);
}
/*
* Add to the unlinked set because a new reference could be
* taken concurrently resulting in a deferred destruction.
*/
zfs_unlinked_add(zp, tx);
mutex_exit(&zp->z_lock);
} else if (unlinked) {
mutex_exit(&zp->z_lock);
zfs_unlinked_add(zp, tx);
}
txtype = TX_REMOVE;
if (flags & FIGNORECASE)
txtype |= TX_CI;
zfs_log_remove(zilog, tx, txtype, dzp, name, obj, unlinked);
dmu_tx_commit(tx);
out:
if (realnmp)
pn_free(realnmp);
zfs_dirent_unlock(dl);
zfs_znode_update_vfs(dzp);
zfs_znode_update_vfs(zp);
if (delete_now)
zrele(zp);
else
zfs_zrele_async(zp);
if (xzp) {
zfs_znode_update_vfs(xzp);
zfs_zrele_async(xzp);
}
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Create a new directory and insert it into dzp using the name
* provided. Return a pointer to the inserted directory.
*
* IN: dzp - znode of directory to add subdir to.
* dirname - name of new directory.
* vap - attributes of new directory.
* cr - credentials of caller.
* flags - case flags.
* vsecp - ACL to be set
*
* OUT: zpp - znode of created directory.
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
* dzp - ctime|mtime updated
* zpp - ctime|mtime|atime updated
*/
/*ARGSUSED*/
int
zfs_mkdir(znode_t *dzp, char *dirname, vattr_t *vap, znode_t **zpp,
cred_t *cr, int flags, vsecattr_t *vsecp)
{
znode_t *zp;
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
zilog_t *zilog;
zfs_dirlock_t *dl;
uint64_t txtype;
dmu_tx_t *tx;
int error;
int zf = ZNEW;
uid_t uid;
gid_t gid = crgetgid(cr);
zfs_acl_ids_t acl_ids;
boolean_t fuid_dirtied;
boolean_t waited = B_FALSE;
ASSERT(S_ISDIR(vap->va_mode));
/*
* If we have an ephemeral id, ACL, or XVATTR then
* make sure file system is at proper version
*/
uid = crgetuid(cr);
if (zfsvfs->z_use_fuids == B_FALSE &&
(vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
return (SET_ERROR(EINVAL));
if (dirname == NULL)
return (SET_ERROR(EINVAL));
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(dzp);
zilog = zfsvfs->z_log;
if (dzp->z_pflags & ZFS_XATTR) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
if (zfsvfs->z_utf8 && u8_validate(dirname,
strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EILSEQ));
}
if (flags & FIGNORECASE)
zf |= ZCILOOK;
if (vap->va_mask & ATTR_XVATTR) {
if ((error = secpolicy_xvattr((xvattr_t *)vap,
crgetuid(cr), cr, vap->va_mode)) != 0) {
ZFS_EXIT(zfsvfs);
return (error);
}
}
if ((error = zfs_acl_ids_create(dzp, 0, vap, cr,
vsecp, &acl_ids)) != 0) {
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* First make sure the new directory doesn't exist.
*
* Existence is checked first to make sure we don't return
* EACCES instead of EEXIST which can cause some applications
* to fail.
*/
top:
*zpp = NULL;
if ((error = zfs_dirent_lock(&dl, dzp, dirname, &zp, zf,
NULL, NULL))) {
zfs_acl_ids_free(&acl_ids);
ZFS_EXIT(zfsvfs);
return (error);
}
if ((error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr))) {
zfs_acl_ids_free(&acl_ids);
zfs_dirent_unlock(dl);
ZFS_EXIT(zfsvfs);
return (error);
}
if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, zfs_inherit_projid(dzp))) {
zfs_acl_ids_free(&acl_ids);
zfs_dirent_unlock(dl);
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EDQUOT));
}
/*
* Add a new entry to the directory.
*/
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
acl_ids.z_aclp->z_acl_bytes);
}
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
ZFS_SA_BASE_ATTR_SIZE);
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
if (error) {
zfs_dirent_unlock(dl);
if (error == ERESTART) {
waited = B_TRUE;
dmu_tx_wait(tx);
dmu_tx_abort(tx);
goto top;
}
zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx);
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Create new node.
*/
zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
/*
* Now put new name in parent dir.
*/
error = zfs_link_create(dl, zp, tx, ZNEW);
if (error != 0) {
zfs_znode_delete(zp, tx);
remove_inode_hash(ZTOI(zp));
goto out;
}
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
*zpp = zp;
txtype = zfs_log_create_txtype(Z_DIR, vsecp, vap);
if (flags & FIGNORECASE)
txtype |= TX_CI;
zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, vsecp,
acl_ids.z_fuidp, vap);
out:
zfs_acl_ids_free(&acl_ids);
dmu_tx_commit(tx);
zfs_dirent_unlock(dl);
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
if (error != 0) {
zrele(zp);
} else {
zfs_znode_update_vfs(dzp);
zfs_znode_update_vfs(zp);
}
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Remove a directory subdir entry. If the current working
* directory is the same as the subdir to be removed, the
* remove will fail.
*
* IN: dzp - znode of directory to remove from.
* name - name of directory to be removed.
* cwd - inode of current working directory.
* cr - credentials of caller.
* flags - case flags
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* dzp - ctime|mtime updated
*/
/*ARGSUSED*/
int
zfs_rmdir(znode_t *dzp, char *name, znode_t *cwd, cred_t *cr,
int flags)
{
znode_t *zp;
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
zilog_t *zilog;
zfs_dirlock_t *dl;
dmu_tx_t *tx;
int error;
int zflg = ZEXISTS;
boolean_t waited = B_FALSE;
if (name == NULL)
return (SET_ERROR(EINVAL));
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(dzp);
zilog = zfsvfs->z_log;
if (flags & FIGNORECASE)
zflg |= ZCILOOK;
top:
zp = NULL;
/*
* Attempt to lock directory; fail if entry doesn't exist.
*/
if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
NULL, NULL))) {
ZFS_EXIT(zfsvfs);
return (error);
}
if ((error = zfs_zaccess_delete(dzp, zp, cr))) {
goto out;
}
if (!S_ISDIR(ZTOI(zp)->i_mode)) {
error = SET_ERROR(ENOTDIR);
goto out;
}
if (zp == cwd) {
error = SET_ERROR(EINVAL);
goto out;
}
/*
* Grab a lock on the directory to make sure that no one is
* trying to add (or lookup) entries while we are removing it.
*/
rw_enter(&zp->z_name_lock, RW_WRITER);
/*
* Grab a lock on the parent pointer to make sure we play well
* with the treewalk and directory rename code.
*/
rw_enter(&zp->z_parent_lock, RW_WRITER);
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
zfs_sa_upgrade_txholds(tx, zp);
zfs_sa_upgrade_txholds(tx, dzp);
dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
if (error) {
rw_exit(&zp->z_parent_lock);
rw_exit(&zp->z_name_lock);
zfs_dirent_unlock(dl);
if (error == ERESTART) {
waited = B_TRUE;
dmu_tx_wait(tx);
dmu_tx_abort(tx);
zrele(zp);
goto top;
}
dmu_tx_abort(tx);
zrele(zp);
ZFS_EXIT(zfsvfs);
return (error);
}
error = zfs_link_destroy(dl, zp, tx, zflg, NULL);
if (error == 0) {
uint64_t txtype = TX_RMDIR;
if (flags & FIGNORECASE)
txtype |= TX_CI;
zfs_log_remove(zilog, tx, txtype, dzp, name, ZFS_NO_OBJECT,
B_FALSE);
}
dmu_tx_commit(tx);
rw_exit(&zp->z_parent_lock);
rw_exit(&zp->z_name_lock);
out:
zfs_dirent_unlock(dl);
zfs_znode_update_vfs(dzp);
zfs_znode_update_vfs(zp);
zrele(zp);
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Read directory entries from the given directory cursor position and emit
* name and position for each entry.
*
* IN: ip - inode of directory to read.
* ctx - directory entry context.
* cr - credentials of caller.
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
* ip - atime updated
*
* Note that the low 4 bits of the cookie returned by zap is always zero.
* This allows us to use the low range for "special" directory entries:
* We use 0 for '.', and 1 for '..'. If this is the root of the filesystem,
* we use the offset 2 for the '.zfs' directory.
*/
/* ARGSUSED */
int
zfs_readdir(struct inode *ip, zpl_dir_context_t *ctx, cred_t *cr)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
objset_t *os;
zap_cursor_t zc;
zap_attribute_t zap;
int error;
uint8_t prefetch;
uint8_t type;
int done = 0;
uint64_t parent;
uint64_t offset; /* must be unsigned; checks for < 1 */
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
&parent, sizeof (parent))) != 0)
goto out;
/*
* Quit if directory has been removed (posix)
*/
if (zp->z_unlinked)
goto out;
error = 0;
os = zfsvfs->z_os;
offset = ctx->pos;
prefetch = zp->z_zn_prefetch;
/*
* Initialize the iterator cursor.
*/
if (offset <= 3) {
/*
* Start iteration from the beginning of the directory.
*/
zap_cursor_init(&zc, os, zp->z_id);
} else {
/*
* The offset is a serialized cursor.
*/
zap_cursor_init_serialized(&zc, os, zp->z_id, offset);
}
/*
* Transform to file-system independent format
*/
while (!done) {
uint64_t objnum;
/*
* Special case `.', `..', and `.zfs'.
*/
if (offset == 0) {
(void) strcpy(zap.za_name, ".");
zap.za_normalization_conflict = 0;
objnum = zp->z_id;
type = DT_DIR;
} else if (offset == 1) {
(void) strcpy(zap.za_name, "..");
zap.za_normalization_conflict = 0;
objnum = parent;
type = DT_DIR;
} else if (offset == 2 && zfs_show_ctldir(zp)) {
(void) strcpy(zap.za_name, ZFS_CTLDIR_NAME);
zap.za_normalization_conflict = 0;
objnum = ZFSCTL_INO_ROOT;
type = DT_DIR;
} else {
/*
* Grab next entry.
*/
if ((error = zap_cursor_retrieve(&zc, &zap))) {
if (error == ENOENT)
break;
else
goto update;
}
/*
* Allow multiple entries provided the first entry is
* the object id. Non-zpl consumers may safely make
* use of the additional space.
*
* XXX: This should be a feature flag for compatibility
*/
if (zap.za_integer_length != 8 ||
zap.za_num_integers == 0) {
cmn_err(CE_WARN, "zap_readdir: bad directory "
"entry, obj = %lld, offset = %lld, "
"length = %d, num = %lld\n",
(u_longlong_t)zp->z_id,
(u_longlong_t)offset,
zap.za_integer_length,
(u_longlong_t)zap.za_num_integers);
error = SET_ERROR(ENXIO);
goto update;
}
objnum = ZFS_DIRENT_OBJ(zap.za_first_integer);
type = ZFS_DIRENT_TYPE(zap.za_first_integer);
}
done = !zpl_dir_emit(ctx, zap.za_name, strlen(zap.za_name),
objnum, type);
if (done)
break;
/* Prefetch znode */
if (prefetch) {
dmu_prefetch(os, objnum, 0, 0, 0,
ZIO_PRIORITY_SYNC_READ);
}
/*
* Move to the next entry, fill in the previous offset.
*/
if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) {
zap_cursor_advance(&zc);
offset = zap_cursor_serialize(&zc);
} else {
offset += 1;
}
ctx->pos = offset;
}
zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */
update:
zap_cursor_fini(&zc);
if (error == ENOENT)
error = 0;
out:
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Get the basic file attributes and place them in the provided kstat
* structure. The inode is assumed to be the authoritative source
* for most of the attributes. However, the znode currently has the
* authoritative atime, blksize, and block count.
*
* IN: ip - inode of file.
*
* OUT: sp - kstat values.
*
* RETURN: 0 (always succeeds)
*/
/* ARGSUSED */
int
zfs_getattr_fast(struct user_namespace *user_ns, struct inode *ip,
struct kstat *sp)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
uint32_t blksize;
u_longlong_t nblocks;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
mutex_enter(&zp->z_lock);
zpl_generic_fillattr(user_ns, ip, sp);
/*
* +1 link count for root inode with visible '.zfs' directory.
*/
if ((zp->z_id == zfsvfs->z_root) && zfs_show_ctldir(zp))
if (sp->nlink < ZFS_LINK_MAX)
sp->nlink++;
sa_object_size(zp->z_sa_hdl, &blksize, &nblocks);
sp->blksize = blksize;
sp->blocks = nblocks;
if (unlikely(zp->z_blksz == 0)) {
/*
* Block size hasn't been set; suggest maximal I/O transfers.
*/
sp->blksize = zfsvfs->z_max_blksz;
}
mutex_exit(&zp->z_lock);
/*
* Required to prevent NFS client from detecting different inode
* numbers of snapshot root dentry before and after snapshot mount.
*/
if (zfsvfs->z_issnap) {
if (ip->i_sb->s_root->d_inode == ip)
sp->ino = ZFSCTL_INO_SNAPDIRS -
dmu_objset_id(zfsvfs->z_os);
}
ZFS_EXIT(zfsvfs);
return (0);
}
/*
* For the operation of changing file's user/group/project, we need to
* handle not only the main object that is assigned to the file directly,
* but also the ones that are used by the file via hidden xattr directory.
*
* Because the xattr directory may contains many EA entries, as to it may
* be impossible to change all of them via the transaction of changing the
* main object's user/group/project attributes. Then we have to change them
* via other multiple independent transactions one by one. It may be not good
* solution, but we have no better idea yet.
*/
static int
zfs_setattr_dir(znode_t *dzp)
{
struct inode *dxip = ZTOI(dzp);
struct inode *xip = NULL;
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
objset_t *os = zfsvfs->z_os;
zap_cursor_t zc;
zap_attribute_t zap;
zfs_dirlock_t *dl;
znode_t *zp = NULL;
dmu_tx_t *tx = NULL;
uint64_t uid, gid;
sa_bulk_attr_t bulk[4];
int count;
int err;
zap_cursor_init(&zc, os, dzp->z_id);
while ((err = zap_cursor_retrieve(&zc, &zap)) == 0) {
count = 0;
if (zap.za_integer_length != 8 || zap.za_num_integers != 1) {
err = ENXIO;
break;
}
err = zfs_dirent_lock(&dl, dzp, (char *)zap.za_name, &zp,
ZEXISTS, NULL, NULL);
if (err == ENOENT)
goto next;
if (err)
break;
xip = ZTOI(zp);
if (KUID_TO_SUID(xip->i_uid) == KUID_TO_SUID(dxip->i_uid) &&
KGID_TO_SGID(xip->i_gid) == KGID_TO_SGID(dxip->i_gid) &&
zp->z_projid == dzp->z_projid)
goto next;
tx = dmu_tx_create(os);
if (!(zp->z_pflags & ZFS_PROJID))
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
else
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err)
break;
mutex_enter(&dzp->z_lock);
if (KUID_TO_SUID(xip->i_uid) != KUID_TO_SUID(dxip->i_uid)) {
xip->i_uid = dxip->i_uid;
uid = zfs_uid_read(dxip);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
&uid, sizeof (uid));
}
if (KGID_TO_SGID(xip->i_gid) != KGID_TO_SGID(dxip->i_gid)) {
xip->i_gid = dxip->i_gid;
gid = zfs_gid_read(dxip);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
&gid, sizeof (gid));
}
if (zp->z_projid != dzp->z_projid) {
if (!(zp->z_pflags & ZFS_PROJID)) {
zp->z_pflags |= ZFS_PROJID;
SA_ADD_BULK_ATTR(bulk, count,
SA_ZPL_FLAGS(zfsvfs), NULL, &zp->z_pflags,
sizeof (zp->z_pflags));
}
zp->z_projid = dzp->z_projid;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PROJID(zfsvfs),
NULL, &zp->z_projid, sizeof (zp->z_projid));
}
mutex_exit(&dzp->z_lock);
if (likely(count > 0)) {
err = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
dmu_tx_commit(tx);
} else {
dmu_tx_abort(tx);
}
tx = NULL;
if (err != 0 && err != ENOENT)
break;
next:
if (zp) {
zrele(zp);
zp = NULL;
zfs_dirent_unlock(dl);
}
zap_cursor_advance(&zc);
}
if (tx)
dmu_tx_abort(tx);
if (zp) {
zrele(zp);
zfs_dirent_unlock(dl);
}
zap_cursor_fini(&zc);
return (err == ENOENT ? 0 : err);
}
/*
* Set the file attributes to the values contained in the
* vattr structure.
*
* IN: zp - znode of file to be modified.
* vap - new attribute values.
* If ATTR_XVATTR set, then optional attrs are being set
* flags - ATTR_UTIME set if non-default time values provided.
* - ATTR_NOACLCHECK (CIFS context only).
* cr - credentials of caller.
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
* ip - ctime updated, mtime updated if size changed.
*/
/* ARGSUSED */
int
zfs_setattr(znode_t *zp, vattr_t *vap, int flags, cred_t *cr)
{
struct inode *ip;
zfsvfs_t *zfsvfs = ZTOZSB(zp);
objset_t *os = zfsvfs->z_os;
zilog_t *zilog;
dmu_tx_t *tx;
vattr_t oldva;
xvattr_t *tmpxvattr;
uint_t mask = vap->va_mask;
uint_t saved_mask = 0;
int trim_mask = 0;
uint64_t new_mode;
uint64_t new_kuid = 0, new_kgid = 0, new_uid, new_gid;
uint64_t xattr_obj;
uint64_t mtime[2], ctime[2], atime[2];
uint64_t projid = ZFS_INVALID_PROJID;
znode_t *attrzp;
int need_policy = FALSE;
int err, err2 = 0;
zfs_fuid_info_t *fuidp = NULL;
xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
xoptattr_t *xoap;
zfs_acl_t *aclp;
boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
boolean_t fuid_dirtied = B_FALSE;
boolean_t handle_eadir = B_FALSE;
sa_bulk_attr_t *bulk, *xattr_bulk;
int count = 0, xattr_count = 0, bulks = 8;
if (mask == 0)
return (0);
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
ip = ZTOI(zp);
/*
* If this is a xvattr_t, then get a pointer to the structure of
* optional attributes. If this is NULL, then we have a vattr_t.
*/
xoap = xva_getxoptattr(xvap);
if (xoap != NULL && (mask & ATTR_XVATTR)) {
if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
if (!dmu_objset_projectquota_enabled(os) ||
(!S_ISREG(ip->i_mode) && !S_ISDIR(ip->i_mode))) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(ENOTSUP));
}
projid = xoap->xoa_projid;
if (unlikely(projid == ZFS_INVALID_PROJID)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
if (projid == zp->z_projid && zp->z_pflags & ZFS_PROJID)
projid = ZFS_INVALID_PROJID;
else
need_policy = TRUE;
}
if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT) &&
(xoap->xoa_projinherit !=
((zp->z_pflags & ZFS_PROJINHERIT) != 0)) &&
(!dmu_objset_projectquota_enabled(os) ||
(!S_ISREG(ip->i_mode) && !S_ISDIR(ip->i_mode)))) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(ENOTSUP));
}
}
zilog = zfsvfs->z_log;
/*
* Make sure that if we have ephemeral uid/gid or xvattr specified
* that file system is at proper version level
*/
if (zfsvfs->z_use_fuids == B_FALSE &&
(((mask & ATTR_UID) && IS_EPHEMERAL(vap->va_uid)) ||
((mask & ATTR_GID) && IS_EPHEMERAL(vap->va_gid)) ||
(mask & ATTR_XVATTR))) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
if (mask & ATTR_SIZE && S_ISDIR(ip->i_mode)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EISDIR));
}
if (mask & ATTR_SIZE && !S_ISREG(ip->i_mode) && !S_ISFIFO(ip->i_mode)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
tmpxvattr = kmem_alloc(sizeof (xvattr_t), KM_SLEEP);
xva_init(tmpxvattr);
bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * bulks, KM_SLEEP);
xattr_bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * bulks, KM_SLEEP);
/*
* Immutable files can only alter immutable bit and atime
*/
if ((zp->z_pflags & ZFS_IMMUTABLE) &&
((mask & (ATTR_SIZE|ATTR_UID|ATTR_GID|ATTR_MTIME|ATTR_MODE)) ||
((mask & ATTR_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) {
err = SET_ERROR(EPERM);
goto out3;
}
if ((mask & ATTR_SIZE) && (zp->z_pflags & ZFS_READONLY)) {
err = SET_ERROR(EPERM);
goto out3;
}
/*
* Verify timestamps doesn't overflow 32 bits.
* ZFS can handle large timestamps, but 32bit syscalls can't
* handle times greater than 2039. This check should be removed
* once large timestamps are fully supported.
*/
if (mask & (ATTR_ATIME | ATTR_MTIME)) {
if (((mask & ATTR_ATIME) &&
TIMESPEC_OVERFLOW(&vap->va_atime)) ||
((mask & ATTR_MTIME) &&
TIMESPEC_OVERFLOW(&vap->va_mtime))) {
err = SET_ERROR(EOVERFLOW);
goto out3;
}
}
top:
attrzp = NULL;
aclp = NULL;
/* Can this be moved to before the top label? */
if (zfs_is_readonly(zfsvfs)) {
err = SET_ERROR(EROFS);
goto out3;
}
/*
* First validate permissions
*/
if (mask & ATTR_SIZE) {
err = zfs_zaccess(zp, ACE_WRITE_DATA, 0, skipaclchk, cr);
if (err)
goto out3;
/*
* XXX - Note, we are not providing any open
* mode flags here (like FNDELAY), so we may
* block if there are locks present... this
* should be addressed in openat().
*/
/* XXX - would it be OK to generate a log record here? */
err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE);
if (err)
goto out3;
}
if (mask & (ATTR_ATIME|ATTR_MTIME) ||
((mask & ATTR_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) ||
XVA_ISSET_REQ(xvap, XAT_READONLY) ||
XVA_ISSET_REQ(xvap, XAT_ARCHIVE) ||
XVA_ISSET_REQ(xvap, XAT_OFFLINE) ||
XVA_ISSET_REQ(xvap, XAT_SPARSE) ||
XVA_ISSET_REQ(xvap, XAT_CREATETIME) ||
XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) {
need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0,
skipaclchk, cr);
}
if (mask & (ATTR_UID|ATTR_GID)) {
int idmask = (mask & (ATTR_UID|ATTR_GID));
int take_owner;
int take_group;
/*
* NOTE: even if a new mode is being set,
* we may clear S_ISUID/S_ISGID bits.
*/
if (!(mask & ATTR_MODE))
vap->va_mode = zp->z_mode;
/*
* Take ownership or chgrp to group we are a member of
*/
take_owner = (mask & ATTR_UID) && (vap->va_uid == crgetuid(cr));
take_group = (mask & ATTR_GID) &&
zfs_groupmember(zfsvfs, vap->va_gid, cr);
/*
* If both ATTR_UID and ATTR_GID are set then take_owner and
* take_group must both be set in order to allow taking
* ownership.
*
* Otherwise, send the check through secpolicy_vnode_setattr()
*
*/
if (((idmask == (ATTR_UID|ATTR_GID)) &&
take_owner && take_group) ||
((idmask == ATTR_UID) && take_owner) ||
((idmask == ATTR_GID) && take_group)) {
if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0,
skipaclchk, cr) == 0) {
/*
* Remove setuid/setgid for non-privileged users
*/
(void) secpolicy_setid_clear(vap, cr);
trim_mask = (mask & (ATTR_UID|ATTR_GID));
} else {
need_policy = TRUE;
}
} else {
need_policy = TRUE;
}
}
mutex_enter(&zp->z_lock);
oldva.va_mode = zp->z_mode;
zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid);
if (mask & ATTR_XVATTR) {
/*
* Update xvattr mask to include only those attributes
* that are actually changing.
*
* the bits will be restored prior to actually setting
* the attributes so the caller thinks they were set.
*/
if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
if (xoap->xoa_appendonly !=
((zp->z_pflags & ZFS_APPENDONLY) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_APPENDONLY);
XVA_SET_REQ(tmpxvattr, XAT_APPENDONLY);
}
}
if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT)) {
if (xoap->xoa_projinherit !=
((zp->z_pflags & ZFS_PROJINHERIT) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_PROJINHERIT);
XVA_SET_REQ(tmpxvattr, XAT_PROJINHERIT);
}
}
if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
if (xoap->xoa_nounlink !=
((zp->z_pflags & ZFS_NOUNLINK) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_NOUNLINK);
XVA_SET_REQ(tmpxvattr, XAT_NOUNLINK);
}
}
if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
if (xoap->xoa_immutable !=
((zp->z_pflags & ZFS_IMMUTABLE) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_IMMUTABLE);
XVA_SET_REQ(tmpxvattr, XAT_IMMUTABLE);
}
}
if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
if (xoap->xoa_nodump !=
((zp->z_pflags & ZFS_NODUMP) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_NODUMP);
XVA_SET_REQ(tmpxvattr, XAT_NODUMP);
}
}
if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
if (xoap->xoa_av_modified !=
((zp->z_pflags & ZFS_AV_MODIFIED) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_AV_MODIFIED);
XVA_SET_REQ(tmpxvattr, XAT_AV_MODIFIED);
}
}
if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
if ((!S_ISREG(ip->i_mode) &&
xoap->xoa_av_quarantined) ||
xoap->xoa_av_quarantined !=
((zp->z_pflags & ZFS_AV_QUARANTINED) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED);
XVA_SET_REQ(tmpxvattr, XAT_AV_QUARANTINED);
}
}
if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
mutex_exit(&zp->z_lock);
err = SET_ERROR(EPERM);
goto out3;
}
if (need_policy == FALSE &&
(XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) ||
XVA_ISSET_REQ(xvap, XAT_OPAQUE))) {
need_policy = TRUE;
}
}
mutex_exit(&zp->z_lock);
if (mask & ATTR_MODE) {
if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) {
err = secpolicy_setid_setsticky_clear(ip, vap,
&oldva, cr);
if (err)
goto out3;
trim_mask |= ATTR_MODE;
} else {
need_policy = TRUE;
}
}
if (need_policy) {
/*
* If trim_mask is set then take ownership
* has been granted or write_acl is present and user
* has the ability to modify mode. In that case remove
* UID|GID and or MODE from mask so that
* secpolicy_vnode_setattr() doesn't revoke it.
*/
if (trim_mask) {
saved_mask = vap->va_mask;
vap->va_mask &= ~trim_mask;
}
err = secpolicy_vnode_setattr(cr, ip, vap, &oldva, flags,
(int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp);
if (err)
goto out3;
if (trim_mask)
vap->va_mask |= saved_mask;
}
/*
* secpolicy_vnode_setattr, or take ownership may have
* changed va_mask
*/
mask = vap->va_mask;
if ((mask & (ATTR_UID | ATTR_GID)) || projid != ZFS_INVALID_PROJID) {
handle_eadir = B_TRUE;
err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
&xattr_obj, sizeof (xattr_obj));
if (err == 0 && xattr_obj) {
err = zfs_zget(ZTOZSB(zp), xattr_obj, &attrzp);
if (err)
goto out2;
}
if (mask & ATTR_UID) {
new_kuid = zfs_fuid_create(zfsvfs,
(uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp);
if (new_kuid != KUID_TO_SUID(ZTOI(zp)->i_uid) &&
zfs_id_overquota(zfsvfs, DMU_USERUSED_OBJECT,
new_kuid)) {
if (attrzp)
zrele(attrzp);
err = SET_ERROR(EDQUOT);
goto out2;
}
}
if (mask & ATTR_GID) {
new_kgid = zfs_fuid_create(zfsvfs,
(uint64_t)vap->va_gid, cr, ZFS_GROUP, &fuidp);
if (new_kgid != KGID_TO_SGID(ZTOI(zp)->i_gid) &&
zfs_id_overquota(zfsvfs, DMU_GROUPUSED_OBJECT,
new_kgid)) {
if (attrzp)
zrele(attrzp);
err = SET_ERROR(EDQUOT);
goto out2;
}
}
if (projid != ZFS_INVALID_PROJID &&
zfs_id_overquota(zfsvfs, DMU_PROJECTUSED_OBJECT, projid)) {
if (attrzp)
zrele(attrzp);
err = EDQUOT;
goto out2;
}
}
tx = dmu_tx_create(os);
if (mask & ATTR_MODE) {
uint64_t pmode = zp->z_mode;
uint64_t acl_obj;
new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT);
if (ZTOZSB(zp)->z_acl_mode == ZFS_ACL_RESTRICTED &&
!(zp->z_pflags & ZFS_ACL_TRIVIAL)) {
err = EPERM;
goto out;
}
if ((err = zfs_acl_chmod_setattr(zp, &aclp, new_mode)))
goto out;
mutex_enter(&zp->z_lock);
if (!zp->z_is_sa && ((acl_obj = zfs_external_acl(zp)) != 0)) {
/*
* Are we upgrading ACL from old V0 format
* to V1 format?
*/
if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
zfs_znode_acl_version(zp) ==
ZFS_ACL_VERSION_INITIAL) {
dmu_tx_hold_free(tx, acl_obj, 0,
DMU_OBJECT_END);
dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
0, aclp->z_acl_bytes);
} else {
dmu_tx_hold_write(tx, acl_obj, 0,
aclp->z_acl_bytes);
}
} else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
0, aclp->z_acl_bytes);
}
mutex_exit(&zp->z_lock);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
} else {
if (((mask & ATTR_XVATTR) &&
XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) ||
(projid != ZFS_INVALID_PROJID &&
!(zp->z_pflags & ZFS_PROJID)))
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
else
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
}
if (attrzp) {
dmu_tx_hold_sa(tx, attrzp->z_sa_hdl, B_FALSE);
}
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
zfs_sa_upgrade_txholds(tx, zp);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err)
goto out;
count = 0;
/*
* Set each attribute requested.
* We group settings according to the locks they need to acquire.
*
* Note: you cannot set ctime directly, although it will be
* updated as a side-effect of calling this function.
*/
if (projid != ZFS_INVALID_PROJID && !(zp->z_pflags & ZFS_PROJID)) {
/*
* For the existed object that is upgraded from old system,
* its on-disk layout has no slot for the project ID attribute.
* But quota accounting logic needs to access related slots by
* offset directly. So we need to adjust old objects' layout
* to make the project ID to some unified and fixed offset.
*/
if (attrzp)
err = sa_add_projid(attrzp->z_sa_hdl, tx, projid);
if (err == 0)
err = sa_add_projid(zp->z_sa_hdl, tx, projid);
if (unlikely(err == EEXIST))
err = 0;
else if (err != 0)
goto out;
else
projid = ZFS_INVALID_PROJID;
}
if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
mutex_enter(&zp->z_acl_lock);
mutex_enter(&zp->z_lock);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, sizeof (zp->z_pflags));
if (attrzp) {
if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
mutex_enter(&attrzp->z_acl_lock);
mutex_enter(&attrzp->z_lock);
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_FLAGS(zfsvfs), NULL, &attrzp->z_pflags,
sizeof (attrzp->z_pflags));
if (projid != ZFS_INVALID_PROJID) {
attrzp->z_projid = projid;
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_PROJID(zfsvfs), NULL, &attrzp->z_projid,
sizeof (attrzp->z_projid));
}
}
if (mask & (ATTR_UID|ATTR_GID)) {
if (mask & ATTR_UID) {
ZTOI(zp)->i_uid = SUID_TO_KUID(new_kuid);
new_uid = zfs_uid_read(ZTOI(zp));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
&new_uid, sizeof (new_uid));
if (attrzp) {
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_UID(zfsvfs), NULL, &new_uid,
sizeof (new_uid));
ZTOI(attrzp)->i_uid = SUID_TO_KUID(new_uid);
}
}
if (mask & ATTR_GID) {
ZTOI(zp)->i_gid = SGID_TO_KGID(new_kgid);
new_gid = zfs_gid_read(ZTOI(zp));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs),
NULL, &new_gid, sizeof (new_gid));
if (attrzp) {
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_GID(zfsvfs), NULL, &new_gid,
sizeof (new_gid));
ZTOI(attrzp)->i_gid = SGID_TO_KGID(new_kgid);
}
}
if (!(mask & ATTR_MODE)) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs),
NULL, &new_mode, sizeof (new_mode));
new_mode = zp->z_mode;
}
err = zfs_acl_chown_setattr(zp);
ASSERT(err == 0);
if (attrzp) {
err = zfs_acl_chown_setattr(attrzp);
ASSERT(err == 0);
}
}
if (mask & ATTR_MODE) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
&new_mode, sizeof (new_mode));
zp->z_mode = ZTOI(zp)->i_mode = new_mode;
ASSERT3P(aclp, !=, NULL);
err = zfs_aclset_common(zp, aclp, cr, tx);
ASSERT0(err);
if (zp->z_acl_cached)
zfs_acl_free(zp->z_acl_cached);
zp->z_acl_cached = aclp;
aclp = NULL;
}
if ((mask & ATTR_ATIME) || zp->z_atime_dirty) {
zp->z_atime_dirty = B_FALSE;
ZFS_TIME_ENCODE(&ip->i_atime, atime);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
&atime, sizeof (atime));
}
if (mask & (ATTR_MTIME | ATTR_SIZE)) {
ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
ZTOI(zp)->i_mtime = zpl_inode_timestamp_truncate(
vap->va_mtime, ZTOI(zp));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
mtime, sizeof (mtime));
}
if (mask & (ATTR_CTIME | ATTR_SIZE)) {
ZFS_TIME_ENCODE(&vap->va_ctime, ctime);
ZTOI(zp)->i_ctime = zpl_inode_timestamp_truncate(vap->va_ctime,
ZTOI(zp));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
ctime, sizeof (ctime));
}
if (projid != ZFS_INVALID_PROJID) {
zp->z_projid = projid;
SA_ADD_BULK_ATTR(bulk, count,
SA_ZPL_PROJID(zfsvfs), NULL, &zp->z_projid,
sizeof (zp->z_projid));
}
if (attrzp && mask) {
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_CTIME(zfsvfs), NULL, &ctime,
sizeof (ctime));
}
/*
* Do this after setting timestamps to prevent timestamp
* update from toggling bit
*/
if (xoap && (mask & ATTR_XVATTR)) {
/*
* restore trimmed off masks
* so that return masks can be set for caller.
*/
if (XVA_ISSET_REQ(tmpxvattr, XAT_APPENDONLY)) {
XVA_SET_REQ(xvap, XAT_APPENDONLY);
}
if (XVA_ISSET_REQ(tmpxvattr, XAT_NOUNLINK)) {
XVA_SET_REQ(xvap, XAT_NOUNLINK);
}
if (XVA_ISSET_REQ(tmpxvattr, XAT_IMMUTABLE)) {
XVA_SET_REQ(xvap, XAT_IMMUTABLE);
}
if (XVA_ISSET_REQ(tmpxvattr, XAT_NODUMP)) {
XVA_SET_REQ(xvap, XAT_NODUMP);
}
if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_MODIFIED)) {
XVA_SET_REQ(xvap, XAT_AV_MODIFIED);
}
if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_QUARANTINED)) {
XVA_SET_REQ(xvap, XAT_AV_QUARANTINED);
}
if (XVA_ISSET_REQ(tmpxvattr, XAT_PROJINHERIT)) {
XVA_SET_REQ(xvap, XAT_PROJINHERIT);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
ASSERT(S_ISREG(ip->i_mode));
zfs_xvattr_set(zp, xvap, tx);
}
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
if (mask != 0)
zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp);
mutex_exit(&zp->z_lock);
if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
mutex_exit(&zp->z_acl_lock);
if (attrzp) {
if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
mutex_exit(&attrzp->z_acl_lock);
mutex_exit(&attrzp->z_lock);
}
out:
if (err == 0 && xattr_count > 0) {
err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk,
xattr_count, tx);
ASSERT(err2 == 0);
}
if (aclp)
zfs_acl_free(aclp);
if (fuidp) {
zfs_fuid_info_free(fuidp);
fuidp = NULL;
}
if (err) {
dmu_tx_abort(tx);
if (attrzp)
zrele(attrzp);
if (err == ERESTART)
goto top;
} else {
if (count > 0)
err2 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
dmu_tx_commit(tx);
if (attrzp) {
if (err2 == 0 && handle_eadir)
err2 = zfs_setattr_dir(attrzp);
zrele(attrzp);
}
zfs_znode_update_vfs(zp);
}
out2:
if (os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
out3:
kmem_free(xattr_bulk, sizeof (sa_bulk_attr_t) * bulks);
kmem_free(bulk, sizeof (sa_bulk_attr_t) * bulks);
kmem_free(tmpxvattr, sizeof (xvattr_t));
ZFS_EXIT(zfsvfs);
return (err);
}
typedef struct zfs_zlock {
krwlock_t *zl_rwlock; /* lock we acquired */
znode_t *zl_znode; /* znode we held */
struct zfs_zlock *zl_next; /* next in list */
} zfs_zlock_t;
/*
* Drop locks and release vnodes that were held by zfs_rename_lock().
*/
static void
zfs_rename_unlock(zfs_zlock_t **zlpp)
{
zfs_zlock_t *zl;
while ((zl = *zlpp) != NULL) {
if (zl->zl_znode != NULL)
zfs_zrele_async(zl->zl_znode);
rw_exit(zl->zl_rwlock);
*zlpp = zl->zl_next;
kmem_free(zl, sizeof (*zl));
}
}
/*
* Search back through the directory tree, using the ".." entries.
* Lock each directory in the chain to prevent concurrent renames.
* Fail any attempt to move a directory into one of its own descendants.
* XXX - z_parent_lock can overlap with map or grow locks
*/
static int
zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp)
{
zfs_zlock_t *zl;
znode_t *zp = tdzp;
uint64_t rootid = ZTOZSB(zp)->z_root;
uint64_t oidp = zp->z_id;
krwlock_t *rwlp = &szp->z_parent_lock;
krw_t rw = RW_WRITER;
/*
* First pass write-locks szp and compares to zp->z_id.
* Later passes read-lock zp and compare to zp->z_parent.
*/
do {
if (!rw_tryenter(rwlp, rw)) {
/*
* Another thread is renaming in this path.
* Note that if we are a WRITER, we don't have any
* parent_locks held yet.
*/
if (rw == RW_READER && zp->z_id > szp->z_id) {
/*
* Drop our locks and restart
*/
zfs_rename_unlock(&zl);
*zlpp = NULL;
zp = tdzp;
oidp = zp->z_id;
rwlp = &szp->z_parent_lock;
rw = RW_WRITER;
continue;
} else {
/*
* Wait for other thread to drop its locks
*/
rw_enter(rwlp, rw);
}
}
zl = kmem_alloc(sizeof (*zl), KM_SLEEP);
zl->zl_rwlock = rwlp;
zl->zl_znode = NULL;
zl->zl_next = *zlpp;
*zlpp = zl;
if (oidp == szp->z_id) /* We're a descendant of szp */
return (SET_ERROR(EINVAL));
if (oidp == rootid) /* We've hit the top */
return (0);
if (rw == RW_READER) { /* i.e. not the first pass */
int error = zfs_zget(ZTOZSB(zp), oidp, &zp);
if (error)
return (error);
zl->zl_znode = zp;
}
(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(ZTOZSB(zp)),
&oidp, sizeof (oidp));
rwlp = &zp->z_parent_lock;
rw = RW_READER;
} while (zp->z_id != sdzp->z_id);
return (0);
}
/*
* Move an entry from the provided source directory to the target
* directory. Change the entry name as indicated.
*
* IN: sdzp - Source directory containing the "old entry".
* snm - Old entry name.
* tdzp - Target directory to contain the "new entry".
* tnm - New entry name.
* cr - credentials of caller.
* flags - case flags
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* sdzp,tdzp - ctime|mtime updated
*/
/*ARGSUSED*/
int
zfs_rename(znode_t *sdzp, char *snm, znode_t *tdzp, char *tnm,
cred_t *cr, int flags)
{
znode_t *szp, *tzp;
zfsvfs_t *zfsvfs = ZTOZSB(sdzp);
zilog_t *zilog;
zfs_dirlock_t *sdl, *tdl;
dmu_tx_t *tx;
zfs_zlock_t *zl;
int cmp, serr, terr;
int error = 0;
int zflg = 0;
boolean_t waited = B_FALSE;
if (snm == NULL || tnm == NULL)
return (SET_ERROR(EINVAL));
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(sdzp);
zilog = zfsvfs->z_log;
ZFS_VERIFY_ZP(tdzp);
/*
* We check i_sb because snapshots and the ctldir must have different
* super blocks.
*/
if (ZTOI(tdzp)->i_sb != ZTOI(sdzp)->i_sb ||
zfsctl_is_node(ZTOI(tdzp))) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EXDEV));
}
if (zfsvfs->z_utf8 && u8_validate(tnm,
strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EILSEQ));
}
if (flags & FIGNORECASE)
zflg |= ZCILOOK;
top:
szp = NULL;
tzp = NULL;
zl = NULL;
/*
* This is to prevent the creation of links into attribute space
* by renaming a linked file into/outof an attribute directory.
* See the comment in zfs_link() for why this is considered bad.
*/
if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
/*
* Lock source and target directory entries. To prevent deadlock,
* a lock ordering must be defined. We lock the directory with
* the smallest object id first, or if it's a tie, the one with
* the lexically first name.
*/
if (sdzp->z_id < tdzp->z_id) {
cmp = -1;
} else if (sdzp->z_id > tdzp->z_id) {
cmp = 1;
} else {
/*
* First compare the two name arguments without
* considering any case folding.
*/
int nofold = (zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER);
cmp = u8_strcmp(snm, tnm, 0, nofold, U8_UNICODE_LATEST, &error);
ASSERT(error == 0 || !zfsvfs->z_utf8);
if (cmp == 0) {
/*
* POSIX: "If the old argument and the new argument
* both refer to links to the same existing file,
* the rename() function shall return successfully
* and perform no other action."
*/
ZFS_EXIT(zfsvfs);
return (0);
}
/*
* If the file system is case-folding, then we may
* have some more checking to do. A case-folding file
* system is either supporting mixed case sensitivity
* access or is completely case-insensitive. Note
* that the file system is always case preserving.
*
* In mixed sensitivity mode case sensitive behavior
* is the default. FIGNORECASE must be used to
* explicitly request case insensitive behavior.
*
* If the source and target names provided differ only
* by case (e.g., a request to rename 'tim' to 'Tim'),
* we will treat this as a special case in the
* case-insensitive mode: as long as the source name
* is an exact match, we will allow this to proceed as
* a name-change request.
*/
if ((zfsvfs->z_case == ZFS_CASE_INSENSITIVE ||
(zfsvfs->z_case == ZFS_CASE_MIXED &&
flags & FIGNORECASE)) &&
u8_strcmp(snm, tnm, 0, zfsvfs->z_norm, U8_UNICODE_LATEST,
&error) == 0) {
/*
* case preserving rename request, require exact
* name matches
*/
zflg |= ZCIEXACT;
zflg &= ~ZCILOOK;
}
}
/*
* If the source and destination directories are the same, we should
* grab the z_name_lock of that directory only once.
*/
if (sdzp == tdzp) {
zflg |= ZHAVELOCK;
rw_enter(&sdzp->z_name_lock, RW_READER);
}
if (cmp < 0) {
serr = zfs_dirent_lock(&sdl, sdzp, snm, &szp,
ZEXISTS | zflg, NULL, NULL);
terr = zfs_dirent_lock(&tdl,
tdzp, tnm, &tzp, ZRENAMING | zflg, NULL, NULL);
} else {
terr = zfs_dirent_lock(&tdl,
tdzp, tnm, &tzp, zflg, NULL, NULL);
serr = zfs_dirent_lock(&sdl,
sdzp, snm, &szp, ZEXISTS | ZRENAMING | zflg,
NULL, NULL);
}
if (serr) {
/*
* Source entry invalid or not there.
*/
if (!terr) {
zfs_dirent_unlock(tdl);
if (tzp)
zrele(tzp);
}
if (sdzp == tdzp)
rw_exit(&sdzp->z_name_lock);
if (strcmp(snm, "..") == 0)
serr = EINVAL;
ZFS_EXIT(zfsvfs);
return (serr);
}
if (terr) {
zfs_dirent_unlock(sdl);
zrele(szp);
if (sdzp == tdzp)
rw_exit(&sdzp->z_name_lock);
if (strcmp(tnm, "..") == 0)
terr = EINVAL;
ZFS_EXIT(zfsvfs);
return (terr);
}
/*
* If we are using project inheritance, means if the directory has
* ZFS_PROJINHERIT set, then its descendant directories will inherit
* not only the project ID, but also the ZFS_PROJINHERIT flag. Under
* such case, we only allow renames into our tree when the project
* IDs are the same.
*/
if (tdzp->z_pflags & ZFS_PROJINHERIT &&
tdzp->z_projid != szp->z_projid) {
error = SET_ERROR(EXDEV);
goto out;
}
/*
* Must have write access at the source to remove the old entry
* and write access at the target to create the new entry.
* Note that if target and source are the same, this can be
* done in a single check.
*/
if ((error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr)))
goto out;
if (S_ISDIR(ZTOI(szp)->i_mode)) {
/*
* Check to make sure rename is valid.
* Can't do a move like this: /usr/a/b to /usr/a/b/c/d
*/
if ((error = zfs_rename_lock(szp, tdzp, sdzp, &zl)))
goto out;
}
/*
* Does target exist?
*/
if (tzp) {
/*
* Source and target must be the same type.
*/
if (S_ISDIR(ZTOI(szp)->i_mode)) {
if (!S_ISDIR(ZTOI(tzp)->i_mode)) {
error = SET_ERROR(ENOTDIR);
goto out;
}
} else {
if (S_ISDIR(ZTOI(tzp)->i_mode)) {
error = SET_ERROR(EISDIR);
goto out;
}
}
/*
* POSIX dictates that when the source and target
* entries refer to the same file object, rename
* must do nothing and exit without error.
*/
if (szp->z_id == tzp->z_id) {
error = 0;
goto out;
}
}
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE);
dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm);
dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm);
if (sdzp != tdzp) {
dmu_tx_hold_sa(tx, tdzp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, tdzp);
}
if (tzp) {
dmu_tx_hold_sa(tx, tzp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, tzp);
}
zfs_sa_upgrade_txholds(tx, szp);
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
if (error) {
if (zl != NULL)
zfs_rename_unlock(&zl);
zfs_dirent_unlock(sdl);
zfs_dirent_unlock(tdl);
if (sdzp == tdzp)
rw_exit(&sdzp->z_name_lock);
if (error == ERESTART) {
waited = B_TRUE;
dmu_tx_wait(tx);
dmu_tx_abort(tx);
zrele(szp);
if (tzp)
zrele(tzp);
goto top;
}
dmu_tx_abort(tx);
zrele(szp);
if (tzp)
zrele(tzp);
ZFS_EXIT(zfsvfs);
return (error);
}
if (tzp) /* Attempt to remove the existing target */
error = zfs_link_destroy(tdl, tzp, tx, zflg, NULL);
if (error == 0) {
error = zfs_link_create(tdl, szp, tx, ZRENAMING);
if (error == 0) {
szp->z_pflags |= ZFS_AV_MODIFIED;
if (tdzp->z_pflags & ZFS_PROJINHERIT)
szp->z_pflags |= ZFS_PROJINHERIT;
error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
(void *)&szp->z_pflags, sizeof (uint64_t), tx);
ASSERT0(error);
error = zfs_link_destroy(sdl, szp, tx, ZRENAMING, NULL);
if (error == 0) {
zfs_log_rename(zilog, tx, TX_RENAME |
(flags & FIGNORECASE ? TX_CI : 0), sdzp,
sdl->dl_name, tdzp, tdl->dl_name, szp);
} else {
/*
* At this point, we have successfully created
* the target name, but have failed to remove
* the source name. Since the create was done
* with the ZRENAMING flag, there are
* complications; for one, the link count is
* wrong. The easiest way to deal with this
* is to remove the newly created target, and
* return the original error. This must
* succeed; fortunately, it is very unlikely to
* fail, since we just created it.
*/
VERIFY3U(zfs_link_destroy(tdl, szp, tx,
ZRENAMING, NULL), ==, 0);
}
} else {
/*
* If we had removed the existing target, subsequent
* call to zfs_link_create() to add back the same entry
* but, the new dnode (szp) should not fail.
*/
ASSERT(tzp == NULL);
}
}
dmu_tx_commit(tx);
out:
if (zl != NULL)
zfs_rename_unlock(&zl);
zfs_dirent_unlock(sdl);
zfs_dirent_unlock(tdl);
zfs_znode_update_vfs(sdzp);
if (sdzp == tdzp)
rw_exit(&sdzp->z_name_lock);
if (sdzp != tdzp)
zfs_znode_update_vfs(tdzp);
zfs_znode_update_vfs(szp);
zrele(szp);
if (tzp) {
zfs_znode_update_vfs(tzp);
zrele(tzp);
}
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Insert the indicated symbolic reference entry into the directory.
*
* IN: dzp - Directory to contain new symbolic link.
* name - Name of directory entry in dip.
* vap - Attributes of new entry.
* link - Name for new symlink entry.
* cr - credentials of caller.
* flags - case flags
*
* OUT: zpp - Znode for new symbolic link.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* dip - ctime|mtime updated
*/
/*ARGSUSED*/
int
zfs_symlink(znode_t *dzp, char *name, vattr_t *vap, char *link,
znode_t **zpp, cred_t *cr, int flags)
{
znode_t *zp;
zfs_dirlock_t *dl;
dmu_tx_t *tx;
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
zilog_t *zilog;
uint64_t len = strlen(link);
int error;
int zflg = ZNEW;
zfs_acl_ids_t acl_ids;
boolean_t fuid_dirtied;
uint64_t txtype = TX_SYMLINK;
boolean_t waited = B_FALSE;
ASSERT(S_ISLNK(vap->va_mode));
if (name == NULL)
return (SET_ERROR(EINVAL));
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(dzp);
zilog = zfsvfs->z_log;
if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EILSEQ));
}
if (flags & FIGNORECASE)
zflg |= ZCILOOK;
if (len > MAXPATHLEN) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(ENAMETOOLONG));
}
if ((error = zfs_acl_ids_create(dzp, 0,
vap, cr, NULL, &acl_ids)) != 0) {
ZFS_EXIT(zfsvfs);
return (error);
}
top:
*zpp = NULL;
/*
* Attempt to lock directory; fail if entry already exists.
*/
error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, NULL, NULL);
if (error) {
zfs_acl_ids_free(&acl_ids);
ZFS_EXIT(zfsvfs);
return (error);
}
if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
zfs_acl_ids_free(&acl_ids);
zfs_dirent_unlock(dl);
ZFS_EXIT(zfsvfs);
return (error);
}
if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, ZFS_DEFAULT_PROJID)) {
zfs_acl_ids_free(&acl_ids);
zfs_dirent_unlock(dl);
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EDQUOT));
}
tx = dmu_tx_create(zfsvfs->z_os);
fuid_dirtied = zfsvfs->z_fuid_dirty;
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len));
dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
ZFS_SA_BASE_ATTR_SIZE + len);
dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
acl_ids.z_aclp->z_acl_bytes);
}
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
if (error) {
zfs_dirent_unlock(dl);
if (error == ERESTART) {
waited = B_TRUE;
dmu_tx_wait(tx);
dmu_tx_abort(tx);
goto top;
}
zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx);
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Create a new object for the symlink.
* for version 4 ZPL datasets the symlink will be an SA attribute
*/
zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
mutex_enter(&zp->z_lock);
if (zp->z_is_sa)
error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zfsvfs),
link, len, tx);
else
zfs_sa_symlink(zp, link, len, tx);
mutex_exit(&zp->z_lock);
zp->z_size = len;
(void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
&zp->z_size, sizeof (zp->z_size), tx);
/*
* Insert the new object into the directory.
*/
error = zfs_link_create(dl, zp, tx, ZNEW);
if (error != 0) {
zfs_znode_delete(zp, tx);
remove_inode_hash(ZTOI(zp));
} else {
if (flags & FIGNORECASE)
txtype |= TX_CI;
zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link);
zfs_znode_update_vfs(dzp);
zfs_znode_update_vfs(zp);
}
zfs_acl_ids_free(&acl_ids);
dmu_tx_commit(tx);
zfs_dirent_unlock(dl);
if (error == 0) {
*zpp = zp;
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
} else {
zrele(zp);
}
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Return, in the buffer contained in the provided uio structure,
* the symbolic path referred to by ip.
*
* IN: ip - inode of symbolic link
* uio - structure to contain the link path.
* cr - credentials of caller.
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
* ip - atime updated
*/
/* ARGSUSED */
int
zfs_readlink(struct inode *ip, zfs_uio_t *uio, cred_t *cr)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
int error;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
mutex_enter(&zp->z_lock);
if (zp->z_is_sa)
error = sa_lookup_uio(zp->z_sa_hdl,
SA_ZPL_SYMLINK(zfsvfs), uio);
else
error = zfs_sa_readlink(zp, uio);
mutex_exit(&zp->z_lock);
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Insert a new entry into directory tdzp referencing szp.
*
* IN: tdzp - Directory to contain new entry.
* szp - znode of new entry.
* name - name of new entry.
* cr - credentials of caller.
* flags - case flags.
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
* tdzp - ctime|mtime updated
* szp - ctime updated
*/
/* ARGSUSED */
int
zfs_link(znode_t *tdzp, znode_t *szp, char *name, cred_t *cr,
int flags)
{
struct inode *sip = ZTOI(szp);
znode_t *tzp;
zfsvfs_t *zfsvfs = ZTOZSB(tdzp);
zilog_t *zilog;
zfs_dirlock_t *dl;
dmu_tx_t *tx;
int error;
int zf = ZNEW;
uint64_t parent;
uid_t owner;
boolean_t waited = B_FALSE;
boolean_t is_tmpfile = 0;
uint64_t txg;
#ifdef HAVE_TMPFILE
is_tmpfile = (sip->i_nlink == 0 && (sip->i_state & I_LINKABLE));
#endif
ASSERT(S_ISDIR(ZTOI(tdzp)->i_mode));
if (name == NULL)
return (SET_ERROR(EINVAL));
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(tdzp);
zilog = zfsvfs->z_log;
/*
* POSIX dictates that we return EPERM here.
* Better choices include ENOTSUP or EISDIR.
*/
if (S_ISDIR(sip->i_mode)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EPERM));
}
ZFS_VERIFY_ZP(szp);
/*
* If we are using project inheritance, means if the directory has
* ZFS_PROJINHERIT set, then its descendant directories will inherit
* not only the project ID, but also the ZFS_PROJINHERIT flag. Under
* such case, we only allow hard link creation in our tree when the
* project IDs are the same.
*/
if (tdzp->z_pflags & ZFS_PROJINHERIT &&
tdzp->z_projid != szp->z_projid) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EXDEV));
}
/*
* We check i_sb because snapshots and the ctldir must have different
* super blocks.
*/
if (sip->i_sb != ZTOI(tdzp)->i_sb || zfsctl_is_node(sip)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EXDEV));
}
/* Prevent links to .zfs/shares files */
if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
&parent, sizeof (uint64_t))) != 0) {
ZFS_EXIT(zfsvfs);
return (error);
}
if (parent == zfsvfs->z_shares_dir) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EPERM));
}
if (zfsvfs->z_utf8 && u8_validate(name,
strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EILSEQ));
}
if (flags & FIGNORECASE)
zf |= ZCILOOK;
/*
* We do not support links between attributes and non-attributes
* because of the potential security risk of creating links
* into "normal" file space in order to circumvent restrictions
* imposed in attribute space.
*/
if ((szp->z_pflags & ZFS_XATTR) != (tdzp->z_pflags & ZFS_XATTR)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
owner = zfs_fuid_map_id(zfsvfs, KUID_TO_SUID(sip->i_uid),
cr, ZFS_OWNER);
if (owner != crgetuid(cr) && secpolicy_basic_link(cr) != 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EPERM));
}
if ((error = zfs_zaccess(tdzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
ZFS_EXIT(zfsvfs);
return (error);
}
top:
/*
* Attempt to lock directory; fail if entry already exists.
*/
error = zfs_dirent_lock(&dl, tdzp, name, &tzp, zf, NULL, NULL);
if (error) {
ZFS_EXIT(zfsvfs);
return (error);
}
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, name);
if (is_tmpfile)
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
zfs_sa_upgrade_txholds(tx, szp);
zfs_sa_upgrade_txholds(tx, tdzp);
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
if (error) {
zfs_dirent_unlock(dl);
if (error == ERESTART) {
waited = B_TRUE;
dmu_tx_wait(tx);
dmu_tx_abort(tx);
goto top;
}
dmu_tx_abort(tx);
ZFS_EXIT(zfsvfs);
return (error);
}
/* unmark z_unlinked so zfs_link_create will not reject */
if (is_tmpfile)
szp->z_unlinked = B_FALSE;
error = zfs_link_create(dl, szp, tx, 0);
if (error == 0) {
uint64_t txtype = TX_LINK;
/*
* tmpfile is created to be in z_unlinkedobj, so remove it.
* Also, we don't log in ZIL, because all previous file
* operation on the tmpfile are ignored by ZIL. Instead we
* always wait for txg to sync to make sure all previous
* operation are sync safe.
*/
if (is_tmpfile) {
VERIFY(zap_remove_int(zfsvfs->z_os,
zfsvfs->z_unlinkedobj, szp->z_id, tx) == 0);
} else {
if (flags & FIGNORECASE)
txtype |= TX_CI;
zfs_log_link(zilog, tx, txtype, tdzp, szp, name);
}
} else if (is_tmpfile) {
/* restore z_unlinked since when linking failed */
szp->z_unlinked = B_TRUE;
}
txg = dmu_tx_get_txg(tx);
dmu_tx_commit(tx);
zfs_dirent_unlock(dl);
if (!is_tmpfile && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
if (is_tmpfile && zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED)
txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), txg);
zfs_znode_update_vfs(tdzp);
zfs_znode_update_vfs(szp);
ZFS_EXIT(zfsvfs);
return (error);
}
static void
zfs_putpage_commit_cb(void *arg)
{
struct page *pp = arg;
ClearPageError(pp);
end_page_writeback(pp);
}
/*
* Push a page out to disk, once the page is on stable storage the
* registered commit callback will be run as notification of completion.
*
* IN: ip - page mapped for inode.
* pp - page to push (page is locked)
* wbc - writeback control data
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
* ip - ctime|mtime updated
*/
/* ARGSUSED */
int
zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
loff_t offset;
loff_t pgoff;
unsigned int pglen;
dmu_tx_t *tx;
caddr_t va;
int err = 0;
uint64_t mtime[2], ctime[2];
sa_bulk_attr_t bulk[3];
int cnt = 0;
struct address_space *mapping;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
ASSERT(PageLocked(pp));
pgoff = page_offset(pp); /* Page byte-offset in file */
offset = i_size_read(ip); /* File length in bytes */
pglen = MIN(PAGE_SIZE, /* Page length in bytes */
P2ROUNDUP(offset, PAGE_SIZE)-pgoff);
/* Page is beyond end of file */
if (pgoff >= offset) {
unlock_page(pp);
ZFS_EXIT(zfsvfs);
return (0);
}
/* Truncate page length to end of file */
if (pgoff + pglen > offset)
pglen = offset - pgoff;
#if 0
/*
* FIXME: Allow mmap writes past its quota. The correct fix
* is to register a page_mkwrite() handler to count the page
* against its quota when it is about to be dirtied.
*/
if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT,
KUID_TO_SUID(ip->i_uid)) ||
zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT,
KGID_TO_SGID(ip->i_gid)) ||
(zp->z_projid != ZFS_DEFAULT_PROJID &&
zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT,
zp->z_projid))) {
err = EDQUOT;
}
#endif
/*
* The ordering here is critical and must adhere to the following
* rules in order to avoid deadlocking in either zfs_read() or
* zfs_free_range() due to a lock inversion.
*
* 1) The page must be unlocked prior to acquiring the range lock.
* This is critical because zfs_read() calls find_lock_page()
* which may block on the page lock while holding the range lock.
*
* 2) Before setting or clearing write back on a page the range lock
* must be held in order to prevent a lock inversion with the
* zfs_free_range() function.
*
* This presents a problem because upon entering this function the
* page lock is already held. To safely acquire the range lock the
* page lock must be dropped. This creates a window where another
* process could truncate, invalidate, dirty, or write out the page.
*
* Therefore, after successfully reacquiring the range and page locks
* the current page state is checked. In the common case everything
* will be as is expected and it can be written out. However, if
* the page state has changed it must be handled accordingly.
*/
mapping = pp->mapping;
redirty_page_for_writepage(wbc, pp);
unlock_page(pp);
zfs_locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock,
pgoff, pglen, RL_WRITER);
lock_page(pp);
/* Page mapping changed or it was no longer dirty, we're done */
if (unlikely((mapping != pp->mapping) || !PageDirty(pp))) {
unlock_page(pp);
zfs_rangelock_exit(lr);
ZFS_EXIT(zfsvfs);
return (0);
}
/* Another process started write block if required */
if (PageWriteback(pp)) {
unlock_page(pp);
zfs_rangelock_exit(lr);
if (wbc->sync_mode != WB_SYNC_NONE) {
if (PageWriteback(pp))
#ifdef HAVE_PAGEMAP_FOLIO_WAIT_BIT
folio_wait_bit(page_folio(pp), PG_writeback);
#else
wait_on_page_bit(pp, PG_writeback);
#endif
}
ZFS_EXIT(zfsvfs);
return (0);
}
/* Clear the dirty flag the required locks are held */
if (!clear_page_dirty_for_io(pp)) {
unlock_page(pp);
zfs_rangelock_exit(lr);
ZFS_EXIT(zfsvfs);
return (0);
}
/*
* Counterpart for redirty_page_for_writepage() above. This page
* was in fact not skipped and should not be counted as if it were.
*/
wbc->pages_skipped--;
set_page_writeback(pp);
unlock_page(pp);
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_write(tx, zp->z_id, pgoff, pglen);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
err = dmu_tx_assign(tx, TXG_NOWAIT);
if (err != 0) {
if (err == ERESTART)
dmu_tx_wait(tx);
dmu_tx_abort(tx);
__set_page_dirty_nobuffers(pp);
ClearPageError(pp);
end_page_writeback(pp);
zfs_rangelock_exit(lr);
ZFS_EXIT(zfsvfs);
return (err);
}
va = kmap(pp);
ASSERT3U(pglen, <=, PAGE_SIZE);
dmu_write(zfsvfs->z_os, zp->z_id, pgoff, pglen, va, tx);
kunmap(pp);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, 8);
/* Preserve the mtime and ctime provided by the inode */
ZFS_TIME_ENCODE(&ip->i_mtime, mtime);
ZFS_TIME_ENCODE(&ip->i_ctime, ctime);
zp->z_atime_dirty = B_FALSE;
zp->z_seq++;
err = sa_bulk_update(zp->z_sa_hdl, bulk, cnt, tx);
zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, pgoff, pglen, 0,
zfs_putpage_commit_cb, pp);
dmu_tx_commit(tx);
zfs_rangelock_exit(lr);
if (wbc->sync_mode != WB_SYNC_NONE) {
/*
* Note that this is rarely called under writepages(), because
* writepages() normally handles the entire commit for
* performance reasons.
*/
zil_commit(zfsvfs->z_log, zp->z_id);
}
+ dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, pglen);
+
ZFS_EXIT(zfsvfs);
return (err);
}
/*
* Update the system attributes when the inode has been dirtied. For the
* moment we only update the mode, atime, mtime, and ctime.
*/
int
zfs_dirty_inode(struct inode *ip, int flags)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
dmu_tx_t *tx;
uint64_t mode, atime[2], mtime[2], ctime[2];
sa_bulk_attr_t bulk[4];
int error = 0;
int cnt = 0;
if (zfs_is_readonly(zfsvfs) || dmu_objset_is_snapshot(zfsvfs->z_os))
return (0);
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
#ifdef I_DIRTY_TIME
/*
* This is the lazytime semantic introduced in Linux 4.0
* This flag will only be called from update_time when lazytime is set.
* (Note, I_DIRTY_SYNC will also set if not lazytime)
* Fortunately mtime and ctime are managed within ZFS itself, so we
* only need to dirty atime.
*/
if (flags == I_DIRTY_TIME) {
zp->z_atime_dirty = B_TRUE;
goto out;
}
#endif
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
goto out;
}
mutex_enter(&zp->z_lock);
zp->z_atime_dirty = B_FALSE;
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
/* Preserve the mode, mtime and ctime provided by the inode */
ZFS_TIME_ENCODE(&ip->i_atime, atime);
ZFS_TIME_ENCODE(&ip->i_mtime, mtime);
ZFS_TIME_ENCODE(&ip->i_ctime, ctime);
mode = ip->i_mode;
zp->z_mode = mode;
error = sa_bulk_update(zp->z_sa_hdl, bulk, cnt, tx);
mutex_exit(&zp->z_lock);
dmu_tx_commit(tx);
out:
ZFS_EXIT(zfsvfs);
return (error);
}
/*ARGSUSED*/
void
zfs_inactive(struct inode *ip)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
uint64_t atime[2];
int error;
int need_unlock = 0;
/* Only read lock if we haven't already write locked, e.g. rollback */
if (!RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock)) {
need_unlock = 1;
rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_READER);
}
if (zp->z_sa_hdl == NULL) {
if (need_unlock)
rw_exit(&zfsvfs->z_teardown_inactive_lock);
return;
}
if (zp->z_atime_dirty && zp->z_unlinked == B_FALSE) {
dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
} else {
ZFS_TIME_ENCODE(&ip->i_atime, atime);
mutex_enter(&zp->z_lock);
(void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zfsvfs),
(void *)&atime, sizeof (atime), tx);
zp->z_atime_dirty = B_FALSE;
mutex_exit(&zp->z_lock);
dmu_tx_commit(tx);
}
}
zfs_zinactive(zp);
if (need_unlock)
rw_exit(&zfsvfs->z_teardown_inactive_lock);
}
/*
* Fill pages with data from the disk.
*/
static int
zfs_fillpage(struct inode *ip, struct page *pl[], int nr_pages)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
objset_t *os;
struct page *cur_pp;
u_offset_t io_off, total;
size_t io_len;
loff_t i_size;
unsigned page_idx;
int err;
os = zfsvfs->z_os;
io_len = nr_pages << PAGE_SHIFT;
i_size = i_size_read(ip);
io_off = page_offset(pl[0]);
if (io_off + io_len > i_size)
io_len = i_size - io_off;
/*
* Iterate over list of pages and read each page individually.
*/
page_idx = 0;
for (total = io_off + io_len; io_off < total; io_off += PAGESIZE) {
caddr_t va;
cur_pp = pl[page_idx++];
va = kmap(cur_pp);
err = dmu_read(os, zp->z_id, io_off, PAGESIZE, va,
DMU_READ_PREFETCH);
kunmap(cur_pp);
if (err) {
/* convert checksum errors into IO errors */
if (err == ECKSUM)
err = SET_ERROR(EIO);
return (err);
}
}
return (0);
}
/*
* Uses zfs_fillpage to read data from the file and fill the pages.
*
* IN: ip - inode of file to get data from.
* pl - list of pages to read
* nr_pages - number of pages to read
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* vp - atime updated
*/
/* ARGSUSED */
int
zfs_getpage(struct inode *ip, struct page *pl[], int nr_pages)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
int err;
if (pl == NULL)
return (0);
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
err = zfs_fillpage(ip, pl, nr_pages);
+ dataset_kstats_update_read_kstats(&zfsvfs->z_kstat, nr_pages*PAGESIZE);
+
ZFS_EXIT(zfsvfs);
return (err);
}
/*
* Check ZFS specific permissions to memory map a section of a file.
*
* IN: ip - inode of the file to mmap
* off - file offset
* addrp - start address in memory region
* len - length of memory region
* vm_flags- address flags
*
* RETURN: 0 if success
* error code if failure
*/
/*ARGSUSED*/
int
zfs_map(struct inode *ip, offset_t off, caddr_t *addrp, size_t len,
unsigned long vm_flags)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
if ((vm_flags & VM_WRITE) && (zp->z_pflags &
(ZFS_IMMUTABLE | ZFS_READONLY | ZFS_APPENDONLY))) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EPERM));
}
if ((vm_flags & (VM_READ | VM_EXEC)) &&
(zp->z_pflags & ZFS_AV_QUARANTINED)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EACCES));
}
if (off < 0 || len > MAXOFFSET_T - off) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(ENXIO));
}
ZFS_EXIT(zfsvfs);
return (0);
}
/*
* Free or allocate space in a file. Currently, this function only
* supports the `F_FREESP' command. However, this command is somewhat
* misnamed, as its functionality includes the ability to allocate as
* well as free space.
*
* IN: zp - znode of file to free data in.
* cmd - action to take (only F_FREESP supported).
* bfp - section of file to free/alloc.
* flag - current file open mode flags.
* offset - current file offset.
* cr - credentials of caller.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* zp - ctime|mtime updated
*/
/* ARGSUSED */
int
zfs_space(znode_t *zp, int cmd, flock64_t *bfp, int flag,
offset_t offset, cred_t *cr)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
uint64_t off, len;
int error;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
if (cmd != F_FREESP) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
/*
* Callers might not be able to detect properly that we are read-only,
* so check it explicitly here.
*/
if (zfs_is_readonly(zfsvfs)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EROFS));
}
if (bfp->l_len < 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
/*
* Permissions aren't checked on Solaris because on this OS
* zfs_space() can only be called with an opened file handle.
* On Linux we can get here through truncate_range() which
* operates directly on inodes, so we need to check access rights.
*/
if ((error = zfs_zaccess(zp, ACE_WRITE_DATA, 0, B_FALSE, cr))) {
ZFS_EXIT(zfsvfs);
return (error);
}
off = bfp->l_start;
len = bfp->l_len; /* 0 means from off to end of file */
error = zfs_freesp(zp, off, len, flag, TRUE);
ZFS_EXIT(zfsvfs);
return (error);
}
/*ARGSUSED*/
int
zfs_fid(struct inode *ip, fid_t *fidp)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
uint32_t gen;
uint64_t gen64;
uint64_t object = zp->z_id;
zfid_short_t *zfid;
int size, i, error;
ZFS_ENTER(zfsvfs);
if (fidp->fid_len < SHORT_FID_LEN) {
fidp->fid_len = SHORT_FID_LEN;
ZFS_EXIT(zfsvfs);
return (SET_ERROR(ENOSPC));
}
ZFS_VERIFY_ZP(zp);
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs),
&gen64, sizeof (uint64_t))) != 0) {
ZFS_EXIT(zfsvfs);
return (error);
}
gen = (uint32_t)gen64;
size = SHORT_FID_LEN;
zfid = (zfid_short_t *)fidp;
zfid->zf_len = size;
for (i = 0; i < sizeof (zfid->zf_object); i++)
zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
/* Must have a non-zero generation number to distinguish from .zfs */
if (gen == 0)
gen = 1;
for (i = 0; i < sizeof (zfid->zf_gen); i++)
zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i));
ZFS_EXIT(zfsvfs);
return (0);
}
#if defined(_KERNEL)
EXPORT_SYMBOL(zfs_open);
EXPORT_SYMBOL(zfs_close);
EXPORT_SYMBOL(zfs_lookup);
EXPORT_SYMBOL(zfs_create);
EXPORT_SYMBOL(zfs_tmpfile);
EXPORT_SYMBOL(zfs_remove);
EXPORT_SYMBOL(zfs_mkdir);
EXPORT_SYMBOL(zfs_rmdir);
EXPORT_SYMBOL(zfs_readdir);
EXPORT_SYMBOL(zfs_getattr_fast);
EXPORT_SYMBOL(zfs_setattr);
EXPORT_SYMBOL(zfs_rename);
EXPORT_SYMBOL(zfs_symlink);
EXPORT_SYMBOL(zfs_readlink);
EXPORT_SYMBOL(zfs_link);
EXPORT_SYMBOL(zfs_inactive);
EXPORT_SYMBOL(zfs_space);
EXPORT_SYMBOL(zfs_fid);
EXPORT_SYMBOL(zfs_getpage);
EXPORT_SYMBOL(zfs_putpage);
EXPORT_SYMBOL(zfs_dirty_inode);
EXPORT_SYMBOL(zfs_map);
/* BEGIN CSTYLED */
module_param(zfs_delete_blocks, ulong, 0644);
MODULE_PARM_DESC(zfs_delete_blocks, "Delete files larger than N blocks async");
/* END CSTYLED */
#endif
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zio_crypt.c b/sys/contrib/openzfs/module/os/linux/zfs/zio_crypt.c
index 52e62f4d1da4..a13fc2aa2546 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zio_crypt.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zio_crypt.c
@@ -1,2043 +1,2059 @@
/*
* CDDL HEADER START
*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2017, Datto, Inc. All rights reserved.
*/
#include <sys/zio_crypt.h>
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
#include <sys/dnode.h>
#include <sys/fs/zfs.h>
#include <sys/zio.h>
#include <sys/zil.h>
#include <sys/sha2.h>
#include <sys/hkdf.h>
#include <sys/qat.h>
/*
* This file is responsible for handling all of the details of generating
* encryption parameters and performing encryption and authentication.
*
* BLOCK ENCRYPTION PARAMETERS:
* Encryption /Authentication Algorithm Suite (crypt):
* The encryption algorithm, mode, and key length we are going to use. We
* currently support AES in either GCM or CCM modes with 128, 192, and 256 bit
* keys. All authentication is currently done with SHA512-HMAC.
*
* Plaintext:
* The unencrypted data that we want to encrypt.
*
* Initialization Vector (IV):
* An initialization vector for the encryption algorithms. This is used to
* "tweak" the encryption algorithms so that two blocks of the same data are
* encrypted into different ciphertext outputs, thus obfuscating block patterns.
* The supported encryption modes (AES-GCM and AES-CCM) require that an IV is
* never reused with the same encryption key. This value is stored unencrypted
* and must simply be provided to the decryption function. We use a 96 bit IV
* (as recommended by NIST) for all block encryption. For non-dedup blocks we
* derive the IV randomly. The first 64 bits of the IV are stored in the second
* word of DVA[2] and the remaining 32 bits are stored in the upper 32 bits of
* blk_fill. This is safe because encrypted blocks can't use the upper 32 bits
* of blk_fill. We only encrypt level 0 blocks, which normally have a fill count
* of 1. The only exception is for DMU_OT_DNODE objects, where the fill count of
* level 0 blocks is the number of allocated dnodes in that block. The on-disk
* format supports at most 2^15 slots per L0 dnode block, because the maximum
* block size is 16MB (2^24). In either case, for level 0 blocks this number
* will still be smaller than UINT32_MAX so it is safe to store the IV in the
* top 32 bits of blk_fill, while leaving the bottom 32 bits of the fill count
* for the dnode code.
*
* Master key:
* This is the most important secret data of an encrypted dataset. It is used
* along with the salt to generate that actual encryption keys via HKDF. We
* do not use the master key to directly encrypt any data because there are
* theoretical limits on how much data can actually be safely encrypted with
* any encryption mode. The master key is stored encrypted on disk with the
* user's wrapping key. Its length is determined by the encryption algorithm.
* For details on how this is stored see the block comment in dsl_crypt.c
*
* Salt:
* Used as an input to the HKDF function, along with the master key. We use a
* 64 bit salt, stored unencrypted in the first word of DVA[2]. Any given salt
* can be used for encrypting many blocks, so we cache the current salt and the
* associated derived key in zio_crypt_t so we do not need to derive it again
* needlessly.
*
* Encryption Key:
* A secret binary key, generated from an HKDF function used to encrypt and
* decrypt data.
*
* Message Authentication Code (MAC)
* The MAC is an output of authenticated encryption modes such as AES-GCM and
* AES-CCM. Its purpose is to ensure that an attacker cannot modify encrypted
* data on disk and return garbage to the application. Effectively, it is a
* checksum that can not be reproduced by an attacker. We store the MAC in the
* second 128 bits of blk_cksum, leaving the first 128 bits for a truncated
* regular checksum of the ciphertext which can be used for scrubbing.
*
* OBJECT AUTHENTICATION:
* Some object types, such as DMU_OT_MASTER_NODE cannot be encrypted because
* they contain some info that always needs to be readable. To prevent this
* data from being altered, we authenticate this data using SHA512-HMAC. This
* will produce a MAC (similar to the one produced via encryption) which can
* be used to verify the object was not modified. HMACs do not require key
* rotation or IVs, so we can keep up to the full 3 copies of authenticated
* data.
*
* ZIL ENCRYPTION:
* ZIL blocks have their bp written to disk ahead of the associated data, so we
* cannot store the MAC there as we normally do. For these blocks the MAC is
* stored in the embedded checksum within the zil_chain_t header. The salt and
* IV are generated for the block on bp allocation instead of at encryption
* time. In addition, ZIL blocks have some pieces that must be left in plaintext
* for claiming even though all of the sensitive user data still needs to be
* encrypted. The function zio_crypt_init_uios_zil() handles parsing which
* pieces of the block need to be encrypted. All data that is not encrypted is
* authenticated using the AAD mechanisms that the supported encryption modes
* provide for. In order to preserve the semantics of the ZIL for encrypted
* datasets, the ZIL is not protected at the objset level as described below.
*
* DNODE ENCRYPTION:
* Similarly to ZIL blocks, the core part of each dnode_phys_t needs to be left
* in plaintext for scrubbing and claiming, but the bonus buffers might contain
* sensitive user data. The function zio_crypt_init_uios_dnode() handles parsing
* which pieces of the block need to be encrypted. For more details about
* dnode authentication and encryption, see zio_crypt_init_uios_dnode().
*
* OBJECT SET AUTHENTICATION:
* Up to this point, everything we have encrypted and authenticated has been
* at level 0 (or -2 for the ZIL). If we did not do any further work the
* on-disk format would be susceptible to attacks that deleted or rearranged
* the order of level 0 blocks. Ideally, the cleanest solution would be to
* maintain a tree of authentication MACs going up the bp tree. However, this
* presents a problem for raw sends. Send files do not send information about
* indirect blocks so there would be no convenient way to transfer the MACs and
* they cannot be recalculated on the receive side without the master key which
* would defeat one of the purposes of raw sends in the first place. Instead,
* for the indirect levels of the bp tree, we use a regular SHA512 of the MACs
* from the level below. We also include some portable fields from blk_prop such
* as the lsize and compression algorithm to prevent the data from being
* misinterpreted.
*
* At the objset level, we maintain 2 separate 256 bit MACs in the
* objset_phys_t. The first one is "portable" and is the logical root of the
* MAC tree maintained in the metadnode's bps. The second, is "local" and is
* used as the root MAC for the user accounting objects, which are also not
* transferred via "zfs send". The portable MAC is sent in the DRR_BEGIN payload
* of the send file. The useraccounting code ensures that the useraccounting
* info is not present upon a receive, so the local MAC can simply be cleared
* out at that time. For more info about objset_phys_t authentication, see
* zio_crypt_do_objset_hmacs().
*
* CONSIDERATIONS FOR DEDUP:
* In order for dedup to work, blocks that we want to dedup with one another
* need to use the same IV and encryption key, so that they will have the same
* ciphertext. Normally, one should never reuse an IV with the same encryption
* key or else AES-GCM and AES-CCM can both actually leak the plaintext of both
* blocks. In this case, however, since we are using the same plaintext as
* well all that we end up with is a duplicate of the original ciphertext we
* already had. As a result, an attacker with read access to the raw disk will
* be able to tell which blocks are the same but this information is given away
* by dedup anyway. In order to get the same IVs and encryption keys for
* equivalent blocks of data we use an HMAC of the plaintext. We use an HMAC
* here so that a reproducible checksum of the plaintext is never available to
* the attacker. The HMAC key is kept alongside the master key, encrypted on
* disk. The first 64 bits of the HMAC are used in place of the random salt, and
* the next 96 bits are used as the IV. As a result of this mechanism, dedup
* will only work within a clone family since encrypted dedup requires use of
* the same master and HMAC keys.
*/
/*
* After encrypting many blocks with the same key we may start to run up
* against the theoretical limits of how much data can securely be encrypted
* with a single key using the supported encryption modes. The most obvious
* limitation is that our risk of generating 2 equivalent 96 bit IVs increases
* the more IVs we generate (which both GCM and CCM modes strictly forbid).
* This risk actually grows surprisingly quickly over time according to the
* Birthday Problem. With a total IV space of 2^(96 bits), and assuming we have
* generated n IVs with a cryptographically secure RNG, the approximate
* probability p(n) of a collision is given as:
*
* p(n) ~= e^(-n*(n-1)/(2*(2^96)))
*
* [http://www.math.cornell.edu/~mec/2008-2009/TianyiZheng/Birthday.html]
*
* Assuming that we want to ensure that p(n) never goes over 1 / 1 trillion
* we must not write more than 398,065,730 blocks with the same encryption key.
* Therefore, we rotate our keys after 400,000,000 blocks have been written by
* generating a new random 64 bit salt for our HKDF encryption key generation
* function.
*/
#define ZFS_KEY_MAX_SALT_USES_DEFAULT 400000000
#define ZFS_CURRENT_MAX_SALT_USES \
(MIN(zfs_key_max_salt_uses, ZFS_KEY_MAX_SALT_USES_DEFAULT))
unsigned long zfs_key_max_salt_uses = ZFS_KEY_MAX_SALT_USES_DEFAULT;
typedef struct blkptr_auth_buf {
uint64_t bab_prop; /* blk_prop - portable mask */
uint8_t bab_mac[ZIO_DATA_MAC_LEN]; /* MAC from blk_cksum */
uint64_t bab_pad; /* reserved for future use */
} blkptr_auth_buf_t;
zio_crypt_info_t zio_crypt_table[ZIO_CRYPT_FUNCTIONS] = {
{"", ZC_TYPE_NONE, 0, "inherit"},
{"", ZC_TYPE_NONE, 0, "on"},
{"", ZC_TYPE_NONE, 0, "off"},
{SUN_CKM_AES_CCM, ZC_TYPE_CCM, 16, "aes-128-ccm"},
{SUN_CKM_AES_CCM, ZC_TYPE_CCM, 24, "aes-192-ccm"},
{SUN_CKM_AES_CCM, ZC_TYPE_CCM, 32, "aes-256-ccm"},
{SUN_CKM_AES_GCM, ZC_TYPE_GCM, 16, "aes-128-gcm"},
{SUN_CKM_AES_GCM, ZC_TYPE_GCM, 24, "aes-192-gcm"},
{SUN_CKM_AES_GCM, ZC_TYPE_GCM, 32, "aes-256-gcm"}
};
void
zio_crypt_key_destroy(zio_crypt_key_t *key)
{
rw_destroy(&key->zk_salt_lock);
/* free crypto templates */
crypto_destroy_ctx_template(key->zk_current_tmpl);
crypto_destroy_ctx_template(key->zk_hmac_tmpl);
/* zero out sensitive data */
bzero(key, sizeof (zio_crypt_key_t));
}
int
zio_crypt_key_init(uint64_t crypt, zio_crypt_key_t *key)
{
int ret;
crypto_mechanism_t mech;
uint_t keydata_len;
ASSERT(key != NULL);
ASSERT3U(crypt, <, ZIO_CRYPT_FUNCTIONS);
keydata_len = zio_crypt_table[crypt].ci_keylen;
bzero(key, sizeof (zio_crypt_key_t));
/* fill keydata buffers and salt with random data */
ret = random_get_bytes((uint8_t *)&key->zk_guid, sizeof (uint64_t));
if (ret != 0)
goto error;
ret = random_get_bytes(key->zk_master_keydata, keydata_len);
if (ret != 0)
goto error;
ret = random_get_bytes(key->zk_hmac_keydata, SHA512_HMAC_KEYLEN);
if (ret != 0)
goto error;
ret = random_get_bytes(key->zk_salt, ZIO_DATA_SALT_LEN);
if (ret != 0)
goto error;
/* derive the current key from the master key */
ret = hkdf_sha512(key->zk_master_keydata, keydata_len, NULL, 0,
key->zk_salt, ZIO_DATA_SALT_LEN, key->zk_current_keydata,
keydata_len);
if (ret != 0)
goto error;
/* initialize keys for the ICP */
key->zk_current_key.ck_format = CRYPTO_KEY_RAW;
key->zk_current_key.ck_data = key->zk_current_keydata;
key->zk_current_key.ck_length = CRYPTO_BYTES2BITS(keydata_len);
key->zk_hmac_key.ck_format = CRYPTO_KEY_RAW;
key->zk_hmac_key.ck_data = &key->zk_hmac_key;
key->zk_hmac_key.ck_length = CRYPTO_BYTES2BITS(SHA512_HMAC_KEYLEN);
/*
* Initialize the crypto templates. It's ok if this fails because
* this is just an optimization.
*/
mech.cm_type = crypto_mech2id(zio_crypt_table[crypt].ci_mechname);
ret = crypto_create_ctx_template(&mech, &key->zk_current_key,
&key->zk_current_tmpl, KM_SLEEP);
if (ret != CRYPTO_SUCCESS)
key->zk_current_tmpl = NULL;
mech.cm_type = crypto_mech2id(SUN_CKM_SHA512_HMAC);
ret = crypto_create_ctx_template(&mech, &key->zk_hmac_key,
&key->zk_hmac_tmpl, KM_SLEEP);
if (ret != CRYPTO_SUCCESS)
key->zk_hmac_tmpl = NULL;
key->zk_crypt = crypt;
key->zk_version = ZIO_CRYPT_KEY_CURRENT_VERSION;
key->zk_salt_count = 0;
rw_init(&key->zk_salt_lock, NULL, RW_DEFAULT, NULL);
return (0);
error:
zio_crypt_key_destroy(key);
return (ret);
}
static int
zio_crypt_key_change_salt(zio_crypt_key_t *key)
{
int ret = 0;
uint8_t salt[ZIO_DATA_SALT_LEN];
crypto_mechanism_t mech;
uint_t keydata_len = zio_crypt_table[key->zk_crypt].ci_keylen;
/* generate a new salt */
ret = random_get_bytes(salt, ZIO_DATA_SALT_LEN);
if (ret != 0)
goto error;
rw_enter(&key->zk_salt_lock, RW_WRITER);
/* someone beat us to the salt rotation, just unlock and return */
if (key->zk_salt_count < ZFS_CURRENT_MAX_SALT_USES)
goto out_unlock;
/* derive the current key from the master key and the new salt */
ret = hkdf_sha512(key->zk_master_keydata, keydata_len, NULL, 0,
salt, ZIO_DATA_SALT_LEN, key->zk_current_keydata, keydata_len);
if (ret != 0)
goto out_unlock;
/* assign the salt and reset the usage count */
bcopy(salt, key->zk_salt, ZIO_DATA_SALT_LEN);
key->zk_salt_count = 0;
/* destroy the old context template and create the new one */
crypto_destroy_ctx_template(key->zk_current_tmpl);
ret = crypto_create_ctx_template(&mech, &key->zk_current_key,
&key->zk_current_tmpl, KM_SLEEP);
if (ret != CRYPTO_SUCCESS)
key->zk_current_tmpl = NULL;
rw_exit(&key->zk_salt_lock);
return (0);
out_unlock:
rw_exit(&key->zk_salt_lock);
error:
return (ret);
}
/* See comment above zfs_key_max_salt_uses definition for details */
int
zio_crypt_key_get_salt(zio_crypt_key_t *key, uint8_t *salt)
{
int ret;
boolean_t salt_change;
rw_enter(&key->zk_salt_lock, RW_READER);
bcopy(key->zk_salt, salt, ZIO_DATA_SALT_LEN);
salt_change = (atomic_inc_64_nv(&key->zk_salt_count) >=
ZFS_CURRENT_MAX_SALT_USES);
rw_exit(&key->zk_salt_lock);
if (salt_change) {
ret = zio_crypt_key_change_salt(key);
if (ret != 0)
goto error;
}
return (0);
error:
return (ret);
}
/*
* This function handles all encryption and decryption in zfs. When
* encrypting it expects puio to reference the plaintext and cuio to
* reference the ciphertext. cuio must have enough space for the
* ciphertext + room for a MAC. datalen should be the length of the
* plaintext / ciphertext alone.
*/
static int
zio_do_crypt_uio(boolean_t encrypt, uint64_t crypt, crypto_key_t *key,
crypto_ctx_template_t tmpl, uint8_t *ivbuf, uint_t datalen,
zfs_uio_t *puio, zfs_uio_t *cuio, uint8_t *authbuf, uint_t auth_len)
{
int ret;
crypto_data_t plaindata, cipherdata;
CK_AES_CCM_PARAMS ccmp;
CK_AES_GCM_PARAMS gcmp;
crypto_mechanism_t mech;
zio_crypt_info_t crypt_info;
uint_t plain_full_len, maclen;
ASSERT3U(crypt, <, ZIO_CRYPT_FUNCTIONS);
ASSERT3U(key->ck_format, ==, CRYPTO_KEY_RAW);
/* lookup the encryption info */
crypt_info = zio_crypt_table[crypt];
/* the mac will always be the last iovec_t in the cipher uio */
maclen = cuio->uio_iov[cuio->uio_iovcnt - 1].iov_len;
ASSERT(maclen <= ZIO_DATA_MAC_LEN);
/* setup encryption mechanism (same as crypt) */
mech.cm_type = crypto_mech2id(crypt_info.ci_mechname);
/*
* Strangely, the ICP requires that plain_full_len must include
* the MAC length when decrypting, even though the UIO does not
* need to have the extra space allocated.
*/
if (encrypt) {
plain_full_len = datalen;
} else {
plain_full_len = datalen + maclen;
}
/*
* setup encryption params (currently only AES CCM and AES GCM
* are supported)
*/
if (crypt_info.ci_crypt_type == ZC_TYPE_CCM) {
ccmp.ulNonceSize = ZIO_DATA_IV_LEN;
ccmp.ulAuthDataSize = auth_len;
ccmp.authData = authbuf;
ccmp.ulMACSize = maclen;
ccmp.nonce = ivbuf;
ccmp.ulDataSize = plain_full_len;
mech.cm_param = (char *)(&ccmp);
mech.cm_param_len = sizeof (CK_AES_CCM_PARAMS);
} else {
gcmp.ulIvLen = ZIO_DATA_IV_LEN;
gcmp.ulIvBits = CRYPTO_BYTES2BITS(ZIO_DATA_IV_LEN);
gcmp.ulAADLen = auth_len;
gcmp.pAAD = authbuf;
gcmp.ulTagBits = CRYPTO_BYTES2BITS(maclen);
gcmp.pIv = ivbuf;
mech.cm_param = (char *)(&gcmp);
mech.cm_param_len = sizeof (CK_AES_GCM_PARAMS);
}
/* populate the cipher and plain data structs. */
plaindata.cd_format = CRYPTO_DATA_UIO;
plaindata.cd_offset = 0;
plaindata.cd_uio = puio;
plaindata.cd_miscdata = NULL;
plaindata.cd_length = plain_full_len;
cipherdata.cd_format = CRYPTO_DATA_UIO;
cipherdata.cd_offset = 0;
cipherdata.cd_uio = cuio;
cipherdata.cd_miscdata = NULL;
cipherdata.cd_length = datalen + maclen;
/* perform the actual encryption */
if (encrypt) {
ret = crypto_encrypt(&mech, &plaindata, key, tmpl, &cipherdata,
NULL);
if (ret != CRYPTO_SUCCESS) {
ret = SET_ERROR(EIO);
goto error;
}
} else {
ret = crypto_decrypt(&mech, &cipherdata, key, tmpl, &plaindata,
NULL);
if (ret != CRYPTO_SUCCESS) {
ASSERT3U(ret, ==, CRYPTO_INVALID_MAC);
ret = SET_ERROR(ECKSUM);
goto error;
}
}
return (0);
error:
return (ret);
}
int
zio_crypt_key_wrap(crypto_key_t *cwkey, zio_crypt_key_t *key, uint8_t *iv,
uint8_t *mac, uint8_t *keydata_out, uint8_t *hmac_keydata_out)
{
int ret;
zfs_uio_t puio, cuio;
uint64_t aad[3];
iovec_t plain_iovecs[2], cipher_iovecs[3];
uint64_t crypt = key->zk_crypt;
uint_t enc_len, keydata_len, aad_len;
ASSERT3U(crypt, <, ZIO_CRYPT_FUNCTIONS);
ASSERT3U(cwkey->ck_format, ==, CRYPTO_KEY_RAW);
keydata_len = zio_crypt_table[crypt].ci_keylen;
/* generate iv for wrapping the master and hmac key */
ret = random_get_pseudo_bytes(iv, WRAPPING_IV_LEN);
if (ret != 0)
goto error;
/* initialize zfs_uio_ts */
plain_iovecs[0].iov_base = key->zk_master_keydata;
plain_iovecs[0].iov_len = keydata_len;
plain_iovecs[1].iov_base = key->zk_hmac_keydata;
plain_iovecs[1].iov_len = SHA512_HMAC_KEYLEN;
cipher_iovecs[0].iov_base = keydata_out;
cipher_iovecs[0].iov_len = keydata_len;
cipher_iovecs[1].iov_base = hmac_keydata_out;
cipher_iovecs[1].iov_len = SHA512_HMAC_KEYLEN;
cipher_iovecs[2].iov_base = mac;
cipher_iovecs[2].iov_len = WRAPPING_MAC_LEN;
/*
* Although we don't support writing to the old format, we do
* support rewrapping the key so that the user can move and
* quarantine datasets on the old format.
*/
if (key->zk_version == 0) {
aad_len = sizeof (uint64_t);
aad[0] = LE_64(key->zk_guid);
} else {
ASSERT3U(key->zk_version, ==, ZIO_CRYPT_KEY_CURRENT_VERSION);
aad_len = sizeof (uint64_t) * 3;
aad[0] = LE_64(key->zk_guid);
aad[1] = LE_64(crypt);
aad[2] = LE_64(key->zk_version);
}
enc_len = zio_crypt_table[crypt].ci_keylen + SHA512_HMAC_KEYLEN;
puio.uio_iov = plain_iovecs;
puio.uio_iovcnt = 2;
puio.uio_segflg = UIO_SYSSPACE;
cuio.uio_iov = cipher_iovecs;
cuio.uio_iovcnt = 3;
cuio.uio_segflg = UIO_SYSSPACE;
/* encrypt the keys and store the resulting ciphertext and mac */
ret = zio_do_crypt_uio(B_TRUE, crypt, cwkey, NULL, iv, enc_len,
&puio, &cuio, (uint8_t *)aad, aad_len);
if (ret != 0)
goto error;
return (0);
error:
return (ret);
}
int
zio_crypt_key_unwrap(crypto_key_t *cwkey, uint64_t crypt, uint64_t version,
uint64_t guid, uint8_t *keydata, uint8_t *hmac_keydata, uint8_t *iv,
uint8_t *mac, zio_crypt_key_t *key)
{
crypto_mechanism_t mech;
zfs_uio_t puio, cuio;
uint64_t aad[3];
iovec_t plain_iovecs[2], cipher_iovecs[3];
uint_t enc_len, keydata_len, aad_len;
int ret;
ASSERT3U(crypt, <, ZIO_CRYPT_FUNCTIONS);
ASSERT3U(cwkey->ck_format, ==, CRYPTO_KEY_RAW);
rw_init(&key->zk_salt_lock, NULL, RW_DEFAULT, NULL);
keydata_len = zio_crypt_table[crypt].ci_keylen;
/* initialize zfs_uio_ts */
plain_iovecs[0].iov_base = key->zk_master_keydata;
plain_iovecs[0].iov_len = keydata_len;
plain_iovecs[1].iov_base = key->zk_hmac_keydata;
plain_iovecs[1].iov_len = SHA512_HMAC_KEYLEN;
cipher_iovecs[0].iov_base = keydata;
cipher_iovecs[0].iov_len = keydata_len;
cipher_iovecs[1].iov_base = hmac_keydata;
cipher_iovecs[1].iov_len = SHA512_HMAC_KEYLEN;
cipher_iovecs[2].iov_base = mac;
cipher_iovecs[2].iov_len = WRAPPING_MAC_LEN;
if (version == 0) {
aad_len = sizeof (uint64_t);
aad[0] = LE_64(guid);
} else {
ASSERT3U(version, ==, ZIO_CRYPT_KEY_CURRENT_VERSION);
aad_len = sizeof (uint64_t) * 3;
aad[0] = LE_64(guid);
aad[1] = LE_64(crypt);
aad[2] = LE_64(version);
}
enc_len = keydata_len + SHA512_HMAC_KEYLEN;
puio.uio_iov = plain_iovecs;
puio.uio_segflg = UIO_SYSSPACE;
puio.uio_iovcnt = 2;
cuio.uio_iov = cipher_iovecs;
cuio.uio_iovcnt = 3;
cuio.uio_segflg = UIO_SYSSPACE;
/* decrypt the keys and store the result in the output buffers */
ret = zio_do_crypt_uio(B_FALSE, crypt, cwkey, NULL, iv, enc_len,
&puio, &cuio, (uint8_t *)aad, aad_len);
if (ret != 0)
goto error;
/* generate a fresh salt */
ret = random_get_bytes(key->zk_salt, ZIO_DATA_SALT_LEN);
if (ret != 0)
goto error;
/* derive the current key from the master key */
ret = hkdf_sha512(key->zk_master_keydata, keydata_len, NULL, 0,
key->zk_salt, ZIO_DATA_SALT_LEN, key->zk_current_keydata,
keydata_len);
if (ret != 0)
goto error;
/* initialize keys for ICP */
key->zk_current_key.ck_format = CRYPTO_KEY_RAW;
key->zk_current_key.ck_data = key->zk_current_keydata;
key->zk_current_key.ck_length = CRYPTO_BYTES2BITS(keydata_len);
key->zk_hmac_key.ck_format = CRYPTO_KEY_RAW;
key->zk_hmac_key.ck_data = key->zk_hmac_keydata;
key->zk_hmac_key.ck_length = CRYPTO_BYTES2BITS(SHA512_HMAC_KEYLEN);
/*
* Initialize the crypto templates. It's ok if this fails because
* this is just an optimization.
*/
mech.cm_type = crypto_mech2id(zio_crypt_table[crypt].ci_mechname);
ret = crypto_create_ctx_template(&mech, &key->zk_current_key,
&key->zk_current_tmpl, KM_SLEEP);
if (ret != CRYPTO_SUCCESS)
key->zk_current_tmpl = NULL;
mech.cm_type = crypto_mech2id(SUN_CKM_SHA512_HMAC);
ret = crypto_create_ctx_template(&mech, &key->zk_hmac_key,
&key->zk_hmac_tmpl, KM_SLEEP);
if (ret != CRYPTO_SUCCESS)
key->zk_hmac_tmpl = NULL;
key->zk_crypt = crypt;
key->zk_version = version;
key->zk_guid = guid;
key->zk_salt_count = 0;
return (0);
error:
zio_crypt_key_destroy(key);
return (ret);
}
int
zio_crypt_generate_iv(uint8_t *ivbuf)
{
int ret;
/* randomly generate the IV */
ret = random_get_pseudo_bytes(ivbuf, ZIO_DATA_IV_LEN);
if (ret != 0)
goto error;
return (0);
error:
bzero(ivbuf, ZIO_DATA_IV_LEN);
return (ret);
}
int
zio_crypt_do_hmac(zio_crypt_key_t *key, uint8_t *data, uint_t datalen,
uint8_t *digestbuf, uint_t digestlen)
{
int ret;
crypto_mechanism_t mech;
crypto_data_t in_data, digest_data;
uint8_t raw_digestbuf[SHA512_DIGEST_LENGTH];
ASSERT3U(digestlen, <=, SHA512_DIGEST_LENGTH);
/* initialize sha512-hmac mechanism and crypto data */
mech.cm_type = crypto_mech2id(SUN_CKM_SHA512_HMAC);
mech.cm_param = NULL;
mech.cm_param_len = 0;
/* initialize the crypto data */
in_data.cd_format = CRYPTO_DATA_RAW;
in_data.cd_offset = 0;
in_data.cd_length = datalen;
in_data.cd_raw.iov_base = (char *)data;
in_data.cd_raw.iov_len = in_data.cd_length;
digest_data.cd_format = CRYPTO_DATA_RAW;
digest_data.cd_offset = 0;
digest_data.cd_length = SHA512_DIGEST_LENGTH;
digest_data.cd_raw.iov_base = (char *)raw_digestbuf;
digest_data.cd_raw.iov_len = digest_data.cd_length;
/* generate the hmac */
ret = crypto_mac(&mech, &in_data, &key->zk_hmac_key, key->zk_hmac_tmpl,
&digest_data, NULL);
if (ret != CRYPTO_SUCCESS) {
ret = SET_ERROR(EIO);
goto error;
}
bcopy(raw_digestbuf, digestbuf, digestlen);
return (0);
error:
bzero(digestbuf, digestlen);
return (ret);
}
int
zio_crypt_generate_iv_salt_dedup(zio_crypt_key_t *key, uint8_t *data,
uint_t datalen, uint8_t *ivbuf, uint8_t *salt)
{
int ret;
uint8_t digestbuf[SHA512_DIGEST_LENGTH];
ret = zio_crypt_do_hmac(key, data, datalen,
digestbuf, SHA512_DIGEST_LENGTH);
if (ret != 0)
return (ret);
bcopy(digestbuf, salt, ZIO_DATA_SALT_LEN);
bcopy(digestbuf + ZIO_DATA_SALT_LEN, ivbuf, ZIO_DATA_IV_LEN);
return (0);
}
/*
* The following functions are used to encode and decode encryption parameters
* into blkptr_t and zil_header_t. The ICP wants to use these parameters as
* byte strings, which normally means that these strings would not need to deal
* with byteswapping at all. However, both blkptr_t and zil_header_t may be
* byteswapped by lower layers and so we must "undo" that byteswap here upon
* decoding and encoding in a non-native byteorder. These functions require
* that the byteorder bit is correct before being called.
*/
void
zio_crypt_encode_params_bp(blkptr_t *bp, uint8_t *salt, uint8_t *iv)
{
uint64_t val64;
uint32_t val32;
ASSERT(BP_IS_ENCRYPTED(bp));
if (!BP_SHOULD_BYTESWAP(bp)) {
bcopy(salt, &bp->blk_dva[2].dva_word[0], sizeof (uint64_t));
bcopy(iv, &bp->blk_dva[2].dva_word[1], sizeof (uint64_t));
bcopy(iv + sizeof (uint64_t), &val32, sizeof (uint32_t));
BP_SET_IV2(bp, val32);
} else {
bcopy(salt, &val64, sizeof (uint64_t));
bp->blk_dva[2].dva_word[0] = BSWAP_64(val64);
bcopy(iv, &val64, sizeof (uint64_t));
bp->blk_dva[2].dva_word[1] = BSWAP_64(val64);
bcopy(iv + sizeof (uint64_t), &val32, sizeof (uint32_t));
BP_SET_IV2(bp, BSWAP_32(val32));
}
}
void
zio_crypt_decode_params_bp(const blkptr_t *bp, uint8_t *salt, uint8_t *iv)
{
uint64_t val64;
uint32_t val32;
ASSERT(BP_IS_PROTECTED(bp));
/* for convenience, so callers don't need to check */
if (BP_IS_AUTHENTICATED(bp)) {
bzero(salt, ZIO_DATA_SALT_LEN);
bzero(iv, ZIO_DATA_IV_LEN);
return;
}
if (!BP_SHOULD_BYTESWAP(bp)) {
bcopy(&bp->blk_dva[2].dva_word[0], salt, sizeof (uint64_t));
bcopy(&bp->blk_dva[2].dva_word[1], iv, sizeof (uint64_t));
val32 = (uint32_t)BP_GET_IV2(bp);
bcopy(&val32, iv + sizeof (uint64_t), sizeof (uint32_t));
} else {
val64 = BSWAP_64(bp->blk_dva[2].dva_word[0]);
bcopy(&val64, salt, sizeof (uint64_t));
val64 = BSWAP_64(bp->blk_dva[2].dva_word[1]);
bcopy(&val64, iv, sizeof (uint64_t));
val32 = BSWAP_32((uint32_t)BP_GET_IV2(bp));
bcopy(&val32, iv + sizeof (uint64_t), sizeof (uint32_t));
}
}
void
zio_crypt_encode_mac_bp(blkptr_t *bp, uint8_t *mac)
{
uint64_t val64;
ASSERT(BP_USES_CRYPT(bp));
ASSERT3U(BP_GET_TYPE(bp), !=, DMU_OT_OBJSET);
if (!BP_SHOULD_BYTESWAP(bp)) {
bcopy(mac, &bp->blk_cksum.zc_word[2], sizeof (uint64_t));
bcopy(mac + sizeof (uint64_t), &bp->blk_cksum.zc_word[3],
sizeof (uint64_t));
} else {
bcopy(mac, &val64, sizeof (uint64_t));
bp->blk_cksum.zc_word[2] = BSWAP_64(val64);
bcopy(mac + sizeof (uint64_t), &val64, sizeof (uint64_t));
bp->blk_cksum.zc_word[3] = BSWAP_64(val64);
}
}
void
zio_crypt_decode_mac_bp(const blkptr_t *bp, uint8_t *mac)
{
uint64_t val64;
ASSERT(BP_USES_CRYPT(bp) || BP_IS_HOLE(bp));
/* for convenience, so callers don't need to check */
if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
bzero(mac, ZIO_DATA_MAC_LEN);
return;
}
if (!BP_SHOULD_BYTESWAP(bp)) {
bcopy(&bp->blk_cksum.zc_word[2], mac, sizeof (uint64_t));
bcopy(&bp->blk_cksum.zc_word[3], mac + sizeof (uint64_t),
sizeof (uint64_t));
} else {
val64 = BSWAP_64(bp->blk_cksum.zc_word[2]);
bcopy(&val64, mac, sizeof (uint64_t));
val64 = BSWAP_64(bp->blk_cksum.zc_word[3]);
bcopy(&val64, mac + sizeof (uint64_t), sizeof (uint64_t));
}
}
void
zio_crypt_encode_mac_zil(void *data, uint8_t *mac)
{
zil_chain_t *zilc = data;
bcopy(mac, &zilc->zc_eck.zec_cksum.zc_word[2], sizeof (uint64_t));
bcopy(mac + sizeof (uint64_t), &zilc->zc_eck.zec_cksum.zc_word[3],
sizeof (uint64_t));
}
void
zio_crypt_decode_mac_zil(const void *data, uint8_t *mac)
{
/*
* The ZIL MAC is embedded in the block it protects, which will
* not have been byteswapped by the time this function has been called.
* As a result, we don't need to worry about byteswapping the MAC.
*/
const zil_chain_t *zilc = data;
bcopy(&zilc->zc_eck.zec_cksum.zc_word[2], mac, sizeof (uint64_t));
bcopy(&zilc->zc_eck.zec_cksum.zc_word[3], mac + sizeof (uint64_t),
sizeof (uint64_t));
}
/*
* This routine takes a block of dnodes (src_abd) and copies only the bonus
* buffers to the same offsets in the dst buffer. datalen should be the size
* of both the src_abd and the dst buffer (not just the length of the bonus
* buffers).
*/
void
zio_crypt_copy_dnode_bonus(abd_t *src_abd, uint8_t *dst, uint_t datalen)
{
uint_t i, max_dnp = datalen >> DNODE_SHIFT;
uint8_t *src;
dnode_phys_t *dnp, *sdnp, *ddnp;
src = abd_borrow_buf_copy(src_abd, datalen);
sdnp = (dnode_phys_t *)src;
ddnp = (dnode_phys_t *)dst;
for (i = 0; i < max_dnp; i += sdnp[i].dn_extra_slots + 1) {
dnp = &sdnp[i];
if (dnp->dn_type != DMU_OT_NONE &&
DMU_OT_IS_ENCRYPTED(dnp->dn_bonustype) &&
dnp->dn_bonuslen != 0) {
bcopy(DN_BONUS(dnp), DN_BONUS(&ddnp[i]),
DN_MAX_BONUS_LEN(dnp));
}
}
abd_return_buf(src_abd, src, datalen);
}
/*
* This function decides what fields from blk_prop are included in
* the on-disk various MAC algorithms.
*/
static void
zio_crypt_bp_zero_nonportable_blkprop(blkptr_t *bp, uint64_t version)
{
/*
* Version 0 did not properly zero out all non-portable fields
* as it should have done. We maintain this code so that we can
* do read-only imports of pools on this version.
*/
if (version == 0) {
BP_SET_DEDUP(bp, 0);
BP_SET_CHECKSUM(bp, 0);
BP_SET_PSIZE(bp, SPA_MINBLOCKSIZE);
return;
}
ASSERT3U(version, ==, ZIO_CRYPT_KEY_CURRENT_VERSION);
/*
* The hole_birth feature might set these fields even if this bp
* is a hole. We zero them out here to guarantee that raw sends
* will function with or without the feature.
*/
if (BP_IS_HOLE(bp)) {
bp->blk_prop = 0ULL;
return;
}
/*
* At L0 we want to verify these fields to ensure that data blocks
* can not be reinterpreted. For instance, we do not want an attacker
* to trick us into returning raw lz4 compressed data to the user
* by modifying the compression bits. At higher levels, we cannot
* enforce this policy since raw sends do not convey any information
* about indirect blocks, so these values might be different on the
* receive side. Fortunately, this does not open any new attack
* vectors, since any alterations that can be made to a higher level
* bp must still verify the correct order of the layer below it.
*/
if (BP_GET_LEVEL(bp) != 0) {
BP_SET_BYTEORDER(bp, 0);
BP_SET_COMPRESS(bp, 0);
/*
* psize cannot be set to zero or it will trigger
* asserts, but the value doesn't really matter as
* long as it is constant.
*/
BP_SET_PSIZE(bp, SPA_MINBLOCKSIZE);
}
BP_SET_DEDUP(bp, 0);
BP_SET_CHECKSUM(bp, 0);
}
static void
zio_crypt_bp_auth_init(uint64_t version, boolean_t should_bswap, blkptr_t *bp,
blkptr_auth_buf_t *bab, uint_t *bab_len)
{
blkptr_t tmpbp = *bp;
if (should_bswap)
byteswap_uint64_array(&tmpbp, sizeof (blkptr_t));
ASSERT(BP_USES_CRYPT(&tmpbp) || BP_IS_HOLE(&tmpbp));
ASSERT0(BP_IS_EMBEDDED(&tmpbp));
zio_crypt_decode_mac_bp(&tmpbp, bab->bab_mac);
/*
* We always MAC blk_prop in LE to ensure portability. This
* must be done after decoding the mac, since the endianness
* will get zero'd out here.
*/
zio_crypt_bp_zero_nonportable_blkprop(&tmpbp, version);
bab->bab_prop = LE_64(tmpbp.blk_prop);
bab->bab_pad = 0ULL;
/* version 0 did not include the padding */
*bab_len = sizeof (blkptr_auth_buf_t);
if (version == 0)
*bab_len -= sizeof (uint64_t);
}
static int
zio_crypt_bp_do_hmac_updates(crypto_context_t ctx, uint64_t version,
boolean_t should_bswap, blkptr_t *bp)
{
int ret;
uint_t bab_len;
blkptr_auth_buf_t bab;
crypto_data_t cd;
zio_crypt_bp_auth_init(version, should_bswap, bp, &bab, &bab_len);
cd.cd_format = CRYPTO_DATA_RAW;
cd.cd_offset = 0;
cd.cd_length = bab_len;
cd.cd_raw.iov_base = (char *)&bab;
cd.cd_raw.iov_len = cd.cd_length;
ret = crypto_mac_update(ctx, &cd, NULL);
if (ret != CRYPTO_SUCCESS) {
ret = SET_ERROR(EIO);
goto error;
}
return (0);
error:
return (ret);
}
static void
zio_crypt_bp_do_indrect_checksum_updates(SHA2_CTX *ctx, uint64_t version,
boolean_t should_bswap, blkptr_t *bp)
{
uint_t bab_len;
blkptr_auth_buf_t bab;
zio_crypt_bp_auth_init(version, should_bswap, bp, &bab, &bab_len);
SHA2Update(ctx, &bab, bab_len);
}
static void
zio_crypt_bp_do_aad_updates(uint8_t **aadp, uint_t *aad_len, uint64_t version,
boolean_t should_bswap, blkptr_t *bp)
{
uint_t bab_len;
blkptr_auth_buf_t bab;
zio_crypt_bp_auth_init(version, should_bswap, bp, &bab, &bab_len);
bcopy(&bab, *aadp, bab_len);
*aadp += bab_len;
*aad_len += bab_len;
}
static int
zio_crypt_do_dnode_hmac_updates(crypto_context_t ctx, uint64_t version,
boolean_t should_bswap, dnode_phys_t *dnp)
{
int ret, i;
dnode_phys_t *adnp, tmp_dncore;
size_t dn_core_size = offsetof(dnode_phys_t, dn_blkptr);
boolean_t le_bswap = (should_bswap == ZFS_HOST_BYTEORDER);
crypto_data_t cd;
cd.cd_format = CRYPTO_DATA_RAW;
cd.cd_offset = 0;
/*
* Authenticate the core dnode (masking out non-portable bits).
* We only copy the first 64 bytes we operate on to avoid the overhead
* of copying 512-64 unneeded bytes. The compiler seems to be fine
* with that.
*/
bcopy(dnp, &tmp_dncore, dn_core_size);
adnp = &tmp_dncore;
if (le_bswap) {
adnp->dn_datablkszsec = BSWAP_16(adnp->dn_datablkszsec);
adnp->dn_bonuslen = BSWAP_16(adnp->dn_bonuslen);
adnp->dn_maxblkid = BSWAP_64(adnp->dn_maxblkid);
adnp->dn_used = BSWAP_64(adnp->dn_used);
}
adnp->dn_flags &= DNODE_CRYPT_PORTABLE_FLAGS_MASK;
adnp->dn_used = 0;
cd.cd_length = dn_core_size;
cd.cd_raw.iov_base = (char *)adnp;
cd.cd_raw.iov_len = cd.cd_length;
ret = crypto_mac_update(ctx, &cd, NULL);
if (ret != CRYPTO_SUCCESS) {
ret = SET_ERROR(EIO);
goto error;
}
for (i = 0; i < dnp->dn_nblkptr; i++) {
ret = zio_crypt_bp_do_hmac_updates(ctx, version,
should_bswap, &dnp->dn_blkptr[i]);
if (ret != 0)
goto error;
}
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
ret = zio_crypt_bp_do_hmac_updates(ctx, version,
should_bswap, DN_SPILL_BLKPTR(dnp));
if (ret != 0)
goto error;
}
return (0);
error:
return (ret);
}
/*
* objset_phys_t blocks introduce a number of exceptions to the normal
* authentication process. objset_phys_t's contain 2 separate HMACS for
* protecting the integrity of their data. The portable_mac protects the
* metadnode. This MAC can be sent with a raw send and protects against
* reordering of data within the metadnode. The local_mac protects the user
* accounting objects which are not sent from one system to another.
*
* In addition, objset blocks are the only blocks that can be modified and
* written to disk without the key loaded under certain circumstances. During
* zil_claim() we need to be able to update the zil_header_t to complete
* claiming log blocks and during raw receives we need to write out the
* portable_mac from the send file. Both of these actions are possible
* because these fields are not protected by either MAC so neither one will
* need to modify the MACs without the key. However, when the modified blocks
* are written out they will be byteswapped into the host machine's native
* endianness which will modify fields protected by the MAC. As a result, MAC
* calculation for objset blocks works slightly differently from other block
* types. Where other block types MAC the data in whatever endianness is
* written to disk, objset blocks always MAC little endian version of their
* values. In the code, should_bswap is the value from BP_SHOULD_BYTESWAP()
* and le_bswap indicates whether a byteswap is needed to get this block
* into little endian format.
*/
int
zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen,
boolean_t should_bswap, uint8_t *portable_mac, uint8_t *local_mac)
{
int ret;
crypto_mechanism_t mech;
crypto_context_t ctx;
crypto_data_t cd;
objset_phys_t *osp = data;
uint64_t intval;
boolean_t le_bswap = (should_bswap == ZFS_HOST_BYTEORDER);
uint8_t raw_portable_mac[SHA512_DIGEST_LENGTH];
uint8_t raw_local_mac[SHA512_DIGEST_LENGTH];
/* initialize HMAC mechanism */
mech.cm_type = crypto_mech2id(SUN_CKM_SHA512_HMAC);
mech.cm_param = NULL;
mech.cm_param_len = 0;
cd.cd_format = CRYPTO_DATA_RAW;
cd.cd_offset = 0;
/* calculate the portable MAC from the portable fields and metadnode */
ret = crypto_mac_init(&mech, &key->zk_hmac_key, NULL, &ctx, NULL);
if (ret != CRYPTO_SUCCESS) {
ret = SET_ERROR(EIO);
goto error;
}
/* add in the os_type */
intval = (le_bswap) ? osp->os_type : BSWAP_64(osp->os_type);
cd.cd_length = sizeof (uint64_t);
cd.cd_raw.iov_base = (char *)&intval;
cd.cd_raw.iov_len = cd.cd_length;
ret = crypto_mac_update(ctx, &cd, NULL);
if (ret != CRYPTO_SUCCESS) {
ret = SET_ERROR(EIO);
goto error;
}
/* add in the portable os_flags */
intval = osp->os_flags;
if (should_bswap)
intval = BSWAP_64(intval);
intval &= OBJSET_CRYPT_PORTABLE_FLAGS_MASK;
if (!ZFS_HOST_BYTEORDER)
intval = BSWAP_64(intval);
cd.cd_length = sizeof (uint64_t);
cd.cd_raw.iov_base = (char *)&intval;
cd.cd_raw.iov_len = cd.cd_length;
ret = crypto_mac_update(ctx, &cd, NULL);
if (ret != CRYPTO_SUCCESS) {
ret = SET_ERROR(EIO);
goto error;
}
/* add in fields from the metadnode */
ret = zio_crypt_do_dnode_hmac_updates(ctx, key->zk_version,
should_bswap, &osp->os_meta_dnode);
if (ret)
goto error;
/* store the final digest in a temporary buffer and copy what we need */
cd.cd_length = SHA512_DIGEST_LENGTH;
cd.cd_raw.iov_base = (char *)raw_portable_mac;
cd.cd_raw.iov_len = cd.cd_length;
ret = crypto_mac_final(ctx, &cd, NULL);
if (ret != CRYPTO_SUCCESS) {
ret = SET_ERROR(EIO);
goto error;
}
bcopy(raw_portable_mac, portable_mac, ZIO_OBJSET_MAC_LEN);
+ /*
+ * This is necessary here as we check next whether
+ * OBJSET_FLAG_USERACCOUNTING_COMPLETE is set in order to
+ * decide if the local_mac should be zeroed out. That flag will always
+ * be set by dmu_objset_id_quota_upgrade_cb() and
+ * dmu_objset_userspace_upgrade_cb() if useraccounting has been
+ * completed.
+ */
+ intval = osp->os_flags;
+ if (should_bswap)
+ intval = BSWAP_64(intval);
+ boolean_t uacct_incomplete =
+ !(intval & OBJSET_FLAG_USERACCOUNTING_COMPLETE);
+
/*
* The local MAC protects the user, group and project accounting.
* If these objects are not present, the local MAC is zeroed out.
*/
- if ((datalen >= OBJSET_PHYS_SIZE_V3 &&
+ if (uacct_incomplete ||
+ (datalen >= OBJSET_PHYS_SIZE_V3 &&
osp->os_userused_dnode.dn_type == DMU_OT_NONE &&
osp->os_groupused_dnode.dn_type == DMU_OT_NONE &&
osp->os_projectused_dnode.dn_type == DMU_OT_NONE) ||
(datalen >= OBJSET_PHYS_SIZE_V2 &&
osp->os_userused_dnode.dn_type == DMU_OT_NONE &&
osp->os_groupused_dnode.dn_type == DMU_OT_NONE) ||
(datalen <= OBJSET_PHYS_SIZE_V1)) {
bzero(local_mac, ZIO_OBJSET_MAC_LEN);
return (0);
}
/* calculate the local MAC from the userused and groupused dnodes */
ret = crypto_mac_init(&mech, &key->zk_hmac_key, NULL, &ctx, NULL);
if (ret != CRYPTO_SUCCESS) {
ret = SET_ERROR(EIO);
goto error;
}
/* add in the non-portable os_flags */
intval = osp->os_flags;
if (should_bswap)
intval = BSWAP_64(intval);
intval &= ~OBJSET_CRYPT_PORTABLE_FLAGS_MASK;
if (!ZFS_HOST_BYTEORDER)
intval = BSWAP_64(intval);
cd.cd_length = sizeof (uint64_t);
cd.cd_raw.iov_base = (char *)&intval;
cd.cd_raw.iov_len = cd.cd_length;
ret = crypto_mac_update(ctx, &cd, NULL);
if (ret != CRYPTO_SUCCESS) {
ret = SET_ERROR(EIO);
goto error;
}
/* add in fields from the user accounting dnodes */
if (osp->os_userused_dnode.dn_type != DMU_OT_NONE) {
ret = zio_crypt_do_dnode_hmac_updates(ctx, key->zk_version,
should_bswap, &osp->os_userused_dnode);
if (ret)
goto error;
}
if (osp->os_groupused_dnode.dn_type != DMU_OT_NONE) {
ret = zio_crypt_do_dnode_hmac_updates(ctx, key->zk_version,
should_bswap, &osp->os_groupused_dnode);
if (ret)
goto error;
}
if (osp->os_projectused_dnode.dn_type != DMU_OT_NONE &&
datalen >= OBJSET_PHYS_SIZE_V3) {
ret = zio_crypt_do_dnode_hmac_updates(ctx, key->zk_version,
should_bswap, &osp->os_projectused_dnode);
if (ret)
goto error;
}
/* store the final digest in a temporary buffer and copy what we need */
cd.cd_length = SHA512_DIGEST_LENGTH;
cd.cd_raw.iov_base = (char *)raw_local_mac;
cd.cd_raw.iov_len = cd.cd_length;
ret = crypto_mac_final(ctx, &cd, NULL);
if (ret != CRYPTO_SUCCESS) {
ret = SET_ERROR(EIO);
goto error;
}
bcopy(raw_local_mac, local_mac, ZIO_OBJSET_MAC_LEN);
return (0);
error:
bzero(portable_mac, ZIO_OBJSET_MAC_LEN);
bzero(local_mac, ZIO_OBJSET_MAC_LEN);
return (ret);
}
static void
zio_crypt_destroy_uio(zfs_uio_t *uio)
{
if (uio->uio_iov)
kmem_free(uio->uio_iov, uio->uio_iovcnt * sizeof (iovec_t));
}
/*
* This function parses an uncompressed indirect block and returns a checksum
* of all the portable fields from all of the contained bps. The portable
* fields are the MAC and all of the fields from blk_prop except for the dedup,
* checksum, and psize bits. For an explanation of the purpose of this, see
* the comment block on object set authentication.
*/
static int
zio_crypt_do_indirect_mac_checksum_impl(boolean_t generate, void *buf,
uint_t datalen, uint64_t version, boolean_t byteswap, uint8_t *cksum)
{
blkptr_t *bp;
int i, epb = datalen >> SPA_BLKPTRSHIFT;
SHA2_CTX ctx;
uint8_t digestbuf[SHA512_DIGEST_LENGTH];
/* checksum all of the MACs from the layer below */
SHA2Init(SHA512, &ctx);
for (i = 0, bp = buf; i < epb; i++, bp++) {
zio_crypt_bp_do_indrect_checksum_updates(&ctx, version,
byteswap, bp);
}
SHA2Final(digestbuf, &ctx);
if (generate) {
bcopy(digestbuf, cksum, ZIO_DATA_MAC_LEN);
return (0);
}
if (bcmp(digestbuf, cksum, ZIO_DATA_MAC_LEN) != 0)
return (SET_ERROR(ECKSUM));
return (0);
}
int
zio_crypt_do_indirect_mac_checksum(boolean_t generate, void *buf,
uint_t datalen, boolean_t byteswap, uint8_t *cksum)
{
int ret;
/*
* Unfortunately, callers of this function will not always have
* easy access to the on-disk format version. This info is
* normally found in the DSL Crypto Key, but the checksum-of-MACs
* is expected to be verifiable even when the key isn't loaded.
* Here, instead of doing a ZAP lookup for the version for each
* zio, we simply try both existing formats.
*/
ret = zio_crypt_do_indirect_mac_checksum_impl(generate, buf,
datalen, ZIO_CRYPT_KEY_CURRENT_VERSION, byteswap, cksum);
if (ret == ECKSUM) {
ASSERT(!generate);
ret = zio_crypt_do_indirect_mac_checksum_impl(generate,
buf, datalen, 0, byteswap, cksum);
}
return (ret);
}
int
zio_crypt_do_indirect_mac_checksum_abd(boolean_t generate, abd_t *abd,
uint_t datalen, boolean_t byteswap, uint8_t *cksum)
{
int ret;
void *buf;
buf = abd_borrow_buf_copy(abd, datalen);
ret = zio_crypt_do_indirect_mac_checksum(generate, buf, datalen,
byteswap, cksum);
abd_return_buf(abd, buf, datalen);
return (ret);
}
/*
* Special case handling routine for encrypting / decrypting ZIL blocks.
* We do not check for the older ZIL chain because the encryption feature
* was not available before the newer ZIL chain was introduced. The goal
* here is to encrypt everything except the blkptr_t of a lr_write_t and
* the zil_chain_t header. Everything that is not encrypted is authenticated.
*/
static int
zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf,
uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap, zfs_uio_t *puio,
zfs_uio_t *cuio, uint_t *enc_len, uint8_t **authbuf, uint_t *auth_len,
boolean_t *no_crypt)
{
int ret;
uint64_t txtype, lr_len;
uint_t nr_src, nr_dst, crypt_len;
uint_t aad_len = 0, nr_iovecs = 0, total_len = 0;
iovec_t *src_iovecs = NULL, *dst_iovecs = NULL;
uint8_t *src, *dst, *slrp, *dlrp, *blkend, *aadp;
zil_chain_t *zilc;
lr_t *lr;
uint8_t *aadbuf = zio_buf_alloc(datalen);
/* cipherbuf always needs an extra iovec for the MAC */
if (encrypt) {
src = plainbuf;
dst = cipherbuf;
nr_src = 0;
nr_dst = 1;
} else {
src = cipherbuf;
dst = plainbuf;
nr_src = 1;
nr_dst = 0;
}
bzero(dst, datalen);
/* find the start and end record of the log block */
zilc = (zil_chain_t *)src;
slrp = src + sizeof (zil_chain_t);
aadp = aadbuf;
blkend = src + ((byteswap) ? BSWAP_64(zilc->zc_nused) : zilc->zc_nused);
/* calculate the number of encrypted iovecs we will need */
for (; slrp < blkend; slrp += lr_len) {
lr = (lr_t *)slrp;
if (!byteswap) {
txtype = lr->lrc_txtype;
lr_len = lr->lrc_reclen;
} else {
txtype = BSWAP_64(lr->lrc_txtype);
lr_len = BSWAP_64(lr->lrc_reclen);
}
nr_iovecs++;
if (txtype == TX_WRITE && lr_len != sizeof (lr_write_t))
nr_iovecs++;
}
nr_src += nr_iovecs;
nr_dst += nr_iovecs;
/* allocate the iovec arrays */
if (nr_src != 0) {
src_iovecs = kmem_alloc(nr_src * sizeof (iovec_t), KM_SLEEP);
if (src_iovecs == NULL) {
ret = SET_ERROR(ENOMEM);
goto error;
}
}
if (nr_dst != 0) {
dst_iovecs = kmem_alloc(nr_dst * sizeof (iovec_t), KM_SLEEP);
if (dst_iovecs == NULL) {
ret = SET_ERROR(ENOMEM);
goto error;
}
}
/*
* Copy the plain zil header over and authenticate everything except
* the checksum that will store our MAC. If we are writing the data
* the embedded checksum will not have been calculated yet, so we don't
* authenticate that.
*/
bcopy(src, dst, sizeof (zil_chain_t));
bcopy(src, aadp, sizeof (zil_chain_t) - sizeof (zio_eck_t));
aadp += sizeof (zil_chain_t) - sizeof (zio_eck_t);
aad_len += sizeof (zil_chain_t) - sizeof (zio_eck_t);
/* loop over records again, filling in iovecs */
nr_iovecs = 0;
slrp = src + sizeof (zil_chain_t);
dlrp = dst + sizeof (zil_chain_t);
for (; slrp < blkend; slrp += lr_len, dlrp += lr_len) {
lr = (lr_t *)slrp;
if (!byteswap) {
txtype = lr->lrc_txtype;
lr_len = lr->lrc_reclen;
} else {
txtype = BSWAP_64(lr->lrc_txtype);
lr_len = BSWAP_64(lr->lrc_reclen);
}
/* copy the common lr_t */
bcopy(slrp, dlrp, sizeof (lr_t));
bcopy(slrp, aadp, sizeof (lr_t));
aadp += sizeof (lr_t);
aad_len += sizeof (lr_t);
ASSERT3P(src_iovecs, !=, NULL);
ASSERT3P(dst_iovecs, !=, NULL);
/*
* If this is a TX_WRITE record we want to encrypt everything
* except the bp if exists. If the bp does exist we want to
* authenticate it.
*/
if (txtype == TX_WRITE) {
crypt_len = sizeof (lr_write_t) -
sizeof (lr_t) - sizeof (blkptr_t);
src_iovecs[nr_iovecs].iov_base = slrp + sizeof (lr_t);
src_iovecs[nr_iovecs].iov_len = crypt_len;
dst_iovecs[nr_iovecs].iov_base = dlrp + sizeof (lr_t);
dst_iovecs[nr_iovecs].iov_len = crypt_len;
/* copy the bp now since it will not be encrypted */
bcopy(slrp + sizeof (lr_write_t) - sizeof (blkptr_t),
dlrp + sizeof (lr_write_t) - sizeof (blkptr_t),
sizeof (blkptr_t));
bcopy(slrp + sizeof (lr_write_t) - sizeof (blkptr_t),
aadp, sizeof (blkptr_t));
aadp += sizeof (blkptr_t);
aad_len += sizeof (blkptr_t);
nr_iovecs++;
total_len += crypt_len;
if (lr_len != sizeof (lr_write_t)) {
crypt_len = lr_len - sizeof (lr_write_t);
src_iovecs[nr_iovecs].iov_base =
slrp + sizeof (lr_write_t);
src_iovecs[nr_iovecs].iov_len = crypt_len;
dst_iovecs[nr_iovecs].iov_base =
dlrp + sizeof (lr_write_t);
dst_iovecs[nr_iovecs].iov_len = crypt_len;
nr_iovecs++;
total_len += crypt_len;
}
} else {
crypt_len = lr_len - sizeof (lr_t);
src_iovecs[nr_iovecs].iov_base = slrp + sizeof (lr_t);
src_iovecs[nr_iovecs].iov_len = crypt_len;
dst_iovecs[nr_iovecs].iov_base = dlrp + sizeof (lr_t);
dst_iovecs[nr_iovecs].iov_len = crypt_len;
nr_iovecs++;
total_len += crypt_len;
}
}
*no_crypt = (nr_iovecs == 0);
*enc_len = total_len;
*authbuf = aadbuf;
*auth_len = aad_len;
if (encrypt) {
puio->uio_iov = src_iovecs;
puio->uio_iovcnt = nr_src;
cuio->uio_iov = dst_iovecs;
cuio->uio_iovcnt = nr_dst;
} else {
puio->uio_iov = dst_iovecs;
puio->uio_iovcnt = nr_dst;
cuio->uio_iov = src_iovecs;
cuio->uio_iovcnt = nr_src;
}
return (0);
error:
zio_buf_free(aadbuf, datalen);
if (src_iovecs != NULL)
kmem_free(src_iovecs, nr_src * sizeof (iovec_t));
if (dst_iovecs != NULL)
kmem_free(dst_iovecs, nr_dst * sizeof (iovec_t));
*enc_len = 0;
*authbuf = NULL;
*auth_len = 0;
*no_crypt = B_FALSE;
puio->uio_iov = NULL;
puio->uio_iovcnt = 0;
cuio->uio_iov = NULL;
cuio->uio_iovcnt = 0;
return (ret);
}
/*
* Special case handling routine for encrypting / decrypting dnode blocks.
*/
static int
zio_crypt_init_uios_dnode(boolean_t encrypt, uint64_t version,
uint8_t *plainbuf, uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap,
zfs_uio_t *puio, zfs_uio_t *cuio, uint_t *enc_len, uint8_t **authbuf,
uint_t *auth_len, boolean_t *no_crypt)
{
int ret;
uint_t nr_src, nr_dst, crypt_len;
uint_t aad_len = 0, nr_iovecs = 0, total_len = 0;
uint_t i, j, max_dnp = datalen >> DNODE_SHIFT;
iovec_t *src_iovecs = NULL, *dst_iovecs = NULL;
uint8_t *src, *dst, *aadp;
dnode_phys_t *dnp, *adnp, *sdnp, *ddnp;
uint8_t *aadbuf = zio_buf_alloc(datalen);
if (encrypt) {
src = plainbuf;
dst = cipherbuf;
nr_src = 0;
nr_dst = 1;
} else {
src = cipherbuf;
dst = plainbuf;
nr_src = 1;
nr_dst = 0;
}
sdnp = (dnode_phys_t *)src;
ddnp = (dnode_phys_t *)dst;
aadp = aadbuf;
/*
* Count the number of iovecs we will need to do the encryption by
* counting the number of bonus buffers that need to be encrypted.
*/
for (i = 0; i < max_dnp; i += sdnp[i].dn_extra_slots + 1) {
/*
* This block may still be byteswapped. However, all of the
* values we use are either uint8_t's (for which byteswapping
* is a noop) or a * != 0 check, which will work regardless
* of whether or not we byteswap.
*/
if (sdnp[i].dn_type != DMU_OT_NONE &&
DMU_OT_IS_ENCRYPTED(sdnp[i].dn_bonustype) &&
sdnp[i].dn_bonuslen != 0) {
nr_iovecs++;
}
}
nr_src += nr_iovecs;
nr_dst += nr_iovecs;
if (nr_src != 0) {
src_iovecs = kmem_alloc(nr_src * sizeof (iovec_t), KM_SLEEP);
if (src_iovecs == NULL) {
ret = SET_ERROR(ENOMEM);
goto error;
}
}
if (nr_dst != 0) {
dst_iovecs = kmem_alloc(nr_dst * sizeof (iovec_t), KM_SLEEP);
if (dst_iovecs == NULL) {
ret = SET_ERROR(ENOMEM);
goto error;
}
}
nr_iovecs = 0;
/*
* Iterate through the dnodes again, this time filling in the uios
* we allocated earlier. We also concatenate any data we want to
* authenticate onto aadbuf.
*/
for (i = 0; i < max_dnp; i += sdnp[i].dn_extra_slots + 1) {
dnp = &sdnp[i];
/* copy over the core fields and blkptrs (kept as plaintext) */
bcopy(dnp, &ddnp[i], (uint8_t *)DN_BONUS(dnp) - (uint8_t *)dnp);
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
bcopy(DN_SPILL_BLKPTR(dnp), DN_SPILL_BLKPTR(&ddnp[i]),
sizeof (blkptr_t));
}
/*
* Handle authenticated data. We authenticate everything in
* the dnode that can be brought over when we do a raw send.
* This includes all of the core fields as well as the MACs
* stored in the bp checksums and all of the portable bits
* from blk_prop. We include the dnode padding here in case it
* ever gets used in the future. Some dn_flags and dn_used are
* not portable so we mask those out values out of the
* authenticated data.
*/
crypt_len = offsetof(dnode_phys_t, dn_blkptr);
bcopy(dnp, aadp, crypt_len);
adnp = (dnode_phys_t *)aadp;
adnp->dn_flags &= DNODE_CRYPT_PORTABLE_FLAGS_MASK;
adnp->dn_used = 0;
aadp += crypt_len;
aad_len += crypt_len;
for (j = 0; j < dnp->dn_nblkptr; j++) {
zio_crypt_bp_do_aad_updates(&aadp, &aad_len,
version, byteswap, &dnp->dn_blkptr[j]);
}
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
zio_crypt_bp_do_aad_updates(&aadp, &aad_len,
version, byteswap, DN_SPILL_BLKPTR(dnp));
}
/*
* If this bonus buffer needs to be encrypted, we prepare an
* iovec_t. The encryption / decryption functions will fill
* this in for us with the encrypted or decrypted data.
* Otherwise we add the bonus buffer to the authenticated
* data buffer and copy it over to the destination. The
* encrypted iovec extends to DN_MAX_BONUS_LEN(dnp) so that
* we can guarantee alignment with the AES block size
* (128 bits).
*/
crypt_len = DN_MAX_BONUS_LEN(dnp);
if (dnp->dn_type != DMU_OT_NONE &&
DMU_OT_IS_ENCRYPTED(dnp->dn_bonustype) &&
dnp->dn_bonuslen != 0) {
ASSERT3U(nr_iovecs, <, nr_src);
ASSERT3U(nr_iovecs, <, nr_dst);
ASSERT3P(src_iovecs, !=, NULL);
ASSERT3P(dst_iovecs, !=, NULL);
src_iovecs[nr_iovecs].iov_base = DN_BONUS(dnp);
src_iovecs[nr_iovecs].iov_len = crypt_len;
dst_iovecs[nr_iovecs].iov_base = DN_BONUS(&ddnp[i]);
dst_iovecs[nr_iovecs].iov_len = crypt_len;
nr_iovecs++;
total_len += crypt_len;
} else {
bcopy(DN_BONUS(dnp), DN_BONUS(&ddnp[i]), crypt_len);
bcopy(DN_BONUS(dnp), aadp, crypt_len);
aadp += crypt_len;
aad_len += crypt_len;
}
}
*no_crypt = (nr_iovecs == 0);
*enc_len = total_len;
*authbuf = aadbuf;
*auth_len = aad_len;
if (encrypt) {
puio->uio_iov = src_iovecs;
puio->uio_iovcnt = nr_src;
cuio->uio_iov = dst_iovecs;
cuio->uio_iovcnt = nr_dst;
} else {
puio->uio_iov = dst_iovecs;
puio->uio_iovcnt = nr_dst;
cuio->uio_iov = src_iovecs;
cuio->uio_iovcnt = nr_src;
}
return (0);
error:
zio_buf_free(aadbuf, datalen);
if (src_iovecs != NULL)
kmem_free(src_iovecs, nr_src * sizeof (iovec_t));
if (dst_iovecs != NULL)
kmem_free(dst_iovecs, nr_dst * sizeof (iovec_t));
*enc_len = 0;
*authbuf = NULL;
*auth_len = 0;
*no_crypt = B_FALSE;
puio->uio_iov = NULL;
puio->uio_iovcnt = 0;
cuio->uio_iov = NULL;
cuio->uio_iovcnt = 0;
return (ret);
}
static int
zio_crypt_init_uios_normal(boolean_t encrypt, uint8_t *plainbuf,
uint8_t *cipherbuf, uint_t datalen, zfs_uio_t *puio, zfs_uio_t *cuio,
uint_t *enc_len)
{
+ (void) encrypt;
int ret;
uint_t nr_plain = 1, nr_cipher = 2;
iovec_t *plain_iovecs = NULL, *cipher_iovecs = NULL;
/* allocate the iovecs for the plain and cipher data */
plain_iovecs = kmem_alloc(nr_plain * sizeof (iovec_t),
KM_SLEEP);
if (!plain_iovecs) {
ret = SET_ERROR(ENOMEM);
goto error;
}
cipher_iovecs = kmem_alloc(nr_cipher * sizeof (iovec_t),
KM_SLEEP);
if (!cipher_iovecs) {
ret = SET_ERROR(ENOMEM);
goto error;
}
plain_iovecs[0].iov_base = plainbuf;
plain_iovecs[0].iov_len = datalen;
cipher_iovecs[0].iov_base = cipherbuf;
cipher_iovecs[0].iov_len = datalen;
*enc_len = datalen;
puio->uio_iov = plain_iovecs;
puio->uio_iovcnt = nr_plain;
cuio->uio_iov = cipher_iovecs;
cuio->uio_iovcnt = nr_cipher;
return (0);
error:
if (plain_iovecs != NULL)
kmem_free(plain_iovecs, nr_plain * sizeof (iovec_t));
if (cipher_iovecs != NULL)
kmem_free(cipher_iovecs, nr_cipher * sizeof (iovec_t));
*enc_len = 0;
puio->uio_iov = NULL;
puio->uio_iovcnt = 0;
cuio->uio_iov = NULL;
cuio->uio_iovcnt = 0;
return (ret);
}
/*
* This function builds up the plaintext (puio) and ciphertext (cuio) uios so
* that they can be used for encryption and decryption by zio_do_crypt_uio().
* Most blocks will use zio_crypt_init_uios_normal(), with ZIL and dnode blocks
* requiring special handling to parse out pieces that are to be encrypted. The
* authbuf is used by these special cases to store additional authenticated
* data (AAD) for the encryption modes.
*/
static int
zio_crypt_init_uios(boolean_t encrypt, uint64_t version, dmu_object_type_t ot,
uint8_t *plainbuf, uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap,
uint8_t *mac, zfs_uio_t *puio, zfs_uio_t *cuio, uint_t *enc_len,
uint8_t **authbuf, uint_t *auth_len, boolean_t *no_crypt)
{
int ret;
iovec_t *mac_iov;
ASSERT(DMU_OT_IS_ENCRYPTED(ot) || ot == DMU_OT_NONE);
/* route to handler */
switch (ot) {
case DMU_OT_INTENT_LOG:
ret = zio_crypt_init_uios_zil(encrypt, plainbuf, cipherbuf,
datalen, byteswap, puio, cuio, enc_len, authbuf, auth_len,
no_crypt);
break;
case DMU_OT_DNODE:
ret = zio_crypt_init_uios_dnode(encrypt, version, plainbuf,
cipherbuf, datalen, byteswap, puio, cuio, enc_len, authbuf,
auth_len, no_crypt);
break;
default:
ret = zio_crypt_init_uios_normal(encrypt, plainbuf, cipherbuf,
datalen, puio, cuio, enc_len);
*authbuf = NULL;
*auth_len = 0;
*no_crypt = B_FALSE;
break;
}
if (ret != 0)
goto error;
/* populate the uios */
puio->uio_segflg = UIO_SYSSPACE;
cuio->uio_segflg = UIO_SYSSPACE;
mac_iov = ((iovec_t *)&cuio->uio_iov[cuio->uio_iovcnt - 1]);
mac_iov->iov_base = mac;
mac_iov->iov_len = ZIO_DATA_MAC_LEN;
return (0);
error:
return (ret);
}
/*
* Primary encryption / decryption entrypoint for zio data.
*/
int
zio_do_crypt_data(boolean_t encrypt, zio_crypt_key_t *key,
dmu_object_type_t ot, boolean_t byteswap, uint8_t *salt, uint8_t *iv,
uint8_t *mac, uint_t datalen, uint8_t *plainbuf, uint8_t *cipherbuf,
boolean_t *no_crypt)
{
int ret;
boolean_t locked = B_FALSE;
uint64_t crypt = key->zk_crypt;
uint_t keydata_len = zio_crypt_table[crypt].ci_keylen;
uint_t enc_len, auth_len;
zfs_uio_t puio, cuio;
uint8_t enc_keydata[MASTER_KEY_MAX_LEN];
crypto_key_t tmp_ckey, *ckey = NULL;
crypto_ctx_template_t tmpl;
uint8_t *authbuf = NULL;
/*
* If the needed key is the current one, just use it. Otherwise we
* need to generate a temporary one from the given salt + master key.
* If we are encrypting, we must return a copy of the current salt
* so that it can be stored in the blkptr_t.
*/
rw_enter(&key->zk_salt_lock, RW_READER);
locked = B_TRUE;
if (bcmp(salt, key->zk_salt, ZIO_DATA_SALT_LEN) == 0) {
ckey = &key->zk_current_key;
tmpl = key->zk_current_tmpl;
} else {
rw_exit(&key->zk_salt_lock);
locked = B_FALSE;
ret = hkdf_sha512(key->zk_master_keydata, keydata_len, NULL, 0,
salt, ZIO_DATA_SALT_LEN, enc_keydata, keydata_len);
if (ret != 0)
goto error;
tmp_ckey.ck_format = CRYPTO_KEY_RAW;
tmp_ckey.ck_data = enc_keydata;
tmp_ckey.ck_length = CRYPTO_BYTES2BITS(keydata_len);
ckey = &tmp_ckey;
tmpl = NULL;
}
/*
* Attempt to use QAT acceleration if we can. We currently don't
* do this for metadnode and ZIL blocks, since they have a much
* more involved buffer layout and the qat_crypt() function only
* works in-place.
*/
if (qat_crypt_use_accel(datalen) &&
ot != DMU_OT_INTENT_LOG && ot != DMU_OT_DNODE) {
uint8_t *srcbuf, *dstbuf;
if (encrypt) {
srcbuf = plainbuf;
dstbuf = cipherbuf;
} else {
srcbuf = cipherbuf;
dstbuf = plainbuf;
}
ret = qat_crypt((encrypt) ? QAT_ENCRYPT : QAT_DECRYPT, srcbuf,
dstbuf, NULL, 0, iv, mac, ckey, key->zk_crypt, datalen);
if (ret == CPA_STATUS_SUCCESS) {
if (locked) {
rw_exit(&key->zk_salt_lock);
locked = B_FALSE;
}
return (0);
}
/* If the hardware implementation fails fall back to software */
}
bzero(&puio, sizeof (zfs_uio_t));
bzero(&cuio, sizeof (zfs_uio_t));
/* create uios for encryption */
ret = zio_crypt_init_uios(encrypt, key->zk_version, ot, plainbuf,
cipherbuf, datalen, byteswap, mac, &puio, &cuio, &enc_len,
&authbuf, &auth_len, no_crypt);
if (ret != 0)
goto error;
/* perform the encryption / decryption in software */
ret = zio_do_crypt_uio(encrypt, key->zk_crypt, ckey, tmpl, iv, enc_len,
&puio, &cuio, authbuf, auth_len);
if (ret != 0)
goto error;
if (locked) {
rw_exit(&key->zk_salt_lock);
locked = B_FALSE;
}
if (authbuf != NULL)
zio_buf_free(authbuf, datalen);
if (ckey == &tmp_ckey)
bzero(enc_keydata, keydata_len);
zio_crypt_destroy_uio(&puio);
zio_crypt_destroy_uio(&cuio);
return (0);
error:
if (locked)
rw_exit(&key->zk_salt_lock);
if (authbuf != NULL)
zio_buf_free(authbuf, datalen);
if (ckey == &tmp_ckey)
bzero(enc_keydata, keydata_len);
zio_crypt_destroy_uio(&puio);
zio_crypt_destroy_uio(&cuio);
return (ret);
}
/*
* Simple wrapper around zio_do_crypt_data() to work with abd's instead of
* linear buffers.
*/
int
zio_do_crypt_abd(boolean_t encrypt, zio_crypt_key_t *key, dmu_object_type_t ot,
boolean_t byteswap, uint8_t *salt, uint8_t *iv, uint8_t *mac,
uint_t datalen, abd_t *pabd, abd_t *cabd, boolean_t *no_crypt)
{
int ret;
void *ptmp, *ctmp;
if (encrypt) {
ptmp = abd_borrow_buf_copy(pabd, datalen);
ctmp = abd_borrow_buf(cabd, datalen);
} else {
ptmp = abd_borrow_buf(pabd, datalen);
ctmp = abd_borrow_buf_copy(cabd, datalen);
}
ret = zio_do_crypt_data(encrypt, key, ot, byteswap, salt, iv, mac,
datalen, ptmp, ctmp, no_crypt);
if (ret != 0)
goto error;
if (encrypt) {
abd_return_buf(pabd, ptmp, datalen);
abd_return_buf_copy(cabd, ctmp, datalen);
} else {
abd_return_buf_copy(pabd, ptmp, datalen);
abd_return_buf(cabd, ctmp, datalen);
}
return (0);
error:
if (encrypt) {
abd_return_buf(pabd, ptmp, datalen);
abd_return_buf_copy(cabd, ctmp, datalen);
} else {
abd_return_buf_copy(pabd, ptmp, datalen);
abd_return_buf(cabd, ctmp, datalen);
}
return (ret);
}
#if defined(_KERNEL)
/* BEGIN CSTYLED */
module_param(zfs_key_max_salt_uses, ulong, 0644);
MODULE_PARM_DESC(zfs_key_max_salt_uses, "Max number of times a salt value "
"can be used for generating encryption keys before it is rotated");
/* END CSTYLED */
#endif
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zpl_file.c b/sys/contrib/openzfs/module/os/linux/zfs/zpl_file.c
index 7e88eae33711..f1241c443797 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zpl_file.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zpl_file.c
@@ -1,1089 +1,1094 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2011, Lawrence Livermore National Security, LLC.
* Copyright (c) 2015 by Chunwei Chen. All rights reserved.
*/
#ifdef CONFIG_COMPAT
#include <linux/compat.h>
#endif
#include <sys/file.h>
#include <sys/dmu_objset.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_vfsops.h>
#include <sys/zfs_vnops.h>
#include <sys/zfs_project.h>
#ifdef HAVE_VFS_SET_PAGE_DIRTY_NOBUFFERS
#include <linux/pagemap.h>
#endif
/*
* When using fallocate(2) to preallocate space, inflate the requested
* capacity check by 10% to account for the required metadata blocks.
*/
unsigned int zfs_fallocate_reserve_percent = 110;
static int
zpl_open(struct inode *ip, struct file *filp)
{
cred_t *cr = CRED();
int error;
fstrans_cookie_t cookie;
error = generic_file_open(ip, filp);
if (error)
return (error);
crhold(cr);
cookie = spl_fstrans_mark();
error = -zfs_open(ip, filp->f_mode, filp->f_flags, cr);
spl_fstrans_unmark(cookie);
crfree(cr);
ASSERT3S(error, <=, 0);
return (error);
}
static int
zpl_release(struct inode *ip, struct file *filp)
{
cred_t *cr = CRED();
int error;
fstrans_cookie_t cookie;
cookie = spl_fstrans_mark();
if (ITOZ(ip)->z_atime_dirty)
zfs_mark_inode_dirty(ip);
crhold(cr);
error = -zfs_close(ip, filp->f_flags, cr);
spl_fstrans_unmark(cookie);
crfree(cr);
ASSERT3S(error, <=, 0);
return (error);
}
static int
zpl_iterate(struct file *filp, zpl_dir_context_t *ctx)
{
cred_t *cr = CRED();
int error;
fstrans_cookie_t cookie;
crhold(cr);
cookie = spl_fstrans_mark();
error = -zfs_readdir(file_inode(filp), ctx, cr);
spl_fstrans_unmark(cookie);
crfree(cr);
ASSERT3S(error, <=, 0);
return (error);
}
#if !defined(HAVE_VFS_ITERATE) && !defined(HAVE_VFS_ITERATE_SHARED)
static int
zpl_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
zpl_dir_context_t ctx =
ZPL_DIR_CONTEXT_INIT(dirent, filldir, filp->f_pos);
int error;
error = zpl_iterate(filp, &ctx);
filp->f_pos = ctx.pos;
return (error);
}
#endif /* !HAVE_VFS_ITERATE && !HAVE_VFS_ITERATE_SHARED */
#if defined(HAVE_FSYNC_WITHOUT_DENTRY)
/*
* Linux 2.6.35 - 3.0 API,
* As of 2.6.35 the dentry argument to the fops->fsync() hook was deemed
* redundant. The dentry is still accessible via filp->f_path.dentry,
* and we are guaranteed that filp will never be NULL.
*/
static int
zpl_fsync(struct file *filp, int datasync)
{
struct inode *inode = filp->f_mapping->host;
cred_t *cr = CRED();
int error;
fstrans_cookie_t cookie;
crhold(cr);
cookie = spl_fstrans_mark();
error = -zfs_fsync(ITOZ(inode), datasync, cr);
spl_fstrans_unmark(cookie);
crfree(cr);
ASSERT3S(error, <=, 0);
return (error);
}
#ifdef HAVE_FILE_AIO_FSYNC
static int
zpl_aio_fsync(struct kiocb *kiocb, int datasync)
{
return (zpl_fsync(kiocb->ki_filp, datasync));
}
#endif
#elif defined(HAVE_FSYNC_RANGE)
/*
* Linux 3.1 API,
* As of 3.1 the responsibility to call filemap_write_and_wait_range() has
* been pushed down in to the .fsync() vfs hook. Additionally, the i_mutex
* lock is no longer held by the caller, for zfs we don't require the lock
* to be held so we don't acquire it.
*/
static int
zpl_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
{
struct inode *inode = filp->f_mapping->host;
cred_t *cr = CRED();
int error;
fstrans_cookie_t cookie;
error = filemap_write_and_wait_range(inode->i_mapping, start, end);
if (error)
return (error);
crhold(cr);
cookie = spl_fstrans_mark();
error = -zfs_fsync(ITOZ(inode), datasync, cr);
spl_fstrans_unmark(cookie);
crfree(cr);
ASSERT3S(error, <=, 0);
return (error);
}
#ifdef HAVE_FILE_AIO_FSYNC
static int
zpl_aio_fsync(struct kiocb *kiocb, int datasync)
{
return (zpl_fsync(kiocb->ki_filp, kiocb->ki_pos, -1, datasync));
}
#endif
#else
#error "Unsupported fops->fsync() implementation"
#endif
static inline int
zfs_io_flags(struct kiocb *kiocb)
{
int flags = 0;
#if defined(IOCB_DSYNC)
if (kiocb->ki_flags & IOCB_DSYNC)
flags |= O_DSYNC;
#endif
#if defined(IOCB_SYNC)
if (kiocb->ki_flags & IOCB_SYNC)
flags |= O_SYNC;
#endif
#if defined(IOCB_APPEND)
if (kiocb->ki_flags & IOCB_APPEND)
flags |= O_APPEND;
#endif
#if defined(IOCB_DIRECT)
if (kiocb->ki_flags & IOCB_DIRECT)
flags |= O_DIRECT;
#endif
return (flags);
}
/*
* If relatime is enabled, call file_accessed() if zfs_relatime_need_update()
* is true. This is needed since datasets with inherited "relatime" property
* aren't necessarily mounted with the MNT_RELATIME flag (e.g. after
* `zfs set relatime=...`), which is what relatime test in VFS by
* relatime_need_update() is based on.
*/
static inline void
zpl_file_accessed(struct file *filp)
{
struct inode *ip = filp->f_mapping->host;
if (!IS_NOATIME(ip) && ITOZSB(ip)->z_relatime) {
if (zfs_relatime_need_update(ip))
file_accessed(filp);
} else {
file_accessed(filp);
}
}
#if defined(HAVE_VFS_RW_ITERATE)
/*
* When HAVE_VFS_IOV_ITER is defined the iov_iter structure supports
* iovecs, kvevs, bvecs and pipes, plus all the required interfaces to
* manipulate the iov_iter are available. In which case the full iov_iter
* can be attached to the uio and correctly handled in the lower layers.
* Otherwise, for older kernels extract the iovec and pass it instead.
*/
static void
zpl_uio_init(zfs_uio_t *uio, struct kiocb *kiocb, struct iov_iter *to,
loff_t pos, ssize_t count, size_t skip)
{
#if defined(HAVE_VFS_IOV_ITER)
zfs_uio_iov_iter_init(uio, to, pos, count, skip);
#else
#ifdef HAVE_IOV_ITER_TYPE
zfs_uio_iovec_init(uio, to->iov, to->nr_segs, pos,
iov_iter_type(to) & ITER_KVEC ? UIO_SYSSPACE : UIO_USERSPACE,
count, skip);
#else
zfs_uio_iovec_init(uio, to->iov, to->nr_segs, pos,
to->type & ITER_KVEC ? UIO_SYSSPACE : UIO_USERSPACE,
count, skip);
#endif
#endif
}
static ssize_t
zpl_iter_read(struct kiocb *kiocb, struct iov_iter *to)
{
cred_t *cr = CRED();
fstrans_cookie_t cookie;
struct file *filp = kiocb->ki_filp;
ssize_t count = iov_iter_count(to);
zfs_uio_t uio;
zpl_uio_init(&uio, kiocb, to, kiocb->ki_pos, count, 0);
crhold(cr);
cookie = spl_fstrans_mark();
int error = -zfs_read(ITOZ(filp->f_mapping->host), &uio,
filp->f_flags | zfs_io_flags(kiocb), cr);
spl_fstrans_unmark(cookie);
crfree(cr);
if (error < 0)
return (error);
ssize_t read = count - uio.uio_resid;
kiocb->ki_pos += read;
zpl_file_accessed(filp);
return (read);
}
static inline ssize_t
zpl_generic_write_checks(struct kiocb *kiocb, struct iov_iter *from,
size_t *countp)
{
#ifdef HAVE_GENERIC_WRITE_CHECKS_KIOCB
ssize_t ret = generic_write_checks(kiocb, from);
if (ret <= 0)
return (ret);
*countp = ret;
#else
struct file *file = kiocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *ip = mapping->host;
int isblk = S_ISBLK(ip->i_mode);
*countp = iov_iter_count(from);
ssize_t ret = generic_write_checks(file, &kiocb->ki_pos, countp, isblk);
if (ret)
return (ret);
#endif
return (0);
}
static ssize_t
zpl_iter_write(struct kiocb *kiocb, struct iov_iter *from)
{
cred_t *cr = CRED();
fstrans_cookie_t cookie;
struct file *filp = kiocb->ki_filp;
struct inode *ip = filp->f_mapping->host;
zfs_uio_t uio;
size_t count = 0;
ssize_t ret;
ret = zpl_generic_write_checks(kiocb, from, &count);
if (ret)
return (ret);
zpl_uio_init(&uio, kiocb, from, kiocb->ki_pos, count, from->iov_offset);
crhold(cr);
cookie = spl_fstrans_mark();
int error = -zfs_write(ITOZ(ip), &uio,
filp->f_flags | zfs_io_flags(kiocb), cr);
spl_fstrans_unmark(cookie);
crfree(cr);
if (error < 0)
return (error);
ssize_t wrote = count - uio.uio_resid;
kiocb->ki_pos += wrote;
return (wrote);
}
#else /* !HAVE_VFS_RW_ITERATE */
static ssize_t
zpl_aio_read(struct kiocb *kiocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
cred_t *cr = CRED();
fstrans_cookie_t cookie;
struct file *filp = kiocb->ki_filp;
size_t count;
ssize_t ret;
ret = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
if (ret)
return (ret);
zfs_uio_t uio;
zfs_uio_iovec_init(&uio, iov, nr_segs, kiocb->ki_pos, UIO_USERSPACE,
count, 0);
crhold(cr);
cookie = spl_fstrans_mark();
int error = -zfs_read(ITOZ(filp->f_mapping->host), &uio,
filp->f_flags | zfs_io_flags(kiocb), cr);
spl_fstrans_unmark(cookie);
crfree(cr);
if (error < 0)
return (error);
ssize_t read = count - uio.uio_resid;
kiocb->ki_pos += read;
zpl_file_accessed(filp);
return (read);
}
static ssize_t
zpl_aio_write(struct kiocb *kiocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
cred_t *cr = CRED();
fstrans_cookie_t cookie;
struct file *filp = kiocb->ki_filp;
struct inode *ip = filp->f_mapping->host;
size_t count;
ssize_t ret;
ret = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
if (ret)
return (ret);
ret = generic_write_checks(filp, &pos, &count, S_ISBLK(ip->i_mode));
if (ret)
return (ret);
zfs_uio_t uio;
zfs_uio_iovec_init(&uio, iov, nr_segs, kiocb->ki_pos, UIO_USERSPACE,
count, 0);
crhold(cr);
cookie = spl_fstrans_mark();
int error = -zfs_write(ITOZ(ip), &uio,
filp->f_flags | zfs_io_flags(kiocb), cr);
spl_fstrans_unmark(cookie);
crfree(cr);
if (error < 0)
return (error);
ssize_t wrote = count - uio.uio_resid;
kiocb->ki_pos += wrote;
return (wrote);
}
#endif /* HAVE_VFS_RW_ITERATE */
#if defined(HAVE_VFS_RW_ITERATE)
static ssize_t
zpl_direct_IO_impl(int rw, struct kiocb *kiocb, struct iov_iter *iter)
{
if (rw == WRITE)
return (zpl_iter_write(kiocb, iter));
else
return (zpl_iter_read(kiocb, iter));
}
#if defined(HAVE_VFS_DIRECT_IO_ITER)
static ssize_t
zpl_direct_IO(struct kiocb *kiocb, struct iov_iter *iter)
{
return (zpl_direct_IO_impl(iov_iter_rw(iter), kiocb, iter));
}
#elif defined(HAVE_VFS_DIRECT_IO_ITER_OFFSET)
static ssize_t
zpl_direct_IO(struct kiocb *kiocb, struct iov_iter *iter, loff_t pos)
{
ASSERT3S(pos, ==, kiocb->ki_pos);
return (zpl_direct_IO_impl(iov_iter_rw(iter), kiocb, iter));
}
#elif defined(HAVE_VFS_DIRECT_IO_ITER_RW_OFFSET)
static ssize_t
zpl_direct_IO(int rw, struct kiocb *kiocb, struct iov_iter *iter, loff_t pos)
{
ASSERT3S(pos, ==, kiocb->ki_pos);
return (zpl_direct_IO_impl(rw, kiocb, iter));
}
#else
#error "Unknown direct IO interface"
#endif
#else /* HAVE_VFS_RW_ITERATE */
#if defined(HAVE_VFS_DIRECT_IO_IOVEC)
static ssize_t
zpl_direct_IO(int rw, struct kiocb *kiocb, const struct iovec *iov,
loff_t pos, unsigned long nr_segs)
{
if (rw == WRITE)
return (zpl_aio_write(kiocb, iov, nr_segs, pos));
else
return (zpl_aio_read(kiocb, iov, nr_segs, pos));
}
#elif defined(HAVE_VFS_DIRECT_IO_ITER_RW_OFFSET)
static ssize_t
zpl_direct_IO(int rw, struct kiocb *kiocb, struct iov_iter *iter, loff_t pos)
{
const struct iovec *iovp = iov_iter_iovec(iter);
unsigned long nr_segs = iter->nr_segs;
ASSERT3S(pos, ==, kiocb->ki_pos);
if (rw == WRITE)
return (zpl_aio_write(kiocb, iovp, nr_segs, pos));
else
return (zpl_aio_read(kiocb, iovp, nr_segs, pos));
}
#else
#error "Unknown direct IO interface"
#endif
#endif /* HAVE_VFS_RW_ITERATE */
static loff_t
zpl_llseek(struct file *filp, loff_t offset, int whence)
{
#if defined(SEEK_HOLE) && defined(SEEK_DATA)
fstrans_cookie_t cookie;
if (whence == SEEK_DATA || whence == SEEK_HOLE) {
struct inode *ip = filp->f_mapping->host;
loff_t maxbytes = ip->i_sb->s_maxbytes;
loff_t error;
spl_inode_lock_shared(ip);
cookie = spl_fstrans_mark();
error = -zfs_holey(ITOZ(ip), whence, &offset);
spl_fstrans_unmark(cookie);
if (error == 0)
error = lseek_execute(filp, ip, offset, maxbytes);
spl_inode_unlock_shared(ip);
return (error);
}
#endif /* SEEK_HOLE && SEEK_DATA */
return (generic_file_llseek(filp, offset, whence));
}
/*
* It's worth taking a moment to describe how mmap is implemented
* for zfs because it differs considerably from other Linux filesystems.
* However, this issue is handled the same way under OpenSolaris.
*
* The issue is that by design zfs bypasses the Linux page cache and
* leaves all caching up to the ARC. This has been shown to work
* well for the common read(2)/write(2) case. However, mmap(2)
* is problem because it relies on being tightly integrated with the
* page cache. To handle this we cache mmap'ed files twice, once in
* the ARC and a second time in the page cache. The code is careful
* to keep both copies synchronized.
*
* When a file with an mmap'ed region is written to using write(2)
* both the data in the ARC and existing pages in the page cache
* are updated. For a read(2) data will be read first from the page
* cache then the ARC if needed. Neither a write(2) or read(2) will
* will ever result in new pages being added to the page cache.
*
* New pages are added to the page cache only via .readpage() which
* is called when the vfs needs to read a page off disk to back the
* virtual memory region. These pages may be modified without
* notifying the ARC and will be written out periodically via
* .writepage(). This will occur due to either a sync or the usual
* page aging behavior. Note because a read(2) of a mmap'ed file
* will always check the page cache first even when the ARC is out
* of date correct data will still be returned.
*
* While this implementation ensures correct behavior it does have
* have some drawbacks. The most obvious of which is that it
* increases the required memory footprint when access mmap'ed
* files. It also adds additional complexity to the code keeping
* both caches synchronized.
*
* Longer term it may be possible to cleanly resolve this wart by
* mapping page cache pages directly on to the ARC buffers. The
* Linux address space operations are flexible enough to allow
* selection of which pages back a particular index. The trick
* would be working out the details of which subsystem is in
* charge, the ARC, the page cache, or both. It may also prove
* helpful to move the ARC buffers to a scatter-gather lists
* rather than a vmalloc'ed region.
*/
static int
zpl_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct inode *ip = filp->f_mapping->host;
znode_t *zp = ITOZ(ip);
int error;
fstrans_cookie_t cookie;
cookie = spl_fstrans_mark();
error = -zfs_map(ip, vma->vm_pgoff, (caddr_t *)vma->vm_start,
(size_t)(vma->vm_end - vma->vm_start), vma->vm_flags);
spl_fstrans_unmark(cookie);
if (error)
return (error);
error = generic_file_mmap(filp, vma);
if (error)
return (error);
mutex_enter(&zp->z_lock);
zp->z_is_mapped = B_TRUE;
mutex_exit(&zp->z_lock);
return (error);
}
/*
* Populate a page with data for the Linux page cache. This function is
* only used to support mmap(2). There will be an identical copy of the
* data in the ARC which is kept up to date via .write() and .writepage().
*/
static inline int
zpl_readpage_common(struct page *pp)
{
struct inode *ip;
struct page *pl[1];
int error = 0;
fstrans_cookie_t cookie;
ASSERT(PageLocked(pp));
ip = pp->mapping->host;
pl[0] = pp;
cookie = spl_fstrans_mark();
error = -zfs_getpage(ip, pl, 1);
spl_fstrans_unmark(cookie);
if (error) {
SetPageError(pp);
ClearPageUptodate(pp);
} else {
ClearPageError(pp);
SetPageUptodate(pp);
flush_dcache_page(pp);
}
unlock_page(pp);
return (error);
}
static int
zpl_readpage(struct file *filp, struct page *pp)
{
return (zpl_readpage_common(pp));
}
static int
zpl_readpage_filler(void *data, struct page *pp)
{
return (zpl_readpage_common(pp));
}
/*
* Populate a set of pages with data for the Linux page cache. This
* function will only be called for read ahead and never for demand
* paging. For simplicity, the code relies on read_cache_pages() to
* correctly lock each page for IO and call zpl_readpage().
*/
static int
zpl_readpages(struct file *filp, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages)
{
return (read_cache_pages(mapping, pages, zpl_readpage_filler, NULL));
}
static int
zpl_putpage(struct page *pp, struct writeback_control *wbc, void *data)
{
struct address_space *mapping = data;
fstrans_cookie_t cookie;
ASSERT(PageLocked(pp));
ASSERT(!PageWriteback(pp));
cookie = spl_fstrans_mark();
(void) zfs_putpage(mapping->host, pp, wbc);
spl_fstrans_unmark(cookie);
return (0);
}
static int
zpl_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
znode_t *zp = ITOZ(mapping->host);
zfsvfs_t *zfsvfs = ITOZSB(mapping->host);
enum writeback_sync_modes sync_mode;
int result;
ZPL_ENTER(zfsvfs);
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
wbc->sync_mode = WB_SYNC_ALL;
ZPL_EXIT(zfsvfs);
sync_mode = wbc->sync_mode;
/*
* We don't want to run write_cache_pages() in SYNC mode here, because
* that would make putpage() wait for a single page to be committed to
* disk every single time, resulting in atrocious performance. Instead
* we run it once in non-SYNC mode so that the ZIL gets all the data,
* and then we commit it all in one go.
*/
wbc->sync_mode = WB_SYNC_NONE;
result = write_cache_pages(mapping, wbc, zpl_putpage, mapping);
if (sync_mode != wbc->sync_mode) {
ZPL_ENTER(zfsvfs);
ZPL_VERIFY_ZP(zp);
if (zfsvfs->z_log != NULL)
zil_commit(zfsvfs->z_log, zp->z_id);
ZPL_EXIT(zfsvfs);
/*
* We need to call write_cache_pages() again (we can't just
* return after the commit) because the previous call in
* non-SYNC mode does not guarantee that we got all the dirty
* pages (see the implementation of write_cache_pages() for
* details). That being said, this is a no-op in most cases.
*/
wbc->sync_mode = sync_mode;
result = write_cache_pages(mapping, wbc, zpl_putpage, mapping);
}
return (result);
}
/*
* Write out dirty pages to the ARC, this function is only required to
* support mmap(2). Mapped pages may be dirtied by memory operations
* which never call .write(). These dirty pages are kept in sync with
* the ARC buffers via this hook.
*/
static int
zpl_writepage(struct page *pp, struct writeback_control *wbc)
{
if (ITOZSB(pp->mapping->host)->z_os->os_sync == ZFS_SYNC_ALWAYS)
wbc->sync_mode = WB_SYNC_ALL;
return (zpl_putpage(pp, wbc, pp->mapping));
}
/*
* The flag combination which matches the behavior of zfs_space() is
* FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE. The FALLOC_FL_PUNCH_HOLE
* flag was introduced in the 2.6.38 kernel.
*
* The original mode=0 (allocate space) behavior can be reasonably emulated
* by checking if enough space exists and creating a sparse file, as real
* persistent space reservation is not possible due to COW, snapshots, etc.
*/
static long
zpl_fallocate_common(struct inode *ip, int mode, loff_t offset, loff_t len)
{
cred_t *cr = CRED();
loff_t olen;
fstrans_cookie_t cookie;
int error = 0;
- if ((mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) != 0)
+ int test_mode = FALLOC_FL_PUNCH_HOLE;
+#ifdef HAVE_FALLOC_FL_ZERO_RANGE
+ test_mode |= FALLOC_FL_ZERO_RANGE;
+#endif
+
+ if ((mode & ~(FALLOC_FL_KEEP_SIZE | test_mode)) != 0)
return (-EOPNOTSUPP);
if (offset < 0 || len <= 0)
return (-EINVAL);
spl_inode_lock(ip);
olen = i_size_read(ip);
crhold(cr);
cookie = spl_fstrans_mark();
- if (mode & FALLOC_FL_PUNCH_HOLE) {
+ if (mode & (test_mode)) {
flock64_t bf;
if (offset > olen)
goto out_unmark;
if (offset + len > olen)
len = olen - offset;
bf.l_type = F_WRLCK;
bf.l_whence = SEEK_SET;
bf.l_start = offset;
bf.l_len = len;
bf.l_pid = 0;
error = -zfs_space(ITOZ(ip), F_FREESP, &bf, O_RDWR, offset, cr);
} else if ((mode & ~FALLOC_FL_KEEP_SIZE) == 0) {
unsigned int percent = zfs_fallocate_reserve_percent;
struct kstatfs statfs;
/* Legacy mode, disable fallocate compatibility. */
if (percent == 0) {
error = -EOPNOTSUPP;
goto out_unmark;
}
/*
* Use zfs_statvfs() instead of dmu_objset_space() since it
* also checks project quota limits, which are relevant here.
*/
error = zfs_statvfs(ip, &statfs);
if (error)
goto out_unmark;
/*
* Shrink available space a bit to account for overhead/races.
* We know the product previously fit into availbytes from
* dmu_objset_space(), so the smaller product will also fit.
*/
if (len > statfs.f_bavail * (statfs.f_bsize * 100 / percent)) {
error = -ENOSPC;
goto out_unmark;
}
if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > olen)
error = zfs_freesp(ITOZ(ip), offset + len, 0, 0, FALSE);
}
out_unmark:
spl_fstrans_unmark(cookie);
spl_inode_unlock(ip);
crfree(cr);
return (error);
}
static long
zpl_fallocate(struct file *filp, int mode, loff_t offset, loff_t len)
{
return zpl_fallocate_common(file_inode(filp),
mode, offset, len);
}
#define ZFS_FL_USER_VISIBLE (FS_FL_USER_VISIBLE | ZFS_PROJINHERIT_FL)
#define ZFS_FL_USER_MODIFIABLE (FS_FL_USER_MODIFIABLE | ZFS_PROJINHERIT_FL)
static uint32_t
__zpl_ioctl_getflags(struct inode *ip)
{
uint64_t zfs_flags = ITOZ(ip)->z_pflags;
uint32_t ioctl_flags = 0;
if (zfs_flags & ZFS_IMMUTABLE)
ioctl_flags |= FS_IMMUTABLE_FL;
if (zfs_flags & ZFS_APPENDONLY)
ioctl_flags |= FS_APPEND_FL;
if (zfs_flags & ZFS_NODUMP)
ioctl_flags |= FS_NODUMP_FL;
if (zfs_flags & ZFS_PROJINHERIT)
ioctl_flags |= ZFS_PROJINHERIT_FL;
return (ioctl_flags & ZFS_FL_USER_VISIBLE);
}
/*
* Map zfs file z_pflags (xvattr_t) to linux file attributes. Only file
* attributes common to both Linux and Solaris are mapped.
*/
static int
zpl_ioctl_getflags(struct file *filp, void __user *arg)
{
uint32_t flags;
int err;
flags = __zpl_ioctl_getflags(file_inode(filp));
err = copy_to_user(arg, &flags, sizeof (flags));
return (err);
}
/*
* fchange() is a helper macro to detect if we have been asked to change a
* flag. This is ugly, but the requirement that we do this is a consequence of
* how the Linux file attribute interface was designed. Another consequence is
* that concurrent modification of files suffers from a TOCTOU race. Neither
* are things we can fix without modifying the kernel-userland interface, which
* is outside of our jurisdiction.
*/
#define fchange(f0, f1, b0, b1) (!((f0) & (b0)) != !((f1) & (b1)))
static int
__zpl_ioctl_setflags(struct inode *ip, uint32_t ioctl_flags, xvattr_t *xva)
{
uint64_t zfs_flags = ITOZ(ip)->z_pflags;
xoptattr_t *xoap;
if (ioctl_flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL |
ZFS_PROJINHERIT_FL))
return (-EOPNOTSUPP);
if (ioctl_flags & ~ZFS_FL_USER_MODIFIABLE)
return (-EACCES);
if ((fchange(ioctl_flags, zfs_flags, FS_IMMUTABLE_FL, ZFS_IMMUTABLE) ||
fchange(ioctl_flags, zfs_flags, FS_APPEND_FL, ZFS_APPENDONLY)) &&
!capable(CAP_LINUX_IMMUTABLE))
return (-EPERM);
if (!zpl_inode_owner_or_capable(kcred->user_ns, ip))
return (-EACCES);
xva_init(xva);
xoap = xva_getxoptattr(xva);
XVA_SET_REQ(xva, XAT_IMMUTABLE);
if (ioctl_flags & FS_IMMUTABLE_FL)
xoap->xoa_immutable = B_TRUE;
XVA_SET_REQ(xva, XAT_APPENDONLY);
if (ioctl_flags & FS_APPEND_FL)
xoap->xoa_appendonly = B_TRUE;
XVA_SET_REQ(xva, XAT_NODUMP);
if (ioctl_flags & FS_NODUMP_FL)
xoap->xoa_nodump = B_TRUE;
XVA_SET_REQ(xva, XAT_PROJINHERIT);
if (ioctl_flags & ZFS_PROJINHERIT_FL)
xoap->xoa_projinherit = B_TRUE;
return (0);
}
static int
zpl_ioctl_setflags(struct file *filp, void __user *arg)
{
struct inode *ip = file_inode(filp);
uint32_t flags;
cred_t *cr = CRED();
xvattr_t xva;
int err;
fstrans_cookie_t cookie;
if (copy_from_user(&flags, arg, sizeof (flags)))
return (-EFAULT);
err = __zpl_ioctl_setflags(ip, flags, &xva);
if (err)
return (err);
crhold(cr);
cookie = spl_fstrans_mark();
err = -zfs_setattr(ITOZ(ip), (vattr_t *)&xva, 0, cr);
spl_fstrans_unmark(cookie);
crfree(cr);
return (err);
}
static int
zpl_ioctl_getxattr(struct file *filp, void __user *arg)
{
zfsxattr_t fsx = { 0 };
struct inode *ip = file_inode(filp);
int err;
fsx.fsx_xflags = __zpl_ioctl_getflags(ip);
fsx.fsx_projid = ITOZ(ip)->z_projid;
err = copy_to_user(arg, &fsx, sizeof (fsx));
return (err);
}
static int
zpl_ioctl_setxattr(struct file *filp, void __user *arg)
{
struct inode *ip = file_inode(filp);
zfsxattr_t fsx;
cred_t *cr = CRED();
xvattr_t xva;
xoptattr_t *xoap;
int err;
fstrans_cookie_t cookie;
if (copy_from_user(&fsx, arg, sizeof (fsx)))
return (-EFAULT);
if (!zpl_is_valid_projid(fsx.fsx_projid))
return (-EINVAL);
err = __zpl_ioctl_setflags(ip, fsx.fsx_xflags, &xva);
if (err)
return (err);
xoap = xva_getxoptattr(&xva);
XVA_SET_REQ(&xva, XAT_PROJID);
xoap->xoa_projid = fsx.fsx_projid;
crhold(cr);
cookie = spl_fstrans_mark();
err = -zfs_setattr(ITOZ(ip), (vattr_t *)&xva, 0, cr);
spl_fstrans_unmark(cookie);
crfree(cr);
return (err);
}
static long
zpl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case FS_IOC_GETFLAGS:
return (zpl_ioctl_getflags(filp, (void *)arg));
case FS_IOC_SETFLAGS:
return (zpl_ioctl_setflags(filp, (void *)arg));
case ZFS_IOC_FSGETXATTR:
return (zpl_ioctl_getxattr(filp, (void *)arg));
case ZFS_IOC_FSSETXATTR:
return (zpl_ioctl_setxattr(filp, (void *)arg));
default:
return (-ENOTTY);
}
}
#ifdef CONFIG_COMPAT
static long
zpl_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case FS_IOC32_GETFLAGS:
cmd = FS_IOC_GETFLAGS;
break;
case FS_IOC32_SETFLAGS:
cmd = FS_IOC_SETFLAGS;
break;
default:
return (-ENOTTY);
}
return (zpl_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)));
}
#endif /* CONFIG_COMPAT */
const struct address_space_operations zpl_address_space_operations = {
.readpages = zpl_readpages,
.readpage = zpl_readpage,
.writepage = zpl_writepage,
.writepages = zpl_writepages,
.direct_IO = zpl_direct_IO,
#ifdef HAVE_VFS_SET_PAGE_DIRTY_NOBUFFERS
.set_page_dirty = __set_page_dirty_nobuffers,
#endif
};
const struct file_operations zpl_file_operations = {
.open = zpl_open,
.release = zpl_release,
.llseek = zpl_llseek,
#ifdef HAVE_VFS_RW_ITERATE
#ifdef HAVE_NEW_SYNC_READ
.read = new_sync_read,
.write = new_sync_write,
#endif
.read_iter = zpl_iter_read,
.write_iter = zpl_iter_write,
#ifdef HAVE_VFS_IOV_ITER
.splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
#endif
#else
.read = do_sync_read,
.write = do_sync_write,
.aio_read = zpl_aio_read,
.aio_write = zpl_aio_write,
#endif
.mmap = zpl_mmap,
.fsync = zpl_fsync,
#ifdef HAVE_FILE_AIO_FSYNC
.aio_fsync = zpl_aio_fsync,
#endif
.fallocate = zpl_fallocate,
.unlocked_ioctl = zpl_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = zpl_compat_ioctl,
#endif
};
const struct file_operations zpl_dir_file_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
#if defined(HAVE_VFS_ITERATE_SHARED)
.iterate_shared = zpl_iterate,
#elif defined(HAVE_VFS_ITERATE)
.iterate = zpl_iterate,
#else
.readdir = zpl_readdir,
#endif
.fsync = zpl_fsync,
.unlocked_ioctl = zpl_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = zpl_compat_ioctl,
#endif
};
/* BEGIN CSTYLED */
module_param(zfs_fallocate_reserve_percent, uint, 0644);
MODULE_PARM_DESC(zfs_fallocate_reserve_percent,
"Percentage of length to use for the available capacity check");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c b/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c
index 44caadd587f7..7a979eb91e7f 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c
@@ -1,1231 +1,1230 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
*/
#include <sys/dataset_kstats.h>
#include <sys/dbuf.h>
#include <sys/dmu_traverse.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_dir.h>
#include <sys/zap.h>
#include <sys/zfeature.h>
#include <sys/zil_impl.h>
#include <sys/dmu_tx.h>
#include <sys/zio.h>
#include <sys/zfs_rlock.h>
#include <sys/spa_impl.h>
#include <sys/zvol.h>
#include <sys/zvol_impl.h>
#include <linux/blkdev_compat.h>
#include <linux/task_io_accounting_ops.h>
unsigned int zvol_major = ZVOL_MAJOR;
unsigned int zvol_request_sync = 0;
unsigned int zvol_prefetch_bytes = (128 * 1024);
unsigned long zvol_max_discard_blocks = 16384;
unsigned int zvol_threads = 32;
unsigned int zvol_open_timeout_ms = 1000;
struct zvol_state_os {
struct gendisk *zvo_disk; /* generic disk */
struct request_queue *zvo_queue; /* request queue */
dev_t zvo_dev; /* device id */
};
taskq_t *zvol_taskq;
static struct ida zvol_ida;
typedef struct zv_request_stack {
zvol_state_t *zv;
struct bio *bio;
} zv_request_t;
typedef struct zv_request_task {
zv_request_t zvr;
taskq_ent_t ent;
} zv_request_task_t;
static zv_request_task_t *
zv_request_task_create(zv_request_t zvr)
{
zv_request_task_t *task;
task = kmem_alloc(sizeof (zv_request_task_t), KM_SLEEP);
taskq_init_ent(&task->ent);
task->zvr = zvr;
return (task);
}
static void
zv_request_task_free(zv_request_task_t *task)
{
kmem_free(task, sizeof (*task));
}
/*
* Given a path, return TRUE if path is a ZVOL.
*/
static boolean_t
zvol_is_zvol_impl(const char *path)
{
dev_t dev = 0;
if (vdev_lookup_bdev(path, &dev) != 0)
return (B_FALSE);
if (MAJOR(dev) == zvol_major)
return (B_TRUE);
return (B_FALSE);
}
static void
zvol_write(zv_request_t *zvr)
{
struct bio *bio = zvr->bio;
int error = 0;
zfs_uio_t uio;
zfs_uio_bvec_init(&uio, bio);
zvol_state_t *zv = zvr->zv;
ASSERT3P(zv, !=, NULL);
ASSERT3U(zv->zv_open_count, >, 0);
ASSERT3P(zv->zv_zilog, !=, NULL);
/* bio marked as FLUSH need to flush before write */
if (bio_is_flush(bio))
zil_commit(zv->zv_zilog, ZVOL_OBJ);
/* Some requests are just for flush and nothing else. */
if (uio.uio_resid == 0) {
rw_exit(&zv->zv_suspend_lock);
BIO_END_IO(bio, 0);
return;
}
struct request_queue *q = zv->zv_zso->zvo_queue;
struct gendisk *disk = zv->zv_zso->zvo_disk;
ssize_t start_resid = uio.uio_resid;
unsigned long start_time;
boolean_t acct = blk_queue_io_stat(q);
if (acct)
start_time = blk_generic_start_io_acct(q, disk, WRITE, bio);
boolean_t sync =
bio_is_fua(bio) || zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
zfs_locked_range_t *lr = zfs_rangelock_enter(&zv->zv_rangelock,
uio.uio_loffset, uio.uio_resid, RL_WRITER);
uint64_t volsize = zv->zv_volsize;
while (uio.uio_resid > 0 && uio.uio_loffset < volsize) {
uint64_t bytes = MIN(uio.uio_resid, DMU_MAX_ACCESS >> 1);
uint64_t off = uio.uio_loffset;
dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
if (bytes > volsize - off) /* don't write past the end */
bytes = volsize - off;
dmu_tx_hold_write_by_dnode(tx, zv->zv_dn, off, bytes);
/* This will only fail for ENOSPC */
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
break;
}
error = dmu_write_uio_dnode(zv->zv_dn, &uio, bytes, tx);
if (error == 0) {
zvol_log_write(zv, tx, off, bytes, sync);
}
dmu_tx_commit(tx);
if (error)
break;
}
zfs_rangelock_exit(lr);
int64_t nwritten = start_resid - uio.uio_resid;
dataset_kstats_update_write_kstats(&zv->zv_kstat, nwritten);
task_io_account_write(nwritten);
if (sync)
zil_commit(zv->zv_zilog, ZVOL_OBJ);
rw_exit(&zv->zv_suspend_lock);
if (acct)
blk_generic_end_io_acct(q, disk, WRITE, bio, start_time);
BIO_END_IO(bio, -error);
}
static void
zvol_write_task(void *arg)
{
zv_request_task_t *task = arg;
zvol_write(&task->zvr);
zv_request_task_free(task);
}
static void
zvol_discard(zv_request_t *zvr)
{
struct bio *bio = zvr->bio;
zvol_state_t *zv = zvr->zv;
uint64_t start = BIO_BI_SECTOR(bio) << 9;
uint64_t size = BIO_BI_SIZE(bio);
uint64_t end = start + size;
boolean_t sync;
int error = 0;
dmu_tx_t *tx;
ASSERT3P(zv, !=, NULL);
ASSERT3U(zv->zv_open_count, >, 0);
ASSERT3P(zv->zv_zilog, !=, NULL);
struct request_queue *q = zv->zv_zso->zvo_queue;
struct gendisk *disk = zv->zv_zso->zvo_disk;
unsigned long start_time;
boolean_t acct = blk_queue_io_stat(q);
if (acct)
start_time = blk_generic_start_io_acct(q, disk, WRITE, bio);
sync = bio_is_fua(bio) || zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
if (end > zv->zv_volsize) {
error = SET_ERROR(EIO);
goto unlock;
}
/*
* Align the request to volume block boundaries when a secure erase is
* not required. This will prevent dnode_free_range() from zeroing out
* the unaligned parts which is slow (read-modify-write) and useless
* since we are not freeing any space by doing so.
*/
if (!bio_is_secure_erase(bio)) {
start = P2ROUNDUP(start, zv->zv_volblocksize);
end = P2ALIGN(end, zv->zv_volblocksize);
size = end - start;
}
if (start >= end)
goto unlock;
zfs_locked_range_t *lr = zfs_rangelock_enter(&zv->zv_rangelock,
start, size, RL_WRITER);
tx = dmu_tx_create(zv->zv_objset);
dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error != 0) {
dmu_tx_abort(tx);
} else {
zvol_log_truncate(zv, tx, start, size, B_TRUE);
dmu_tx_commit(tx);
error = dmu_free_long_range(zv->zv_objset,
ZVOL_OBJ, start, size);
}
zfs_rangelock_exit(lr);
if (error == 0 && sync)
zil_commit(zv->zv_zilog, ZVOL_OBJ);
unlock:
rw_exit(&zv->zv_suspend_lock);
if (acct)
blk_generic_end_io_acct(q, disk, WRITE, bio, start_time);
BIO_END_IO(bio, -error);
}
static void
zvol_discard_task(void *arg)
{
zv_request_task_t *task = arg;
zvol_discard(&task->zvr);
zv_request_task_free(task);
}
static void
zvol_read(zv_request_t *zvr)
{
struct bio *bio = zvr->bio;
int error = 0;
zfs_uio_t uio;
zfs_uio_bvec_init(&uio, bio);
zvol_state_t *zv = zvr->zv;
ASSERT3P(zv, !=, NULL);
ASSERT3U(zv->zv_open_count, >, 0);
struct request_queue *q = zv->zv_zso->zvo_queue;
struct gendisk *disk = zv->zv_zso->zvo_disk;
ssize_t start_resid = uio.uio_resid;
unsigned long start_time;
boolean_t acct = blk_queue_io_stat(q);
if (acct)
start_time = blk_generic_start_io_acct(q, disk, READ, bio);
zfs_locked_range_t *lr = zfs_rangelock_enter(&zv->zv_rangelock,
uio.uio_loffset, uio.uio_resid, RL_READER);
uint64_t volsize = zv->zv_volsize;
while (uio.uio_resid > 0 && uio.uio_loffset < volsize) {
uint64_t bytes = MIN(uio.uio_resid, DMU_MAX_ACCESS >> 1);
/* don't read past the end */
if (bytes > volsize - uio.uio_loffset)
bytes = volsize - uio.uio_loffset;
error = dmu_read_uio_dnode(zv->zv_dn, &uio, bytes);
if (error) {
/* convert checksum errors into IO errors */
if (error == ECKSUM)
error = SET_ERROR(EIO);
break;
}
}
zfs_rangelock_exit(lr);
int64_t nread = start_resid - uio.uio_resid;
dataset_kstats_update_read_kstats(&zv->zv_kstat, nread);
task_io_account_read(nread);
rw_exit(&zv->zv_suspend_lock);
if (acct)
blk_generic_end_io_acct(q, disk, READ, bio, start_time);
BIO_END_IO(bio, -error);
}
static void
zvol_read_task(void *arg)
{
zv_request_task_t *task = arg;
zvol_read(&task->zvr);
zv_request_task_free(task);
}
#ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
#ifdef HAVE_BDEV_SUBMIT_BIO_RETURNS_VOID
static void
zvol_submit_bio(struct bio *bio)
#else
static blk_qc_t
zvol_submit_bio(struct bio *bio)
#endif
#else
static MAKE_REQUEST_FN_RET
zvol_request(struct request_queue *q, struct bio *bio)
#endif
{
#ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
#if defined(HAVE_BIO_BDEV_DISK)
struct request_queue *q = bio->bi_bdev->bd_disk->queue;
#else
struct request_queue *q = bio->bi_disk->queue;
#endif
#endif
zvol_state_t *zv = q->queuedata;
fstrans_cookie_t cookie = spl_fstrans_mark();
uint64_t offset = BIO_BI_SECTOR(bio) << 9;
uint64_t size = BIO_BI_SIZE(bio);
int rw = bio_data_dir(bio);
if (bio_has_data(bio) && offset + size > zv->zv_volsize) {
printk(KERN_INFO
"%s: bad access: offset=%llu, size=%lu\n",
zv->zv_zso->zvo_disk->disk_name,
(long long unsigned)offset,
(long unsigned)size);
BIO_END_IO(bio, -SET_ERROR(EIO));
goto out;
}
zv_request_t zvr = {
.zv = zv,
.bio = bio,
};
zv_request_task_t *task;
if (rw == WRITE) {
if (unlikely(zv->zv_flags & ZVOL_RDONLY)) {
BIO_END_IO(bio, -SET_ERROR(EROFS));
goto out;
}
/*
* Prevents the zvol from being suspended, or the ZIL being
* concurrently opened. Will be released after the i/o
* completes.
*/
rw_enter(&zv->zv_suspend_lock, RW_READER);
/*
* Open a ZIL if this is the first time we have written to this
* zvol. We protect zv->zv_zilog with zv_suspend_lock rather
* than zv_state_lock so that we don't need to acquire an
* additional lock in this path.
*/
if (zv->zv_zilog == NULL) {
rw_exit(&zv->zv_suspend_lock);
rw_enter(&zv->zv_suspend_lock, RW_WRITER);
if (zv->zv_zilog == NULL) {
zv->zv_zilog = zil_open(zv->zv_objset,
zvol_get_data);
zv->zv_flags |= ZVOL_WRITTEN_TO;
/* replay / destroy done in zvol_create_minor */
VERIFY0((zv->zv_zilog->zl_header->zh_flags &
ZIL_REPLAY_NEEDED));
}
rw_downgrade(&zv->zv_suspend_lock);
}
/*
* We don't want this thread to be blocked waiting for i/o to
* complete, so we instead wait from a taskq callback. The
* i/o may be a ZIL write (via zil_commit()), or a read of an
* indirect block, or a read of a data block (if this is a
* partial-block write). We will indicate that the i/o is
* complete by calling BIO_END_IO() from the taskq callback.
*
* This design allows the calling thread to continue and
* initiate more concurrent operations by calling
* zvol_request() again. There are typically only a small
* number of threads available to call zvol_request() (e.g.
* one per iSCSI target), so keeping the latency of
* zvol_request() low is important for performance.
*
* The zvol_request_sync module parameter allows this
* behavior to be altered, for performance evaluation
* purposes. If the callback blocks, setting
* zvol_request_sync=1 will result in much worse performance.
*
* We can have up to zvol_threads concurrent i/o's being
* processed for all zvols on the system. This is typically
* a vast improvement over the zvol_request_sync=1 behavior
* of one i/o at a time per zvol. However, an even better
* design would be for zvol_request() to initiate the zio
* directly, and then be notified by the zio_done callback,
* which would call BIO_END_IO(). Unfortunately, the DMU/ZIL
* interfaces lack this functionality (they block waiting for
* the i/o to complete).
*/
if (bio_is_discard(bio) || bio_is_secure_erase(bio)) {
if (zvol_request_sync) {
zvol_discard(&zvr);
} else {
task = zv_request_task_create(zvr);
taskq_dispatch_ent(zvol_taskq,
zvol_discard_task, task, 0, &task->ent);
}
} else {
if (zvol_request_sync) {
zvol_write(&zvr);
} else {
task = zv_request_task_create(zvr);
taskq_dispatch_ent(zvol_taskq,
zvol_write_task, task, 0, &task->ent);
}
}
} else {
/*
* The SCST driver, and possibly others, may issue READ I/Os
* with a length of zero bytes. These empty I/Os contain no
* data and require no additional handling.
*/
if (size == 0) {
BIO_END_IO(bio, 0);
goto out;
}
rw_enter(&zv->zv_suspend_lock, RW_READER);
/* See comment in WRITE case above. */
if (zvol_request_sync) {
zvol_read(&zvr);
} else {
task = zv_request_task_create(zvr);
taskq_dispatch_ent(zvol_taskq,
zvol_read_task, task, 0, &task->ent);
}
}
out:
spl_fstrans_unmark(cookie);
#if (defined(HAVE_MAKE_REQUEST_FN_RET_QC) || \
defined(HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS)) && \
!defined(HAVE_BDEV_SUBMIT_BIO_RETURNS_VOID)
return (BLK_QC_T_NONE);
#endif
}
static int
zvol_open(struct block_device *bdev, fmode_t flag)
{
zvol_state_t *zv;
int error = 0;
- boolean_t drop_suspend = B_TRUE;
- boolean_t drop_namespace = B_FALSE;
+ boolean_t drop_suspend = B_FALSE;
#ifndef HAVE_BLKDEV_GET_ERESTARTSYS
hrtime_t timeout = MSEC2NSEC(zvol_open_timeout_ms);
hrtime_t start = gethrtime();
retry:
#endif
rw_enter(&zvol_state_lock, RW_READER);
/*
* Obtain a copy of private_data under the zvol_state_lock to make
* sure that either the result of zvol free code path setting
* bdev->bd_disk->private_data to NULL is observed, or zvol_free()
* is not called on this zv because of the positive zv_open_count.
*/
zv = bdev->bd_disk->private_data;
if (zv == NULL) {
rw_exit(&zvol_state_lock);
return (SET_ERROR(-ENXIO));
}
- if (zv->zv_open_count == 0 && !mutex_owned(&spa_namespace_lock)) {
+ mutex_enter(&zv->zv_state_lock);
+ /*
+ * Make sure zvol is not suspended during first open
+ * (hold zv_suspend_lock) and respect proper lock acquisition
+ * ordering - zv_suspend_lock before zv_state_lock
+ */
+ if (zv->zv_open_count == 0) {
+ if (!rw_tryenter(&zv->zv_suspend_lock, RW_READER)) {
+ mutex_exit(&zv->zv_state_lock);
+ rw_enter(&zv->zv_suspend_lock, RW_READER);
+ mutex_enter(&zv->zv_state_lock);
+ /* check to see if zv_suspend_lock is needed */
+ if (zv->zv_open_count != 0) {
+ rw_exit(&zv->zv_suspend_lock);
+ } else {
+ drop_suspend = B_TRUE;
+ }
+ } else {
+ drop_suspend = B_TRUE;
+ }
+ }
+ rw_exit(&zvol_state_lock);
+
+ ASSERT(MUTEX_HELD(&zv->zv_state_lock));
+
+ if (zv->zv_open_count == 0) {
+ boolean_t drop_namespace = B_FALSE;
+
+ ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
+
/*
* In all other call paths the spa_namespace_lock is taken
* before the bdev->bd_mutex lock. However, on open(2)
* the __blkdev_get() function calls fops->open() with the
* bdev->bd_mutex lock held. This can result in a deadlock
* when zvols from one pool are used as vdevs in another.
*
* To prevent a lock inversion deadlock we preemptively
* take the spa_namespace_lock. Normally the lock will not
* be contended and this is safe because spa_open_common()
* handles the case where the caller already holds the
* spa_namespace_lock.
*
* When the lock cannot be aquired after multiple retries
* this must be the vdev on zvol deadlock case and we have
* no choice but to return an error. For 5.12 and older
* kernels returning -ERESTARTSYS will result in the
* bdev->bd_mutex being dropped, then reacquired, and
* fops->open() being called again. This process can be
* repeated safely until both locks are acquired. For 5.13
* and newer the -ERESTARTSYS retry logic was removed from
* the kernel so the only option is to return the error for
* the caller to handle it.
*/
- if (!mutex_tryenter(&spa_namespace_lock)) {
- rw_exit(&zvol_state_lock);
+ if (!mutex_owned(&spa_namespace_lock)) {
+ if (!mutex_tryenter(&spa_namespace_lock)) {
+ mutex_exit(&zv->zv_state_lock);
+ rw_exit(&zv->zv_suspend_lock);
#ifdef HAVE_BLKDEV_GET_ERESTARTSYS
- schedule();
- return (SET_ERROR(-ERESTARTSYS));
-#else
- if ((gethrtime() - start) > timeout)
+ schedule();
return (SET_ERROR(-ERESTARTSYS));
+#else
+ if ((gethrtime() - start) > timeout)
+ return (SET_ERROR(-ERESTARTSYS));
- schedule_timeout(MSEC_TO_TICK(10));
- goto retry;
+ schedule_timeout(MSEC_TO_TICK(10));
+ goto retry;
#endif
- } else {
- drop_namespace = B_TRUE;
- }
- }
-
- mutex_enter(&zv->zv_state_lock);
- /*
- * make sure zvol is not suspended during first open
- * (hold zv_suspend_lock) and respect proper lock acquisition
- * ordering - zv_suspend_lock before zv_state_lock
- */
- if (zv->zv_open_count == 0) {
- if (!rw_tryenter(&zv->zv_suspend_lock, RW_READER)) {
- mutex_exit(&zv->zv_state_lock);
- rw_enter(&zv->zv_suspend_lock, RW_READER);
- mutex_enter(&zv->zv_state_lock);
- /* check to see if zv_suspend_lock is needed */
- if (zv->zv_open_count != 0) {
- rw_exit(&zv->zv_suspend_lock);
- drop_suspend = B_FALSE;
+ } else {
+ drop_namespace = B_TRUE;
}
}
- } else {
- drop_suspend = B_FALSE;
- }
- rw_exit(&zvol_state_lock);
- ASSERT(MUTEX_HELD(&zv->zv_state_lock));
-
- if (zv->zv_open_count == 0) {
- ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
error = -zvol_first_open(zv, !(flag & FMODE_WRITE));
- if (error)
- goto out_mutex;
- }
- if ((flag & FMODE_WRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
- error = -EROFS;
- goto out_open_count;
+ if (drop_namespace)
+ mutex_exit(&spa_namespace_lock);
}
- zv->zv_open_count++;
-
- mutex_exit(&zv->zv_state_lock);
- if (drop_namespace)
- mutex_exit(&spa_namespace_lock);
- if (drop_suspend)
- rw_exit(&zv->zv_suspend_lock);
-
- zfs_check_media_change(bdev);
-
- return (0);
+ if (error == 0) {
+ if ((flag & FMODE_WRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
+ if (zv->zv_open_count == 0)
+ zvol_last_close(zv);
-out_open_count:
- if (zv->zv_open_count == 0)
- zvol_last_close(zv);
+ error = SET_ERROR(-EROFS);
+ } else {
+ zv->zv_open_count++;
+ }
+ }
-out_mutex:
mutex_exit(&zv->zv_state_lock);
- if (drop_namespace)
- mutex_exit(&spa_namespace_lock);
if (drop_suspend)
rw_exit(&zv->zv_suspend_lock);
- return (SET_ERROR(error));
+ if (error == 0)
+ zfs_check_media_change(bdev);
+
+ return (error);
}
static void
zvol_release(struct gendisk *disk, fmode_t mode)
{
zvol_state_t *zv;
boolean_t drop_suspend = B_TRUE;
rw_enter(&zvol_state_lock, RW_READER);
zv = disk->private_data;
mutex_enter(&zv->zv_state_lock);
ASSERT3U(zv->zv_open_count, >, 0);
/*
* make sure zvol is not suspended during last close
* (hold zv_suspend_lock) and respect proper lock acquisition
* ordering - zv_suspend_lock before zv_state_lock
*/
if (zv->zv_open_count == 1) {
if (!rw_tryenter(&zv->zv_suspend_lock, RW_READER)) {
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, RW_READER);
mutex_enter(&zv->zv_state_lock);
/* check to see if zv_suspend_lock is needed */
if (zv->zv_open_count != 1) {
rw_exit(&zv->zv_suspend_lock);
drop_suspend = B_FALSE;
}
}
} else {
drop_suspend = B_FALSE;
}
rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
zv->zv_open_count--;
if (zv->zv_open_count == 0) {
ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
zvol_last_close(zv);
}
mutex_exit(&zv->zv_state_lock);
if (drop_suspend)
rw_exit(&zv->zv_suspend_lock);
}
static int
zvol_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
zvol_state_t *zv = bdev->bd_disk->private_data;
int error = 0;
ASSERT3U(zv->zv_open_count, >, 0);
switch (cmd) {
case BLKFLSBUF:
fsync_bdev(bdev);
invalidate_bdev(bdev);
rw_enter(&zv->zv_suspend_lock, RW_READER);
if (!(zv->zv_flags & ZVOL_RDONLY))
txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
rw_exit(&zv->zv_suspend_lock);
break;
case BLKZNAME:
mutex_enter(&zv->zv_state_lock);
error = copy_to_user((void *)arg, zv->zv_name, MAXNAMELEN);
mutex_exit(&zv->zv_state_lock);
break;
default:
error = -ENOTTY;
break;
}
return (SET_ERROR(error));
}
#ifdef CONFIG_COMPAT
static int
zvol_compat_ioctl(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long arg)
{
return (zvol_ioctl(bdev, mode, cmd, arg));
}
#else
#define zvol_compat_ioctl NULL
#endif
static unsigned int
zvol_check_events(struct gendisk *disk, unsigned int clearing)
{
unsigned int mask = 0;
rw_enter(&zvol_state_lock, RW_READER);
zvol_state_t *zv = disk->private_data;
if (zv != NULL) {
mutex_enter(&zv->zv_state_lock);
mask = zv->zv_changed ? DISK_EVENT_MEDIA_CHANGE : 0;
zv->zv_changed = 0;
mutex_exit(&zv->zv_state_lock);
}
rw_exit(&zvol_state_lock);
return (mask);
}
static int
zvol_revalidate_disk(struct gendisk *disk)
{
rw_enter(&zvol_state_lock, RW_READER);
zvol_state_t *zv = disk->private_data;
if (zv != NULL) {
mutex_enter(&zv->zv_state_lock);
set_capacity(zv->zv_zso->zvo_disk,
zv->zv_volsize >> SECTOR_BITS);
mutex_exit(&zv->zv_state_lock);
}
rw_exit(&zvol_state_lock);
return (0);
}
static int
zvol_update_volsize(zvol_state_t *zv, uint64_t volsize)
{
struct gendisk *disk = zv->zv_zso->zvo_disk;
#if defined(HAVE_REVALIDATE_DISK_SIZE)
revalidate_disk_size(disk, zvol_revalidate_disk(disk) == 0);
#elif defined(HAVE_REVALIDATE_DISK)
revalidate_disk(disk);
#else
zvol_revalidate_disk(disk);
#endif
return (0);
}
static void
zvol_clear_private(zvol_state_t *zv)
{
/*
* Cleared while holding zvol_state_lock as a writer
* which will prevent zvol_open() from opening it.
*/
zv->zv_zso->zvo_disk->private_data = NULL;
}
/*
* Provide a simple virtual geometry for legacy compatibility. For devices
* smaller than 1 MiB a small head and sector count is used to allow very
* tiny devices. For devices over 1 Mib a standard head and sector count
* is used to keep the cylinders count reasonable.
*/
static int
zvol_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
zvol_state_t *zv = bdev->bd_disk->private_data;
sector_t sectors;
ASSERT3U(zv->zv_open_count, >, 0);
sectors = get_capacity(zv->zv_zso->zvo_disk);
if (sectors > 2048) {
geo->heads = 16;
geo->sectors = 63;
} else {
geo->heads = 2;
geo->sectors = 4;
}
geo->start = 0;
geo->cylinders = sectors / (geo->heads * geo->sectors);
return (0);
}
static struct block_device_operations zvol_ops = {
.open = zvol_open,
.release = zvol_release,
.ioctl = zvol_ioctl,
.compat_ioctl = zvol_compat_ioctl,
.check_events = zvol_check_events,
#ifdef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK
.revalidate_disk = zvol_revalidate_disk,
#endif
.getgeo = zvol_getgeo,
.owner = THIS_MODULE,
#ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
.submit_bio = zvol_submit_bio,
#endif
};
/*
* Allocate memory for a new zvol_state_t and setup the required
* request queue and generic disk structures for the block device.
*/
static zvol_state_t *
zvol_alloc(dev_t dev, const char *name)
{
zvol_state_t *zv;
struct zvol_state_os *zso;
uint64_t volmode;
if (dsl_prop_get_integer(name, "volmode", &volmode, NULL) != 0)
return (NULL);
if (volmode == ZFS_VOLMODE_DEFAULT)
volmode = zvol_volmode;
if (volmode == ZFS_VOLMODE_NONE)
return (NULL);
zv = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
zso = kmem_zalloc(sizeof (struct zvol_state_os), KM_SLEEP);
zv->zv_zso = zso;
zv->zv_volmode = volmode;
list_link_init(&zv->zv_next);
mutex_init(&zv->zv_state_lock, NULL, MUTEX_DEFAULT, NULL);
#ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
#ifdef HAVE_BLK_ALLOC_DISK
zso->zvo_disk = blk_alloc_disk(NUMA_NO_NODE);
if (zso->zvo_disk == NULL)
goto out_kmem;
zso->zvo_disk->minors = ZVOL_MINORS;
zso->zvo_queue = zso->zvo_disk->queue;
#else
zso->zvo_queue = blk_alloc_queue(NUMA_NO_NODE);
if (zso->zvo_queue == NULL)
goto out_kmem;
zso->zvo_disk = alloc_disk(ZVOL_MINORS);
if (zso->zvo_disk == NULL) {
blk_cleanup_queue(zso->zvo_queue);
goto out_kmem;
}
zso->zvo_disk->queue = zso->zvo_queue;
#endif /* HAVE_BLK_ALLOC_DISK */
#else
zso->zvo_queue = blk_generic_alloc_queue(zvol_request, NUMA_NO_NODE);
if (zso->zvo_queue == NULL)
goto out_kmem;
zso->zvo_disk = alloc_disk(ZVOL_MINORS);
if (zso->zvo_disk == NULL) {
blk_cleanup_queue(zso->zvo_queue);
goto out_kmem;
}
zso->zvo_disk->queue = zso->zvo_queue;
#endif /* HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS */
blk_queue_set_write_cache(zso->zvo_queue, B_TRUE, B_TRUE);
/* Limit read-ahead to a single page to prevent over-prefetching. */
blk_queue_set_read_ahead(zso->zvo_queue, 1);
/* Disable write merging in favor of the ZIO pipeline. */
blk_queue_flag_set(QUEUE_FLAG_NOMERGES, zso->zvo_queue);
/* Enable /proc/diskstats */
blk_queue_flag_set(QUEUE_FLAG_IO_STAT, zso->zvo_queue);
zso->zvo_queue->queuedata = zv;
zso->zvo_dev = dev;
zv->zv_open_count = 0;
strlcpy(zv->zv_name, name, MAXNAMELEN);
zfs_rangelock_init(&zv->zv_rangelock, NULL, NULL);
rw_init(&zv->zv_suspend_lock, NULL, RW_DEFAULT, NULL);
zso->zvo_disk->major = zvol_major;
zso->zvo_disk->events = DISK_EVENT_MEDIA_CHANGE;
if (volmode == ZFS_VOLMODE_DEV) {
/*
* ZFS_VOLMODE_DEV disable partitioning on ZVOL devices: set
* gendisk->minors = 1 as noted in include/linux/genhd.h.
* Also disable extended partition numbers (GENHD_FL_EXT_DEVT)
* and suppresses partition scanning (GENHD_FL_NO_PART_SCAN)
* setting gendisk->flags accordingly.
*/
zso->zvo_disk->minors = 1;
#if defined(GENHD_FL_EXT_DEVT)
zso->zvo_disk->flags &= ~GENHD_FL_EXT_DEVT;
#endif
#if defined(GENHD_FL_NO_PART_SCAN)
zso->zvo_disk->flags |= GENHD_FL_NO_PART_SCAN;
#endif
}
zso->zvo_disk->first_minor = (dev & MINORMASK);
zso->zvo_disk->fops = &zvol_ops;
zso->zvo_disk->private_data = zv;
snprintf(zso->zvo_disk->disk_name, DISK_NAME_LEN, "%s%d",
ZVOL_DEV_NAME, (dev & MINORMASK));
return (zv);
out_kmem:
kmem_free(zso, sizeof (struct zvol_state_os));
kmem_free(zv, sizeof (zvol_state_t));
return (NULL);
}
/*
* Cleanup then free a zvol_state_t which was created by zvol_alloc().
* At this time, the structure is not opened by anyone, is taken off
* the zvol_state_list, and has its private data set to NULL.
* The zvol_state_lock is dropped.
*
* This function may take many milliseconds to complete (e.g. we've seen
* it take over 256ms), due to the calls to "blk_cleanup_queue" and
* "del_gendisk". Thus, consumers need to be careful to account for this
* latency when calling this function.
*/
static void
zvol_free(zvol_state_t *zv)
{
ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
ASSERT(!MUTEX_HELD(&zv->zv_state_lock));
ASSERT0(zv->zv_open_count);
ASSERT3P(zv->zv_zso->zvo_disk->private_data, ==, NULL);
rw_destroy(&zv->zv_suspend_lock);
zfs_rangelock_fini(&zv->zv_rangelock);
del_gendisk(zv->zv_zso->zvo_disk);
#if defined(HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS) && \
defined(HAVE_BLK_ALLOC_DISK)
blk_cleanup_disk(zv->zv_zso->zvo_disk);
#else
blk_cleanup_queue(zv->zv_zso->zvo_queue);
put_disk(zv->zv_zso->zvo_disk);
#endif
ida_simple_remove(&zvol_ida,
MINOR(zv->zv_zso->zvo_dev) >> ZVOL_MINOR_BITS);
mutex_destroy(&zv->zv_state_lock);
dataset_kstats_destroy(&zv->zv_kstat);
kmem_free(zv->zv_zso, sizeof (struct zvol_state_os));
kmem_free(zv, sizeof (zvol_state_t));
}
void
zvol_wait_close(zvol_state_t *zv)
{
}
/*
* Create a block device minor node and setup the linkage between it
* and the specified volume. Once this function returns the block
* device is live and ready for use.
*/
static int
zvol_os_create_minor(const char *name)
{
zvol_state_t *zv;
objset_t *os;
dmu_object_info_t *doi;
uint64_t volsize;
uint64_t len;
unsigned minor = 0;
int error = 0;
int idx;
uint64_t hash = zvol_name_hash(name);
if (zvol_inhibit_dev)
return (0);
idx = ida_simple_get(&zvol_ida, 0, 0, kmem_flags_convert(KM_SLEEP));
if (idx < 0)
return (SET_ERROR(-idx));
minor = idx << ZVOL_MINOR_BITS;
zv = zvol_find_by_name_hash(name, hash, RW_NONE);
if (zv) {
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
mutex_exit(&zv->zv_state_lock);
ida_simple_remove(&zvol_ida, idx);
return (SET_ERROR(EEXIST));
}
doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, B_TRUE, FTAG, &os);
if (error)
goto out_doi;
error = dmu_object_info(os, ZVOL_OBJ, doi);
if (error)
goto out_dmu_objset_disown;
error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
if (error)
goto out_dmu_objset_disown;
zv = zvol_alloc(MKDEV(zvol_major, minor), name);
if (zv == NULL) {
error = SET_ERROR(EAGAIN);
goto out_dmu_objset_disown;
}
zv->zv_hash = hash;
if (dmu_objset_is_snapshot(os))
zv->zv_flags |= ZVOL_RDONLY;
zv->zv_volblocksize = doi->doi_data_block_size;
zv->zv_volsize = volsize;
zv->zv_objset = os;
set_capacity(zv->zv_zso->zvo_disk, zv->zv_volsize >> 9);
blk_queue_max_hw_sectors(zv->zv_zso->zvo_queue,
(DMU_MAX_ACCESS / 4) >> 9);
blk_queue_max_segments(zv->zv_zso->zvo_queue, UINT16_MAX);
blk_queue_max_segment_size(zv->zv_zso->zvo_queue, UINT_MAX);
blk_queue_physical_block_size(zv->zv_zso->zvo_queue,
zv->zv_volblocksize);
blk_queue_io_opt(zv->zv_zso->zvo_queue, zv->zv_volblocksize);
blk_queue_max_discard_sectors(zv->zv_zso->zvo_queue,
(zvol_max_discard_blocks * zv->zv_volblocksize) >> 9);
blk_queue_discard_granularity(zv->zv_zso->zvo_queue,
zv->zv_volblocksize);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, zv->zv_zso->zvo_queue);
#ifdef QUEUE_FLAG_NONROT
blk_queue_flag_set(QUEUE_FLAG_NONROT, zv->zv_zso->zvo_queue);
#endif
#ifdef QUEUE_FLAG_ADD_RANDOM
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, zv->zv_zso->zvo_queue);
#endif
/* This flag was introduced in kernel version 4.12. */
#ifdef QUEUE_FLAG_SCSI_PASSTHROUGH
blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, zv->zv_zso->zvo_queue);
#endif
ASSERT3P(zv->zv_zilog, ==, NULL);
zv->zv_zilog = zil_open(os, zvol_get_data);
if (spa_writeable(dmu_objset_spa(os))) {
if (zil_replay_disable)
zil_destroy(zv->zv_zilog, B_FALSE);
else
zil_replay(os, zv, zvol_replay_vector);
}
zil_close(zv->zv_zilog);
zv->zv_zilog = NULL;
ASSERT3P(zv->zv_kstat.dk_kstats, ==, NULL);
dataset_kstats_create(&zv->zv_kstat, zv->zv_objset);
/*
* When udev detects the addition of the device it will immediately
* invoke blkid(8) to determine the type of content on the device.
* Prefetching the blocks commonly scanned by blkid(8) will speed
* up this process.
*/
len = MIN(MAX(zvol_prefetch_bytes, 0), SPA_MAXBLOCKSIZE);
if (len > 0) {
dmu_prefetch(os, ZVOL_OBJ, 0, 0, len, ZIO_PRIORITY_SYNC_READ);
dmu_prefetch(os, ZVOL_OBJ, 0, volsize - len, len,
ZIO_PRIORITY_SYNC_READ);
}
zv->zv_objset = NULL;
out_dmu_objset_disown:
dmu_objset_disown(os, B_TRUE, FTAG);
out_doi:
kmem_free(doi, sizeof (dmu_object_info_t));
/*
* Keep in mind that once add_disk() is called, the zvol is
* announced to the world, and zvol_open()/zvol_release() can
* be called at any time. Incidentally, add_disk() itself calls
* zvol_open()->zvol_first_open() and zvol_release()->zvol_last_close()
* directly as well.
*/
if (error == 0) {
rw_enter(&zvol_state_lock, RW_WRITER);
zvol_insert(zv);
rw_exit(&zvol_state_lock);
+#ifdef HAVE_ADD_DISK_RET
+ error = add_disk(zv->zv_zso->zvo_disk);
+#else
add_disk(zv->zv_zso->zvo_disk);
+#endif
} else {
ida_simple_remove(&zvol_ida, idx);
}
return (error);
}
static void
zvol_rename_minor(zvol_state_t *zv, const char *newname)
{
int readonly = get_disk_ro(zv->zv_zso->zvo_disk);
ASSERT(RW_LOCK_HELD(&zvol_state_lock));
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
strlcpy(zv->zv_name, newname, sizeof (zv->zv_name));
/* move to new hashtable entry */
zv->zv_hash = zvol_name_hash(zv->zv_name);
hlist_del(&zv->zv_hlink);
hlist_add_head(&zv->zv_hlink, ZVOL_HT_HEAD(zv->zv_hash));
/*
* The block device's read-only state is briefly changed causing
* a KOBJ_CHANGE uevent to be issued. This ensures udev detects
* the name change and fixes the symlinks. This does not change
* ZVOL_RDONLY in zv->zv_flags so the actual read-only state never
* changes. This would normally be done using kobject_uevent() but
* that is a GPL-only symbol which is why we need this workaround.
*/
set_disk_ro(zv->zv_zso->zvo_disk, !readonly);
set_disk_ro(zv->zv_zso->zvo_disk, readonly);
}
static void
zvol_set_disk_ro_impl(zvol_state_t *zv, int flags)
{
set_disk_ro(zv->zv_zso->zvo_disk, flags);
}
static void
zvol_set_capacity_impl(zvol_state_t *zv, uint64_t capacity)
{
set_capacity(zv->zv_zso->zvo_disk, capacity);
}
const static zvol_platform_ops_t zvol_linux_ops = {
.zv_free = zvol_free,
.zv_rename_minor = zvol_rename_minor,
.zv_create_minor = zvol_os_create_minor,
.zv_update_volsize = zvol_update_volsize,
.zv_clear_private = zvol_clear_private,
.zv_is_zvol = zvol_is_zvol_impl,
.zv_set_disk_ro = zvol_set_disk_ro_impl,
.zv_set_capacity = zvol_set_capacity_impl,
};
int
zvol_init(void)
{
int error;
int threads = MIN(MAX(zvol_threads, 1), 1024);
error = register_blkdev(zvol_major, ZVOL_DRIVER);
if (error) {
printk(KERN_INFO "ZFS: register_blkdev() failed %d\n", error);
return (error);
}
zvol_taskq = taskq_create(ZVOL_DRIVER, threads, maxclsyspri,
threads * 2, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
if (zvol_taskq == NULL) {
unregister_blkdev(zvol_major, ZVOL_DRIVER);
return (-ENOMEM);
}
zvol_init_impl();
ida_init(&zvol_ida);
zvol_register_ops(&zvol_linux_ops);
return (0);
}
void
zvol_fini(void)
{
zvol_fini_impl();
unregister_blkdev(zvol_major, ZVOL_DRIVER);
taskq_destroy(zvol_taskq);
ida_destroy(&zvol_ida);
}
/* BEGIN CSTYLED */
module_param(zvol_inhibit_dev, uint, 0644);
MODULE_PARM_DESC(zvol_inhibit_dev, "Do not create zvol device nodes");
module_param(zvol_major, uint, 0444);
MODULE_PARM_DESC(zvol_major, "Major number for zvol device");
module_param(zvol_threads, uint, 0444);
MODULE_PARM_DESC(zvol_threads, "Max number of threads to handle I/O requests");
module_param(zvol_request_sync, uint, 0644);
MODULE_PARM_DESC(zvol_request_sync, "Synchronously handle bio requests");
module_param(zvol_max_discard_blocks, ulong, 0444);
MODULE_PARM_DESC(zvol_max_discard_blocks, "Max number of blocks to discard");
module_param(zvol_prefetch_bytes, uint, 0644);
MODULE_PARM_DESC(zvol_prefetch_bytes, "Prefetch N bytes at zvol start+end");
module_param(zvol_volmode, uint, 0644);
MODULE_PARM_DESC(zvol_volmode, "Default volmode property value");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zcommon/zfs_prop.c b/sys/contrib/openzfs/module/zcommon/zfs_prop.c
index 402d749c1aeb..5f88bd02089d 100644
--- a/sys/contrib/openzfs/module/zcommon/zfs_prop.c
+++ b/sys/contrib/openzfs/module/zcommon/zfs_prop.c
@@ -1,1052 +1,1056 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2018 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright 2016, Joyent, Inc.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
*/
/* Portions Copyright 2010 Robert Milkowski */
#include <sys/zio.h>
#include <sys/spa.h>
#include <sys/u8_textprep.h>
#include <sys/zfs_acl.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_znode.h>
#include <sys/dsl_crypt.h>
#include "zfs_prop.h"
#include "zfs_deleg.h"
#include "zfs_fletcher.h"
#if !defined(_KERNEL)
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#endif
static zprop_desc_t zfs_prop_table[ZFS_NUM_PROPS];
/* Note this is indexed by zfs_userquota_prop_t, keep the order the same */
const char *zfs_userquota_prop_prefixes[] = {
"userused@",
"userquota@",
"groupused@",
"groupquota@",
"userobjused@",
"userobjquota@",
"groupobjused@",
"groupobjquota@",
"projectused@",
"projectquota@",
"projectobjused@",
"projectobjquota@"
};
zprop_desc_t *
zfs_prop_get_table(void)
{
return (zfs_prop_table);
}
void
zfs_prop_init(void)
{
static zprop_index_t checksum_table[] = {
{ "on", ZIO_CHECKSUM_ON },
{ "off", ZIO_CHECKSUM_OFF },
{ "fletcher2", ZIO_CHECKSUM_FLETCHER_2 },
{ "fletcher4", ZIO_CHECKSUM_FLETCHER_4 },
{ "sha256", ZIO_CHECKSUM_SHA256 },
{ "noparity", ZIO_CHECKSUM_NOPARITY },
{ "sha512", ZIO_CHECKSUM_SHA512 },
{ "skein", ZIO_CHECKSUM_SKEIN },
#if !defined(__FreeBSD__)
{ "edonr", ZIO_CHECKSUM_EDONR },
#endif
{ NULL }
};
static zprop_index_t dedup_table[] = {
{ "on", ZIO_CHECKSUM_ON },
{ "off", ZIO_CHECKSUM_OFF },
{ "verify", ZIO_CHECKSUM_ON | ZIO_CHECKSUM_VERIFY },
{ "sha256", ZIO_CHECKSUM_SHA256 },
{ "sha256,verify",
ZIO_CHECKSUM_SHA256 | ZIO_CHECKSUM_VERIFY },
{ "sha512", ZIO_CHECKSUM_SHA512 },
{ "sha512,verify",
ZIO_CHECKSUM_SHA512 | ZIO_CHECKSUM_VERIFY },
{ "skein", ZIO_CHECKSUM_SKEIN },
{ "skein,verify",
ZIO_CHECKSUM_SKEIN | ZIO_CHECKSUM_VERIFY },
#if !defined(__FreeBSD__)
{ "edonr,verify",
ZIO_CHECKSUM_EDONR | ZIO_CHECKSUM_VERIFY },
#endif
{ NULL }
};
static zprop_index_t compress_table[] = {
{ "on", ZIO_COMPRESS_ON },
{ "off", ZIO_COMPRESS_OFF },
{ "lzjb", ZIO_COMPRESS_LZJB },
{ "gzip", ZIO_COMPRESS_GZIP_6 }, /* gzip default */
{ "gzip-1", ZIO_COMPRESS_GZIP_1 },
{ "gzip-2", ZIO_COMPRESS_GZIP_2 },
{ "gzip-3", ZIO_COMPRESS_GZIP_3 },
{ "gzip-4", ZIO_COMPRESS_GZIP_4 },
{ "gzip-5", ZIO_COMPRESS_GZIP_5 },
{ "gzip-6", ZIO_COMPRESS_GZIP_6 },
{ "gzip-7", ZIO_COMPRESS_GZIP_7 },
{ "gzip-8", ZIO_COMPRESS_GZIP_8 },
{ "gzip-9", ZIO_COMPRESS_GZIP_9 },
{ "zle", ZIO_COMPRESS_ZLE },
{ "lz4", ZIO_COMPRESS_LZ4 },
{ "zstd", ZIO_COMPRESS_ZSTD },
{ "zstd-fast",
ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_FAST_DEFAULT) },
/*
* ZSTD 1-19 are synthetic. We store the compression level in a
* separate hidden property to avoid wasting a large amount of
* space in the ZIO_COMPRESS enum.
*
* The compression level is also stored within the header of the
* compressed block since we may need it for later recompression
* to avoid checksum errors (L2ARC).
*
* Note that the level here is defined as bit shifted mask on
* top of the method.
*/
{ "zstd-1", ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_1) },
{ "zstd-2", ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_2) },
{ "zstd-3", ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_3) },
{ "zstd-4", ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_4) },
{ "zstd-5", ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_5) },
{ "zstd-6", ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_6) },
{ "zstd-7", ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_7) },
{ "zstd-8", ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_8) },
{ "zstd-9", ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_9) },
{ "zstd-10", ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_10) },
{ "zstd-11", ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_11) },
{ "zstd-12", ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_12) },
{ "zstd-13", ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_13) },
{ "zstd-14", ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_14) },
{ "zstd-15", ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_15) },
{ "zstd-16", ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_16) },
{ "zstd-17", ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_17) },
{ "zstd-18", ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_18) },
{ "zstd-19", ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_19) },
/*
* The ZSTD-Fast levels are also synthetic.
*/
{ "zstd-fast-1",
ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_FAST_1) },
{ "zstd-fast-2",
ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_FAST_2) },
{ "zstd-fast-3",
ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_FAST_3) },
{ "zstd-fast-4",
ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_FAST_4) },
{ "zstd-fast-5",
ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_FAST_5) },
{ "zstd-fast-6",
ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_FAST_6) },
{ "zstd-fast-7",
ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_FAST_7) },
{ "zstd-fast-8",
ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_FAST_8) },
{ "zstd-fast-9",
ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_FAST_9) },
{ "zstd-fast-10",
ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_FAST_10) },
{ "zstd-fast-20",
ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_FAST_20) },
{ "zstd-fast-30",
ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_FAST_30) },
{ "zstd-fast-40",
ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_FAST_40) },
{ "zstd-fast-50",
ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_FAST_50) },
{ "zstd-fast-60",
ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_FAST_60) },
{ "zstd-fast-70",
ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_FAST_70) },
{ "zstd-fast-80",
ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_FAST_80) },
{ "zstd-fast-90",
ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_FAST_90) },
{ "zstd-fast-100",
ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_FAST_100) },
{ "zstd-fast-500",
ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_FAST_500) },
{ "zstd-fast-1000",
ZIO_COMPLEVEL_ZSTD(ZIO_ZSTD_LEVEL_FAST_1000) },
{ NULL }
};
static zprop_index_t crypto_table[] = {
{ "on", ZIO_CRYPT_ON },
{ "off", ZIO_CRYPT_OFF },
{ "aes-128-ccm", ZIO_CRYPT_AES_128_CCM },
{ "aes-192-ccm", ZIO_CRYPT_AES_192_CCM },
{ "aes-256-ccm", ZIO_CRYPT_AES_256_CCM },
{ "aes-128-gcm", ZIO_CRYPT_AES_128_GCM },
{ "aes-192-gcm", ZIO_CRYPT_AES_192_GCM },
{ "aes-256-gcm", ZIO_CRYPT_AES_256_GCM },
{ NULL }
};
static zprop_index_t keyformat_table[] = {
{ "none", ZFS_KEYFORMAT_NONE },
{ "raw", ZFS_KEYFORMAT_RAW },
{ "hex", ZFS_KEYFORMAT_HEX },
{ "passphrase", ZFS_KEYFORMAT_PASSPHRASE },
{ NULL }
};
static zprop_index_t snapdir_table[] = {
{ "hidden", ZFS_SNAPDIR_HIDDEN },
{ "visible", ZFS_SNAPDIR_VISIBLE },
{ NULL }
};
static zprop_index_t snapdev_table[] = {
{ "hidden", ZFS_SNAPDEV_HIDDEN },
{ "visible", ZFS_SNAPDEV_VISIBLE },
{ NULL }
};
static zprop_index_t acl_mode_table[] = {
{ "discard", ZFS_ACL_DISCARD },
{ "groupmask", ZFS_ACL_GROUPMASK },
{ "passthrough", ZFS_ACL_PASSTHROUGH },
{ "restricted", ZFS_ACL_RESTRICTED },
{ NULL }
};
static zprop_index_t acltype_table[] = {
{ "off", ZFS_ACLTYPE_OFF },
{ "posix", ZFS_ACLTYPE_POSIX },
{ "nfsv4", ZFS_ACLTYPE_NFSV4 },
{ "disabled", ZFS_ACLTYPE_OFF }, /* bkwrd compatibility */
{ "noacl", ZFS_ACLTYPE_OFF }, /* bkwrd compatibility */
{ "posixacl", ZFS_ACLTYPE_POSIX }, /* bkwrd compatibility */
{ NULL }
};
static zprop_index_t acl_inherit_table[] = {
{ "discard", ZFS_ACL_DISCARD },
{ "noallow", ZFS_ACL_NOALLOW },
{ "restricted", ZFS_ACL_RESTRICTED },
{ "passthrough", ZFS_ACL_PASSTHROUGH },
{ "secure", ZFS_ACL_RESTRICTED }, /* bkwrd compatibility */
{ "passthrough-x", ZFS_ACL_PASSTHROUGH_X },
{ NULL }
};
static zprop_index_t case_table[] = {
{ "sensitive", ZFS_CASE_SENSITIVE },
{ "insensitive", ZFS_CASE_INSENSITIVE },
{ "mixed", ZFS_CASE_MIXED },
{ NULL }
};
static zprop_index_t copies_table[] = {
{ "1", 1 },
{ "2", 2 },
{ "3", 3 },
{ NULL }
};
/*
* Use the unique flags we have to send to u8_strcmp() and/or
* u8_textprep() to represent the various normalization property
* values.
*/
static zprop_index_t normalize_table[] = {
{ "none", 0 },
{ "formD", U8_TEXTPREP_NFD },
{ "formKC", U8_TEXTPREP_NFKC },
{ "formC", U8_TEXTPREP_NFC },
{ "formKD", U8_TEXTPREP_NFKD },
{ NULL }
};
static zprop_index_t version_table[] = {
{ "1", 1 },
{ "2", 2 },
{ "3", 3 },
{ "4", 4 },
{ "5", 5 },
{ "current", ZPL_VERSION },
{ NULL }
};
static zprop_index_t boolean_table[] = {
{ "off", 0 },
{ "on", 1 },
{ NULL }
};
static zprop_index_t keystatus_table[] = {
{ "none", ZFS_KEYSTATUS_NONE},
{ "unavailable", ZFS_KEYSTATUS_UNAVAILABLE},
{ "available", ZFS_KEYSTATUS_AVAILABLE},
{ NULL }
};
static zprop_index_t logbias_table[] = {
{ "latency", ZFS_LOGBIAS_LATENCY },
{ "throughput", ZFS_LOGBIAS_THROUGHPUT },
{ NULL }
};
static zprop_index_t canmount_table[] = {
{ "off", ZFS_CANMOUNT_OFF },
{ "on", ZFS_CANMOUNT_ON },
{ "noauto", ZFS_CANMOUNT_NOAUTO },
{ NULL }
};
static zprop_index_t cache_table[] = {
{ "none", ZFS_CACHE_NONE },
{ "metadata", ZFS_CACHE_METADATA },
{ "all", ZFS_CACHE_ALL },
{ NULL }
};
static zprop_index_t sync_table[] = {
{ "standard", ZFS_SYNC_STANDARD },
{ "always", ZFS_SYNC_ALWAYS },
{ "disabled", ZFS_SYNC_DISABLED },
{ NULL }
};
static zprop_index_t xattr_table[] = {
{ "off", ZFS_XATTR_OFF },
{ "on", ZFS_XATTR_DIR },
{ "sa", ZFS_XATTR_SA },
{ "dir", ZFS_XATTR_DIR },
{ NULL }
};
static zprop_index_t dnsize_table[] = {
{ "legacy", ZFS_DNSIZE_LEGACY },
{ "auto", ZFS_DNSIZE_AUTO },
{ "1k", ZFS_DNSIZE_1K },
{ "2k", ZFS_DNSIZE_2K },
{ "4k", ZFS_DNSIZE_4K },
{ "8k", ZFS_DNSIZE_8K },
{ "16k", ZFS_DNSIZE_16K },
{ NULL }
};
static zprop_index_t redundant_metadata_table[] = {
{ "all", ZFS_REDUNDANT_METADATA_ALL },
{ "most", ZFS_REDUNDANT_METADATA_MOST },
{ NULL }
};
static zprop_index_t volmode_table[] = {
{ "default", ZFS_VOLMODE_DEFAULT },
{ "full", ZFS_VOLMODE_GEOM },
{ "geom", ZFS_VOLMODE_GEOM },
{ "dev", ZFS_VOLMODE_DEV },
{ "none", ZFS_VOLMODE_NONE },
{ NULL }
};
/* inherit index properties */
zprop_register_index(ZFS_PROP_REDUNDANT_METADATA, "redundant_metadata",
ZFS_REDUNDANT_METADATA_ALL,
PROP_INHERIT, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME,
"all | most", "REDUND_MD",
redundant_metadata_table);
zprop_register_index(ZFS_PROP_SYNC, "sync", ZFS_SYNC_STANDARD,
PROP_INHERIT, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME,
"standard | always | disabled", "SYNC",
sync_table);
zprop_register_index(ZFS_PROP_CHECKSUM, "checksum",
ZIO_CHECKSUM_DEFAULT, PROP_INHERIT, ZFS_TYPE_FILESYSTEM |
ZFS_TYPE_VOLUME,
#if !defined(__FreeBSD__)
"on | off | fletcher2 | fletcher4 | sha256 | sha512 | skein"
" | edonr",
#else
"on | off | fletcher2 | fletcher4 | sha256 | sha512 | skein",
#endif
"CHECKSUM", checksum_table);
zprop_register_index(ZFS_PROP_DEDUP, "dedup", ZIO_CHECKSUM_OFF,
PROP_INHERIT, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME,
"on | off | verify | sha256[,verify] | sha512[,verify] | "
#if !defined(__FreeBSD__)
"skein[,verify] | edonr,verify",
#else
"skein[,verify]",
#endif
"DEDUP", dedup_table);
zprop_register_index(ZFS_PROP_COMPRESSION, "compression",
ZIO_COMPRESS_DEFAULT, PROP_INHERIT,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME,
"on | off | lzjb | gzip | gzip-[1-9] | zle | lz4 | "
"zstd | zstd-[1-19] | "
"zstd-fast | zstd-fast-[1-10,20,30,40,50,60,70,80,90,100,500,1000]",
"COMPRESS", compress_table);
zprop_register_index(ZFS_PROP_SNAPDIR, "snapdir", ZFS_SNAPDIR_HIDDEN,
PROP_INHERIT, ZFS_TYPE_FILESYSTEM,
"hidden | visible", "SNAPDIR", snapdir_table);
zprop_register_index(ZFS_PROP_SNAPDEV, "snapdev", ZFS_SNAPDEV_HIDDEN,
PROP_INHERIT, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME,
"hidden | visible", "SNAPDEV", snapdev_table);
zprop_register_index(ZFS_PROP_ACLMODE, "aclmode", ZFS_ACL_DISCARD,
PROP_INHERIT, ZFS_TYPE_FILESYSTEM,
"discard | groupmask | passthrough | restricted", "ACLMODE",
acl_mode_table);
zprop_register_index(ZFS_PROP_ACLTYPE, "acltype",
#ifdef __linux__
/* Linux doesn't natively support ZFS's NFSv4-style ACLs. */
ZFS_ACLTYPE_OFF,
#else
ZFS_ACLTYPE_NFSV4,
#endif
PROP_INHERIT, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT,
"off | nfsv4 | posix", "ACLTYPE", acltype_table);
zprop_register_index(ZFS_PROP_ACLINHERIT, "aclinherit",
ZFS_ACL_RESTRICTED, PROP_INHERIT, ZFS_TYPE_FILESYSTEM,
"discard | noallow | restricted | passthrough | passthrough-x",
"ACLINHERIT", acl_inherit_table);
zprop_register_index(ZFS_PROP_COPIES, "copies", 1, PROP_INHERIT,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME,
"1 | 2 | 3", "COPIES", copies_table);
zprop_register_index(ZFS_PROP_PRIMARYCACHE, "primarycache",
ZFS_CACHE_ALL, PROP_INHERIT,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT | ZFS_TYPE_VOLUME,
"all | none | metadata", "PRIMARYCACHE", cache_table);
zprop_register_index(ZFS_PROP_SECONDARYCACHE, "secondarycache",
ZFS_CACHE_ALL, PROP_INHERIT,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT | ZFS_TYPE_VOLUME,
"all | none | metadata", "SECONDARYCACHE", cache_table);
zprop_register_index(ZFS_PROP_LOGBIAS, "logbias", ZFS_LOGBIAS_LATENCY,
PROP_INHERIT, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME,
"latency | throughput", "LOGBIAS", logbias_table);
zprop_register_index(ZFS_PROP_XATTR, "xattr", ZFS_XATTR_DIR,
PROP_INHERIT, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT,
"on | off | dir | sa", "XATTR", xattr_table);
zprop_register_index(ZFS_PROP_DNODESIZE, "dnodesize",
ZFS_DNSIZE_LEGACY, PROP_INHERIT, ZFS_TYPE_FILESYSTEM,
"legacy | auto | 1k | 2k | 4k | 8k | 16k", "DNSIZE", dnsize_table);
zprop_register_index(ZFS_PROP_VOLMODE, "volmode",
ZFS_VOLMODE_DEFAULT, PROP_INHERIT,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME,
"default | full | geom | dev | none", "VOLMODE", volmode_table);
/* inherit index (boolean) properties */
zprop_register_index(ZFS_PROP_ATIME, "atime", 1, PROP_INHERIT,
ZFS_TYPE_FILESYSTEM, "on | off", "ATIME", boolean_table);
zprop_register_index(ZFS_PROP_RELATIME, "relatime", 0, PROP_INHERIT,
ZFS_TYPE_FILESYSTEM, "on | off", "RELATIME", boolean_table);
zprop_register_index(ZFS_PROP_DEVICES, "devices", 1, PROP_INHERIT,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT, "on | off", "DEVICES",
boolean_table);
zprop_register_index(ZFS_PROP_EXEC, "exec", 1, PROP_INHERIT,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT, "on | off", "EXEC",
boolean_table);
zprop_register_index(ZFS_PROP_SETUID, "setuid", 1, PROP_INHERIT,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT, "on | off", "SETUID",
boolean_table);
zprop_register_index(ZFS_PROP_READONLY, "readonly", 0, PROP_INHERIT,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, "on | off", "RDONLY",
boolean_table);
#ifdef __FreeBSD__
zprop_register_index(ZFS_PROP_ZONED, "jailed", 0, PROP_INHERIT,
ZFS_TYPE_FILESYSTEM, "on | off", "JAILED", boolean_table);
#else
zprop_register_index(ZFS_PROP_ZONED, "zoned", 0, PROP_INHERIT,
ZFS_TYPE_FILESYSTEM, "on | off", "ZONED", boolean_table);
#endif
zprop_register_index(ZFS_PROP_VSCAN, "vscan", 0, PROP_INHERIT,
ZFS_TYPE_FILESYSTEM, "on | off", "VSCAN", boolean_table);
zprop_register_index(ZFS_PROP_NBMAND, "nbmand", 0, PROP_INHERIT,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT, "on | off", "NBMAND",
boolean_table);
zprop_register_index(ZFS_PROP_OVERLAY, "overlay", 1, PROP_INHERIT,
ZFS_TYPE_FILESYSTEM, "on | off", "OVERLAY", boolean_table);
/* default index properties */
zprop_register_index(ZFS_PROP_VERSION, "version", 0, PROP_DEFAULT,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT,
"1 | 2 | 3 | 4 | 5 | current", "VERSION", version_table);
zprop_register_index(ZFS_PROP_CANMOUNT, "canmount", ZFS_CANMOUNT_ON,
PROP_DEFAULT, ZFS_TYPE_FILESYSTEM, "on | off | noauto",
"CANMOUNT", canmount_table);
/* readonly index properties */
zprop_register_index(ZFS_PROP_MOUNTED, "mounted", 0, PROP_READONLY,
ZFS_TYPE_FILESYSTEM, "yes | no", "MOUNTED", boolean_table);
zprop_register_index(ZFS_PROP_DEFER_DESTROY, "defer_destroy", 0,
PROP_READONLY, ZFS_TYPE_SNAPSHOT, "yes | no", "DEFER_DESTROY",
boolean_table);
zprop_register_index(ZFS_PROP_KEYSTATUS, "keystatus",
ZFS_KEYSTATUS_NONE, PROP_READONLY, ZFS_TYPE_DATASET,
"none | unavailable | available",
"KEYSTATUS", keystatus_table);
/* set once index properties */
zprop_register_index(ZFS_PROP_NORMALIZE, "normalization", 0,
PROP_ONETIME, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT,
"none | formC | formD | formKC | formKD", "NORMALIZATION",
normalize_table);
zprop_register_index(ZFS_PROP_CASE, "casesensitivity",
ZFS_CASE_SENSITIVE, PROP_ONETIME, ZFS_TYPE_FILESYSTEM |
ZFS_TYPE_SNAPSHOT,
"sensitive | insensitive | mixed", "CASE", case_table);
zprop_register_index(ZFS_PROP_KEYFORMAT, "keyformat",
ZFS_KEYFORMAT_NONE, PROP_ONETIME_DEFAULT,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME,
"none | raw | hex | passphrase", "KEYFORMAT", keyformat_table);
zprop_register_index(ZFS_PROP_ENCRYPTION, "encryption",
ZIO_CRYPT_DEFAULT, PROP_ONETIME, ZFS_TYPE_DATASET,
"on | off | aes-128-ccm | aes-192-ccm | aes-256-ccm | "
"aes-128-gcm | aes-192-gcm | aes-256-gcm", "ENCRYPTION",
crypto_table);
/* set once index (boolean) properties */
zprop_register_index(ZFS_PROP_UTF8ONLY, "utf8only", 0, PROP_ONETIME,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT,
"on | off", "UTF8ONLY", boolean_table);
/* string properties */
zprop_register_string(ZFS_PROP_ORIGIN, "origin", NULL, PROP_READONLY,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, "<snapshot>", "ORIGIN");
zprop_register_string(ZFS_PROP_CLONES, "clones", NULL, PROP_READONLY,
ZFS_TYPE_SNAPSHOT, "<dataset>[,...]", "CLONES");
zprop_register_string(ZFS_PROP_MOUNTPOINT, "mountpoint", "/",
PROP_INHERIT, ZFS_TYPE_FILESYSTEM, "<path> | legacy | none",
"MOUNTPOINT");
zprop_register_string(ZFS_PROP_SHARENFS, "sharenfs", "off",
PROP_INHERIT, ZFS_TYPE_FILESYSTEM, "on | off | NFS share options",
"SHARENFS");
zprop_register_string(ZFS_PROP_TYPE, "type", NULL, PROP_READONLY,
ZFS_TYPE_DATASET | ZFS_TYPE_BOOKMARK,
"filesystem | volume | snapshot | bookmark", "TYPE");
zprop_register_string(ZFS_PROP_SHARESMB, "sharesmb", "off",
PROP_INHERIT, ZFS_TYPE_FILESYSTEM,
"on | off | SMB share options", "SHARESMB");
zprop_register_string(ZFS_PROP_MLSLABEL, "mlslabel",
ZFS_MLSLABEL_DEFAULT, PROP_INHERIT, ZFS_TYPE_DATASET,
"<sensitivity label>", "MLSLABEL");
zprop_register_string(ZFS_PROP_SELINUX_CONTEXT, "context",
"none", PROP_DEFAULT, ZFS_TYPE_DATASET, "<selinux context>",
"CONTEXT");
zprop_register_string(ZFS_PROP_SELINUX_FSCONTEXT, "fscontext",
"none", PROP_DEFAULT, ZFS_TYPE_DATASET, "<selinux fscontext>",
"FSCONTEXT");
zprop_register_string(ZFS_PROP_SELINUX_DEFCONTEXT, "defcontext",
"none", PROP_DEFAULT, ZFS_TYPE_DATASET, "<selinux defcontext>",
"DEFCONTEXT");
zprop_register_string(ZFS_PROP_SELINUX_ROOTCONTEXT, "rootcontext",
"none", PROP_DEFAULT, ZFS_TYPE_DATASET, "<selinux rootcontext>",
"ROOTCONTEXT");
zprop_register_string(ZFS_PROP_RECEIVE_RESUME_TOKEN,
"receive_resume_token",
NULL, PROP_READONLY, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME,
"<string token>", "RESUMETOK");
zprop_register_string(ZFS_PROP_ENCRYPTION_ROOT, "encryptionroot", NULL,
PROP_READONLY, ZFS_TYPE_DATASET, "<filesystem | volume>",
"ENCROOT");
zprop_register_string(ZFS_PROP_KEYLOCATION, "keylocation",
"none", PROP_DEFAULT, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME,
- "prompt | <file URI>", "KEYLOCATION");
+ "prompt | <file URI> | <https URL> | <http URL>", "KEYLOCATION");
zprop_register_string(ZFS_PROP_REDACT_SNAPS,
"redact_snaps", NULL, PROP_READONLY,
ZFS_TYPE_DATASET | ZFS_TYPE_BOOKMARK, "<snapshot>[,...]",
"RSNAPS");
/* readonly number properties */
zprop_register_number(ZFS_PROP_USED, "used", 0, PROP_READONLY,
ZFS_TYPE_DATASET, "<size>", "USED");
zprop_register_number(ZFS_PROP_AVAILABLE, "available", 0, PROP_READONLY,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, "<size>", "AVAIL");
zprop_register_number(ZFS_PROP_REFERENCED, "referenced", 0,
PROP_READONLY, ZFS_TYPE_DATASET | ZFS_TYPE_BOOKMARK, "<size>",
"REFER");
zprop_register_number(ZFS_PROP_COMPRESSRATIO, "compressratio", 0,
PROP_READONLY, ZFS_TYPE_DATASET | ZFS_TYPE_BOOKMARK,
"<1.00x or higher if compressed>", "RATIO");
zprop_register_number(ZFS_PROP_REFRATIO, "refcompressratio", 0,
PROP_READONLY, ZFS_TYPE_DATASET,
"<1.00x or higher if compressed>", "REFRATIO");
zprop_register_number(ZFS_PROP_VOLBLOCKSIZE, "volblocksize",
ZVOL_DEFAULT_BLOCKSIZE, PROP_ONETIME,
ZFS_TYPE_VOLUME, "512 to 128k, power of 2", "VOLBLOCK");
zprop_register_number(ZFS_PROP_USEDSNAP, "usedbysnapshots", 0,
PROP_READONLY, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, "<size>",
"USEDSNAP");
zprop_register_number(ZFS_PROP_USEDDS, "usedbydataset", 0,
PROP_READONLY, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, "<size>",
"USEDDS");
zprop_register_number(ZFS_PROP_USEDCHILD, "usedbychildren", 0,
PROP_READONLY, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, "<size>",
"USEDCHILD");
zprop_register_number(ZFS_PROP_USEDREFRESERV, "usedbyrefreservation", 0,
PROP_READONLY,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, "<size>", "USEDREFRESERV");
zprop_register_number(ZFS_PROP_USERREFS, "userrefs", 0, PROP_READONLY,
ZFS_TYPE_SNAPSHOT, "<count>", "USERREFS");
zprop_register_number(ZFS_PROP_WRITTEN, "written", 0, PROP_READONLY,
ZFS_TYPE_DATASET, "<size>", "WRITTEN");
zprop_register_number(ZFS_PROP_LOGICALUSED, "logicalused", 0,
PROP_READONLY, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, "<size>",
"LUSED");
zprop_register_number(ZFS_PROP_LOGICALREFERENCED, "logicalreferenced",
0, PROP_READONLY, ZFS_TYPE_DATASET | ZFS_TYPE_BOOKMARK, "<size>",
"LREFER");
zprop_register_number(ZFS_PROP_FILESYSTEM_COUNT, "filesystem_count",
UINT64_MAX, PROP_READONLY, ZFS_TYPE_FILESYSTEM,
"<count>", "FSCOUNT");
zprop_register_number(ZFS_PROP_SNAPSHOT_COUNT, "snapshot_count",
UINT64_MAX, PROP_READONLY, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME,
"<count>", "SSCOUNT");
zprop_register_number(ZFS_PROP_GUID, "guid", 0, PROP_READONLY,
ZFS_TYPE_DATASET | ZFS_TYPE_BOOKMARK, "<uint64>", "GUID");
zprop_register_number(ZFS_PROP_CREATETXG, "createtxg", 0, PROP_READONLY,
ZFS_TYPE_DATASET | ZFS_TYPE_BOOKMARK, "<uint64>", "CREATETXG");
zprop_register_number(ZFS_PROP_PBKDF2_ITERS, "pbkdf2iters",
0, PROP_ONETIME_DEFAULT, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME,
"<iters>", "PBKDF2ITERS");
zprop_register_number(ZFS_PROP_OBJSETID, "objsetid", 0,
PROP_READONLY, ZFS_TYPE_DATASET, "<uint64>", "OBJSETID");
/* default number properties */
zprop_register_number(ZFS_PROP_QUOTA, "quota", 0, PROP_DEFAULT,
ZFS_TYPE_FILESYSTEM, "<size> | none", "QUOTA");
zprop_register_number(ZFS_PROP_RESERVATION, "reservation", 0,
PROP_DEFAULT, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME,
"<size> | none", "RESERV");
zprop_register_number(ZFS_PROP_VOLSIZE, "volsize", 0, PROP_DEFAULT,
ZFS_TYPE_SNAPSHOT | ZFS_TYPE_VOLUME, "<size>", "VOLSIZE");
zprop_register_number(ZFS_PROP_REFQUOTA, "refquota", 0, PROP_DEFAULT,
ZFS_TYPE_FILESYSTEM, "<size> | none", "REFQUOTA");
zprop_register_number(ZFS_PROP_REFRESERVATION, "refreservation", 0,
PROP_DEFAULT, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME,
"<size> | none", "REFRESERV");
zprop_register_number(ZFS_PROP_FILESYSTEM_LIMIT, "filesystem_limit",
UINT64_MAX, PROP_DEFAULT, ZFS_TYPE_FILESYSTEM,
"<count> | none", "FSLIMIT");
zprop_register_number(ZFS_PROP_SNAPSHOT_LIMIT, "snapshot_limit",
UINT64_MAX, PROP_DEFAULT, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME,
"<count> | none", "SSLIMIT");
/* inherit number properties */
zprop_register_number(ZFS_PROP_RECORDSIZE, "recordsize",
SPA_OLD_MAXBLOCKSIZE, PROP_INHERIT,
ZFS_TYPE_FILESYSTEM, "512 to 1M, power of 2", "RECSIZE");
zprop_register_number(ZFS_PROP_SPECIAL_SMALL_BLOCKS,
"special_small_blocks", 0, PROP_INHERIT, ZFS_TYPE_FILESYSTEM,
"zero or 512 to 1M, power of 2", "SPECIAL_SMALL_BLOCKS");
/* hidden properties */
zprop_register_hidden(ZFS_PROP_NUMCLONES, "numclones", PROP_TYPE_NUMBER,
PROP_READONLY, ZFS_TYPE_SNAPSHOT, "NUMCLONES");
zprop_register_hidden(ZFS_PROP_NAME, "name", PROP_TYPE_STRING,
PROP_READONLY, ZFS_TYPE_DATASET | ZFS_TYPE_BOOKMARK, "NAME");
zprop_register_hidden(ZFS_PROP_ISCSIOPTIONS, "iscsioptions",
PROP_TYPE_STRING, PROP_INHERIT, ZFS_TYPE_VOLUME, "ISCSIOPTIONS");
zprop_register_hidden(ZFS_PROP_STMF_SHAREINFO, "stmf_sbd_lu",
PROP_TYPE_STRING, PROP_INHERIT, ZFS_TYPE_VOLUME,
"STMF_SBD_LU");
zprop_register_hidden(ZFS_PROP_USERACCOUNTING, "useraccounting",
PROP_TYPE_NUMBER, PROP_READONLY, ZFS_TYPE_DATASET,
"USERACCOUNTING");
zprop_register_hidden(ZFS_PROP_UNIQUE, "unique", PROP_TYPE_NUMBER,
PROP_READONLY, ZFS_TYPE_DATASET, "UNIQUE");
zprop_register_hidden(ZFS_PROP_INCONSISTENT, "inconsistent",
PROP_TYPE_NUMBER, PROP_READONLY, ZFS_TYPE_DATASET, "INCONSISTENT");
zprop_register_hidden(ZFS_PROP_IVSET_GUID, "ivsetguid",
PROP_TYPE_NUMBER, PROP_READONLY,
ZFS_TYPE_DATASET | ZFS_TYPE_BOOKMARK, "IVSETGUID");
zprop_register_hidden(ZFS_PROP_PREV_SNAP, "prevsnap", PROP_TYPE_STRING,
PROP_READONLY, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, "PREVSNAP");
zprop_register_hidden(ZFS_PROP_PBKDF2_SALT, "pbkdf2salt",
PROP_TYPE_NUMBER, PROP_ONETIME_DEFAULT,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, "PBKDF2SALT");
zprop_register_hidden(ZFS_PROP_KEY_GUID, "keyguid", PROP_TYPE_NUMBER,
PROP_READONLY, ZFS_TYPE_DATASET, "KEYGUID");
zprop_register_hidden(ZFS_PROP_REDACTED, "redacted", PROP_TYPE_NUMBER,
PROP_READONLY, ZFS_TYPE_DATASET, "REDACTED");
/*
* Properties that are obsolete and not used. These are retained so
* that we don't have to change the values of the zfs_prop_t enum, or
* have NULL pointers in the zfs_prop_table[].
*/
zprop_register_hidden(ZFS_PROP_REMAPTXG, "remaptxg", PROP_TYPE_NUMBER,
PROP_READONLY, ZFS_TYPE_DATASET, "REMAPTXG");
/* oddball properties */
zprop_register_impl(ZFS_PROP_CREATION, "creation", PROP_TYPE_NUMBER, 0,
NULL, PROP_READONLY, ZFS_TYPE_DATASET | ZFS_TYPE_BOOKMARK,
"<date>", "CREATION", B_FALSE, B_TRUE, NULL);
}
boolean_t
zfs_prop_delegatable(zfs_prop_t prop)
{
zprop_desc_t *pd = &zfs_prop_table[prop];
/* The mlslabel property is never delegatable. */
if (prop == ZFS_PROP_MLSLABEL)
return (B_FALSE);
return (pd->pd_attr != PROP_READONLY);
}
/*
* Given a zfs dataset property name, returns the corresponding property ID.
*/
zfs_prop_t
zfs_name_to_prop(const char *propname)
{
return (zprop_name_to_prop(propname, ZFS_TYPE_DATASET));
}
/*
* For user property names, we allow all lowercase alphanumeric characters, plus
* a few useful punctuation characters.
*/
static int
valid_char(char c)
{
return ((c >= 'a' && c <= 'z') ||
(c >= '0' && c <= '9') ||
c == '-' || c == '_' || c == '.' || c == ':');
}
/*
* Returns true if this is a valid user-defined property (one with a ':').
*/
boolean_t
zfs_prop_user(const char *name)
{
int i;
char c;
boolean_t foundsep = B_FALSE;
for (i = 0; i < strlen(name); i++) {
c = name[i];
if (!valid_char(c))
return (B_FALSE);
if (c == ':')
foundsep = B_TRUE;
}
if (!foundsep)
return (B_FALSE);
return (B_TRUE);
}
/*
* Returns true if this is a valid userspace-type property (one with a '@').
* Note that after the @, any character is valid (eg, another @, for SID
* user@domain).
*/
boolean_t
zfs_prop_userquota(const char *name)
{
zfs_userquota_prop_t prop;
for (prop = 0; prop < ZFS_NUM_USERQUOTA_PROPS; prop++) {
if (strncmp(name, zfs_userquota_prop_prefixes[prop],
strlen(zfs_userquota_prop_prefixes[prop])) == 0) {
return (B_TRUE);
}
}
return (B_FALSE);
}
/*
* Returns true if this is a valid written@ property.
* Note that after the @, any character is valid (eg, another @, for
* written@pool/fs@origin).
*/
boolean_t
zfs_prop_written(const char *name)
{
static const char *prop_prefix = "written@";
static const char *book_prefix = "written#";
return (strncmp(name, prop_prefix, strlen(prop_prefix)) == 0 ||
strncmp(name, book_prefix, strlen(book_prefix)) == 0);
}
/*
* Tables of index types, plus functions to convert between the user view
* (strings) and internal representation (uint64_t).
*/
int
zfs_prop_string_to_index(zfs_prop_t prop, const char *string, uint64_t *index)
{
return (zprop_string_to_index(prop, string, index, ZFS_TYPE_DATASET));
}
int
zfs_prop_index_to_string(zfs_prop_t prop, uint64_t index, const char **string)
{
return (zprop_index_to_string(prop, index, string, ZFS_TYPE_DATASET));
}
uint64_t
zfs_prop_random_value(zfs_prop_t prop, uint64_t seed)
{
return (zprop_random_value(prop, seed, ZFS_TYPE_DATASET));
}
/*
* Returns TRUE if the property applies to any of the given dataset types.
*/
boolean_t
zfs_prop_valid_for_type(int prop, zfs_type_t types, boolean_t headcheck)
{
return (zprop_valid_for_type(prop, types, headcheck));
}
zprop_type_t
zfs_prop_get_type(zfs_prop_t prop)
{
return (zfs_prop_table[prop].pd_proptype);
}
/*
* Returns TRUE if the property is readonly.
*/
boolean_t
zfs_prop_readonly(zfs_prop_t prop)
{
return (zfs_prop_table[prop].pd_attr == PROP_READONLY ||
zfs_prop_table[prop].pd_attr == PROP_ONETIME ||
zfs_prop_table[prop].pd_attr == PROP_ONETIME_DEFAULT);
}
/*
* Returns TRUE if the property is visible (not hidden).
*/
boolean_t
zfs_prop_visible(zfs_prop_t prop)
{
return (zfs_prop_table[prop].pd_visible &&
zfs_prop_table[prop].pd_zfs_mod_supported);
}
/*
* Returns TRUE if the property is only allowed to be set once.
*/
boolean_t
zfs_prop_setonce(zfs_prop_t prop)
{
return (zfs_prop_table[prop].pd_attr == PROP_ONETIME ||
zfs_prop_table[prop].pd_attr == PROP_ONETIME_DEFAULT);
}
const char *
zfs_prop_default_string(zfs_prop_t prop)
{
return (zfs_prop_table[prop].pd_strdefault);
}
uint64_t
zfs_prop_default_numeric(zfs_prop_t prop)
{
return (zfs_prop_table[prop].pd_numdefault);
}
/*
* Given a dataset property ID, returns the corresponding name.
* Assuming the zfs dataset property ID is valid.
*/
const char *
zfs_prop_to_name(zfs_prop_t prop)
{
return (zfs_prop_table[prop].pd_name);
}
/*
* Returns TRUE if the property is inheritable.
*/
boolean_t
zfs_prop_inheritable(zfs_prop_t prop)
{
return (zfs_prop_table[prop].pd_attr == PROP_INHERIT ||
zfs_prop_table[prop].pd_attr == PROP_ONETIME);
}
/*
* Returns TRUE if property is one of the encryption properties that requires
* a loaded encryption key to modify.
*/
boolean_t
zfs_prop_encryption_key_param(zfs_prop_t prop)
{
/*
* keylocation does not count as an encryption property. It can be
* changed at will without needing the master keys.
*/
return (prop == ZFS_PROP_PBKDF2_SALT || prop == ZFS_PROP_PBKDF2_ITERS ||
prop == ZFS_PROP_KEYFORMAT);
}
/*
* Helper function used by both kernelspace and userspace to check the
* keylocation property. If encrypted is set, the keylocation must be valid
* for an encrypted dataset.
*/
boolean_t
zfs_prop_valid_keylocation(const char *str, boolean_t encrypted)
{
if (strcmp("none", str) == 0)
return (!encrypted);
else if (strcmp("prompt", str) == 0)
return (B_TRUE);
else if (strlen(str) > 8 && strncmp("file:///", str, 8) == 0)
return (B_TRUE);
+ else if (strlen(str) > 8 && strncmp("https://", str, 8) == 0)
+ return (B_TRUE);
+ else if (strlen(str) > 7 && strncmp("http://", str, 7) == 0)
+ return (B_TRUE);
return (B_FALSE);
}
#ifndef _KERNEL
#include <libzfs.h>
/*
* Returns a string describing the set of acceptable values for the given
* zfs property, or NULL if it cannot be set.
*/
const char *
zfs_prop_values(zfs_prop_t prop)
{
return (zfs_prop_table[prop].pd_values);
}
/*
* Returns TRUE if this property is a string type. Note that index types
* (compression, checksum) are treated as strings in userland, even though they
* are stored numerically on disk.
*/
int
zfs_prop_is_string(zfs_prop_t prop)
{
return (zfs_prop_table[prop].pd_proptype == PROP_TYPE_STRING ||
zfs_prop_table[prop].pd_proptype == PROP_TYPE_INDEX);
}
/*
* Returns the column header for the given property. Used only in
* 'zfs list -o', but centralized here with the other property information.
*/
const char *
zfs_prop_column_name(zfs_prop_t prop)
{
return (zfs_prop_table[prop].pd_colname);
}
/*
* Returns whether the given property should be displayed right-justified for
* 'zfs list'.
*/
boolean_t
zfs_prop_align_right(zfs_prop_t prop)
{
return (zfs_prop_table[prop].pd_rightalign);
}
#endif
#if defined(_KERNEL)
#include <sys/simd.h>
-#if defined(HAVE_KERNEL_FPU_INTERNAL)
+#if defined(HAVE_KERNEL_FPU_INTERNAL) || defined(HAVE_KERNEL_FPU_XSAVE_INTERNAL)
union fpregs_state **zfs_kfpu_fpregs;
EXPORT_SYMBOL(zfs_kfpu_fpregs);
-#endif /* HAVE_KERNEL_FPU_INTERNAL */
+#endif /* HAVE_KERNEL_FPU_INTERNAL || HAVE_KERNEL_FPU_XSAVE_INTERNAL */
static int __init
zcommon_init(void)
{
int error = kfpu_init();
if (error)
return (error);
fletcher_4_init();
return (0);
}
static void __exit
zcommon_fini(void)
{
fletcher_4_fini();
kfpu_fini();
}
module_init_early(zcommon_init);
module_exit(zcommon_fini);
#endif
ZFS_MODULE_DESCRIPTION("Generic ZFS support");
ZFS_MODULE_AUTHOR(ZFS_META_AUTHOR);
ZFS_MODULE_LICENSE(ZFS_META_LICENSE);
ZFS_MODULE_VERSION(ZFS_META_VERSION "-" ZFS_META_RELEASE);
/* zfs dataset property functions */
EXPORT_SYMBOL(zfs_userquota_prop_prefixes);
EXPORT_SYMBOL(zfs_prop_init);
EXPORT_SYMBOL(zfs_prop_get_type);
EXPORT_SYMBOL(zfs_prop_get_table);
EXPORT_SYMBOL(zfs_prop_delegatable);
EXPORT_SYMBOL(zfs_prop_visible);
/* Dataset property functions shared between libzfs and kernel. */
EXPORT_SYMBOL(zfs_prop_default_string);
EXPORT_SYMBOL(zfs_prop_default_numeric);
EXPORT_SYMBOL(zfs_prop_readonly);
EXPORT_SYMBOL(zfs_prop_inheritable);
EXPORT_SYMBOL(zfs_prop_encryption_key_param);
EXPORT_SYMBOL(zfs_prop_valid_keylocation);
EXPORT_SYMBOL(zfs_prop_setonce);
EXPORT_SYMBOL(zfs_prop_to_name);
EXPORT_SYMBOL(zfs_name_to_prop);
EXPORT_SYMBOL(zfs_prop_user);
EXPORT_SYMBOL(zfs_prop_userquota);
EXPORT_SYMBOL(zfs_prop_index_to_string);
EXPORT_SYMBOL(zfs_prop_string_to_index);
EXPORT_SYMBOL(zfs_prop_valid_for_type);
EXPORT_SYMBOL(zfs_prop_written);
diff --git a/sys/contrib/openzfs/module/zfs/abd.c b/sys/contrib/openzfs/module/zfs/abd.c
index 03a7b1e033b3..42bf3e3036f9 100644
--- a/sys/contrib/openzfs/module/zfs/abd.c
+++ b/sys/contrib/openzfs/module/zfs/abd.c
@@ -1,1216 +1,1216 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2014 by Chunwei Chen. All rights reserved.
* Copyright (c) 2019 by Delphix. All rights reserved.
*/
/*
* ARC buffer data (ABD).
*
* ABDs are an abstract data structure for the ARC which can use two
* different ways of storing the underlying data:
*
* (a) Linear buffer. In this case, all the data in the ABD is stored in one
* contiguous buffer in memory (from a zio_[data_]buf_* kmem cache).
*
* +-------------------+
* | ABD (linear) |
* | abd_flags = ... |
* | abd_size = ... | +--------------------------------+
* | abd_buf ------------->| raw buffer of size abd_size |
* +-------------------+ +--------------------------------+
* no abd_chunks
*
* (b) Scattered buffer. In this case, the data in the ABD is split into
* equal-sized chunks (from the abd_chunk_cache kmem_cache), with pointers
* to the chunks recorded in an array at the end of the ABD structure.
*
* +-------------------+
* | ABD (scattered) |
* | abd_flags = ... |
* | abd_size = ... |
* | abd_offset = 0 | +-----------+
* | abd_chunks[0] ----------------------------->| chunk 0 |
* | abd_chunks[1] ---------------------+ +-----------+
* | ... | | +-----------+
* | abd_chunks[N-1] ---------+ +------->| chunk 1 |
* +-------------------+ | +-----------+
* | ...
* | +-----------+
* +----------------->| chunk N-1 |
* +-----------+
*
* In addition to directly allocating a linear or scattered ABD, it is also
* possible to create an ABD by requesting the "sub-ABD" starting at an offset
* within an existing ABD. In linear buffers this is simple (set abd_buf of
* the new ABD to the starting point within the original raw buffer), but
* scattered ABDs are a little more complex. The new ABD makes a copy of the
* relevant abd_chunks pointers (but not the underlying data). However, to
* provide arbitrary rather than only chunk-aligned starting offsets, it also
* tracks an abd_offset field which represents the starting point of the data
* within the first chunk in abd_chunks. For both linear and scattered ABDs,
* creating an offset ABD marks the original ABD as the offset's parent, and the
* original ABD's abd_children refcount is incremented. This data allows us to
* ensure the root ABD isn't deleted before its children.
*
* Most consumers should never need to know what type of ABD they're using --
* the ABD public API ensures that it's possible to transparently switch from
* using a linear ABD to a scattered one when doing so would be beneficial.
*
* If you need to use the data within an ABD directly, if you know it's linear
* (because you allocated it) you can use abd_to_buf() to access the underlying
* raw buffer. Otherwise, you should use one of the abd_borrow_buf* functions
* which will allocate a raw buffer if necessary. Use the abd_return_buf*
* functions to return any raw buffers that are no longer necessary when you're
* done using them.
*
* There are a variety of ABD APIs that implement basic buffer operations:
* compare, copy, read, write, and fill with zeroes. If you need a custom
* function which progressively accesses the whole ABD, use the abd_iterate_*
* functions.
*
* As an additional feature, linear and scatter ABD's can be stitched together
* by using the gang ABD type (abd_alloc_gang_abd()). This allows for
* multiple ABDs to be viewed as a singular ABD.
*
* It is possible to make all ABDs linear by setting zfs_abd_scatter_enabled to
* B_FALSE.
*/
#include <sys/abd_impl.h>
#include <sys/param.h>
#include <sys/zio.h>
#include <sys/zfs_context.h>
#include <sys/zfs_znode.h>
/* see block comment above for description */
int zfs_abd_scatter_enabled = B_TRUE;
void
abd_verify(abd_t *abd)
{
#ifdef ZFS_DEBUG
ASSERT3U(abd->abd_size, >, 0);
ASSERT3U(abd->abd_size, <=, SPA_MAXBLOCKSIZE);
ASSERT3U(abd->abd_flags, ==, abd->abd_flags & (ABD_FLAG_LINEAR |
ABD_FLAG_OWNER | ABD_FLAG_META | ABD_FLAG_MULTI_ZONE |
ABD_FLAG_MULTI_CHUNK | ABD_FLAG_LINEAR_PAGE | ABD_FLAG_GANG |
ABD_FLAG_GANG_FREE | ABD_FLAG_ZEROS | ABD_FLAG_ALLOCD));
IMPLY(abd->abd_parent != NULL, !(abd->abd_flags & ABD_FLAG_OWNER));
IMPLY(abd->abd_flags & ABD_FLAG_META, abd->abd_flags & ABD_FLAG_OWNER);
if (abd_is_linear(abd)) {
ASSERT3P(ABD_LINEAR_BUF(abd), !=, NULL);
} else if (abd_is_gang(abd)) {
uint_t child_sizes = 0;
for (abd_t *cabd = list_head(&ABD_GANG(abd).abd_gang_chain);
cabd != NULL;
cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
ASSERT(list_link_active(&cabd->abd_gang_link));
child_sizes += cabd->abd_size;
abd_verify(cabd);
}
ASSERT3U(abd->abd_size, ==, child_sizes);
} else {
abd_verify_scatter(abd);
}
#endif
}
static void
abd_init_struct(abd_t *abd)
{
list_link_init(&abd->abd_gang_link);
mutex_init(&abd->abd_mtx, NULL, MUTEX_DEFAULT, NULL);
abd->abd_flags = 0;
#ifdef ZFS_DEBUG
zfs_refcount_create(&abd->abd_children);
abd->abd_parent = NULL;
#endif
abd->abd_size = 0;
}
static void
abd_fini_struct(abd_t *abd)
{
mutex_destroy(&abd->abd_mtx);
ASSERT(!list_link_active(&abd->abd_gang_link));
#ifdef ZFS_DEBUG
zfs_refcount_destroy(&abd->abd_children);
#endif
}
abd_t *
abd_alloc_struct(size_t size)
{
abd_t *abd = abd_alloc_struct_impl(size);
abd_init_struct(abd);
abd->abd_flags |= ABD_FLAG_ALLOCD;
return (abd);
}
void
abd_free_struct(abd_t *abd)
{
abd_fini_struct(abd);
abd_free_struct_impl(abd);
}
/*
* Allocate an ABD, along with its own underlying data buffers. Use this if you
* don't care whether the ABD is linear or not.
*/
abd_t *
abd_alloc(size_t size, boolean_t is_metadata)
{
if (!zfs_abd_scatter_enabled || abd_size_alloc_linear(size))
return (abd_alloc_linear(size, is_metadata));
VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
abd_t *abd = abd_alloc_struct(size);
abd->abd_flags |= ABD_FLAG_OWNER;
abd->abd_u.abd_scatter.abd_offset = 0;
abd_alloc_chunks(abd, size);
if (is_metadata) {
abd->abd_flags |= ABD_FLAG_META;
}
abd->abd_size = size;
abd_update_scatter_stats(abd, ABDSTAT_INCR);
return (abd);
}
/*
* Allocate an ABD that must be linear, along with its own underlying data
* buffer. Only use this when it would be very annoying to write your ABD
* consumer with a scattered ABD.
*/
abd_t *
abd_alloc_linear(size_t size, boolean_t is_metadata)
{
abd_t *abd = abd_alloc_struct(0);
VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
abd->abd_flags |= ABD_FLAG_LINEAR | ABD_FLAG_OWNER;
if (is_metadata) {
abd->abd_flags |= ABD_FLAG_META;
}
abd->abd_size = size;
if (is_metadata) {
ABD_LINEAR_BUF(abd) = zio_buf_alloc(size);
} else {
ABD_LINEAR_BUF(abd) = zio_data_buf_alloc(size);
}
abd_update_linear_stats(abd, ABDSTAT_INCR);
return (abd);
}
static void
abd_free_linear(abd_t *abd)
{
if (abd_is_linear_page(abd)) {
abd_free_linear_page(abd);
return;
}
if (abd->abd_flags & ABD_FLAG_META) {
zio_buf_free(ABD_LINEAR_BUF(abd), abd->abd_size);
} else {
zio_data_buf_free(ABD_LINEAR_BUF(abd), abd->abd_size);
}
abd_update_linear_stats(abd, ABDSTAT_DECR);
}
static void
abd_free_gang(abd_t *abd)
{
ASSERT(abd_is_gang(abd));
abd_t *cabd;
while ((cabd = list_head(&ABD_GANG(abd).abd_gang_chain)) != NULL) {
/*
* We must acquire the child ABDs mutex to ensure that if it
* is being added to another gang ABD we will set the link
* as inactive when removing it from this gang ABD and before
* adding it to the other gang ABD.
*/
mutex_enter(&cabd->abd_mtx);
ASSERT(list_link_active(&cabd->abd_gang_link));
list_remove(&ABD_GANG(abd).abd_gang_chain, cabd);
mutex_exit(&cabd->abd_mtx);
if (cabd->abd_flags & ABD_FLAG_GANG_FREE)
abd_free(cabd);
}
list_destroy(&ABD_GANG(abd).abd_gang_chain);
}
static void
abd_free_scatter(abd_t *abd)
{
abd_free_chunks(abd);
abd_update_scatter_stats(abd, ABDSTAT_DECR);
}
/*
* Free an ABD. Use with any kind of abd: those created with abd_alloc_*()
* and abd_get_*(), including abd_get_offset_struct().
*
* If the ABD was created with abd_alloc_*(), the underlying data
* (scatterlist or linear buffer) will also be freed. (Subject to ownership
* changes via abd_*_ownership_of_buf().)
*
* Unless the ABD was created with abd_get_offset_struct(), the abd_t will
* also be freed.
*/
void
abd_free(abd_t *abd)
{
if (abd == NULL)
return;
abd_verify(abd);
#ifdef ZFS_DEBUG
IMPLY(abd->abd_flags & ABD_FLAG_OWNER, abd->abd_parent == NULL);
#endif
if (abd_is_gang(abd)) {
abd_free_gang(abd);
} else if (abd_is_linear(abd)) {
if (abd->abd_flags & ABD_FLAG_OWNER)
abd_free_linear(abd);
} else {
if (abd->abd_flags & ABD_FLAG_OWNER)
abd_free_scatter(abd);
}
#ifdef ZFS_DEBUG
if (abd->abd_parent != NULL) {
(void) zfs_refcount_remove_many(&abd->abd_parent->abd_children,
abd->abd_size, abd);
}
#endif
abd_fini_struct(abd);
if (abd->abd_flags & ABD_FLAG_ALLOCD)
abd_free_struct_impl(abd);
}
/*
* Allocate an ABD of the same format (same metadata flag, same scatterize
* setting) as another ABD.
*/
abd_t *
abd_alloc_sametype(abd_t *sabd, size_t size)
{
boolean_t is_metadata = (sabd->abd_flags & ABD_FLAG_META) != 0;
if (abd_is_linear(sabd) &&
!abd_is_linear_page(sabd)) {
return (abd_alloc_linear(size, is_metadata));
} else {
return (abd_alloc(size, is_metadata));
}
}
/*
* Create gang ABD that will be the head of a list of ABD's. This is used
* to "chain" scatter/gather lists together when constructing aggregated
* IO's. To free this abd, abd_free() must be called.
*/
abd_t *
abd_alloc_gang(void)
{
abd_t *abd = abd_alloc_struct(0);
abd->abd_flags |= ABD_FLAG_GANG | ABD_FLAG_OWNER;
list_create(&ABD_GANG(abd).abd_gang_chain,
sizeof (abd_t), offsetof(abd_t, abd_gang_link));
return (abd);
}
/*
* Add a child gang ABD to a parent gang ABDs chained list.
*/
static void
abd_gang_add_gang(abd_t *pabd, abd_t *cabd, boolean_t free_on_free)
{
ASSERT(abd_is_gang(pabd));
ASSERT(abd_is_gang(cabd));
if (free_on_free) {
/*
* If the parent is responsible for freeing the child gang
* ABD we will just splice the child's children ABD list to
* the parent's list and immediately free the child gang ABD
* struct. The parent gang ABDs children from the child gang
* will retain all the free_on_free settings after being
* added to the parents list.
*/
pabd->abd_size += cabd->abd_size;
list_move_tail(&ABD_GANG(pabd).abd_gang_chain,
&ABD_GANG(cabd).abd_gang_chain);
ASSERT(list_is_empty(&ABD_GANG(cabd).abd_gang_chain));
abd_verify(pabd);
abd_free(cabd);
} else {
for (abd_t *child = list_head(&ABD_GANG(cabd).abd_gang_chain);
child != NULL;
child = list_next(&ABD_GANG(cabd).abd_gang_chain, child)) {
/*
* We always pass B_FALSE for free_on_free as it is the
* original child gang ABDs responsibility to determine
* if any of its child ABDs should be free'd on the call
* to abd_free().
*/
abd_gang_add(pabd, child, B_FALSE);
}
abd_verify(pabd);
}
}
/*
* Add a child ABD to a gang ABD's chained list.
*/
void
abd_gang_add(abd_t *pabd, abd_t *cabd, boolean_t free_on_free)
{
ASSERT(abd_is_gang(pabd));
abd_t *child_abd = NULL;
/*
* If the child being added is a gang ABD, we will add the
* child's ABDs to the parent gang ABD. This allows us to account
* for the offset correctly in the parent gang ABD.
*/
if (abd_is_gang(cabd)) {
ASSERT(!list_link_active(&cabd->abd_gang_link));
ASSERT(!list_is_empty(&ABD_GANG(cabd).abd_gang_chain));
return (abd_gang_add_gang(pabd, cabd, free_on_free));
}
ASSERT(!abd_is_gang(cabd));
/*
* In order to verify that an ABD is not already part of
* another gang ABD, we must lock the child ABD's abd_mtx
* to check its abd_gang_link status. We unlock the abd_mtx
* only after it is has been added to a gang ABD, which
* will update the abd_gang_link's status. See comment below
* for how an ABD can be in multiple gang ABD's simultaneously.
*/
mutex_enter(&cabd->abd_mtx);
if (list_link_active(&cabd->abd_gang_link)) {
/*
* If the child ABD is already part of another
* gang ABD then we must allocate a new
* ABD to use a separate link. We mark the newly
* allocated ABD with ABD_FLAG_GANG_FREE, before
* adding it to the gang ABD's list, to make the
* gang ABD aware that it is responsible to call
* abd_free(). We use abd_get_offset() in order
* to just allocate a new ABD but avoid copying the
* data over into the newly allocated ABD.
*
* An ABD may become part of multiple gang ABD's. For
* example, when writing ditto bocks, the same ABD
* is used to write 2 or 3 locations with 2 or 3
* zio_t's. Each of the zio's may be aggregated with
* different adjacent zio's. zio aggregation uses gang
* zio's, so the single ABD can become part of multiple
* gang zio's.
*
* The ASSERT below is to make sure that if
* free_on_free is passed as B_TRUE, the ABD can
* not be in multiple gang ABD's. The gang ABD
* can not be responsible for cleaning up the child
* ABD memory allocation if the ABD can be in
* multiple gang ABD's at one time.
*/
ASSERT3B(free_on_free, ==, B_FALSE);
child_abd = abd_get_offset(cabd, 0);
child_abd->abd_flags |= ABD_FLAG_GANG_FREE;
} else {
child_abd = cabd;
if (free_on_free)
child_abd->abd_flags |= ABD_FLAG_GANG_FREE;
}
ASSERT3P(child_abd, !=, NULL);
list_insert_tail(&ABD_GANG(pabd).abd_gang_chain, child_abd);
mutex_exit(&cabd->abd_mtx);
pabd->abd_size += child_abd->abd_size;
}
/*
* Locate the ABD for the supplied offset in the gang ABD.
* Return a new offset relative to the returned ABD.
*/
abd_t *
abd_gang_get_offset(abd_t *abd, size_t *off)
{
abd_t *cabd;
ASSERT(abd_is_gang(abd));
ASSERT3U(*off, <, abd->abd_size);
for (cabd = list_head(&ABD_GANG(abd).abd_gang_chain); cabd != NULL;
cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
if (*off >= cabd->abd_size)
*off -= cabd->abd_size;
else
return (cabd);
}
VERIFY3P(cabd, !=, NULL);
return (cabd);
}
/*
* Allocate a new ABD, using the provided struct (if non-NULL, and if
* circumstances allow - otherwise allocate the struct). The returned ABD will
* point to offset off of sabd. It shares the underlying buffer data with sabd.
* Use abd_free() to free. sabd must not be freed while any derived ABDs exist.
*/
static abd_t *
abd_get_offset_impl(abd_t *abd, abd_t *sabd, size_t off, size_t size)
{
abd_verify(sabd);
ASSERT3U(off + size, <=, sabd->abd_size);
if (abd_is_linear(sabd)) {
if (abd == NULL)
abd = abd_alloc_struct(0);
/*
* Even if this buf is filesystem metadata, we only track that
* if we own the underlying data buffer, which is not true in
* this case. Therefore, we don't ever use ABD_FLAG_META here.
*/
abd->abd_flags |= ABD_FLAG_LINEAR;
ABD_LINEAR_BUF(abd) = (char *)ABD_LINEAR_BUF(sabd) + off;
} else if (abd_is_gang(sabd)) {
size_t left = size;
if (abd == NULL) {
abd = abd_alloc_gang();
} else {
abd->abd_flags |= ABD_FLAG_GANG;
list_create(&ABD_GANG(abd).abd_gang_chain,
sizeof (abd_t), offsetof(abd_t, abd_gang_link));
}
abd->abd_flags &= ~ABD_FLAG_OWNER;
for (abd_t *cabd = abd_gang_get_offset(sabd, &off);
cabd != NULL && left > 0;
cabd = list_next(&ABD_GANG(sabd).abd_gang_chain, cabd)) {
int csize = MIN(left, cabd->abd_size - off);
abd_t *nabd = abd_get_offset_size(cabd, off, csize);
abd_gang_add(abd, nabd, B_TRUE);
left -= csize;
off = 0;
}
ASSERT3U(left, ==, 0);
} else {
abd = abd_get_offset_scatter(abd, sabd, off, size);
}
ASSERT3P(abd, !=, NULL);
abd->abd_size = size;
#ifdef ZFS_DEBUG
abd->abd_parent = sabd;
(void) zfs_refcount_add_many(&sabd->abd_children, abd->abd_size, abd);
#endif
return (abd);
}
/*
* Like abd_get_offset_size(), but memory for the abd_t is provided by the
* caller. Using this routine can improve performance by avoiding the cost
* of allocating memory for the abd_t struct, and updating the abd stats.
* Usually, the provided abd is returned, but in some circumstances (FreeBSD,
* if sabd is scatter and size is more than 2 pages) a new abd_t may need to
* be allocated. Therefore callers should be careful to use the returned
* abd_t*.
*/
abd_t *
abd_get_offset_struct(abd_t *abd, abd_t *sabd, size_t off, size_t size)
{
abd_t *result;
abd_init_struct(abd);
result = abd_get_offset_impl(abd, sabd, off, size);
if (result != abd)
abd_fini_struct(abd);
return (result);
}
abd_t *
abd_get_offset(abd_t *sabd, size_t off)
{
size_t size = sabd->abd_size > off ? sabd->abd_size - off : 0;
VERIFY3U(size, >, 0);
return (abd_get_offset_impl(NULL, sabd, off, size));
}
abd_t *
abd_get_offset_size(abd_t *sabd, size_t off, size_t size)
{
ASSERT3U(off + size, <=, sabd->abd_size);
return (abd_get_offset_impl(NULL, sabd, off, size));
}
/*
* Return a size scatter ABD containing only zeros.
*/
abd_t *
abd_get_zeros(size_t size)
{
ASSERT3P(abd_zero_scatter, !=, NULL);
ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
return (abd_get_offset_size(abd_zero_scatter, 0, size));
}
/*
* Allocate a linear ABD structure for buf.
*/
abd_t *
abd_get_from_buf(void *buf, size_t size)
{
abd_t *abd = abd_alloc_struct(0);
VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
/*
* Even if this buf is filesystem metadata, we only track that if we
* own the underlying data buffer, which is not true in this case.
* Therefore, we don't ever use ABD_FLAG_META here.
*/
abd->abd_flags |= ABD_FLAG_LINEAR;
abd->abd_size = size;
ABD_LINEAR_BUF(abd) = buf;
return (abd);
}
/*
* Get the raw buffer associated with a linear ABD.
*/
void *
abd_to_buf(abd_t *abd)
{
ASSERT(abd_is_linear(abd));
abd_verify(abd);
return (ABD_LINEAR_BUF(abd));
}
/*
* Borrow a raw buffer from an ABD without copying the contents of the ABD
* into the buffer. If the ABD is scattered, this will allocate a raw buffer
* whose contents are undefined. To copy over the existing data in the ABD, use
* abd_borrow_buf_copy() instead.
*/
void *
abd_borrow_buf(abd_t *abd, size_t n)
{
void *buf;
abd_verify(abd);
ASSERT3U(abd->abd_size, >=, n);
if (abd_is_linear(abd)) {
buf = abd_to_buf(abd);
} else {
buf = zio_buf_alloc(n);
}
#ifdef ZFS_DEBUG
(void) zfs_refcount_add_many(&abd->abd_children, n, buf);
#endif
return (buf);
}
void *
abd_borrow_buf_copy(abd_t *abd, size_t n)
{
void *buf = abd_borrow_buf(abd, n);
if (!abd_is_linear(abd)) {
abd_copy_to_buf(buf, abd, n);
}
return (buf);
}
/*
* Return a borrowed raw buffer to an ABD. If the ABD is scattered, this will
* not change the contents of the ABD and will ASSERT that you didn't modify
* the buffer since it was borrowed. If you want any changes you made to buf to
* be copied back to abd, use abd_return_buf_copy() instead.
*/
void
abd_return_buf(abd_t *abd, void *buf, size_t n)
{
abd_verify(abd);
ASSERT3U(abd->abd_size, >=, n);
if (abd_is_linear(abd)) {
ASSERT3P(buf, ==, abd_to_buf(abd));
} else {
ASSERT0(abd_cmp_buf(abd, buf, n));
zio_buf_free(buf, n);
}
#ifdef ZFS_DEBUG
(void) zfs_refcount_remove_many(&abd->abd_children, n, buf);
#endif
}
void
abd_return_buf_copy(abd_t *abd, void *buf, size_t n)
{
if (!abd_is_linear(abd)) {
abd_copy_from_buf(abd, buf, n);
}
abd_return_buf(abd, buf, n);
}
void
abd_release_ownership_of_buf(abd_t *abd)
{
ASSERT(abd_is_linear(abd));
ASSERT(abd->abd_flags & ABD_FLAG_OWNER);
/*
* abd_free() needs to handle LINEAR_PAGE ABD's specially.
* Since that flag does not survive the
* abd_release_ownership_of_buf() -> abd_get_from_buf() ->
* abd_take_ownership_of_buf() sequence, we don't allow releasing
* these "linear but not zio_[data_]buf_alloc()'ed" ABD's.
*/
ASSERT(!abd_is_linear_page(abd));
abd_verify(abd);
abd->abd_flags &= ~ABD_FLAG_OWNER;
/* Disable this flag since we no longer own the data buffer */
abd->abd_flags &= ~ABD_FLAG_META;
abd_update_linear_stats(abd, ABDSTAT_DECR);
}
/*
* Give this ABD ownership of the buffer that it's storing. Can only be used on
* linear ABDs which were allocated via abd_get_from_buf(), or ones allocated
* with abd_alloc_linear() which subsequently released ownership of their buf
* with abd_release_ownership_of_buf().
*/
void
abd_take_ownership_of_buf(abd_t *abd, boolean_t is_metadata)
{
ASSERT(abd_is_linear(abd));
ASSERT(!(abd->abd_flags & ABD_FLAG_OWNER));
abd_verify(abd);
abd->abd_flags |= ABD_FLAG_OWNER;
if (is_metadata) {
abd->abd_flags |= ABD_FLAG_META;
}
abd_update_linear_stats(abd, ABDSTAT_INCR);
}
/*
* Initializes an abd_iter based on whether the abd is a gang ABD
* or just a single ABD.
*/
static inline abd_t *
abd_init_abd_iter(abd_t *abd, struct abd_iter *aiter, size_t off)
{
abd_t *cabd = NULL;
if (abd_is_gang(abd)) {
cabd = abd_gang_get_offset(abd, &off);
if (cabd) {
abd_iter_init(aiter, cabd);
abd_iter_advance(aiter, off);
}
} else {
abd_iter_init(aiter, abd);
abd_iter_advance(aiter, off);
}
return (cabd);
}
/*
* Advances an abd_iter. We have to be careful with gang ABD as
* advancing could mean that we are at the end of a particular ABD and
* must grab the ABD in the gang ABD's list.
*/
static inline abd_t *
abd_advance_abd_iter(abd_t *abd, abd_t *cabd, struct abd_iter *aiter,
size_t len)
{
abd_iter_advance(aiter, len);
if (abd_is_gang(abd) && abd_iter_at_end(aiter)) {
ASSERT3P(cabd, !=, NULL);
cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd);
if (cabd) {
abd_iter_init(aiter, cabd);
abd_iter_advance(aiter, 0);
}
}
return (cabd);
}
int
abd_iterate_func(abd_t *abd, size_t off, size_t size,
abd_iter_func_t *func, void *private)
{
struct abd_iter aiter;
int ret = 0;
if (size == 0)
return (0);
abd_verify(abd);
ASSERT3U(off + size, <=, abd->abd_size);
boolean_t gang = abd_is_gang(abd);
abd_t *c_abd = abd_init_abd_iter(abd, &aiter, off);
while (size > 0) {
/* If we are at the end of the gang ABD we are done */
if (gang && !c_abd)
break;
abd_iter_map(&aiter);
size_t len = MIN(aiter.iter_mapsize, size);
ASSERT3U(len, >, 0);
ret = func(aiter.iter_mapaddr, len, private);
abd_iter_unmap(&aiter);
if (ret != 0)
break;
size -= len;
c_abd = abd_advance_abd_iter(abd, c_abd, &aiter, len);
}
return (ret);
}
struct buf_arg {
void *arg_buf;
};
static int
abd_copy_to_buf_off_cb(void *buf, size_t size, void *private)
{
struct buf_arg *ba_ptr = private;
(void) memcpy(ba_ptr->arg_buf, buf, size);
ba_ptr->arg_buf = (char *)ba_ptr->arg_buf + size;
return (0);
}
/*
* Copy abd to buf. (off is the offset in abd.)
*/
void
abd_copy_to_buf_off(void *buf, abd_t *abd, size_t off, size_t size)
{
struct buf_arg ba_ptr = { buf };
(void) abd_iterate_func(abd, off, size, abd_copy_to_buf_off_cb,
&ba_ptr);
}
static int
abd_cmp_buf_off_cb(void *buf, size_t size, void *private)
{
int ret;
struct buf_arg *ba_ptr = private;
ret = memcmp(buf, ba_ptr->arg_buf, size);
ba_ptr->arg_buf = (char *)ba_ptr->arg_buf + size;
return (ret);
}
/*
* Compare the contents of abd to buf. (off is the offset in abd.)
*/
int
abd_cmp_buf_off(abd_t *abd, const void *buf, size_t off, size_t size)
{
struct buf_arg ba_ptr = { (void *) buf };
return (abd_iterate_func(abd, off, size, abd_cmp_buf_off_cb, &ba_ptr));
}
static int
abd_copy_from_buf_off_cb(void *buf, size_t size, void *private)
{
struct buf_arg *ba_ptr = private;
(void) memcpy(buf, ba_ptr->arg_buf, size);
ba_ptr->arg_buf = (char *)ba_ptr->arg_buf + size;
return (0);
}
/*
* Copy from buf to abd. (off is the offset in abd.)
*/
void
abd_copy_from_buf_off(abd_t *abd, const void *buf, size_t off, size_t size)
{
struct buf_arg ba_ptr = { (void *) buf };
(void) abd_iterate_func(abd, off, size, abd_copy_from_buf_off_cb,
&ba_ptr);
}
-/*ARGSUSED*/
static int
abd_zero_off_cb(void *buf, size_t size, void *private)
{
+ (void) private;
(void) memset(buf, 0, size);
return (0);
}
/*
* Zero out the abd from a particular offset to the end.
*/
void
abd_zero_off(abd_t *abd, size_t off, size_t size)
{
(void) abd_iterate_func(abd, off, size, abd_zero_off_cb, NULL);
}
/*
* Iterate over two ABDs and call func incrementally on the two ABDs' data in
* equal-sized chunks (passed to func as raw buffers). func could be called many
* times during this iteration.
*/
int
abd_iterate_func2(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff,
size_t size, abd_iter_func2_t *func, void *private)
{
int ret = 0;
struct abd_iter daiter, saiter;
boolean_t dabd_is_gang_abd, sabd_is_gang_abd;
abd_t *c_dabd, *c_sabd;
if (size == 0)
return (0);
abd_verify(dabd);
abd_verify(sabd);
ASSERT3U(doff + size, <=, dabd->abd_size);
ASSERT3U(soff + size, <=, sabd->abd_size);
dabd_is_gang_abd = abd_is_gang(dabd);
sabd_is_gang_abd = abd_is_gang(sabd);
c_dabd = abd_init_abd_iter(dabd, &daiter, doff);
c_sabd = abd_init_abd_iter(sabd, &saiter, soff);
while (size > 0) {
/* if we are at the end of the gang ABD we are done */
if ((dabd_is_gang_abd && !c_dabd) ||
(sabd_is_gang_abd && !c_sabd))
break;
abd_iter_map(&daiter);
abd_iter_map(&saiter);
size_t dlen = MIN(daiter.iter_mapsize, size);
size_t slen = MIN(saiter.iter_mapsize, size);
size_t len = MIN(dlen, slen);
ASSERT(dlen > 0 || slen > 0);
ret = func(daiter.iter_mapaddr, saiter.iter_mapaddr, len,
private);
abd_iter_unmap(&saiter);
abd_iter_unmap(&daiter);
if (ret != 0)
break;
size -= len;
c_dabd =
abd_advance_abd_iter(dabd, c_dabd, &daiter, len);
c_sabd =
abd_advance_abd_iter(sabd, c_sabd, &saiter, len);
}
return (ret);
}
-/*ARGSUSED*/
static int
abd_copy_off_cb(void *dbuf, void *sbuf, size_t size, void *private)
{
+ (void) private;
(void) memcpy(dbuf, sbuf, size);
return (0);
}
/*
* Copy from sabd to dabd starting from soff and doff.
*/
void
abd_copy_off(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff, size_t size)
{
(void) abd_iterate_func2(dabd, sabd, doff, soff, size,
abd_copy_off_cb, NULL);
}
-/*ARGSUSED*/
static int
abd_cmp_cb(void *bufa, void *bufb, size_t size, void *private)
{
+ (void) private;
return (memcmp(bufa, bufb, size));
}
/*
* Compares the contents of two ABDs.
*/
int
abd_cmp(abd_t *dabd, abd_t *sabd)
{
ASSERT3U(dabd->abd_size, ==, sabd->abd_size);
return (abd_iterate_func2(dabd, sabd, 0, 0, dabd->abd_size,
abd_cmp_cb, NULL));
}
/*
* Iterate over code ABDs and a data ABD and call @func_raidz_gen.
*
* @cabds parity ABDs, must have equal size
* @dabd data ABD. Can be NULL (in this case @dsize = 0)
* @func_raidz_gen should be implemented so that its behaviour
* is the same when taking linear and when taking scatter
*/
void
abd_raidz_gen_iterate(abd_t **cabds, abd_t *dabd,
ssize_t csize, ssize_t dsize, const unsigned parity,
void (*func_raidz_gen)(void **, const void *, size_t, size_t))
{
int i;
ssize_t len, dlen;
struct abd_iter caiters[3];
struct abd_iter daiter = {0};
void *caddrs[3];
unsigned long flags __maybe_unused = 0;
abd_t *c_cabds[3];
abd_t *c_dabd = NULL;
boolean_t cabds_is_gang_abd[3];
boolean_t dabd_is_gang_abd = B_FALSE;
ASSERT3U(parity, <=, 3);
for (i = 0; i < parity; i++) {
cabds_is_gang_abd[i] = abd_is_gang(cabds[i]);
c_cabds[i] = abd_init_abd_iter(cabds[i], &caiters[i], 0);
}
if (dabd) {
dabd_is_gang_abd = abd_is_gang(dabd);
c_dabd = abd_init_abd_iter(dabd, &daiter, 0);
}
ASSERT3S(dsize, >=, 0);
abd_enter_critical(flags);
while (csize > 0) {
/* if we are at the end of the gang ABD we are done */
if (dabd_is_gang_abd && !c_dabd)
break;
for (i = 0; i < parity; i++) {
/*
* If we are at the end of the gang ABD we are
* done.
*/
if (cabds_is_gang_abd[i] && !c_cabds[i])
break;
abd_iter_map(&caiters[i]);
caddrs[i] = caiters[i].iter_mapaddr;
}
len = csize;
if (dabd && dsize > 0)
abd_iter_map(&daiter);
switch (parity) {
case 3:
len = MIN(caiters[2].iter_mapsize, len);
fallthrough;
case 2:
len = MIN(caiters[1].iter_mapsize, len);
fallthrough;
case 1:
len = MIN(caiters[0].iter_mapsize, len);
}
/* must be progressive */
ASSERT3S(len, >, 0);
if (dabd && dsize > 0) {
/* this needs precise iter.length */
len = MIN(daiter.iter_mapsize, len);
dlen = len;
} else
dlen = 0;
/* must be progressive */
ASSERT3S(len, >, 0);
/*
* The iterated function likely will not do well if each
* segment except the last one is not multiple of 512 (raidz).
*/
ASSERT3U(((uint64_t)len & 511ULL), ==, 0);
func_raidz_gen(caddrs, daiter.iter_mapaddr, len, dlen);
for (i = parity-1; i >= 0; i--) {
abd_iter_unmap(&caiters[i]);
c_cabds[i] =
abd_advance_abd_iter(cabds[i], c_cabds[i],
&caiters[i], len);
}
if (dabd && dsize > 0) {
abd_iter_unmap(&daiter);
c_dabd =
abd_advance_abd_iter(dabd, c_dabd, &daiter,
dlen);
dsize -= dlen;
}
csize -= len;
ASSERT3S(dsize, >=, 0);
ASSERT3S(csize, >=, 0);
}
abd_exit_critical(flags);
}
/*
* Iterate over code ABDs and data reconstruction target ABDs and call
* @func_raidz_rec. Function maps at most 6 pages atomically.
*
* @cabds parity ABDs, must have equal size
* @tabds rec target ABDs, at most 3
* @tsize size of data target columns
* @func_raidz_rec expects syndrome data in target columns. Function
* reconstructs data and overwrites target columns.
*/
void
abd_raidz_rec_iterate(abd_t **cabds, abd_t **tabds,
ssize_t tsize, const unsigned parity,
void (*func_raidz_rec)(void **t, const size_t tsize, void **c,
const unsigned *mul),
const unsigned *mul)
{
int i;
ssize_t len;
struct abd_iter citers[3];
struct abd_iter xiters[3];
void *caddrs[3], *xaddrs[3];
unsigned long flags __maybe_unused = 0;
boolean_t cabds_is_gang_abd[3];
boolean_t tabds_is_gang_abd[3];
abd_t *c_cabds[3];
abd_t *c_tabds[3];
ASSERT3U(parity, <=, 3);
for (i = 0; i < parity; i++) {
cabds_is_gang_abd[i] = abd_is_gang(cabds[i]);
tabds_is_gang_abd[i] = abd_is_gang(tabds[i]);
c_cabds[i] =
abd_init_abd_iter(cabds[i], &citers[i], 0);
c_tabds[i] =
abd_init_abd_iter(tabds[i], &xiters[i], 0);
}
abd_enter_critical(flags);
while (tsize > 0) {
for (i = 0; i < parity; i++) {
/*
* If we are at the end of the gang ABD we
* are done.
*/
if (cabds_is_gang_abd[i] && !c_cabds[i])
break;
if (tabds_is_gang_abd[i] && !c_tabds[i])
break;
abd_iter_map(&citers[i]);
abd_iter_map(&xiters[i]);
caddrs[i] = citers[i].iter_mapaddr;
xaddrs[i] = xiters[i].iter_mapaddr;
}
len = tsize;
switch (parity) {
case 3:
len = MIN(xiters[2].iter_mapsize, len);
len = MIN(citers[2].iter_mapsize, len);
fallthrough;
case 2:
len = MIN(xiters[1].iter_mapsize, len);
len = MIN(citers[1].iter_mapsize, len);
fallthrough;
case 1:
len = MIN(xiters[0].iter_mapsize, len);
len = MIN(citers[0].iter_mapsize, len);
}
/* must be progressive */
ASSERT3S(len, >, 0);
/*
* The iterated function likely will not do well if each
* segment except the last one is not multiple of 512 (raidz).
*/
ASSERT3U(((uint64_t)len & 511ULL), ==, 0);
func_raidz_rec(xaddrs, len, caddrs, mul);
for (i = parity-1; i >= 0; i--) {
abd_iter_unmap(&xiters[i]);
abd_iter_unmap(&citers[i]);
c_tabds[i] =
abd_advance_abd_iter(tabds[i], c_tabds[i],
&xiters[i], len);
c_cabds[i] =
abd_advance_abd_iter(cabds[i], c_cabds[i],
&citers[i], len);
}
tsize -= len;
ASSERT3S(tsize, >=, 0);
}
abd_exit_critical(flags);
}
diff --git a/sys/contrib/openzfs/module/zfs/arc.c b/sys/contrib/openzfs/module/zfs/arc.c
index 149ceffc25b3..9984c2d27534 100644
--- a/sys/contrib/openzfs/module/zfs/arc.c
+++ b/sys/contrib/openzfs/module/zfs/arc.c
@@ -1,11141 +1,11155 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, Joyent, Inc.
* Copyright (c) 2011, 2020, Delphix. All rights reserved.
* Copyright (c) 2014, Saso Kiselkov. All rights reserved.
* Copyright (c) 2017, Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
* Copyright (c) 2020, George Amanakis. All rights reserved.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
* Copyright (c) 2020, The FreeBSD Foundation [1]
*
* [1] Portions of this software were developed by Allan Jude
* under sponsorship from the FreeBSD Foundation.
*/
/*
* DVA-based Adjustable Replacement Cache
*
* While much of the theory of operation used here is
* based on the self-tuning, low overhead replacement cache
* presented by Megiddo and Modha at FAST 2003, there are some
* significant differences:
*
* 1. The Megiddo and Modha model assumes any page is evictable.
* Pages in its cache cannot be "locked" into memory. This makes
* the eviction algorithm simple: evict the last page in the list.
* This also make the performance characteristics easy to reason
* about. Our cache is not so simple. At any given moment, some
* subset of the blocks in the cache are un-evictable because we
* have handed out a reference to them. Blocks are only evictable
* when there are no external references active. This makes
* eviction far more problematic: we choose to evict the evictable
* blocks that are the "lowest" in the list.
*
* There are times when it is not possible to evict the requested
* space. In these circumstances we are unable to adjust the cache
* size. To prevent the cache growing unbounded at these times we
* implement a "cache throttle" that slows the flow of new data
* into the cache until we can make space available.
*
* 2. The Megiddo and Modha model assumes a fixed cache size.
* Pages are evicted when the cache is full and there is a cache
* miss. Our model has a variable sized cache. It grows with
* high use, but also tries to react to memory pressure from the
* operating system: decreasing its size when system memory is
* tight.
*
* 3. The Megiddo and Modha model assumes a fixed page size. All
* elements of the cache are therefore exactly the same size. So
* when adjusting the cache size following a cache miss, its simply
* a matter of choosing a single page to evict. In our model, we
* have variable sized cache blocks (ranging from 512 bytes to
* 128K bytes). We therefore choose a set of blocks to evict to make
* space for a cache miss that approximates as closely as possible
* the space used by the new block.
*
* See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache"
* by N. Megiddo & D. Modha, FAST 2003
*/
/*
* The locking model:
*
* A new reference to a cache buffer can be obtained in two
* ways: 1) via a hash table lookup using the DVA as a key,
* or 2) via one of the ARC lists. The arc_read() interface
* uses method 1, while the internal ARC algorithms for
* adjusting the cache use method 2. We therefore provide two
* types of locks: 1) the hash table lock array, and 2) the
* ARC list locks.
*
* Buffers do not have their own mutexes, rather they rely on the
* hash table mutexes for the bulk of their protection (i.e. most
* fields in the arc_buf_hdr_t are protected by these mutexes).
*
* buf_hash_find() returns the appropriate mutex (held) when it
* locates the requested buffer in the hash table. It returns
* NULL for the mutex if the buffer was not in the table.
*
* buf_hash_remove() expects the appropriate hash mutex to be
* already held before it is invoked.
*
* Each ARC state also has a mutex which is used to protect the
* buffer list associated with the state. When attempting to
* obtain a hash table lock while holding an ARC list lock you
* must use: mutex_tryenter() to avoid deadlock. Also note that
* the active state mutex must be held before the ghost state mutex.
*
* It as also possible to register a callback which is run when the
* arc_meta_limit is reached and no buffers can be safely evicted. In
* this case the arc user should drop a reference on some arc buffers so
* they can be reclaimed and the arc_meta_limit honored. For example,
* when using the ZPL each dentry holds a references on a znode. These
* dentries must be pruned before the arc buffer holding the znode can
* be safely evicted.
*
* Note that the majority of the performance stats are manipulated
* with atomic operations.
*
* The L2ARC uses the l2ad_mtx on each vdev for the following:
*
* - L2ARC buflist creation
* - L2ARC buflist eviction
* - L2ARC write completion, which walks L2ARC buflists
* - ARC header destruction, as it removes from L2ARC buflists
* - ARC header release, as it removes from L2ARC buflists
*/
/*
* ARC operation:
*
* Every block that is in the ARC is tracked by an arc_buf_hdr_t structure.
* This structure can point either to a block that is still in the cache or to
* one that is only accessible in an L2 ARC device, or it can provide
* information about a block that was recently evicted. If a block is
* only accessible in the L2ARC, then the arc_buf_hdr_t only has enough
* information to retrieve it from the L2ARC device. This information is
* stored in the l2arc_buf_hdr_t sub-structure of the arc_buf_hdr_t. A block
* that is in this state cannot access the data directly.
*
* Blocks that are actively being referenced or have not been evicted
* are cached in the L1ARC. The L1ARC (l1arc_buf_hdr_t) is a structure within
* the arc_buf_hdr_t that will point to the data block in memory. A block can
* only be read by a consumer if it has an l1arc_buf_hdr_t. The L1ARC
* caches data in two ways -- in a list of ARC buffers (arc_buf_t) and
* also in the arc_buf_hdr_t's private physical data block pointer (b_pabd).
*
* The L1ARC's data pointer may or may not be uncompressed. The ARC has the
* ability to store the physical data (b_pabd) associated with the DVA of the
* arc_buf_hdr_t. Since the b_pabd is a copy of the on-disk physical block,
* it will match its on-disk compression characteristics. This behavior can be
* disabled by setting 'zfs_compressed_arc_enabled' to B_FALSE. When the
* compressed ARC functionality is disabled, the b_pabd will point to an
* uncompressed version of the on-disk data.
*
* Data in the L1ARC is not accessed by consumers of the ARC directly. Each
* arc_buf_hdr_t can have multiple ARC buffers (arc_buf_t) which reference it.
* Each ARC buffer (arc_buf_t) is being actively accessed by a specific ARC
* consumer. The ARC will provide references to this data and will keep it
* cached until it is no longer in use. The ARC caches only the L1ARC's physical
* data block and will evict any arc_buf_t that is no longer referenced. The
* amount of memory consumed by the arc_buf_ts' data buffers can be seen via the
* "overhead_size" kstat.
*
* Depending on the consumer, an arc_buf_t can be requested in uncompressed or
* compressed form. The typical case is that consumers will want uncompressed
* data, and when that happens a new data buffer is allocated where the data is
* decompressed for them to use. Currently the only consumer who wants
* compressed arc_buf_t's is "zfs send", when it streams data exactly as it
* exists on disk. When this happens, the arc_buf_t's data buffer is shared
* with the arc_buf_hdr_t.
*
* Here is a diagram showing an arc_buf_hdr_t referenced by two arc_buf_t's. The
* first one is owned by a compressed send consumer (and therefore references
* the same compressed data buffer as the arc_buf_hdr_t) and the second could be
* used by any other consumer (and has its own uncompressed copy of the data
* buffer).
*
* arc_buf_hdr_t
* +-----------+
* | fields |
* | common to |
* | L1- and |
* | L2ARC |
* +-----------+
* | l2arc_buf_hdr_t
* | |
* +-----------+
* | l1arc_buf_hdr_t
* | | arc_buf_t
* | b_buf +------------>+-----------+ arc_buf_t
* | b_pabd +-+ |b_next +---->+-----------+
* +-----------+ | |-----------| |b_next +-->NULL
* | |b_comp = T | +-----------+
* | |b_data +-+ |b_comp = F |
* | +-----------+ | |b_data +-+
* +->+------+ | +-----------+ |
* compressed | | | |
* data | |<--------------+ | uncompressed
* +------+ compressed, | data
* shared +-->+------+
* data | |
* | |
* +------+
*
* When a consumer reads a block, the ARC must first look to see if the
* arc_buf_hdr_t is cached. If the hdr is cached then the ARC allocates a new
* arc_buf_t and either copies uncompressed data into a new data buffer from an
* existing uncompressed arc_buf_t, decompresses the hdr's b_pabd buffer into a
* new data buffer, or shares the hdr's b_pabd buffer, depending on whether the
* hdr is compressed and the desired compression characteristics of the
* arc_buf_t consumer. If the arc_buf_t ends up sharing data with the
* arc_buf_hdr_t and both of them are uncompressed then the arc_buf_t must be
* the last buffer in the hdr's b_buf list, however a shared compressed buf can
* be anywhere in the hdr's list.
*
* The diagram below shows an example of an uncompressed ARC hdr that is
* sharing its data with an arc_buf_t (note that the shared uncompressed buf is
* the last element in the buf list):
*
* arc_buf_hdr_t
* +-----------+
* | |
* | |
* | |
* +-----------+
* l2arc_buf_hdr_t| |
* | |
* +-----------+
* l1arc_buf_hdr_t| |
* | | arc_buf_t (shared)
* | b_buf +------------>+---------+ arc_buf_t
* | | |b_next +---->+---------+
* | b_pabd +-+ |---------| |b_next +-->NULL
* +-----------+ | | | +---------+
* | |b_data +-+ | |
* | +---------+ | |b_data +-+
* +->+------+ | +---------+ |
* | | | |
* uncompressed | | | |
* data +------+ | |
* ^ +->+------+ |
* | uncompressed | | |
* | data | | |
* | +------+ |
* +---------------------------------+
*
* Writing to the ARC requires that the ARC first discard the hdr's b_pabd
* since the physical block is about to be rewritten. The new data contents
* will be contained in the arc_buf_t. As the I/O pipeline performs the write,
* it may compress the data before writing it to disk. The ARC will be called
* with the transformed data and will bcopy the transformed on-disk block into
* a newly allocated b_pabd. Writes are always done into buffers which have
* either been loaned (and hence are new and don't have other readers) or
* buffers which have been released (and hence have their own hdr, if there
* were originally other readers of the buf's original hdr). This ensures that
* the ARC only needs to update a single buf and its hdr after a write occurs.
*
* When the L2ARC is in use, it will also take advantage of the b_pabd. The
* L2ARC will always write the contents of b_pabd to the L2ARC. This means
* that when compressed ARC is enabled that the L2ARC blocks are identical
* to the on-disk block in the main data pool. This provides a significant
* advantage since the ARC can leverage the bp's checksum when reading from the
* L2ARC to determine if the contents are valid. However, if the compressed
* ARC is disabled, then the L2ARC's block must be transformed to look
* like the physical block in the main data pool before comparing the
* checksum and determining its validity.
*
* The L1ARC has a slightly different system for storing encrypted data.
* Raw (encrypted + possibly compressed) data has a few subtle differences from
* data that is just compressed. The biggest difference is that it is not
* possible to decrypt encrypted data (or vice-versa) if the keys aren't loaded.
* The other difference is that encryption cannot be treated as a suggestion.
* If a caller would prefer compressed data, but they actually wind up with
* uncompressed data the worst thing that could happen is there might be a
* performance hit. If the caller requests encrypted data, however, we must be
* sure they actually get it or else secret information could be leaked. Raw
* data is stored in hdr->b_crypt_hdr.b_rabd. An encrypted header, therefore,
* may have both an encrypted version and a decrypted version of its data at
* once. When a caller needs a raw arc_buf_t, it is allocated and the data is
* copied out of this header. To avoid complications with b_pabd, raw buffers
* cannot be shared.
*/
#include <sys/spa.h>
#include <sys/zio.h>
#include <sys/spa_impl.h>
#include <sys/zio_compress.h>
#include <sys/zio_checksum.h>
#include <sys/zfs_context.h>
#include <sys/arc.h>
#include <sys/zfs_refcount.h>
#include <sys/vdev.h>
#include <sys/vdev_impl.h>
#include <sys/dsl_pool.h>
#include <sys/multilist.h>
#include <sys/abd.h>
#include <sys/zil.h>
#include <sys/fm/fs/zfs.h>
#include <sys/callb.h>
#include <sys/kstat.h>
#include <sys/zthr.h>
#include <zfs_fletcher.h>
#include <sys/arc_impl.h>
#include <sys/trace_zfs.h>
#include <sys/aggsum.h>
#include <sys/wmsum.h>
#include <cityhash.h>
#include <sys/vdev_trim.h>
#include <sys/zfs_racct.h>
#include <sys/zstd/zstd.h>
#ifndef _KERNEL
/* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */
boolean_t arc_watch = B_FALSE;
#endif
/*
* This thread's job is to keep enough free memory in the system, by
* calling arc_kmem_reap_soon() plus arc_reduce_target_size(), which improves
* arc_available_memory().
*/
static zthr_t *arc_reap_zthr;
/*
* This thread's job is to keep arc_size under arc_c, by calling
* arc_evict(), which improves arc_is_overflowing().
*/
static zthr_t *arc_evict_zthr;
static arc_buf_hdr_t **arc_state_evict_markers;
static int arc_state_evict_marker_count;
static kmutex_t arc_evict_lock;
static boolean_t arc_evict_needed = B_FALSE;
/*
* Count of bytes evicted since boot.
*/
static uint64_t arc_evict_count;
/*
* List of arc_evict_waiter_t's, representing threads waiting for the
* arc_evict_count to reach specific values.
*/
static list_t arc_evict_waiters;
/*
* When arc_is_overflowing(), arc_get_data_impl() waits for this percent of
* the requested amount of data to be evicted. For example, by default for
* every 2KB that's evicted, 1KB of it may be "reused" by a new allocation.
* Since this is above 100%, it ensures that progress is made towards getting
* arc_size under arc_c. Since this is finite, it ensures that allocations
* can still happen, even during the potentially long time that arc_size is
* more than arc_c.
*/
int zfs_arc_eviction_pct = 200;
/*
* The number of headers to evict in arc_evict_state_impl() before
* dropping the sublist lock and evicting from another sublist. A lower
* value means we're more likely to evict the "correct" header (i.e. the
* oldest header in the arc state), but comes with higher overhead
* (i.e. more invocations of arc_evict_state_impl()).
*/
int zfs_arc_evict_batch_limit = 10;
/* number of seconds before growing cache again */
int arc_grow_retry = 5;
/*
* Minimum time between calls to arc_kmem_reap_soon().
*/
int arc_kmem_cache_reap_retry_ms = 1000;
/* shift of arc_c for calculating overflow limit in arc_get_data_impl */
int zfs_arc_overflow_shift = 8;
/* shift of arc_c for calculating both min and max arc_p */
int arc_p_min_shift = 4;
/* log2(fraction of arc to reclaim) */
int arc_shrink_shift = 7;
/* percent of pagecache to reclaim arc to */
#ifdef _KERNEL
uint_t zfs_arc_pc_percent = 0;
#endif
/*
* log2(fraction of ARC which must be free to allow growing).
* I.e. If there is less than arc_c >> arc_no_grow_shift free memory,
* when reading a new block into the ARC, we will evict an equal-sized block
* from the ARC.
*
* This must be less than arc_shrink_shift, so that when we shrink the ARC,
* we will still not allow it to grow.
*/
int arc_no_grow_shift = 5;
/*
* minimum lifespan of a prefetch block in clock ticks
* (initialized in arc_init())
*/
static int arc_min_prefetch_ms;
static int arc_min_prescient_prefetch_ms;
/*
* If this percent of memory is free, don't throttle.
*/
int arc_lotsfree_percent = 10;
/*
* The arc has filled available memory and has now warmed up.
*/
boolean_t arc_warm;
/*
* These tunables are for performance analysis.
*/
unsigned long zfs_arc_max = 0;
unsigned long zfs_arc_min = 0;
unsigned long zfs_arc_meta_limit = 0;
unsigned long zfs_arc_meta_min = 0;
unsigned long zfs_arc_dnode_limit = 0;
unsigned long zfs_arc_dnode_reduce_percent = 10;
int zfs_arc_grow_retry = 0;
int zfs_arc_shrink_shift = 0;
int zfs_arc_p_min_shift = 0;
int zfs_arc_average_blocksize = 8 * 1024; /* 8KB */
/*
* ARC dirty data constraints for arc_tempreserve_space() throttle.
*/
unsigned long zfs_arc_dirty_limit_percent = 50; /* total dirty data limit */
unsigned long zfs_arc_anon_limit_percent = 25; /* anon block dirty limit */
unsigned long zfs_arc_pool_dirty_percent = 20; /* each pool's anon allowance */
/*
* Enable or disable compressed arc buffers.
*/
int zfs_compressed_arc_enabled = B_TRUE;
/*
* ARC will evict meta buffers that exceed arc_meta_limit. This
* tunable make arc_meta_limit adjustable for different workloads.
*/
unsigned long zfs_arc_meta_limit_percent = 75;
/*
* Percentage that can be consumed by dnodes of ARC meta buffers.
*/
unsigned long zfs_arc_dnode_limit_percent = 10;
/*
* These tunables are Linux specific
*/
unsigned long zfs_arc_sys_free = 0;
int zfs_arc_min_prefetch_ms = 0;
int zfs_arc_min_prescient_prefetch_ms = 0;
int zfs_arc_p_dampener_disable = 1;
int zfs_arc_meta_prune = 10000;
int zfs_arc_meta_strategy = ARC_STRATEGY_META_BALANCED;
int zfs_arc_meta_adjust_restarts = 4096;
int zfs_arc_lotsfree_percent = 10;
+/*
+ * Number of arc_prune threads
+ */
+static int zfs_arc_prune_task_threads = 1;
+
/* The 6 states: */
arc_state_t ARC_anon;
arc_state_t ARC_mru;
arc_state_t ARC_mru_ghost;
arc_state_t ARC_mfu;
arc_state_t ARC_mfu_ghost;
arc_state_t ARC_l2c_only;
arc_stats_t arc_stats = {
{ "hits", KSTAT_DATA_UINT64 },
{ "misses", KSTAT_DATA_UINT64 },
{ "demand_data_hits", KSTAT_DATA_UINT64 },
{ "demand_data_misses", KSTAT_DATA_UINT64 },
{ "demand_metadata_hits", KSTAT_DATA_UINT64 },
{ "demand_metadata_misses", KSTAT_DATA_UINT64 },
{ "prefetch_data_hits", KSTAT_DATA_UINT64 },
{ "prefetch_data_misses", KSTAT_DATA_UINT64 },
{ "prefetch_metadata_hits", KSTAT_DATA_UINT64 },
{ "prefetch_metadata_misses", KSTAT_DATA_UINT64 },
{ "mru_hits", KSTAT_DATA_UINT64 },
{ "mru_ghost_hits", KSTAT_DATA_UINT64 },
{ "mfu_hits", KSTAT_DATA_UINT64 },
{ "mfu_ghost_hits", KSTAT_DATA_UINT64 },
{ "deleted", KSTAT_DATA_UINT64 },
{ "mutex_miss", KSTAT_DATA_UINT64 },
{ "access_skip", KSTAT_DATA_UINT64 },
{ "evict_skip", KSTAT_DATA_UINT64 },
{ "evict_not_enough", KSTAT_DATA_UINT64 },
{ "evict_l2_cached", KSTAT_DATA_UINT64 },
{ "evict_l2_eligible", KSTAT_DATA_UINT64 },
{ "evict_l2_eligible_mfu", KSTAT_DATA_UINT64 },
{ "evict_l2_eligible_mru", KSTAT_DATA_UINT64 },
{ "evict_l2_ineligible", KSTAT_DATA_UINT64 },
{ "evict_l2_skip", KSTAT_DATA_UINT64 },
{ "hash_elements", KSTAT_DATA_UINT64 },
{ "hash_elements_max", KSTAT_DATA_UINT64 },
{ "hash_collisions", KSTAT_DATA_UINT64 },
{ "hash_chains", KSTAT_DATA_UINT64 },
{ "hash_chain_max", KSTAT_DATA_UINT64 },
{ "p", KSTAT_DATA_UINT64 },
{ "c", KSTAT_DATA_UINT64 },
{ "c_min", KSTAT_DATA_UINT64 },
{ "c_max", KSTAT_DATA_UINT64 },
{ "size", KSTAT_DATA_UINT64 },
{ "compressed_size", KSTAT_DATA_UINT64 },
{ "uncompressed_size", KSTAT_DATA_UINT64 },
{ "overhead_size", KSTAT_DATA_UINT64 },
{ "hdr_size", KSTAT_DATA_UINT64 },
{ "data_size", KSTAT_DATA_UINT64 },
{ "metadata_size", KSTAT_DATA_UINT64 },
{ "dbuf_size", KSTAT_DATA_UINT64 },
{ "dnode_size", KSTAT_DATA_UINT64 },
{ "bonus_size", KSTAT_DATA_UINT64 },
#if defined(COMPAT_FREEBSD11)
{ "other_size", KSTAT_DATA_UINT64 },
#endif
{ "anon_size", KSTAT_DATA_UINT64 },
{ "anon_evictable_data", KSTAT_DATA_UINT64 },
{ "anon_evictable_metadata", KSTAT_DATA_UINT64 },
{ "mru_size", KSTAT_DATA_UINT64 },
{ "mru_evictable_data", KSTAT_DATA_UINT64 },
{ "mru_evictable_metadata", KSTAT_DATA_UINT64 },
{ "mru_ghost_size", KSTAT_DATA_UINT64 },
{ "mru_ghost_evictable_data", KSTAT_DATA_UINT64 },
{ "mru_ghost_evictable_metadata", KSTAT_DATA_UINT64 },
{ "mfu_size", KSTAT_DATA_UINT64 },
{ "mfu_evictable_data", KSTAT_DATA_UINT64 },
{ "mfu_evictable_metadata", KSTAT_DATA_UINT64 },
{ "mfu_ghost_size", KSTAT_DATA_UINT64 },
{ "mfu_ghost_evictable_data", KSTAT_DATA_UINT64 },
{ "mfu_ghost_evictable_metadata", KSTAT_DATA_UINT64 },
{ "l2_hits", KSTAT_DATA_UINT64 },
{ "l2_misses", KSTAT_DATA_UINT64 },
{ "l2_prefetch_asize", KSTAT_DATA_UINT64 },
{ "l2_mru_asize", KSTAT_DATA_UINT64 },
{ "l2_mfu_asize", KSTAT_DATA_UINT64 },
{ "l2_bufc_data_asize", KSTAT_DATA_UINT64 },
{ "l2_bufc_metadata_asize", KSTAT_DATA_UINT64 },
{ "l2_feeds", KSTAT_DATA_UINT64 },
{ "l2_rw_clash", KSTAT_DATA_UINT64 },
{ "l2_read_bytes", KSTAT_DATA_UINT64 },
{ "l2_write_bytes", KSTAT_DATA_UINT64 },
{ "l2_writes_sent", KSTAT_DATA_UINT64 },
{ "l2_writes_done", KSTAT_DATA_UINT64 },
{ "l2_writes_error", KSTAT_DATA_UINT64 },
{ "l2_writes_lock_retry", KSTAT_DATA_UINT64 },
{ "l2_evict_lock_retry", KSTAT_DATA_UINT64 },
{ "l2_evict_reading", KSTAT_DATA_UINT64 },
{ "l2_evict_l1cached", KSTAT_DATA_UINT64 },
{ "l2_free_on_write", KSTAT_DATA_UINT64 },
{ "l2_abort_lowmem", KSTAT_DATA_UINT64 },
{ "l2_cksum_bad", KSTAT_DATA_UINT64 },
{ "l2_io_error", KSTAT_DATA_UINT64 },
{ "l2_size", KSTAT_DATA_UINT64 },
{ "l2_asize", KSTAT_DATA_UINT64 },
{ "l2_hdr_size", KSTAT_DATA_UINT64 },
{ "l2_log_blk_writes", KSTAT_DATA_UINT64 },
{ "l2_log_blk_avg_asize", KSTAT_DATA_UINT64 },
{ "l2_log_blk_asize", KSTAT_DATA_UINT64 },
{ "l2_log_blk_count", KSTAT_DATA_UINT64 },
{ "l2_data_to_meta_ratio", KSTAT_DATA_UINT64 },
{ "l2_rebuild_success", KSTAT_DATA_UINT64 },
{ "l2_rebuild_unsupported", KSTAT_DATA_UINT64 },
{ "l2_rebuild_io_errors", KSTAT_DATA_UINT64 },
{ "l2_rebuild_dh_errors", KSTAT_DATA_UINT64 },
{ "l2_rebuild_cksum_lb_errors", KSTAT_DATA_UINT64 },
{ "l2_rebuild_lowmem", KSTAT_DATA_UINT64 },
{ "l2_rebuild_size", KSTAT_DATA_UINT64 },
{ "l2_rebuild_asize", KSTAT_DATA_UINT64 },
{ "l2_rebuild_bufs", KSTAT_DATA_UINT64 },
{ "l2_rebuild_bufs_precached", KSTAT_DATA_UINT64 },
{ "l2_rebuild_log_blks", KSTAT_DATA_UINT64 },
{ "memory_throttle_count", KSTAT_DATA_UINT64 },
{ "memory_direct_count", KSTAT_DATA_UINT64 },
{ "memory_indirect_count", KSTAT_DATA_UINT64 },
{ "memory_all_bytes", KSTAT_DATA_UINT64 },
{ "memory_free_bytes", KSTAT_DATA_UINT64 },
{ "memory_available_bytes", KSTAT_DATA_INT64 },
{ "arc_no_grow", KSTAT_DATA_UINT64 },
{ "arc_tempreserve", KSTAT_DATA_UINT64 },
{ "arc_loaned_bytes", KSTAT_DATA_UINT64 },
{ "arc_prune", KSTAT_DATA_UINT64 },
{ "arc_meta_used", KSTAT_DATA_UINT64 },
{ "arc_meta_limit", KSTAT_DATA_UINT64 },
{ "arc_dnode_limit", KSTAT_DATA_UINT64 },
{ "arc_meta_max", KSTAT_DATA_UINT64 },
{ "arc_meta_min", KSTAT_DATA_UINT64 },
{ "async_upgrade_sync", KSTAT_DATA_UINT64 },
{ "demand_hit_predictive_prefetch", KSTAT_DATA_UINT64 },
{ "demand_hit_prescient_prefetch", KSTAT_DATA_UINT64 },
{ "arc_need_free", KSTAT_DATA_UINT64 },
{ "arc_sys_free", KSTAT_DATA_UINT64 },
{ "arc_raw_size", KSTAT_DATA_UINT64 },
{ "cached_only_in_progress", KSTAT_DATA_UINT64 },
{ "abd_chunk_waste_size", KSTAT_DATA_UINT64 },
};
arc_sums_t arc_sums;
#define ARCSTAT_MAX(stat, val) { \
uint64_t m; \
while ((val) > (m = arc_stats.stat.value.ui64) && \
(m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \
continue; \
}
/*
* We define a macro to allow ARC hits/misses to be easily broken down by
* two separate conditions, giving a total of four different subtypes for
* each of hits and misses (so eight statistics total).
*/
#define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
if (cond1) { \
if (cond2) { \
ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
} else { \
ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
} \
} else { \
if (cond2) { \
ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
} else { \
ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
} \
}
/*
* This macro allows us to use kstats as floating averages. Each time we
* update this kstat, we first factor it and the update value by
* ARCSTAT_AVG_FACTOR to shrink the new value's contribution to the overall
* average. This macro assumes that integer loads and stores are atomic, but
* is not safe for multiple writers updating the kstat in parallel (only the
* last writer's update will remain).
*/
#define ARCSTAT_F_AVG_FACTOR 3
#define ARCSTAT_F_AVG(stat, value) \
do { \
uint64_t x = ARCSTAT(stat); \
x = x - x / ARCSTAT_F_AVG_FACTOR + \
(value) / ARCSTAT_F_AVG_FACTOR; \
ARCSTAT(stat) = x; \
_NOTE(CONSTCOND) \
} while (0)
kstat_t *arc_ksp;
/*
* There are several ARC variables that are critical to export as kstats --
* but we don't want to have to grovel around in the kstat whenever we wish to
* manipulate them. For these variables, we therefore define them to be in
* terms of the statistic variable. This assures that we are not introducing
* the possibility of inconsistency by having shadow copies of the variables,
* while still allowing the code to be readable.
*/
#define arc_tempreserve ARCSTAT(arcstat_tempreserve)
#define arc_loaned_bytes ARCSTAT(arcstat_loaned_bytes)
#define arc_meta_limit ARCSTAT(arcstat_meta_limit) /* max size for metadata */
/* max size for dnodes */
#define arc_dnode_size_limit ARCSTAT(arcstat_dnode_limit)
#define arc_meta_min ARCSTAT(arcstat_meta_min) /* min size for metadata */
#define arc_need_free ARCSTAT(arcstat_need_free) /* waiting to be evicted */
hrtime_t arc_growtime;
list_t arc_prune_list;
kmutex_t arc_prune_mtx;
taskq_t *arc_prune_taskq;
#define GHOST_STATE(state) \
((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \
(state) == arc_l2c_only)
#define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_FLAG_IN_HASH_TABLE)
#define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS)
#define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_FLAG_IO_ERROR)
#define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_FLAG_PREFETCH)
#define HDR_PRESCIENT_PREFETCH(hdr) \
((hdr)->b_flags & ARC_FLAG_PRESCIENT_PREFETCH)
#define HDR_COMPRESSION_ENABLED(hdr) \
((hdr)->b_flags & ARC_FLAG_COMPRESSED_ARC)
#define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_FLAG_L2CACHE)
#define HDR_L2_READING(hdr) \
(((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) && \
((hdr)->b_flags & ARC_FLAG_HAS_L2HDR))
#define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITING)
#define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_FLAG_L2_EVICTED)
#define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITE_HEAD)
#define HDR_PROTECTED(hdr) ((hdr)->b_flags & ARC_FLAG_PROTECTED)
#define HDR_NOAUTH(hdr) ((hdr)->b_flags & ARC_FLAG_NOAUTH)
#define HDR_SHARED_DATA(hdr) ((hdr)->b_flags & ARC_FLAG_SHARED_DATA)
#define HDR_ISTYPE_METADATA(hdr) \
((hdr)->b_flags & ARC_FLAG_BUFC_METADATA)
#define HDR_ISTYPE_DATA(hdr) (!HDR_ISTYPE_METADATA(hdr))
#define HDR_HAS_L1HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L1HDR)
#define HDR_HAS_L2HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR)
#define HDR_HAS_RABD(hdr) \
(HDR_HAS_L1HDR(hdr) && HDR_PROTECTED(hdr) && \
(hdr)->b_crypt_hdr.b_rabd != NULL)
#define HDR_ENCRYPTED(hdr) \
(HDR_PROTECTED(hdr) && DMU_OT_IS_ENCRYPTED((hdr)->b_crypt_hdr.b_ot))
#define HDR_AUTHENTICATED(hdr) \
(HDR_PROTECTED(hdr) && !DMU_OT_IS_ENCRYPTED((hdr)->b_crypt_hdr.b_ot))
/* For storing compression mode in b_flags */
#define HDR_COMPRESS_OFFSET (highbit64(ARC_FLAG_COMPRESS_0) - 1)
#define HDR_GET_COMPRESS(hdr) ((enum zio_compress)BF32_GET((hdr)->b_flags, \
HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS))
#define HDR_SET_COMPRESS(hdr, cmp) BF32_SET((hdr)->b_flags, \
HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS, (cmp));
#define ARC_BUF_LAST(buf) ((buf)->b_next == NULL)
#define ARC_BUF_SHARED(buf) ((buf)->b_flags & ARC_BUF_FLAG_SHARED)
#define ARC_BUF_COMPRESSED(buf) ((buf)->b_flags & ARC_BUF_FLAG_COMPRESSED)
#define ARC_BUF_ENCRYPTED(buf) ((buf)->b_flags & ARC_BUF_FLAG_ENCRYPTED)
/*
* Other sizes
*/
#define HDR_FULL_CRYPT_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
#define HDR_FULL_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_crypt_hdr))
#define HDR_L2ONLY_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_l1hdr))
/*
* Hash table routines
*/
#define BUF_LOCKS 2048
typedef struct buf_hash_table {
uint64_t ht_mask;
arc_buf_hdr_t **ht_table;
kmutex_t ht_locks[BUF_LOCKS] ____cacheline_aligned;
} buf_hash_table_t;
static buf_hash_table_t buf_hash_table;
#define BUF_HASH_INDEX(spa, dva, birth) \
(buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
#define BUF_HASH_LOCK(idx) (&buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
#define HDR_LOCK(hdr) \
(BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
uint64_t zfs_crc64_table[256];
/*
* Level 2 ARC
*/
#define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */
#define L2ARC_HEADROOM 2 /* num of writes */
/*
* If we discover during ARC scan any buffers to be compressed, we boost
* our headroom for the next scanning cycle by this percentage multiple.
*/
#define L2ARC_HEADROOM_BOOST 200
#define L2ARC_FEED_SECS 1 /* caching interval secs */
#define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */
/*
* We can feed L2ARC from two states of ARC buffers, mru and mfu,
* and each of the state has two types: data and metadata.
*/
#define L2ARC_FEED_TYPES 4
/* L2ARC Performance Tunables */
unsigned long l2arc_write_max = L2ARC_WRITE_SIZE; /* def max write size */
unsigned long l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra warmup write */
unsigned long l2arc_headroom = L2ARC_HEADROOM; /* # of dev writes */
unsigned long l2arc_headroom_boost = L2ARC_HEADROOM_BOOST;
unsigned long l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */
unsigned long l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval msecs */
int l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */
int l2arc_feed_again = B_TRUE; /* turbo warmup */
int l2arc_norw = B_FALSE; /* no reads during writes */
int l2arc_meta_percent = 33; /* limit on headers size */
/*
* L2ARC Internals
*/
static list_t L2ARC_dev_list; /* device list */
static list_t *l2arc_dev_list; /* device list pointer */
static kmutex_t l2arc_dev_mtx; /* device list mutex */
static l2arc_dev_t *l2arc_dev_last; /* last device used */
static list_t L2ARC_free_on_write; /* free after write buf list */
static list_t *l2arc_free_on_write; /* free after write list ptr */
static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */
static uint64_t l2arc_ndev; /* number of devices */
typedef struct l2arc_read_callback {
arc_buf_hdr_t *l2rcb_hdr; /* read header */
blkptr_t l2rcb_bp; /* original blkptr */
zbookmark_phys_t l2rcb_zb; /* original bookmark */
int l2rcb_flags; /* original flags */
abd_t *l2rcb_abd; /* temporary buffer */
} l2arc_read_callback_t;
typedef struct l2arc_data_free {
/* protected by l2arc_free_on_write_mtx */
abd_t *l2df_abd;
size_t l2df_size;
arc_buf_contents_t l2df_type;
list_node_t l2df_list_node;
} l2arc_data_free_t;
typedef enum arc_fill_flags {
ARC_FILL_LOCKED = 1 << 0, /* hdr lock is held */
ARC_FILL_COMPRESSED = 1 << 1, /* fill with compressed data */
ARC_FILL_ENCRYPTED = 1 << 2, /* fill with encrypted data */
ARC_FILL_NOAUTH = 1 << 3, /* don't attempt to authenticate */
ARC_FILL_IN_PLACE = 1 << 4 /* fill in place (special case) */
} arc_fill_flags_t;
typedef enum arc_ovf_level {
ARC_OVF_NONE, /* ARC within target size. */
ARC_OVF_SOME, /* ARC is slightly overflowed. */
ARC_OVF_SEVERE /* ARC is severely overflowed. */
} arc_ovf_level_t;
static kmutex_t l2arc_feed_thr_lock;
static kcondvar_t l2arc_feed_thr_cv;
static uint8_t l2arc_thread_exit;
static kmutex_t l2arc_rebuild_thr_lock;
static kcondvar_t l2arc_rebuild_thr_cv;
enum arc_hdr_alloc_flags {
ARC_HDR_ALLOC_RDATA = 0x1,
ARC_HDR_DO_ADAPT = 0x2,
ARC_HDR_USE_RESERVE = 0x4,
};
static abd_t *arc_get_data_abd(arc_buf_hdr_t *, uint64_t, void *, int);
static void *arc_get_data_buf(arc_buf_hdr_t *, uint64_t, void *);
static void arc_get_data_impl(arc_buf_hdr_t *, uint64_t, void *, int);
static void arc_free_data_abd(arc_buf_hdr_t *, abd_t *, uint64_t, void *);
static void arc_free_data_buf(arc_buf_hdr_t *, void *, uint64_t, void *);
static void arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag);
static void arc_hdr_free_abd(arc_buf_hdr_t *, boolean_t);
static void arc_hdr_alloc_abd(arc_buf_hdr_t *, int);
static void arc_access(arc_buf_hdr_t *, kmutex_t *);
static void arc_buf_watch(arc_buf_t *);
static arc_buf_contents_t arc_buf_type(arc_buf_hdr_t *);
static uint32_t arc_bufc_to_flags(arc_buf_contents_t);
static inline void arc_hdr_set_flags(arc_buf_hdr_t *hdr, arc_flags_t flags);
static inline void arc_hdr_clear_flags(arc_buf_hdr_t *hdr, arc_flags_t flags);
static boolean_t l2arc_write_eligible(uint64_t, arc_buf_hdr_t *);
static void l2arc_read_done(zio_t *);
static void l2arc_do_free_on_write(void);
static void l2arc_hdr_arcstats_update(arc_buf_hdr_t *hdr, boolean_t incr,
boolean_t state_only);
#define l2arc_hdr_arcstats_increment(hdr) \
l2arc_hdr_arcstats_update((hdr), B_TRUE, B_FALSE)
#define l2arc_hdr_arcstats_decrement(hdr) \
l2arc_hdr_arcstats_update((hdr), B_FALSE, B_FALSE)
#define l2arc_hdr_arcstats_increment_state(hdr) \
l2arc_hdr_arcstats_update((hdr), B_TRUE, B_TRUE)
#define l2arc_hdr_arcstats_decrement_state(hdr) \
l2arc_hdr_arcstats_update((hdr), B_FALSE, B_TRUE)
/*
* l2arc_mfuonly : A ZFS module parameter that controls whether only MFU
* metadata and data are cached from ARC into L2ARC.
*/
int l2arc_mfuonly = 0;
/*
* L2ARC TRIM
* l2arc_trim_ahead : A ZFS module parameter that controls how much ahead of
* the current write size (l2arc_write_max) we should TRIM if we
* have filled the device. It is defined as a percentage of the
* write size. If set to 100 we trim twice the space required to
* accommodate upcoming writes. A minimum of 64MB will be trimmed.
* It also enables TRIM of the whole L2ARC device upon creation or
* addition to an existing pool or if the header of the device is
* invalid upon importing a pool or onlining a cache device. The
* default is 0, which disables TRIM on L2ARC altogether as it can
* put significant stress on the underlying storage devices. This
* will vary depending of how well the specific device handles
* these commands.
*/
unsigned long l2arc_trim_ahead = 0;
/*
* Performance tuning of L2ARC persistence:
*
* l2arc_rebuild_enabled : A ZFS module parameter that controls whether adding
* an L2ARC device (either at pool import or later) will attempt
* to rebuild L2ARC buffer contents.
* l2arc_rebuild_blocks_min_l2size : A ZFS module parameter that controls
* whether log blocks are written to the L2ARC device. If the L2ARC
* device is less than 1GB, the amount of data l2arc_evict()
* evicts is significant compared to the amount of restored L2ARC
* data. In this case do not write log blocks in L2ARC in order
* not to waste space.
*/
int l2arc_rebuild_enabled = B_TRUE;
unsigned long l2arc_rebuild_blocks_min_l2size = 1024 * 1024 * 1024;
/* L2ARC persistence rebuild control routines. */
void l2arc_rebuild_vdev(vdev_t *vd, boolean_t reopen);
static void l2arc_dev_rebuild_thread(void *arg);
static int l2arc_rebuild(l2arc_dev_t *dev);
/* L2ARC persistence read I/O routines. */
static int l2arc_dev_hdr_read(l2arc_dev_t *dev);
static int l2arc_log_blk_read(l2arc_dev_t *dev,
const l2arc_log_blkptr_t *this_lp, const l2arc_log_blkptr_t *next_lp,
l2arc_log_blk_phys_t *this_lb, l2arc_log_blk_phys_t *next_lb,
zio_t *this_io, zio_t **next_io);
static zio_t *l2arc_log_blk_fetch(vdev_t *vd,
const l2arc_log_blkptr_t *lp, l2arc_log_blk_phys_t *lb);
static void l2arc_log_blk_fetch_abort(zio_t *zio);
/* L2ARC persistence block restoration routines. */
static void l2arc_log_blk_restore(l2arc_dev_t *dev,
const l2arc_log_blk_phys_t *lb, uint64_t lb_asize);
static void l2arc_hdr_restore(const l2arc_log_ent_phys_t *le,
l2arc_dev_t *dev);
/* L2ARC persistence write I/O routines. */
static void l2arc_log_blk_commit(l2arc_dev_t *dev, zio_t *pio,
l2arc_write_callback_t *cb);
/* L2ARC persistence auxiliary routines. */
boolean_t l2arc_log_blkptr_valid(l2arc_dev_t *dev,
const l2arc_log_blkptr_t *lbp);
static boolean_t l2arc_log_blk_insert(l2arc_dev_t *dev,
const arc_buf_hdr_t *ab);
boolean_t l2arc_range_check_overlap(uint64_t bottom,
uint64_t top, uint64_t check);
static void l2arc_blk_fetch_done(zio_t *zio);
static inline uint64_t
l2arc_log_blk_overhead(uint64_t write_sz, l2arc_dev_t *dev);
/*
* We use Cityhash for this. It's fast, and has good hash properties without
* requiring any large static buffers.
*/
static uint64_t
buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth)
{
return (cityhash4(spa, dva->dva_word[0], dva->dva_word[1], birth));
}
#define HDR_EMPTY(hdr) \
((hdr)->b_dva.dva_word[0] == 0 && \
(hdr)->b_dva.dva_word[1] == 0)
#define HDR_EMPTY_OR_LOCKED(hdr) \
(HDR_EMPTY(hdr) || MUTEX_HELD(HDR_LOCK(hdr)))
#define HDR_EQUAL(spa, dva, birth, hdr) \
((hdr)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \
((hdr)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \
((hdr)->b_birth == birth) && ((hdr)->b_spa == spa)
static void
buf_discard_identity(arc_buf_hdr_t *hdr)
{
hdr->b_dva.dva_word[0] = 0;
hdr->b_dva.dva_word[1] = 0;
hdr->b_birth = 0;
}
static arc_buf_hdr_t *
buf_hash_find(uint64_t spa, const blkptr_t *bp, kmutex_t **lockp)
{
const dva_t *dva = BP_IDENTITY(bp);
uint64_t birth = BP_PHYSICAL_BIRTH(bp);
uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
arc_buf_hdr_t *hdr;
mutex_enter(hash_lock);
for (hdr = buf_hash_table.ht_table[idx]; hdr != NULL;
hdr = hdr->b_hash_next) {
if (HDR_EQUAL(spa, dva, birth, hdr)) {
*lockp = hash_lock;
return (hdr);
}
}
mutex_exit(hash_lock);
*lockp = NULL;
return (NULL);
}
/*
* Insert an entry into the hash table. If there is already an element
* equal to elem in the hash table, then the already existing element
* will be returned and the new element will not be inserted.
* Otherwise returns NULL.
* If lockp == NULL, the caller is assumed to already hold the hash lock.
*/
static arc_buf_hdr_t *
buf_hash_insert(arc_buf_hdr_t *hdr, kmutex_t **lockp)
{
uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth);
kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
arc_buf_hdr_t *fhdr;
uint32_t i;
ASSERT(!DVA_IS_EMPTY(&hdr->b_dva));
ASSERT(hdr->b_birth != 0);
ASSERT(!HDR_IN_HASH_TABLE(hdr));
if (lockp != NULL) {
*lockp = hash_lock;
mutex_enter(hash_lock);
} else {
ASSERT(MUTEX_HELD(hash_lock));
}
for (fhdr = buf_hash_table.ht_table[idx], i = 0; fhdr != NULL;
fhdr = fhdr->b_hash_next, i++) {
if (HDR_EQUAL(hdr->b_spa, &hdr->b_dva, hdr->b_birth, fhdr))
return (fhdr);
}
hdr->b_hash_next = buf_hash_table.ht_table[idx];
buf_hash_table.ht_table[idx] = hdr;
arc_hdr_set_flags(hdr, ARC_FLAG_IN_HASH_TABLE);
/* collect some hash table performance data */
if (i > 0) {
ARCSTAT_BUMP(arcstat_hash_collisions);
if (i == 1)
ARCSTAT_BUMP(arcstat_hash_chains);
ARCSTAT_MAX(arcstat_hash_chain_max, i);
}
uint64_t he = atomic_inc_64_nv(
&arc_stats.arcstat_hash_elements.value.ui64);
ARCSTAT_MAX(arcstat_hash_elements_max, he);
return (NULL);
}
static void
buf_hash_remove(arc_buf_hdr_t *hdr)
{
arc_buf_hdr_t *fhdr, **hdrp;
uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth);
ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
ASSERT(HDR_IN_HASH_TABLE(hdr));
hdrp = &buf_hash_table.ht_table[idx];
while ((fhdr = *hdrp) != hdr) {
ASSERT3P(fhdr, !=, NULL);
hdrp = &fhdr->b_hash_next;
}
*hdrp = hdr->b_hash_next;
hdr->b_hash_next = NULL;
arc_hdr_clear_flags(hdr, ARC_FLAG_IN_HASH_TABLE);
/* collect some hash table performance data */
atomic_dec_64(&arc_stats.arcstat_hash_elements.value.ui64);
if (buf_hash_table.ht_table[idx] &&
buf_hash_table.ht_table[idx]->b_hash_next == NULL)
ARCSTAT_BUMPDOWN(arcstat_hash_chains);
}
/*
* Global data structures and functions for the buf kmem cache.
*/
static kmem_cache_t *hdr_full_cache;
static kmem_cache_t *hdr_full_crypt_cache;
static kmem_cache_t *hdr_l2only_cache;
static kmem_cache_t *buf_cache;
static void
buf_fini(void)
{
- int i;
-
#if defined(_KERNEL)
/*
* Large allocations which do not require contiguous pages
* should be using vmem_free() in the linux kernel\
*/
vmem_free(buf_hash_table.ht_table,
(buf_hash_table.ht_mask + 1) * sizeof (void *));
#else
kmem_free(buf_hash_table.ht_table,
(buf_hash_table.ht_mask + 1) * sizeof (void *));
#endif
- for (i = 0; i < BUF_LOCKS; i++)
+ for (int i = 0; i < BUF_LOCKS; i++)
mutex_destroy(BUF_HASH_LOCK(i));
kmem_cache_destroy(hdr_full_cache);
kmem_cache_destroy(hdr_full_crypt_cache);
kmem_cache_destroy(hdr_l2only_cache);
kmem_cache_destroy(buf_cache);
}
/*
* Constructor callback - called when the cache is empty
* and a new buf is requested.
*/
-/* ARGSUSED */
static int
hdr_full_cons(void *vbuf, void *unused, int kmflag)
{
+ (void) unused, (void) kmflag;
arc_buf_hdr_t *hdr = vbuf;
bzero(hdr, HDR_FULL_SIZE);
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
cv_init(&hdr->b_l1hdr.b_cv, NULL, CV_DEFAULT, NULL);
zfs_refcount_create(&hdr->b_l1hdr.b_refcnt);
mutex_init(&hdr->b_l1hdr.b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
list_link_init(&hdr->b_l1hdr.b_arc_node);
list_link_init(&hdr->b_l2hdr.b_l2node);
multilist_link_init(&hdr->b_l1hdr.b_arc_node);
arc_space_consume(HDR_FULL_SIZE, ARC_SPACE_HDRS);
return (0);
}
-/* ARGSUSED */
static int
hdr_full_crypt_cons(void *vbuf, void *unused, int kmflag)
{
+ (void) unused;
arc_buf_hdr_t *hdr = vbuf;
hdr_full_cons(vbuf, unused, kmflag);
bzero(&hdr->b_crypt_hdr, sizeof (hdr->b_crypt_hdr));
arc_space_consume(sizeof (hdr->b_crypt_hdr), ARC_SPACE_HDRS);
return (0);
}
-/* ARGSUSED */
static int
hdr_l2only_cons(void *vbuf, void *unused, int kmflag)
{
+ (void) unused, (void) kmflag;
arc_buf_hdr_t *hdr = vbuf;
bzero(hdr, HDR_L2ONLY_SIZE);
arc_space_consume(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS);
return (0);
}
-/* ARGSUSED */
static int
buf_cons(void *vbuf, void *unused, int kmflag)
{
+ (void) unused, (void) kmflag;
arc_buf_t *buf = vbuf;
bzero(buf, sizeof (arc_buf_t));
mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL);
arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS);
return (0);
}
/*
* Destructor callback - called when a cached buf is
* no longer required.
*/
-/* ARGSUSED */
static void
hdr_full_dest(void *vbuf, void *unused)
{
+ (void) unused;
arc_buf_hdr_t *hdr = vbuf;
ASSERT(HDR_EMPTY(hdr));
cv_destroy(&hdr->b_l1hdr.b_cv);
zfs_refcount_destroy(&hdr->b_l1hdr.b_refcnt);
mutex_destroy(&hdr->b_l1hdr.b_freeze_lock);
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
arc_space_return(HDR_FULL_SIZE, ARC_SPACE_HDRS);
}
-/* ARGSUSED */
static void
hdr_full_crypt_dest(void *vbuf, void *unused)
{
+ (void) unused;
arc_buf_hdr_t *hdr = vbuf;
hdr_full_dest(vbuf, unused);
arc_space_return(sizeof (hdr->b_crypt_hdr), ARC_SPACE_HDRS);
}
-/* ARGSUSED */
static void
hdr_l2only_dest(void *vbuf, void *unused)
{
- arc_buf_hdr_t *hdr __maybe_unused = vbuf;
+ (void) unused;
+ arc_buf_hdr_t *hdr = vbuf;
ASSERT(HDR_EMPTY(hdr));
arc_space_return(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS);
}
-/* ARGSUSED */
static void
buf_dest(void *vbuf, void *unused)
{
+ (void) unused;
arc_buf_t *buf = vbuf;
mutex_destroy(&buf->b_evict_lock);
arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS);
}
static void
buf_init(void)
{
uint64_t *ct = NULL;
uint64_t hsize = 1ULL << 12;
int i, j;
/*
* The hash table is big enough to fill all of physical memory
* with an average block size of zfs_arc_average_blocksize (default 8K).
* By default, the table will take up
* totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
*/
while (hsize * zfs_arc_average_blocksize < arc_all_memory())
hsize <<= 1;
retry:
buf_hash_table.ht_mask = hsize - 1;
#if defined(_KERNEL)
/*
* Large allocations which do not require contiguous pages
* should be using vmem_alloc() in the linux kernel
*/
buf_hash_table.ht_table =
vmem_zalloc(hsize * sizeof (void*), KM_SLEEP);
#else
buf_hash_table.ht_table =
kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
#endif
if (buf_hash_table.ht_table == NULL) {
ASSERT(hsize > (1ULL << 8));
hsize >>= 1;
goto retry;
}
hdr_full_cache = kmem_cache_create("arc_buf_hdr_t_full", HDR_FULL_SIZE,
0, hdr_full_cons, hdr_full_dest, NULL, NULL, NULL, 0);
hdr_full_crypt_cache = kmem_cache_create("arc_buf_hdr_t_full_crypt",
HDR_FULL_CRYPT_SIZE, 0, hdr_full_crypt_cons, hdr_full_crypt_dest,
NULL, NULL, NULL, 0);
hdr_l2only_cache = kmem_cache_create("arc_buf_hdr_t_l2only",
HDR_L2ONLY_SIZE, 0, hdr_l2only_cons, hdr_l2only_dest, NULL,
NULL, NULL, 0);
buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
0, buf_cons, buf_dest, NULL, NULL, NULL, 0);
for (i = 0; i < 256; i++)
for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
*ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
for (i = 0; i < BUF_LOCKS; i++)
mutex_init(BUF_HASH_LOCK(i), NULL, MUTEX_DEFAULT, NULL);
}
#define ARC_MINTIME (hz>>4) /* 62 ms */
/*
* This is the size that the buf occupies in memory. If the buf is compressed,
* it will correspond to the compressed size. You should use this method of
* getting the buf size unless you explicitly need the logical size.
*/
uint64_t
arc_buf_size(arc_buf_t *buf)
{
return (ARC_BUF_COMPRESSED(buf) ?
HDR_GET_PSIZE(buf->b_hdr) : HDR_GET_LSIZE(buf->b_hdr));
}
uint64_t
arc_buf_lsize(arc_buf_t *buf)
{
return (HDR_GET_LSIZE(buf->b_hdr));
}
/*
* This function will return B_TRUE if the buffer is encrypted in memory.
* This buffer can be decrypted by calling arc_untransform().
*/
boolean_t
arc_is_encrypted(arc_buf_t *buf)
{
return (ARC_BUF_ENCRYPTED(buf) != 0);
}
/*
* Returns B_TRUE if the buffer represents data that has not had its MAC
* verified yet.
*/
boolean_t
arc_is_unauthenticated(arc_buf_t *buf)
{
return (HDR_NOAUTH(buf->b_hdr) != 0);
}
void
arc_get_raw_params(arc_buf_t *buf, boolean_t *byteorder, uint8_t *salt,
uint8_t *iv, uint8_t *mac)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT(HDR_PROTECTED(hdr));
bcopy(hdr->b_crypt_hdr.b_salt, salt, ZIO_DATA_SALT_LEN);
bcopy(hdr->b_crypt_hdr.b_iv, iv, ZIO_DATA_IV_LEN);
bcopy(hdr->b_crypt_hdr.b_mac, mac, ZIO_DATA_MAC_LEN);
*byteorder = (hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS) ?
ZFS_HOST_BYTEORDER : !ZFS_HOST_BYTEORDER;
}
/*
* Indicates how this buffer is compressed in memory. If it is not compressed
* the value will be ZIO_COMPRESS_OFF. It can be made normally readable with
* arc_untransform() as long as it is also unencrypted.
*/
enum zio_compress
arc_get_compression(arc_buf_t *buf)
{
return (ARC_BUF_COMPRESSED(buf) ?
HDR_GET_COMPRESS(buf->b_hdr) : ZIO_COMPRESS_OFF);
}
/*
* Return the compression algorithm used to store this data in the ARC. If ARC
* compression is enabled or this is an encrypted block, this will be the same
* as what's used to store it on-disk. Otherwise, this will be ZIO_COMPRESS_OFF.
*/
static inline enum zio_compress
arc_hdr_get_compress(arc_buf_hdr_t *hdr)
{
return (HDR_COMPRESSION_ENABLED(hdr) ?
HDR_GET_COMPRESS(hdr) : ZIO_COMPRESS_OFF);
}
uint8_t
arc_get_complevel(arc_buf_t *buf)
{
return (buf->b_hdr->b_complevel);
}
static inline boolean_t
arc_buf_is_shared(arc_buf_t *buf)
{
boolean_t shared = (buf->b_data != NULL &&
buf->b_hdr->b_l1hdr.b_pabd != NULL &&
abd_is_linear(buf->b_hdr->b_l1hdr.b_pabd) &&
buf->b_data == abd_to_buf(buf->b_hdr->b_l1hdr.b_pabd));
IMPLY(shared, HDR_SHARED_DATA(buf->b_hdr));
IMPLY(shared, ARC_BUF_SHARED(buf));
IMPLY(shared, ARC_BUF_COMPRESSED(buf) || ARC_BUF_LAST(buf));
/*
* It would be nice to assert arc_can_share() too, but the "hdr isn't
* already being shared" requirement prevents us from doing that.
*/
return (shared);
}
/*
* Free the checksum associated with this header. If there is no checksum, this
* is a no-op.
*/
static inline void
arc_cksum_free(arc_buf_hdr_t *hdr)
{
ASSERT(HDR_HAS_L1HDR(hdr));
mutex_enter(&hdr->b_l1hdr.b_freeze_lock);
if (hdr->b_l1hdr.b_freeze_cksum != NULL) {
kmem_free(hdr->b_l1hdr.b_freeze_cksum, sizeof (zio_cksum_t));
hdr->b_l1hdr.b_freeze_cksum = NULL;
}
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
}
/*
* Return true iff at least one of the bufs on hdr is not compressed.
* Encrypted buffers count as compressed.
*/
static boolean_t
arc_hdr_has_uncompressed_buf(arc_buf_hdr_t *hdr)
{
ASSERT(hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY_OR_LOCKED(hdr));
for (arc_buf_t *b = hdr->b_l1hdr.b_buf; b != NULL; b = b->b_next) {
if (!ARC_BUF_COMPRESSED(b)) {
return (B_TRUE);
}
}
return (B_FALSE);
}
/*
* If we've turned on the ZFS_DEBUG_MODIFY flag, verify that the buf's data
* matches the checksum that is stored in the hdr. If there is no checksum,
* or if the buf is compressed, this is a no-op.
*/
static void
arc_cksum_verify(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
zio_cksum_t zc;
if (!(zfs_flags & ZFS_DEBUG_MODIFY))
return;
if (ARC_BUF_COMPRESSED(buf))
return;
ASSERT(HDR_HAS_L1HDR(hdr));
mutex_enter(&hdr->b_l1hdr.b_freeze_lock);
if (hdr->b_l1hdr.b_freeze_cksum == NULL || HDR_IO_ERROR(hdr)) {
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
return;
}
fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL, &zc);
if (!ZIO_CHECKSUM_EQUAL(*hdr->b_l1hdr.b_freeze_cksum, zc))
panic("buffer modified while frozen!");
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
}
/*
* This function makes the assumption that data stored in the L2ARC
* will be transformed exactly as it is in the main pool. Because of
* this we can verify the checksum against the reading process's bp.
*/
static boolean_t
arc_cksum_is_equal(arc_buf_hdr_t *hdr, zio_t *zio)
{
ASSERT(!BP_IS_EMBEDDED(zio->io_bp));
VERIFY3U(BP_GET_PSIZE(zio->io_bp), ==, HDR_GET_PSIZE(hdr));
/*
* Block pointers always store the checksum for the logical data.
* If the block pointer has the gang bit set, then the checksum
* it represents is for the reconstituted data and not for an
* individual gang member. The zio pipeline, however, must be able to
* determine the checksum of each of the gang constituents so it
* treats the checksum comparison differently than what we need
* for l2arc blocks. This prevents us from using the
* zio_checksum_error() interface directly. Instead we must call the
* zio_checksum_error_impl() so that we can ensure the checksum is
* generated using the correct checksum algorithm and accounts for the
* logical I/O size and not just a gang fragment.
*/
return (zio_checksum_error_impl(zio->io_spa, zio->io_bp,
BP_GET_CHECKSUM(zio->io_bp), zio->io_abd, zio->io_size,
zio->io_offset, NULL) == 0);
}
/*
* Given a buf full of data, if ZFS_DEBUG_MODIFY is enabled this computes a
* checksum and attaches it to the buf's hdr so that we can ensure that the buf
* isn't modified later on. If buf is compressed or there is already a checksum
* on the hdr, this is a no-op (we only checksum uncompressed bufs).
*/
static void
arc_cksum_compute(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
if (!(zfs_flags & ZFS_DEBUG_MODIFY))
return;
ASSERT(HDR_HAS_L1HDR(hdr));
mutex_enter(&buf->b_hdr->b_l1hdr.b_freeze_lock);
if (hdr->b_l1hdr.b_freeze_cksum != NULL || ARC_BUF_COMPRESSED(buf)) {
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
return;
}
ASSERT(!ARC_BUF_ENCRYPTED(buf));
ASSERT(!ARC_BUF_COMPRESSED(buf));
hdr->b_l1hdr.b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t),
KM_SLEEP);
fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL,
hdr->b_l1hdr.b_freeze_cksum);
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
arc_buf_watch(buf);
}
#ifndef _KERNEL
void
arc_buf_sigsegv(int sig, siginfo_t *si, void *unused)
{
+ (void) sig, (void) unused;
panic("Got SIGSEGV at address: 0x%lx\n", (long)si->si_addr);
}
#endif
-/* ARGSUSED */
static void
arc_buf_unwatch(arc_buf_t *buf)
{
#ifndef _KERNEL
if (arc_watch) {
ASSERT0(mprotect(buf->b_data, arc_buf_size(buf),
PROT_READ | PROT_WRITE));
}
+#else
+ (void) buf;
#endif
}
-/* ARGSUSED */
static void
arc_buf_watch(arc_buf_t *buf)
{
#ifndef _KERNEL
if (arc_watch)
ASSERT0(mprotect(buf->b_data, arc_buf_size(buf),
PROT_READ));
+#else
+ (void) buf;
#endif
}
static arc_buf_contents_t
arc_buf_type(arc_buf_hdr_t *hdr)
{
arc_buf_contents_t type;
if (HDR_ISTYPE_METADATA(hdr)) {
type = ARC_BUFC_METADATA;
} else {
type = ARC_BUFC_DATA;
}
VERIFY3U(hdr->b_type, ==, type);
return (type);
}
boolean_t
arc_is_metadata(arc_buf_t *buf)
{
return (HDR_ISTYPE_METADATA(buf->b_hdr) != 0);
}
static uint32_t
arc_bufc_to_flags(arc_buf_contents_t type)
{
switch (type) {
case ARC_BUFC_DATA:
/* metadata field is 0 if buffer contains normal data */
return (0);
case ARC_BUFC_METADATA:
return (ARC_FLAG_BUFC_METADATA);
default:
break;
}
panic("undefined ARC buffer type!");
return ((uint32_t)-1);
}
void
arc_buf_thaw(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
arc_cksum_verify(buf);
/*
* Compressed buffers do not manipulate the b_freeze_cksum.
*/
if (ARC_BUF_COMPRESSED(buf))
return;
ASSERT(HDR_HAS_L1HDR(hdr));
arc_cksum_free(hdr);
arc_buf_unwatch(buf);
}
void
arc_buf_freeze(arc_buf_t *buf)
{
if (!(zfs_flags & ZFS_DEBUG_MODIFY))
return;
if (ARC_BUF_COMPRESSED(buf))
return;
ASSERT(HDR_HAS_L1HDR(buf->b_hdr));
arc_cksum_compute(buf);
}
/*
* The arc_buf_hdr_t's b_flags should never be modified directly. Instead,
* the following functions should be used to ensure that the flags are
* updated in a thread-safe way. When manipulating the flags either
* the hash_lock must be held or the hdr must be undiscoverable. This
* ensures that we're not racing with any other threads when updating
* the flags.
*/
static inline void
arc_hdr_set_flags(arc_buf_hdr_t *hdr, arc_flags_t flags)
{
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
hdr->b_flags |= flags;
}
static inline void
arc_hdr_clear_flags(arc_buf_hdr_t *hdr, arc_flags_t flags)
{
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
hdr->b_flags &= ~flags;
}
/*
* Setting the compression bits in the arc_buf_hdr_t's b_flags is
* done in a special way since we have to clear and set bits
* at the same time. Consumers that wish to set the compression bits
* must use this function to ensure that the flags are updated in
* thread-safe manner.
*/
static void
arc_hdr_set_compress(arc_buf_hdr_t *hdr, enum zio_compress cmp)
{
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
/*
* Holes and embedded blocks will always have a psize = 0 so
* we ignore the compression of the blkptr and set the
* want to uncompress them. Mark them as uncompressed.
*/
if (!zfs_compressed_arc_enabled || HDR_GET_PSIZE(hdr) == 0) {
arc_hdr_clear_flags(hdr, ARC_FLAG_COMPRESSED_ARC);
ASSERT(!HDR_COMPRESSION_ENABLED(hdr));
} else {
arc_hdr_set_flags(hdr, ARC_FLAG_COMPRESSED_ARC);
ASSERT(HDR_COMPRESSION_ENABLED(hdr));
}
HDR_SET_COMPRESS(hdr, cmp);
ASSERT3U(HDR_GET_COMPRESS(hdr), ==, cmp);
}
/*
* Looks for another buf on the same hdr which has the data decompressed, copies
* from it, and returns true. If no such buf exists, returns false.
*/
static boolean_t
arc_buf_try_copy_decompressed_data(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
boolean_t copied = B_FALSE;
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3P(buf->b_data, !=, NULL);
ASSERT(!ARC_BUF_COMPRESSED(buf));
for (arc_buf_t *from = hdr->b_l1hdr.b_buf; from != NULL;
from = from->b_next) {
/* can't use our own data buffer */
if (from == buf) {
continue;
}
if (!ARC_BUF_COMPRESSED(from)) {
bcopy(from->b_data, buf->b_data, arc_buf_size(buf));
copied = B_TRUE;
break;
}
}
/*
* There were no decompressed bufs, so there should not be a
* checksum on the hdr either.
*/
if (zfs_flags & ZFS_DEBUG_MODIFY)
EQUIV(!copied, hdr->b_l1hdr.b_freeze_cksum == NULL);
return (copied);
}
/*
* Allocates an ARC buf header that's in an evicted & L2-cached state.
* This is used during l2arc reconstruction to make empty ARC buffers
* which circumvent the regular disk->arc->l2arc path and instead come
* into being in the reverse order, i.e. l2arc->arc.
*/
static arc_buf_hdr_t *
arc_buf_alloc_l2only(size_t size, arc_buf_contents_t type, l2arc_dev_t *dev,
dva_t dva, uint64_t daddr, int32_t psize, uint64_t birth,
enum zio_compress compress, uint8_t complevel, boolean_t protected,
boolean_t prefetch, arc_state_type_t arcs_state)
{
arc_buf_hdr_t *hdr;
ASSERT(size != 0);
hdr = kmem_cache_alloc(hdr_l2only_cache, KM_SLEEP);
hdr->b_birth = birth;
hdr->b_type = type;
hdr->b_flags = 0;
arc_hdr_set_flags(hdr, arc_bufc_to_flags(type) | ARC_FLAG_HAS_L2HDR);
HDR_SET_LSIZE(hdr, size);
HDR_SET_PSIZE(hdr, psize);
arc_hdr_set_compress(hdr, compress);
hdr->b_complevel = complevel;
if (protected)
arc_hdr_set_flags(hdr, ARC_FLAG_PROTECTED);
if (prefetch)
arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
hdr->b_spa = spa_load_guid(dev->l2ad_vdev->vdev_spa);
hdr->b_dva = dva;
hdr->b_l2hdr.b_dev = dev;
hdr->b_l2hdr.b_daddr = daddr;
hdr->b_l2hdr.b_arcs_state = arcs_state;
return (hdr);
}
/*
* Return the size of the block, b_pabd, that is stored in the arc_buf_hdr_t.
*/
static uint64_t
arc_hdr_size(arc_buf_hdr_t *hdr)
{
uint64_t size;
if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF &&
HDR_GET_PSIZE(hdr) > 0) {
size = HDR_GET_PSIZE(hdr);
} else {
ASSERT3U(HDR_GET_LSIZE(hdr), !=, 0);
size = HDR_GET_LSIZE(hdr);
}
return (size);
}
static int
arc_hdr_authenticate(arc_buf_hdr_t *hdr, spa_t *spa, uint64_t dsobj)
{
int ret;
uint64_t csize;
uint64_t lsize = HDR_GET_LSIZE(hdr);
uint64_t psize = HDR_GET_PSIZE(hdr);
void *tmpbuf = NULL;
abd_t *abd = hdr->b_l1hdr.b_pabd;
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
ASSERT(HDR_AUTHENTICATED(hdr));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
/*
* The MAC is calculated on the compressed data that is stored on disk.
* However, if compressed arc is disabled we will only have the
* decompressed data available to us now. Compress it into a temporary
* abd so we can verify the MAC. The performance overhead of this will
* be relatively low, since most objects in an encrypted objset will
* be encrypted (instead of authenticated) anyway.
*/
if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr)) {
tmpbuf = zio_buf_alloc(lsize);
abd = abd_get_from_buf(tmpbuf, lsize);
abd_take_ownership_of_buf(abd, B_TRUE);
csize = zio_compress_data(HDR_GET_COMPRESS(hdr),
hdr->b_l1hdr.b_pabd, tmpbuf, lsize, hdr->b_complevel);
ASSERT3U(csize, <=, psize);
abd_zero_off(abd, csize, psize - csize);
}
/*
* Authentication is best effort. We authenticate whenever the key is
* available. If we succeed we clear ARC_FLAG_NOAUTH.
*/
if (hdr->b_crypt_hdr.b_ot == DMU_OT_OBJSET) {
ASSERT3U(HDR_GET_COMPRESS(hdr), ==, ZIO_COMPRESS_OFF);
ASSERT3U(lsize, ==, psize);
ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa, dsobj, abd,
psize, hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS);
} else {
ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj, abd, psize,
hdr->b_crypt_hdr.b_mac);
}
if (ret == 0)
arc_hdr_clear_flags(hdr, ARC_FLAG_NOAUTH);
else if (ret != ENOENT)
goto error;
if (tmpbuf != NULL)
abd_free(abd);
return (0);
error:
if (tmpbuf != NULL)
abd_free(abd);
return (ret);
}
/*
* This function will take a header that only has raw encrypted data in
* b_crypt_hdr.b_rabd and decrypt it into a new buffer which is stored in
* b_l1hdr.b_pabd. If designated in the header flags, this function will
* also decompress the data.
*/
static int
arc_hdr_decrypt(arc_buf_hdr_t *hdr, spa_t *spa, const zbookmark_phys_t *zb)
{
int ret;
abd_t *cabd = NULL;
void *tmp = NULL;
boolean_t no_crypt = B_FALSE;
boolean_t bswap = (hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS);
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
ASSERT(HDR_ENCRYPTED(hdr));
arc_hdr_alloc_abd(hdr, ARC_HDR_DO_ADAPT);
ret = spa_do_crypt_abd(B_FALSE, spa, zb, hdr->b_crypt_hdr.b_ot,
B_FALSE, bswap, hdr->b_crypt_hdr.b_salt, hdr->b_crypt_hdr.b_iv,
hdr->b_crypt_hdr.b_mac, HDR_GET_PSIZE(hdr), hdr->b_l1hdr.b_pabd,
hdr->b_crypt_hdr.b_rabd, &no_crypt);
if (ret != 0)
goto error;
if (no_crypt) {
abd_copy(hdr->b_l1hdr.b_pabd, hdr->b_crypt_hdr.b_rabd,
HDR_GET_PSIZE(hdr));
}
/*
* If this header has disabled arc compression but the b_pabd is
* compressed after decrypting it, we need to decompress the newly
* decrypted data.
*/
if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr)) {
/*
* We want to make sure that we are correctly honoring the
* zfs_abd_scatter_enabled setting, so we allocate an abd here
* and then loan a buffer from it, rather than allocating a
* linear buffer and wrapping it in an abd later.
*/
cabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr,
ARC_HDR_DO_ADAPT);
tmp = abd_borrow_buf(cabd, arc_hdr_size(hdr));
ret = zio_decompress_data(HDR_GET_COMPRESS(hdr),
hdr->b_l1hdr.b_pabd, tmp, HDR_GET_PSIZE(hdr),
HDR_GET_LSIZE(hdr), &hdr->b_complevel);
if (ret != 0) {
abd_return_buf(cabd, tmp, arc_hdr_size(hdr));
goto error;
}
abd_return_buf_copy(cabd, tmp, arc_hdr_size(hdr));
arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd,
arc_hdr_size(hdr), hdr);
hdr->b_l1hdr.b_pabd = cabd;
}
return (0);
error:
arc_hdr_free_abd(hdr, B_FALSE);
if (cabd != NULL)
arc_free_data_buf(hdr, cabd, arc_hdr_size(hdr), hdr);
return (ret);
}
/*
* This function is called during arc_buf_fill() to prepare the header's
* abd plaintext pointer for use. This involves authenticated protected
* data and decrypting encrypted data into the plaintext abd.
*/
static int
arc_fill_hdr_crypt(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, spa_t *spa,
const zbookmark_phys_t *zb, boolean_t noauth)
{
int ret;
ASSERT(HDR_PROTECTED(hdr));
if (hash_lock != NULL)
mutex_enter(hash_lock);
if (HDR_NOAUTH(hdr) && !noauth) {
/*
* The caller requested authenticated data but our data has
* not been authenticated yet. Verify the MAC now if we can.
*/
ret = arc_hdr_authenticate(hdr, spa, zb->zb_objset);
if (ret != 0)
goto error;
} else if (HDR_HAS_RABD(hdr) && hdr->b_l1hdr.b_pabd == NULL) {
/*
* If we only have the encrypted version of the data, but the
* unencrypted version was requested we take this opportunity
* to store the decrypted version in the header for future use.
*/
ret = arc_hdr_decrypt(hdr, spa, zb);
if (ret != 0)
goto error;
}
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
if (hash_lock != NULL)
mutex_exit(hash_lock);
return (0);
error:
if (hash_lock != NULL)
mutex_exit(hash_lock);
return (ret);
}
/*
* This function is used by the dbuf code to decrypt bonus buffers in place.
* The dbuf code itself doesn't have any locking for decrypting a shared dnode
* block, so we use the hash lock here to protect against concurrent calls to
* arc_buf_fill().
*/
static void
-arc_buf_untransform_in_place(arc_buf_t *buf, kmutex_t *hash_lock)
+arc_buf_untransform_in_place(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT(HDR_ENCRYPTED(hdr));
ASSERT3U(hdr->b_crypt_hdr.b_ot, ==, DMU_OT_DNODE);
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
zio_crypt_copy_dnode_bonus(hdr->b_l1hdr.b_pabd, buf->b_data,
arc_buf_size(buf));
buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED;
buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
hdr->b_crypt_hdr.b_ebufcnt -= 1;
}
/*
* Given a buf that has a data buffer attached to it, this function will
* efficiently fill the buf with data of the specified compression setting from
* the hdr and update the hdr's b_freeze_cksum if necessary. If the buf and hdr
* are already sharing a data buf, no copy is performed.
*
* If the buf is marked as compressed but uncompressed data was requested, this
* will allocate a new data buffer for the buf, remove that flag, and fill the
* buf with uncompressed data. You can't request a compressed buf on a hdr with
* uncompressed data, and (since we haven't added support for it yet) if you
* want compressed data your buf must already be marked as compressed and have
* the correct-sized data buffer.
*/
static int
arc_buf_fill(arc_buf_t *buf, spa_t *spa, const zbookmark_phys_t *zb,
arc_fill_flags_t flags)
{
int error = 0;
arc_buf_hdr_t *hdr = buf->b_hdr;
boolean_t hdr_compressed =
(arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF);
boolean_t compressed = (flags & ARC_FILL_COMPRESSED) != 0;
boolean_t encrypted = (flags & ARC_FILL_ENCRYPTED) != 0;
dmu_object_byteswap_t bswap = hdr->b_l1hdr.b_byteswap;
kmutex_t *hash_lock = (flags & ARC_FILL_LOCKED) ? NULL : HDR_LOCK(hdr);
ASSERT3P(buf->b_data, !=, NULL);
IMPLY(compressed, hdr_compressed || ARC_BUF_ENCRYPTED(buf));
IMPLY(compressed, ARC_BUF_COMPRESSED(buf));
IMPLY(encrypted, HDR_ENCRYPTED(hdr));
IMPLY(encrypted, ARC_BUF_ENCRYPTED(buf));
IMPLY(encrypted, ARC_BUF_COMPRESSED(buf));
IMPLY(encrypted, !ARC_BUF_SHARED(buf));
/*
* If the caller wanted encrypted data we just need to copy it from
* b_rabd and potentially byteswap it. We won't be able to do any
* further transforms on it.
*/
if (encrypted) {
ASSERT(HDR_HAS_RABD(hdr));
abd_copy_to_buf(buf->b_data, hdr->b_crypt_hdr.b_rabd,
HDR_GET_PSIZE(hdr));
goto byteswap;
}
/*
* Adjust encrypted and authenticated headers to accommodate
* the request if needed. Dnode blocks (ARC_FILL_IN_PLACE) are
* allowed to fail decryption due to keys not being loaded
* without being marked as an IO error.
*/
if (HDR_PROTECTED(hdr)) {
error = arc_fill_hdr_crypt(hdr, hash_lock, spa,
zb, !!(flags & ARC_FILL_NOAUTH));
if (error == EACCES && (flags & ARC_FILL_IN_PLACE) != 0) {
return (error);
} else if (error != 0) {
if (hash_lock != NULL)
mutex_enter(hash_lock);
arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR);
if (hash_lock != NULL)
mutex_exit(hash_lock);
return (error);
}
}
/*
* There is a special case here for dnode blocks which are
* decrypting their bonus buffers. These blocks may request to
* be decrypted in-place. This is necessary because there may
* be many dnodes pointing into this buffer and there is
* currently no method to synchronize replacing the backing
* b_data buffer and updating all of the pointers. Here we use
* the hash lock to ensure there are no races. If the need
* arises for other types to be decrypted in-place, they must
* add handling here as well.
*/
if ((flags & ARC_FILL_IN_PLACE) != 0) {
ASSERT(!hdr_compressed);
ASSERT(!compressed);
ASSERT(!encrypted);
if (HDR_ENCRYPTED(hdr) && ARC_BUF_ENCRYPTED(buf)) {
ASSERT3U(hdr->b_crypt_hdr.b_ot, ==, DMU_OT_DNODE);
if (hash_lock != NULL)
mutex_enter(hash_lock);
- arc_buf_untransform_in_place(buf, hash_lock);
+ arc_buf_untransform_in_place(buf);
if (hash_lock != NULL)
mutex_exit(hash_lock);
/* Compute the hdr's checksum if necessary */
arc_cksum_compute(buf);
}
return (0);
}
if (hdr_compressed == compressed) {
if (!arc_buf_is_shared(buf)) {
abd_copy_to_buf(buf->b_data, hdr->b_l1hdr.b_pabd,
arc_buf_size(buf));
}
} else {
ASSERT(hdr_compressed);
ASSERT(!compressed);
/*
* If the buf is sharing its data with the hdr, unlink it and
* allocate a new data buffer for the buf.
*/
if (arc_buf_is_shared(buf)) {
ASSERT(ARC_BUF_COMPRESSED(buf));
/* We need to give the buf its own b_data */
buf->b_flags &= ~ARC_BUF_FLAG_SHARED;
buf->b_data =
arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf);
arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
/* Previously overhead was 0; just add new overhead */
ARCSTAT_INCR(arcstat_overhead_size, HDR_GET_LSIZE(hdr));
} else if (ARC_BUF_COMPRESSED(buf)) {
/* We need to reallocate the buf's b_data */
arc_free_data_buf(hdr, buf->b_data, HDR_GET_PSIZE(hdr),
buf);
buf->b_data =
arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf);
/* We increased the size of b_data; update overhead */
ARCSTAT_INCR(arcstat_overhead_size,
HDR_GET_LSIZE(hdr) - HDR_GET_PSIZE(hdr));
}
/*
* Regardless of the buf's previous compression settings, it
* should not be compressed at the end of this function.
*/
buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
/*
* Try copying the data from another buf which already has a
* decompressed version. If that's not possible, it's time to
* bite the bullet and decompress the data from the hdr.
*/
if (arc_buf_try_copy_decompressed_data(buf)) {
/* Skip byteswapping and checksumming (already done) */
return (0);
} else {
error = zio_decompress_data(HDR_GET_COMPRESS(hdr),
hdr->b_l1hdr.b_pabd, buf->b_data,
HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr),
&hdr->b_complevel);
/*
* Absent hardware errors or software bugs, this should
* be impossible, but log it anyway so we can debug it.
*/
if (error != 0) {
zfs_dbgmsg(
"hdr %px, compress %d, psize %d, lsize %d",
hdr, arc_hdr_get_compress(hdr),
HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr));
if (hash_lock != NULL)
mutex_enter(hash_lock);
arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR);
if (hash_lock != NULL)
mutex_exit(hash_lock);
return (SET_ERROR(EIO));
}
}
}
byteswap:
/* Byteswap the buf's data if necessary */
if (bswap != DMU_BSWAP_NUMFUNCS) {
ASSERT(!HDR_SHARED_DATA(hdr));
ASSERT3U(bswap, <, DMU_BSWAP_NUMFUNCS);
dmu_ot_byteswap[bswap].ob_func(buf->b_data, HDR_GET_LSIZE(hdr));
}
/* Compute the hdr's checksum if necessary */
arc_cksum_compute(buf);
return (0);
}
/*
* If this function is being called to decrypt an encrypted buffer or verify an
* authenticated one, the key must be loaded and a mapping must be made
* available in the keystore via spa_keystore_create_mapping() or one of its
* callers.
*/
int
arc_untransform(arc_buf_t *buf, spa_t *spa, const zbookmark_phys_t *zb,
boolean_t in_place)
{
int ret;
arc_fill_flags_t flags = 0;
if (in_place)
flags |= ARC_FILL_IN_PLACE;
ret = arc_buf_fill(buf, spa, zb, flags);
if (ret == ECKSUM) {
/*
* Convert authentication and decryption errors to EIO
* (and generate an ereport) before leaving the ARC.
*/
ret = SET_ERROR(EIO);
spa_log_error(spa, zb);
(void) zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION,
spa, NULL, zb, NULL, 0);
}
return (ret);
}
/*
* Increment the amount of evictable space in the arc_state_t's refcount.
* We account for the space used by the hdr and the arc buf individually
* so that we can add and remove them from the refcount individually.
*/
static void
arc_evictable_space_increment(arc_buf_hdr_t *hdr, arc_state_t *state)
{
arc_buf_contents_t type = arc_buf_type(hdr);
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(state)) {
ASSERT0(hdr->b_l1hdr.b_bufcnt);
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
(void) zfs_refcount_add_many(&state->arcs_esize[type],
HDR_GET_LSIZE(hdr), hdr);
return;
}
if (hdr->b_l1hdr.b_pabd != NULL) {
(void) zfs_refcount_add_many(&state->arcs_esize[type],
arc_hdr_size(hdr), hdr);
}
if (HDR_HAS_RABD(hdr)) {
(void) zfs_refcount_add_many(&state->arcs_esize[type],
HDR_GET_PSIZE(hdr), hdr);
}
for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) {
if (arc_buf_is_shared(buf))
continue;
(void) zfs_refcount_add_many(&state->arcs_esize[type],
arc_buf_size(buf), buf);
}
}
/*
* Decrement the amount of evictable space in the arc_state_t's refcount.
* We account for the space used by the hdr and the arc buf individually
* so that we can add and remove them from the refcount individually.
*/
static void
arc_evictable_space_decrement(arc_buf_hdr_t *hdr, arc_state_t *state)
{
arc_buf_contents_t type = arc_buf_type(hdr);
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(state)) {
ASSERT0(hdr->b_l1hdr.b_bufcnt);
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
HDR_GET_LSIZE(hdr), hdr);
return;
}
if (hdr->b_l1hdr.b_pabd != NULL) {
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
arc_hdr_size(hdr), hdr);
}
if (HDR_HAS_RABD(hdr)) {
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
HDR_GET_PSIZE(hdr), hdr);
}
for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) {
if (arc_buf_is_shared(buf))
continue;
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
arc_buf_size(buf), buf);
}
}
/*
* Add a reference to this hdr indicating that someone is actively
* referencing that memory. When the refcount transitions from 0 to 1,
* we remove it from the respective arc_state_t list to indicate that
* it is not evictable.
*/
static void
add_reference(arc_buf_hdr_t *hdr, void *tag)
{
arc_state_t *state;
ASSERT(HDR_HAS_L1HDR(hdr));
if (!HDR_EMPTY(hdr) && !MUTEX_HELD(HDR_LOCK(hdr))) {
ASSERT(hdr->b_l1hdr.b_state == arc_anon);
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
}
state = hdr->b_l1hdr.b_state;
if ((zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) &&
(state != arc_anon)) {
/* We don't use the L2-only state list. */
if (state != arc_l2c_only) {
multilist_remove(&state->arcs_list[arc_buf_type(hdr)],
hdr);
arc_evictable_space_decrement(hdr, state);
}
/* remove the prefetch flag if we get a reference */
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_decrement_state(hdr);
arc_hdr_clear_flags(hdr, ARC_FLAG_PREFETCH);
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_increment_state(hdr);
}
}
/*
* Remove a reference from this hdr. When the reference transitions from
* 1 to 0 and we're not anonymous, then we add this hdr to the arc_state_t's
* list making it eligible for eviction.
*/
static int
remove_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag)
{
int cnt;
arc_state_t *state = hdr->b_l1hdr.b_state;
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));
ASSERT(!GHOST_STATE(state));
/*
* arc_l2c_only counts as a ghost state so we don't need to explicitly
* check to prevent usage of the arc_l2c_only list.
*/
if (((cnt = zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) == 0) &&
(state != arc_anon)) {
multilist_insert(&state->arcs_list[arc_buf_type(hdr)], hdr);
ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0);
arc_evictable_space_increment(hdr, state);
}
return (cnt);
}
/*
* Returns detailed information about a specific arc buffer. When the
* state_index argument is set the function will calculate the arc header
* list position for its arc state. Since this requires a linear traversal
* callers are strongly encourage not to do this. However, it can be helpful
* for targeted analysis so the functionality is provided.
*/
void
arc_buf_info(arc_buf_t *ab, arc_buf_info_t *abi, int state_index)
{
+ (void) state_index;
arc_buf_hdr_t *hdr = ab->b_hdr;
l1arc_buf_hdr_t *l1hdr = NULL;
l2arc_buf_hdr_t *l2hdr = NULL;
arc_state_t *state = NULL;
memset(abi, 0, sizeof (arc_buf_info_t));
if (hdr == NULL)
return;
abi->abi_flags = hdr->b_flags;
if (HDR_HAS_L1HDR(hdr)) {
l1hdr = &hdr->b_l1hdr;
state = l1hdr->b_state;
}
if (HDR_HAS_L2HDR(hdr))
l2hdr = &hdr->b_l2hdr;
if (l1hdr) {
abi->abi_bufcnt = l1hdr->b_bufcnt;
abi->abi_access = l1hdr->b_arc_access;
abi->abi_mru_hits = l1hdr->b_mru_hits;
abi->abi_mru_ghost_hits = l1hdr->b_mru_ghost_hits;
abi->abi_mfu_hits = l1hdr->b_mfu_hits;
abi->abi_mfu_ghost_hits = l1hdr->b_mfu_ghost_hits;
abi->abi_holds = zfs_refcount_count(&l1hdr->b_refcnt);
}
if (l2hdr) {
abi->abi_l2arc_dattr = l2hdr->b_daddr;
abi->abi_l2arc_hits = l2hdr->b_hits;
}
abi->abi_state_type = state ? state->arcs_state : ARC_STATE_ANON;
abi->abi_state_contents = arc_buf_type(hdr);
abi->abi_size = arc_hdr_size(hdr);
}
/*
* Move the supplied buffer to the indicated state. The hash lock
* for the buffer must be held by the caller.
*/
static void
arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
kmutex_t *hash_lock)
{
arc_state_t *old_state;
int64_t refcnt;
uint32_t bufcnt;
boolean_t update_old, update_new;
arc_buf_contents_t buftype = arc_buf_type(hdr);
/*
* We almost always have an L1 hdr here, since we call arc_hdr_realloc()
* in arc_read() when bringing a buffer out of the L2ARC. However, the
* L1 hdr doesn't always exist when we change state to arc_anon before
* destroying a header, in which case reallocating to add the L1 hdr is
* pointless.
*/
if (HDR_HAS_L1HDR(hdr)) {
old_state = hdr->b_l1hdr.b_state;
refcnt = zfs_refcount_count(&hdr->b_l1hdr.b_refcnt);
bufcnt = hdr->b_l1hdr.b_bufcnt;
update_old = (bufcnt > 0 || hdr->b_l1hdr.b_pabd != NULL ||
HDR_HAS_RABD(hdr));
} else {
old_state = arc_l2c_only;
refcnt = 0;
bufcnt = 0;
update_old = B_FALSE;
}
update_new = update_old;
ASSERT(MUTEX_HELD(hash_lock));
ASSERT3P(new_state, !=, old_state);
ASSERT(!GHOST_STATE(new_state) || bufcnt == 0);
ASSERT(old_state != arc_anon || bufcnt <= 1);
/*
* If this buffer is evictable, transfer it from the
* old state list to the new state list.
*/
if (refcnt == 0) {
if (old_state != arc_anon && old_state != arc_l2c_only) {
ASSERT(HDR_HAS_L1HDR(hdr));
multilist_remove(&old_state->arcs_list[buftype], hdr);
if (GHOST_STATE(old_state)) {
ASSERT0(bufcnt);
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
update_old = B_TRUE;
}
arc_evictable_space_decrement(hdr, old_state);
}
if (new_state != arc_anon && new_state != arc_l2c_only) {
/*
* An L1 header always exists here, since if we're
* moving to some L1-cached state (i.e. not l2c_only or
* anonymous), we realloc the header to add an L1hdr
* beforehand.
*/
ASSERT(HDR_HAS_L1HDR(hdr));
multilist_insert(&new_state->arcs_list[buftype], hdr);
if (GHOST_STATE(new_state)) {
ASSERT0(bufcnt);
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
update_new = B_TRUE;
}
arc_evictable_space_increment(hdr, new_state);
}
}
ASSERT(!HDR_EMPTY(hdr));
if (new_state == arc_anon && HDR_IN_HASH_TABLE(hdr))
buf_hash_remove(hdr);
/* adjust state sizes (ignore arc_l2c_only) */
if (update_new && new_state != arc_l2c_only) {
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(new_state)) {
ASSERT0(bufcnt);
/*
* When moving a header to a ghost state, we first
* remove all arc buffers. Thus, we'll have a
* bufcnt of zero, and no arc buffer to use for
* the reference. As a result, we use the arc
* header pointer for the reference.
*/
(void) zfs_refcount_add_many(&new_state->arcs_size,
HDR_GET_LSIZE(hdr), hdr);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
} else {
uint32_t buffers = 0;
/*
* Each individual buffer holds a unique reference,
* thus we must remove each of these references one
* at a time.
*/
for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) {
ASSERT3U(bufcnt, !=, 0);
buffers++;
/*
* When the arc_buf_t is sharing the data
* block with the hdr, the owner of the
* reference belongs to the hdr. Only
* add to the refcount if the arc_buf_t is
* not shared.
*/
if (arc_buf_is_shared(buf))
continue;
(void) zfs_refcount_add_many(
&new_state->arcs_size,
arc_buf_size(buf), buf);
}
ASSERT3U(bufcnt, ==, buffers);
if (hdr->b_l1hdr.b_pabd != NULL) {
(void) zfs_refcount_add_many(
&new_state->arcs_size,
arc_hdr_size(hdr), hdr);
}
if (HDR_HAS_RABD(hdr)) {
(void) zfs_refcount_add_many(
&new_state->arcs_size,
HDR_GET_PSIZE(hdr), hdr);
}
}
}
if (update_old && old_state != arc_l2c_only) {
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(old_state)) {
ASSERT0(bufcnt);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
/*
* When moving a header off of a ghost state,
* the header will not contain any arc buffers.
* We use the arc header pointer for the reference
* which is exactly what we did when we put the
* header on the ghost state.
*/
(void) zfs_refcount_remove_many(&old_state->arcs_size,
HDR_GET_LSIZE(hdr), hdr);
} else {
uint32_t buffers = 0;
/*
* Each individual buffer holds a unique reference,
* thus we must remove each of these references one
* at a time.
*/
for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) {
ASSERT3U(bufcnt, !=, 0);
buffers++;
/*
* When the arc_buf_t is sharing the data
* block with the hdr, the owner of the
* reference belongs to the hdr. Only
* add to the refcount if the arc_buf_t is
* not shared.
*/
if (arc_buf_is_shared(buf))
continue;
(void) zfs_refcount_remove_many(
&old_state->arcs_size, arc_buf_size(buf),
buf);
}
ASSERT3U(bufcnt, ==, buffers);
ASSERT(hdr->b_l1hdr.b_pabd != NULL ||
HDR_HAS_RABD(hdr));
if (hdr->b_l1hdr.b_pabd != NULL) {
(void) zfs_refcount_remove_many(
&old_state->arcs_size, arc_hdr_size(hdr),
hdr);
}
if (HDR_HAS_RABD(hdr)) {
(void) zfs_refcount_remove_many(
&old_state->arcs_size, HDR_GET_PSIZE(hdr),
hdr);
}
}
}
if (HDR_HAS_L1HDR(hdr)) {
hdr->b_l1hdr.b_state = new_state;
if (HDR_HAS_L2HDR(hdr) && new_state != arc_l2c_only) {
l2arc_hdr_arcstats_decrement_state(hdr);
hdr->b_l2hdr.b_arcs_state = new_state->arcs_state;
l2arc_hdr_arcstats_increment_state(hdr);
}
}
}
void
arc_space_consume(uint64_t space, arc_space_type_t type)
{
ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
switch (type) {
default:
break;
case ARC_SPACE_DATA:
ARCSTAT_INCR(arcstat_data_size, space);
break;
case ARC_SPACE_META:
ARCSTAT_INCR(arcstat_metadata_size, space);
break;
case ARC_SPACE_BONUS:
ARCSTAT_INCR(arcstat_bonus_size, space);
break;
case ARC_SPACE_DNODE:
aggsum_add(&arc_sums.arcstat_dnode_size, space);
break;
case ARC_SPACE_DBUF:
ARCSTAT_INCR(arcstat_dbuf_size, space);
break;
case ARC_SPACE_HDRS:
ARCSTAT_INCR(arcstat_hdr_size, space);
break;
case ARC_SPACE_L2HDRS:
aggsum_add(&arc_sums.arcstat_l2_hdr_size, space);
break;
case ARC_SPACE_ABD_CHUNK_WASTE:
/*
* Note: this includes space wasted by all scatter ABD's, not
* just those allocated by the ARC. But the vast majority of
* scatter ABD's come from the ARC, because other users are
* very short-lived.
*/
ARCSTAT_INCR(arcstat_abd_chunk_waste_size, space);
break;
}
if (type != ARC_SPACE_DATA && type != ARC_SPACE_ABD_CHUNK_WASTE)
aggsum_add(&arc_sums.arcstat_meta_used, space);
aggsum_add(&arc_sums.arcstat_size, space);
}
void
arc_space_return(uint64_t space, arc_space_type_t type)
{
ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
switch (type) {
default:
break;
case ARC_SPACE_DATA:
ARCSTAT_INCR(arcstat_data_size, -space);
break;
case ARC_SPACE_META:
ARCSTAT_INCR(arcstat_metadata_size, -space);
break;
case ARC_SPACE_BONUS:
ARCSTAT_INCR(arcstat_bonus_size, -space);
break;
case ARC_SPACE_DNODE:
aggsum_add(&arc_sums.arcstat_dnode_size, -space);
break;
case ARC_SPACE_DBUF:
ARCSTAT_INCR(arcstat_dbuf_size, -space);
break;
case ARC_SPACE_HDRS:
ARCSTAT_INCR(arcstat_hdr_size, -space);
break;
case ARC_SPACE_L2HDRS:
aggsum_add(&arc_sums.arcstat_l2_hdr_size, -space);
break;
case ARC_SPACE_ABD_CHUNK_WASTE:
ARCSTAT_INCR(arcstat_abd_chunk_waste_size, -space);
break;
}
if (type != ARC_SPACE_DATA && type != ARC_SPACE_ABD_CHUNK_WASTE) {
ASSERT(aggsum_compare(&arc_sums.arcstat_meta_used,
space) >= 0);
ARCSTAT_MAX(arcstat_meta_max,
aggsum_upper_bound(&arc_sums.arcstat_meta_used));
aggsum_add(&arc_sums.arcstat_meta_used, -space);
}
ASSERT(aggsum_compare(&arc_sums.arcstat_size, space) >= 0);
aggsum_add(&arc_sums.arcstat_size, -space);
}
/*
* Given a hdr and a buf, returns whether that buf can share its b_data buffer
* with the hdr's b_pabd.
*/
static boolean_t
arc_can_share(arc_buf_hdr_t *hdr, arc_buf_t *buf)
{
/*
* The criteria for sharing a hdr's data are:
* 1. the buffer is not encrypted
* 2. the hdr's compression matches the buf's compression
* 3. the hdr doesn't need to be byteswapped
* 4. the hdr isn't already being shared
* 5. the buf is either compressed or it is the last buf in the hdr list
*
* Criterion #5 maintains the invariant that shared uncompressed
* bufs must be the final buf in the hdr's b_buf list. Reading this, you
* might ask, "if a compressed buf is allocated first, won't that be the
* last thing in the list?", but in that case it's impossible to create
* a shared uncompressed buf anyway (because the hdr must be compressed
* to have the compressed buf). You might also think that #3 is
* sufficient to make this guarantee, however it's possible
* (specifically in the rare L2ARC write race mentioned in
* arc_buf_alloc_impl()) there will be an existing uncompressed buf that
* is shareable, but wasn't at the time of its allocation. Rather than
* allow a new shared uncompressed buf to be created and then shuffle
* the list around to make it the last element, this simply disallows
* sharing if the new buf isn't the first to be added.
*/
ASSERT3P(buf->b_hdr, ==, hdr);
boolean_t hdr_compressed =
arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF;
boolean_t buf_compressed = ARC_BUF_COMPRESSED(buf) != 0;
return (!ARC_BUF_ENCRYPTED(buf) &&
buf_compressed == hdr_compressed &&
hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS &&
!HDR_SHARED_DATA(hdr) &&
(ARC_BUF_LAST(buf) || ARC_BUF_COMPRESSED(buf)));
}
/*
* Allocate a buf for this hdr. If you care about the data that's in the hdr,
* or if you want a compressed buffer, pass those flags in. Returns 0 if the
* copy was made successfully, or an error code otherwise.
*/
static int
arc_buf_alloc_impl(arc_buf_hdr_t *hdr, spa_t *spa, const zbookmark_phys_t *zb,
void *tag, boolean_t encrypted, boolean_t compressed, boolean_t noauth,
boolean_t fill, arc_buf_t **ret)
{
arc_buf_t *buf;
arc_fill_flags_t flags = ARC_FILL_LOCKED;
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3U(HDR_GET_LSIZE(hdr), >, 0);
VERIFY(hdr->b_type == ARC_BUFC_DATA ||
hdr->b_type == ARC_BUFC_METADATA);
ASSERT3P(ret, !=, NULL);
ASSERT3P(*ret, ==, NULL);
IMPLY(encrypted, compressed);
buf = *ret = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
buf->b_hdr = hdr;
buf->b_data = NULL;
buf->b_next = hdr->b_l1hdr.b_buf;
buf->b_flags = 0;
add_reference(hdr, tag);
/*
* We're about to change the hdr's b_flags. We must either
* hold the hash_lock or be undiscoverable.
*/
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
/*
* Only honor requests for compressed bufs if the hdr is actually
* compressed. This must be overridden if the buffer is encrypted since
* encrypted buffers cannot be decompressed.
*/
if (encrypted) {
buf->b_flags |= ARC_BUF_FLAG_COMPRESSED;
buf->b_flags |= ARC_BUF_FLAG_ENCRYPTED;
flags |= ARC_FILL_COMPRESSED | ARC_FILL_ENCRYPTED;
} else if (compressed &&
arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF) {
buf->b_flags |= ARC_BUF_FLAG_COMPRESSED;
flags |= ARC_FILL_COMPRESSED;
}
if (noauth) {
ASSERT0(encrypted);
flags |= ARC_FILL_NOAUTH;
}
/*
* If the hdr's data can be shared then we share the data buffer and
* set the appropriate bit in the hdr's b_flags to indicate the hdr is
* sharing it's b_pabd with the arc_buf_t. Otherwise, we allocate a new
* buffer to store the buf's data.
*
* There are two additional restrictions here because we're sharing
* hdr -> buf instead of the usual buf -> hdr. First, the hdr can't be
* actively involved in an L2ARC write, because if this buf is used by
* an arc_write() then the hdr's data buffer will be released when the
* write completes, even though the L2ARC write might still be using it.
* Second, the hdr's ABD must be linear so that the buf's user doesn't
* need to be ABD-aware. It must be allocated via
* zio_[data_]buf_alloc(), not as a page, because we need to be able
* to abd_release_ownership_of_buf(), which isn't allowed on "linear
* page" buffers because the ABD code needs to handle freeing them
* specially.
*/
boolean_t can_share = arc_can_share(hdr, buf) &&
!HDR_L2_WRITING(hdr) &&
hdr->b_l1hdr.b_pabd != NULL &&
abd_is_linear(hdr->b_l1hdr.b_pabd) &&
!abd_is_linear_page(hdr->b_l1hdr.b_pabd);
/* Set up b_data and sharing */
if (can_share) {
buf->b_data = abd_to_buf(hdr->b_l1hdr.b_pabd);
buf->b_flags |= ARC_BUF_FLAG_SHARED;
arc_hdr_set_flags(hdr, ARC_FLAG_SHARED_DATA);
} else {
buf->b_data =
arc_get_data_buf(hdr, arc_buf_size(buf), buf);
ARCSTAT_INCR(arcstat_overhead_size, arc_buf_size(buf));
}
VERIFY3P(buf->b_data, !=, NULL);
hdr->b_l1hdr.b_buf = buf;
hdr->b_l1hdr.b_bufcnt += 1;
if (encrypted)
hdr->b_crypt_hdr.b_ebufcnt += 1;
/*
* If the user wants the data from the hdr, we need to either copy or
* decompress the data.
*/
if (fill) {
ASSERT3P(zb, !=, NULL);
return (arc_buf_fill(buf, spa, zb, flags));
}
return (0);
}
static char *arc_onloan_tag = "onloan";
static inline void
arc_loaned_bytes_update(int64_t delta)
{
atomic_add_64(&arc_loaned_bytes, delta);
/* assert that it did not wrap around */
ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0);
}
/*
* Loan out an anonymous arc buffer. Loaned buffers are not counted as in
* flight data by arc_tempreserve_space() until they are "returned". Loaned
* buffers must be returned to the arc before they can be used by the DMU or
* freed.
*/
arc_buf_t *
arc_loan_buf(spa_t *spa, boolean_t is_metadata, int size)
{
arc_buf_t *buf = arc_alloc_buf(spa, arc_onloan_tag,
is_metadata ? ARC_BUFC_METADATA : ARC_BUFC_DATA, size);
arc_loaned_bytes_update(arc_buf_size(buf));
return (buf);
}
arc_buf_t *
arc_loan_compressed_buf(spa_t *spa, uint64_t psize, uint64_t lsize,
enum zio_compress compression_type, uint8_t complevel)
{
arc_buf_t *buf = arc_alloc_compressed_buf(spa, arc_onloan_tag,
psize, lsize, compression_type, complevel);
arc_loaned_bytes_update(arc_buf_size(buf));
return (buf);
}
arc_buf_t *
arc_loan_raw_buf(spa_t *spa, uint64_t dsobj, boolean_t byteorder,
const uint8_t *salt, const uint8_t *iv, const uint8_t *mac,
dmu_object_type_t ot, uint64_t psize, uint64_t lsize,
enum zio_compress compression_type, uint8_t complevel)
{
arc_buf_t *buf = arc_alloc_raw_buf(spa, arc_onloan_tag, dsobj,
byteorder, salt, iv, mac, ot, psize, lsize, compression_type,
complevel);
atomic_add_64(&arc_loaned_bytes, psize);
return (buf);
}
/*
* Return a loaned arc buffer to the arc.
*/
void
arc_return_buf(arc_buf_t *buf, void *tag)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT3P(buf->b_data, !=, NULL);
ASSERT(HDR_HAS_L1HDR(hdr));
(void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag);
(void) zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
arc_loaned_bytes_update(-arc_buf_size(buf));
}
/* Detach an arc_buf from a dbuf (tag) */
void
arc_loan_inuse_buf(arc_buf_t *buf, void *tag)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT3P(buf->b_data, !=, NULL);
ASSERT(HDR_HAS_L1HDR(hdr));
(void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
(void) zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, tag);
arc_loaned_bytes_update(arc_buf_size(buf));
}
static void
l2arc_free_abd_on_write(abd_t *abd, size_t size, arc_buf_contents_t type)
{
l2arc_data_free_t *df = kmem_alloc(sizeof (*df), KM_SLEEP);
df->l2df_abd = abd;
df->l2df_size = size;
df->l2df_type = type;
mutex_enter(&l2arc_free_on_write_mtx);
list_insert_head(l2arc_free_on_write, df);
mutex_exit(&l2arc_free_on_write_mtx);
}
static void
arc_hdr_free_on_write(arc_buf_hdr_t *hdr, boolean_t free_rdata)
{
arc_state_t *state = hdr->b_l1hdr.b_state;
arc_buf_contents_t type = arc_buf_type(hdr);
uint64_t size = (free_rdata) ? HDR_GET_PSIZE(hdr) : arc_hdr_size(hdr);
/* protected by hash lock, if in the hash table */
if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT(state != arc_anon && state != arc_l2c_only);
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
size, hdr);
}
(void) zfs_refcount_remove_many(&state->arcs_size, size, hdr);
if (type == ARC_BUFC_METADATA) {
arc_space_return(size, ARC_SPACE_META);
} else {
ASSERT(type == ARC_BUFC_DATA);
arc_space_return(size, ARC_SPACE_DATA);
}
if (free_rdata) {
l2arc_free_abd_on_write(hdr->b_crypt_hdr.b_rabd, size, type);
} else {
l2arc_free_abd_on_write(hdr->b_l1hdr.b_pabd, size, type);
}
}
/*
* Share the arc_buf_t's data with the hdr. Whenever we are sharing the
* data buffer, we transfer the refcount ownership to the hdr and update
* the appropriate kstats.
*/
static void
arc_share_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
{
ASSERT(arc_can_share(hdr, buf));
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!ARC_BUF_ENCRYPTED(buf));
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
/*
* Start sharing the data buffer. We transfer the
* refcount ownership to the hdr since it always owns
* the refcount whenever an arc_buf_t is shared.
*/
zfs_refcount_transfer_ownership_many(&hdr->b_l1hdr.b_state->arcs_size,
arc_hdr_size(hdr), buf, hdr);
hdr->b_l1hdr.b_pabd = abd_get_from_buf(buf->b_data, arc_buf_size(buf));
abd_take_ownership_of_buf(hdr->b_l1hdr.b_pabd,
HDR_ISTYPE_METADATA(hdr));
arc_hdr_set_flags(hdr, ARC_FLAG_SHARED_DATA);
buf->b_flags |= ARC_BUF_FLAG_SHARED;
/*
* Since we've transferred ownership to the hdr we need
* to increment its compressed and uncompressed kstats and
* decrement the overhead size.
*/
ARCSTAT_INCR(arcstat_compressed_size, arc_hdr_size(hdr));
ARCSTAT_INCR(arcstat_uncompressed_size, HDR_GET_LSIZE(hdr));
ARCSTAT_INCR(arcstat_overhead_size, -arc_buf_size(buf));
}
static void
arc_unshare_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
{
ASSERT(arc_buf_is_shared(buf));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
/*
* We are no longer sharing this buffer so we need
* to transfer its ownership to the rightful owner.
*/
zfs_refcount_transfer_ownership_many(&hdr->b_l1hdr.b_state->arcs_size,
arc_hdr_size(hdr), hdr, buf);
arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
abd_release_ownership_of_buf(hdr->b_l1hdr.b_pabd);
abd_free(hdr->b_l1hdr.b_pabd);
hdr->b_l1hdr.b_pabd = NULL;
buf->b_flags &= ~ARC_BUF_FLAG_SHARED;
/*
* Since the buffer is no longer shared between
* the arc buf and the hdr, count it as overhead.
*/
ARCSTAT_INCR(arcstat_compressed_size, -arc_hdr_size(hdr));
ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr));
ARCSTAT_INCR(arcstat_overhead_size, arc_buf_size(buf));
}
/*
* Remove an arc_buf_t from the hdr's buf list and return the last
* arc_buf_t on the list. If no buffers remain on the list then return
* NULL.
*/
static arc_buf_t *
arc_buf_remove(arc_buf_hdr_t *hdr, arc_buf_t *buf)
{
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
arc_buf_t **bufp = &hdr->b_l1hdr.b_buf;
arc_buf_t *lastbuf = NULL;
/*
* Remove the buf from the hdr list and locate the last
* remaining buffer on the list.
*/
while (*bufp != NULL) {
if (*bufp == buf)
*bufp = buf->b_next;
/*
* If we've removed a buffer in the middle of
* the list then update the lastbuf and update
* bufp.
*/
if (*bufp != NULL) {
lastbuf = *bufp;
bufp = &(*bufp)->b_next;
}
}
buf->b_next = NULL;
ASSERT3P(lastbuf, !=, buf);
IMPLY(hdr->b_l1hdr.b_bufcnt > 0, lastbuf != NULL);
IMPLY(hdr->b_l1hdr.b_bufcnt > 0, hdr->b_l1hdr.b_buf != NULL);
IMPLY(lastbuf != NULL, ARC_BUF_LAST(lastbuf));
return (lastbuf);
}
/*
* Free up buf->b_data and pull the arc_buf_t off of the arc_buf_hdr_t's
* list and free it.
*/
static void
arc_buf_destroy_impl(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
/*
* Free up the data associated with the buf but only if we're not
* sharing this with the hdr. If we are sharing it with the hdr, the
* hdr is responsible for doing the free.
*/
if (buf->b_data != NULL) {
/*
* We're about to change the hdr's b_flags. We must either
* hold the hash_lock or be undiscoverable.
*/
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
arc_cksum_verify(buf);
arc_buf_unwatch(buf);
if (arc_buf_is_shared(buf)) {
arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
} else {
uint64_t size = arc_buf_size(buf);
arc_free_data_buf(hdr, buf->b_data, size, buf);
ARCSTAT_INCR(arcstat_overhead_size, -size);
}
buf->b_data = NULL;
ASSERT(hdr->b_l1hdr.b_bufcnt > 0);
hdr->b_l1hdr.b_bufcnt -= 1;
if (ARC_BUF_ENCRYPTED(buf)) {
hdr->b_crypt_hdr.b_ebufcnt -= 1;
/*
* If we have no more encrypted buffers and we've
* already gotten a copy of the decrypted data we can
* free b_rabd to save some space.
*/
if (hdr->b_crypt_hdr.b_ebufcnt == 0 &&
HDR_HAS_RABD(hdr) && hdr->b_l1hdr.b_pabd != NULL &&
!HDR_IO_IN_PROGRESS(hdr)) {
arc_hdr_free_abd(hdr, B_TRUE);
}
}
}
arc_buf_t *lastbuf = arc_buf_remove(hdr, buf);
if (ARC_BUF_SHARED(buf) && !ARC_BUF_COMPRESSED(buf)) {
/*
* If the current arc_buf_t is sharing its data buffer with the
* hdr, then reassign the hdr's b_pabd to share it with the new
* buffer at the end of the list. The shared buffer is always
* the last one on the hdr's buffer list.
*
* There is an equivalent case for compressed bufs, but since
* they aren't guaranteed to be the last buf in the list and
* that is an exceedingly rare case, we just allow that space be
* wasted temporarily. We must also be careful not to share
* encrypted buffers, since they cannot be shared.
*/
if (lastbuf != NULL && !ARC_BUF_ENCRYPTED(lastbuf)) {
/* Only one buf can be shared at once */
VERIFY(!arc_buf_is_shared(lastbuf));
/* hdr is uncompressed so can't have compressed buf */
VERIFY(!ARC_BUF_COMPRESSED(lastbuf));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
arc_hdr_free_abd(hdr, B_FALSE);
/*
* We must setup a new shared block between the
* last buffer and the hdr. The data would have
* been allocated by the arc buf so we need to transfer
* ownership to the hdr since it's now being shared.
*/
arc_share_buf(hdr, lastbuf);
}
} else if (HDR_SHARED_DATA(hdr)) {
/*
* Uncompressed shared buffers are always at the end
* of the list. Compressed buffers don't have the
* same requirements. This makes it hard to
* simply assert that the lastbuf is shared so
* we rely on the hdr's compression flags to determine
* if we have a compressed, shared buffer.
*/
ASSERT3P(lastbuf, !=, NULL);
ASSERT(arc_buf_is_shared(lastbuf) ||
arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF);
}
/*
* Free the checksum if we're removing the last uncompressed buf from
* this hdr.
*/
if (!arc_hdr_has_uncompressed_buf(hdr)) {
arc_cksum_free(hdr);
}
/* clean up the buf */
buf->b_hdr = NULL;
kmem_cache_free(buf_cache, buf);
}
static void
arc_hdr_alloc_abd(arc_buf_hdr_t *hdr, int alloc_flags)
{
uint64_t size;
boolean_t alloc_rdata = ((alloc_flags & ARC_HDR_ALLOC_RDATA) != 0);
ASSERT3U(HDR_GET_LSIZE(hdr), >, 0);
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(!HDR_SHARED_DATA(hdr) || alloc_rdata);
IMPLY(alloc_rdata, HDR_PROTECTED(hdr));
if (alloc_rdata) {
size = HDR_GET_PSIZE(hdr);
ASSERT3P(hdr->b_crypt_hdr.b_rabd, ==, NULL);
hdr->b_crypt_hdr.b_rabd = arc_get_data_abd(hdr, size, hdr,
alloc_flags);
ASSERT3P(hdr->b_crypt_hdr.b_rabd, !=, NULL);
ARCSTAT_INCR(arcstat_raw_size, size);
} else {
size = arc_hdr_size(hdr);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
hdr->b_l1hdr.b_pabd = arc_get_data_abd(hdr, size, hdr,
alloc_flags);
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
}
ARCSTAT_INCR(arcstat_compressed_size, size);
ARCSTAT_INCR(arcstat_uncompressed_size, HDR_GET_LSIZE(hdr));
}
static void
arc_hdr_free_abd(arc_buf_hdr_t *hdr, boolean_t free_rdata)
{
uint64_t size = (free_rdata) ? HDR_GET_PSIZE(hdr) : arc_hdr_size(hdr);
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr));
IMPLY(free_rdata, HDR_HAS_RABD(hdr));
/*
* If the hdr is currently being written to the l2arc then
* we defer freeing the data by adding it to the l2arc_free_on_write
* list. The l2arc will free the data once it's finished
* writing it to the l2arc device.
*/
if (HDR_L2_WRITING(hdr)) {
arc_hdr_free_on_write(hdr, free_rdata);
ARCSTAT_BUMP(arcstat_l2_free_on_write);
} else if (free_rdata) {
arc_free_data_abd(hdr, hdr->b_crypt_hdr.b_rabd, size, hdr);
} else {
arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd, size, hdr);
}
if (free_rdata) {
hdr->b_crypt_hdr.b_rabd = NULL;
ARCSTAT_INCR(arcstat_raw_size, -size);
} else {
hdr->b_l1hdr.b_pabd = NULL;
}
if (hdr->b_l1hdr.b_pabd == NULL && !HDR_HAS_RABD(hdr))
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
ARCSTAT_INCR(arcstat_compressed_size, -size);
ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr));
}
/*
* Allocate empty anonymous ARC header. The header will get its identity
* assigned and buffers attached later as part of read or write operations.
*
* In case of read arc_read() assigns header its identify (b_dva + b_birth),
* inserts it into ARC hash to become globally visible and allocates physical
* (b_pabd) or raw (b_rabd) ABD buffer to read into from disk. On disk read
* completion arc_read_done() allocates ARC buffer(s) as needed, potentially
* sharing one of them with the physical ABD buffer.
*
* In case of write arc_alloc_buf() allocates ARC buffer to be filled with
* data. Then after compression and/or encryption arc_write_ready() allocates
* and fills (or potentially shares) physical (b_pabd) or raw (b_rabd) ABD
* buffer. On disk write completion arc_write_done() assigns the header its
* new identity (b_dva + b_birth) and inserts into ARC hash.
*
* In case of partial overwrite the old data is read first as described. Then
* arc_release() either allocates new anonymous ARC header and moves the ARC
* buffer to it, or reuses the old ARC header by discarding its identity and
* removing it from ARC hash. After buffer modification normal write process
* follows as described.
*/
static arc_buf_hdr_t *
arc_hdr_alloc(uint64_t spa, int32_t psize, int32_t lsize,
boolean_t protected, enum zio_compress compression_type, uint8_t complevel,
arc_buf_contents_t type)
{
arc_buf_hdr_t *hdr;
VERIFY(type == ARC_BUFC_DATA || type == ARC_BUFC_METADATA);
if (protected) {
hdr = kmem_cache_alloc(hdr_full_crypt_cache, KM_PUSHPAGE);
} else {
hdr = kmem_cache_alloc(hdr_full_cache, KM_PUSHPAGE);
}
ASSERT(HDR_EMPTY(hdr));
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
HDR_SET_PSIZE(hdr, psize);
HDR_SET_LSIZE(hdr, lsize);
hdr->b_spa = spa;
hdr->b_type = type;
hdr->b_flags = 0;
arc_hdr_set_flags(hdr, arc_bufc_to_flags(type) | ARC_FLAG_HAS_L1HDR);
arc_hdr_set_compress(hdr, compression_type);
hdr->b_complevel = complevel;
if (protected)
arc_hdr_set_flags(hdr, ARC_FLAG_PROTECTED);
hdr->b_l1hdr.b_state = arc_anon;
hdr->b_l1hdr.b_arc_access = 0;
hdr->b_l1hdr.b_mru_hits = 0;
hdr->b_l1hdr.b_mru_ghost_hits = 0;
hdr->b_l1hdr.b_mfu_hits = 0;
hdr->b_l1hdr.b_mfu_ghost_hits = 0;
hdr->b_l1hdr.b_bufcnt = 0;
hdr->b_l1hdr.b_buf = NULL;
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
return (hdr);
}
/*
* Transition between the two allocation states for the arc_buf_hdr struct.
* The arc_buf_hdr struct can be allocated with (hdr_full_cache) or without
* (hdr_l2only_cache) the fields necessary for the L1 cache - the smaller
* version is used when a cache buffer is only in the L2ARC in order to reduce
* memory usage.
*/
static arc_buf_hdr_t *
arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new)
{
ASSERT(HDR_HAS_L2HDR(hdr));
arc_buf_hdr_t *nhdr;
l2arc_dev_t *dev = hdr->b_l2hdr.b_dev;
ASSERT((old == hdr_full_cache && new == hdr_l2only_cache) ||
(old == hdr_l2only_cache && new == hdr_full_cache));
/*
* if the caller wanted a new full header and the header is to be
* encrypted we will actually allocate the header from the full crypt
* cache instead. The same applies to freeing from the old cache.
*/
if (HDR_PROTECTED(hdr) && new == hdr_full_cache)
new = hdr_full_crypt_cache;
if (HDR_PROTECTED(hdr) && old == hdr_full_cache)
old = hdr_full_crypt_cache;
nhdr = kmem_cache_alloc(new, KM_PUSHPAGE);
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
buf_hash_remove(hdr);
bcopy(hdr, nhdr, HDR_L2ONLY_SIZE);
if (new == hdr_full_cache || new == hdr_full_crypt_cache) {
arc_hdr_set_flags(nhdr, ARC_FLAG_HAS_L1HDR);
/*
* arc_access and arc_change_state need to be aware that a
* header has just come out of L2ARC, so we set its state to
* l2c_only even though it's about to change.
*/
nhdr->b_l1hdr.b_state = arc_l2c_only;
/* Verify previous threads set to NULL before freeing */
ASSERT3P(nhdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
} else {
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
ASSERT0(hdr->b_l1hdr.b_bufcnt);
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
/*
* If we've reached here, We must have been called from
* arc_evict_hdr(), as such we should have already been
* removed from any ghost list we were previously on
* (which protects us from racing with arc_evict_state),
* thus no locking is needed during this check.
*/
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
/*
* A buffer must not be moved into the arc_l2c_only
* state if it's not finished being written out to the
* l2arc device. Otherwise, the b_l1hdr.b_pabd field
* might try to be accessed, even though it was removed.
*/
VERIFY(!HDR_L2_WRITING(hdr));
VERIFY3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
arc_hdr_clear_flags(nhdr, ARC_FLAG_HAS_L1HDR);
}
/*
* The header has been reallocated so we need to re-insert it into any
* lists it was on.
*/
(void) buf_hash_insert(nhdr, NULL);
ASSERT(list_link_active(&hdr->b_l2hdr.b_l2node));
mutex_enter(&dev->l2ad_mtx);
/*
* We must place the realloc'ed header back into the list at
* the same spot. Otherwise, if it's placed earlier in the list,
* l2arc_write_buffers() could find it during the function's
* write phase, and try to write it out to the l2arc.
*/
list_insert_after(&dev->l2ad_buflist, hdr, nhdr);
list_remove(&dev->l2ad_buflist, hdr);
mutex_exit(&dev->l2ad_mtx);
/*
* Since we're using the pointer address as the tag when
* incrementing and decrementing the l2ad_alloc refcount, we
* must remove the old pointer (that we're about to destroy) and
* add the new pointer to the refcount. Otherwise we'd remove
* the wrong pointer address when calling arc_hdr_destroy() later.
*/
(void) zfs_refcount_remove_many(&dev->l2ad_alloc,
arc_hdr_size(hdr), hdr);
(void) zfs_refcount_add_many(&dev->l2ad_alloc,
arc_hdr_size(nhdr), nhdr);
buf_discard_identity(hdr);
kmem_cache_free(old, hdr);
return (nhdr);
}
/*
* This function allows an L1 header to be reallocated as a crypt
* header and vice versa. If we are going to a crypt header, the
* new fields will be zeroed out.
*/
static arc_buf_hdr_t *
arc_hdr_realloc_crypt(arc_buf_hdr_t *hdr, boolean_t need_crypt)
{
arc_buf_hdr_t *nhdr;
arc_buf_t *buf;
kmem_cache_t *ncache, *ocache;
unsigned nsize, osize;
/*
* This function requires that hdr is in the arc_anon state.
* Therefore it won't have any L2ARC data for us to worry
* about copying.
*/
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(!HDR_HAS_L2HDR(hdr));
ASSERT3U(!!HDR_PROTECTED(hdr), !=, need_crypt);
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
ASSERT(!list_link_active(&hdr->b_l2hdr.b_l2node));
ASSERT3P(hdr->b_hash_next, ==, NULL);
if (need_crypt) {
ncache = hdr_full_crypt_cache;
nsize = sizeof (hdr->b_crypt_hdr);
ocache = hdr_full_cache;
osize = HDR_FULL_SIZE;
} else {
ncache = hdr_full_cache;
nsize = HDR_FULL_SIZE;
ocache = hdr_full_crypt_cache;
osize = sizeof (hdr->b_crypt_hdr);
}
nhdr = kmem_cache_alloc(ncache, KM_PUSHPAGE);
/*
* Copy all members that aren't locks or condvars to the new header.
* No lists are pointing to us (as we asserted above), so we don't
* need to worry about the list nodes.
*/
nhdr->b_dva = hdr->b_dva;
nhdr->b_birth = hdr->b_birth;
nhdr->b_type = hdr->b_type;
nhdr->b_flags = hdr->b_flags;
nhdr->b_psize = hdr->b_psize;
nhdr->b_lsize = hdr->b_lsize;
nhdr->b_spa = hdr->b_spa;
nhdr->b_l1hdr.b_freeze_cksum = hdr->b_l1hdr.b_freeze_cksum;
nhdr->b_l1hdr.b_bufcnt = hdr->b_l1hdr.b_bufcnt;
nhdr->b_l1hdr.b_byteswap = hdr->b_l1hdr.b_byteswap;
nhdr->b_l1hdr.b_state = hdr->b_l1hdr.b_state;
nhdr->b_l1hdr.b_arc_access = hdr->b_l1hdr.b_arc_access;
nhdr->b_l1hdr.b_mru_hits = hdr->b_l1hdr.b_mru_hits;
nhdr->b_l1hdr.b_mru_ghost_hits = hdr->b_l1hdr.b_mru_ghost_hits;
nhdr->b_l1hdr.b_mfu_hits = hdr->b_l1hdr.b_mfu_hits;
nhdr->b_l1hdr.b_mfu_ghost_hits = hdr->b_l1hdr.b_mfu_ghost_hits;
nhdr->b_l1hdr.b_acb = hdr->b_l1hdr.b_acb;
nhdr->b_l1hdr.b_pabd = hdr->b_l1hdr.b_pabd;
/*
* This zfs_refcount_add() exists only to ensure that the individual
* arc buffers always point to a header that is referenced, avoiding
* a small race condition that could trigger ASSERTs.
*/
(void) zfs_refcount_add(&nhdr->b_l1hdr.b_refcnt, FTAG);
nhdr->b_l1hdr.b_buf = hdr->b_l1hdr.b_buf;
for (buf = nhdr->b_l1hdr.b_buf; buf != NULL; buf = buf->b_next) {
mutex_enter(&buf->b_evict_lock);
buf->b_hdr = nhdr;
mutex_exit(&buf->b_evict_lock);
}
zfs_refcount_transfer(&nhdr->b_l1hdr.b_refcnt, &hdr->b_l1hdr.b_refcnt);
(void) zfs_refcount_remove(&nhdr->b_l1hdr.b_refcnt, FTAG);
ASSERT0(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt));
if (need_crypt) {
arc_hdr_set_flags(nhdr, ARC_FLAG_PROTECTED);
} else {
arc_hdr_clear_flags(nhdr, ARC_FLAG_PROTECTED);
}
/* unset all members of the original hdr */
bzero(&hdr->b_dva, sizeof (dva_t));
hdr->b_birth = 0;
hdr->b_type = ARC_BUFC_INVALID;
hdr->b_flags = 0;
hdr->b_psize = 0;
hdr->b_lsize = 0;
hdr->b_spa = 0;
hdr->b_l1hdr.b_freeze_cksum = NULL;
hdr->b_l1hdr.b_buf = NULL;
hdr->b_l1hdr.b_bufcnt = 0;
hdr->b_l1hdr.b_byteswap = 0;
hdr->b_l1hdr.b_state = NULL;
hdr->b_l1hdr.b_arc_access = 0;
hdr->b_l1hdr.b_mru_hits = 0;
hdr->b_l1hdr.b_mru_ghost_hits = 0;
hdr->b_l1hdr.b_mfu_hits = 0;
hdr->b_l1hdr.b_mfu_ghost_hits = 0;
hdr->b_l1hdr.b_acb = NULL;
hdr->b_l1hdr.b_pabd = NULL;
if (ocache == hdr_full_crypt_cache) {
ASSERT(!HDR_HAS_RABD(hdr));
hdr->b_crypt_hdr.b_ot = DMU_OT_NONE;
hdr->b_crypt_hdr.b_ebufcnt = 0;
hdr->b_crypt_hdr.b_dsobj = 0;
bzero(hdr->b_crypt_hdr.b_salt, ZIO_DATA_SALT_LEN);
bzero(hdr->b_crypt_hdr.b_iv, ZIO_DATA_IV_LEN);
bzero(hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN);
}
buf_discard_identity(hdr);
kmem_cache_free(ocache, hdr);
return (nhdr);
}
/*
* This function is used by the send / receive code to convert a newly
* allocated arc_buf_t to one that is suitable for a raw encrypted write. It
* is also used to allow the root objset block to be updated without altering
* its embedded MACs. Both block types will always be uncompressed so we do not
* have to worry about compression type or psize.
*/
void
arc_convert_to_raw(arc_buf_t *buf, uint64_t dsobj, boolean_t byteorder,
dmu_object_type_t ot, const uint8_t *salt, const uint8_t *iv,
const uint8_t *mac)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT(ot == DMU_OT_DNODE || ot == DMU_OT_OBJSET);
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
buf->b_flags |= (ARC_BUF_FLAG_COMPRESSED | ARC_BUF_FLAG_ENCRYPTED);
if (!HDR_PROTECTED(hdr))
hdr = arc_hdr_realloc_crypt(hdr, B_TRUE);
hdr->b_crypt_hdr.b_dsobj = dsobj;
hdr->b_crypt_hdr.b_ot = ot;
hdr->b_l1hdr.b_byteswap = (byteorder == ZFS_HOST_BYTEORDER) ?
DMU_BSWAP_NUMFUNCS : DMU_OT_BYTESWAP(ot);
if (!arc_hdr_has_uncompressed_buf(hdr))
arc_cksum_free(hdr);
if (salt != NULL)
bcopy(salt, hdr->b_crypt_hdr.b_salt, ZIO_DATA_SALT_LEN);
if (iv != NULL)
bcopy(iv, hdr->b_crypt_hdr.b_iv, ZIO_DATA_IV_LEN);
if (mac != NULL)
bcopy(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN);
}
/*
* Allocate a new arc_buf_hdr_t and arc_buf_t and return the buf to the caller.
* The buf is returned thawed since we expect the consumer to modify it.
*/
arc_buf_t *
arc_alloc_buf(spa_t *spa, void *tag, arc_buf_contents_t type, int32_t size)
{
arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), size, size,
B_FALSE, ZIO_COMPRESS_OFF, 0, type);
arc_buf_t *buf = NULL;
VERIFY0(arc_buf_alloc_impl(hdr, spa, NULL, tag, B_FALSE, B_FALSE,
B_FALSE, B_FALSE, &buf));
arc_buf_thaw(buf);
return (buf);
}
/*
* Allocate a compressed buf in the same manner as arc_alloc_buf. Don't use this
* for bufs containing metadata.
*/
arc_buf_t *
arc_alloc_compressed_buf(spa_t *spa, void *tag, uint64_t psize, uint64_t lsize,
enum zio_compress compression_type, uint8_t complevel)
{
ASSERT3U(lsize, >, 0);
ASSERT3U(lsize, >=, psize);
ASSERT3U(compression_type, >, ZIO_COMPRESS_OFF);
ASSERT3U(compression_type, <, ZIO_COMPRESS_FUNCTIONS);
arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize,
B_FALSE, compression_type, complevel, ARC_BUFC_DATA);
arc_buf_t *buf = NULL;
VERIFY0(arc_buf_alloc_impl(hdr, spa, NULL, tag, B_FALSE,
B_TRUE, B_FALSE, B_FALSE, &buf));
arc_buf_thaw(buf);
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
/*
* To ensure that the hdr has the correct data in it if we call
* arc_untransform() on this buf before it's been written to disk,
* it's easiest if we just set up sharing between the buf and the hdr.
*/
arc_share_buf(hdr, buf);
return (buf);
}
arc_buf_t *
arc_alloc_raw_buf(spa_t *spa, void *tag, uint64_t dsobj, boolean_t byteorder,
const uint8_t *salt, const uint8_t *iv, const uint8_t *mac,
dmu_object_type_t ot, uint64_t psize, uint64_t lsize,
enum zio_compress compression_type, uint8_t complevel)
{
arc_buf_hdr_t *hdr;
arc_buf_t *buf;
arc_buf_contents_t type = DMU_OT_IS_METADATA(ot) ?
ARC_BUFC_METADATA : ARC_BUFC_DATA;
ASSERT3U(lsize, >, 0);
ASSERT3U(lsize, >=, psize);
ASSERT3U(compression_type, >=, ZIO_COMPRESS_OFF);
ASSERT3U(compression_type, <, ZIO_COMPRESS_FUNCTIONS);
hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize, B_TRUE,
compression_type, complevel, type);
hdr->b_crypt_hdr.b_dsobj = dsobj;
hdr->b_crypt_hdr.b_ot = ot;
hdr->b_l1hdr.b_byteswap = (byteorder == ZFS_HOST_BYTEORDER) ?
DMU_BSWAP_NUMFUNCS : DMU_OT_BYTESWAP(ot);
bcopy(salt, hdr->b_crypt_hdr.b_salt, ZIO_DATA_SALT_LEN);
bcopy(iv, hdr->b_crypt_hdr.b_iv, ZIO_DATA_IV_LEN);
bcopy(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN);
/*
* This buffer will be considered encrypted even if the ot is not an
* encrypted type. It will become authenticated instead in
* arc_write_ready().
*/
buf = NULL;
VERIFY0(arc_buf_alloc_impl(hdr, spa, NULL, tag, B_TRUE, B_TRUE,
B_FALSE, B_FALSE, &buf));
arc_buf_thaw(buf);
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
return (buf);
}
static void
l2arc_hdr_arcstats_update(arc_buf_hdr_t *hdr, boolean_t incr,
boolean_t state_only)
{
l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr;
l2arc_dev_t *dev = l2hdr->b_dev;
uint64_t lsize = HDR_GET_LSIZE(hdr);
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev, psize);
arc_buf_contents_t type = hdr->b_type;
int64_t lsize_s;
int64_t psize_s;
int64_t asize_s;
if (incr) {
lsize_s = lsize;
psize_s = psize;
asize_s = asize;
} else {
lsize_s = -lsize;
psize_s = -psize;
asize_s = -asize;
}
/* If the buffer is a prefetch, count it as such. */
if (HDR_PREFETCH(hdr)) {
ARCSTAT_INCR(arcstat_l2_prefetch_asize, asize_s);
} else {
/*
* We use the value stored in the L2 header upon initial
* caching in L2ARC. This value will be updated in case
* an MRU/MRU_ghost buffer transitions to MFU but the L2ARC
* metadata (log entry) cannot currently be updated. Having
* the ARC state in the L2 header solves the problem of a
* possibly absent L1 header (apparent in buffers restored
* from persistent L2ARC).
*/
switch (hdr->b_l2hdr.b_arcs_state) {
case ARC_STATE_MRU_GHOST:
case ARC_STATE_MRU:
ARCSTAT_INCR(arcstat_l2_mru_asize, asize_s);
break;
case ARC_STATE_MFU_GHOST:
case ARC_STATE_MFU:
ARCSTAT_INCR(arcstat_l2_mfu_asize, asize_s);
break;
default:
break;
}
}
if (state_only)
return;
ARCSTAT_INCR(arcstat_l2_psize, psize_s);
ARCSTAT_INCR(arcstat_l2_lsize, lsize_s);
switch (type) {
case ARC_BUFC_DATA:
ARCSTAT_INCR(arcstat_l2_bufc_data_asize, asize_s);
break;
case ARC_BUFC_METADATA:
ARCSTAT_INCR(arcstat_l2_bufc_metadata_asize, asize_s);
break;
default:
break;
}
}
static void
arc_hdr_l2hdr_destroy(arc_buf_hdr_t *hdr)
{
l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr;
l2arc_dev_t *dev = l2hdr->b_dev;
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev, psize);
ASSERT(MUTEX_HELD(&dev->l2ad_mtx));
ASSERT(HDR_HAS_L2HDR(hdr));
list_remove(&dev->l2ad_buflist, hdr);
l2arc_hdr_arcstats_decrement(hdr);
vdev_space_update(dev->l2ad_vdev, -asize, 0, 0);
(void) zfs_refcount_remove_many(&dev->l2ad_alloc, arc_hdr_size(hdr),
hdr);
arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR);
}
static void
arc_hdr_destroy(arc_buf_hdr_t *hdr)
{
if (HDR_HAS_L1HDR(hdr)) {
ASSERT(hdr->b_l1hdr.b_buf == NULL ||
hdr->b_l1hdr.b_bufcnt > 0);
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
}
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT(!HDR_IN_HASH_TABLE(hdr));
if (HDR_HAS_L2HDR(hdr)) {
l2arc_dev_t *dev = hdr->b_l2hdr.b_dev;
boolean_t buflist_held = MUTEX_HELD(&dev->l2ad_mtx);
if (!buflist_held)
mutex_enter(&dev->l2ad_mtx);
/*
* Even though we checked this conditional above, we
* need to check this again now that we have the
* l2ad_mtx. This is because we could be racing with
* another thread calling l2arc_evict() which might have
* destroyed this header's L2 portion as we were waiting
* to acquire the l2ad_mtx. If that happens, we don't
* want to re-destroy the header's L2 portion.
*/
if (HDR_HAS_L2HDR(hdr))
arc_hdr_l2hdr_destroy(hdr);
if (!buflist_held)
mutex_exit(&dev->l2ad_mtx);
}
/*
* The header's identify can only be safely discarded once it is no
* longer discoverable. This requires removing it from the hash table
* and the l2arc header list. After this point the hash lock can not
* be used to protect the header.
*/
if (!HDR_EMPTY(hdr))
buf_discard_identity(hdr);
if (HDR_HAS_L1HDR(hdr)) {
arc_cksum_free(hdr);
while (hdr->b_l1hdr.b_buf != NULL)
arc_buf_destroy_impl(hdr->b_l1hdr.b_buf);
if (hdr->b_l1hdr.b_pabd != NULL)
arc_hdr_free_abd(hdr, B_FALSE);
if (HDR_HAS_RABD(hdr))
arc_hdr_free_abd(hdr, B_TRUE);
}
ASSERT3P(hdr->b_hash_next, ==, NULL);
if (HDR_HAS_L1HDR(hdr)) {
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
if (!HDR_PROTECTED(hdr)) {
kmem_cache_free(hdr_full_cache, hdr);
} else {
kmem_cache_free(hdr_full_crypt_cache, hdr);
}
} else {
kmem_cache_free(hdr_l2only_cache, hdr);
}
}
void
arc_buf_destroy(arc_buf_t *buf, void* tag)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
if (hdr->b_l1hdr.b_state == arc_anon) {
ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1);
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
VERIFY0(remove_reference(hdr, NULL, tag));
arc_hdr_destroy(hdr);
return;
}
kmutex_t *hash_lock = HDR_LOCK(hdr);
mutex_enter(hash_lock);
ASSERT3P(hdr, ==, buf->b_hdr);
ASSERT(hdr->b_l1hdr.b_bufcnt > 0);
ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
ASSERT3P(hdr->b_l1hdr.b_state, !=, arc_anon);
ASSERT3P(buf->b_data, !=, NULL);
(void) remove_reference(hdr, hash_lock, tag);
arc_buf_destroy_impl(buf);
mutex_exit(hash_lock);
}
/*
* Evict the arc_buf_hdr that is provided as a parameter. The resultant
* state of the header is dependent on its state prior to entering this
* function. The following transitions are possible:
*
* - arc_mru -> arc_mru_ghost
* - arc_mfu -> arc_mfu_ghost
* - arc_mru_ghost -> arc_l2c_only
* - arc_mru_ghost -> deleted
* - arc_mfu_ghost -> arc_l2c_only
* - arc_mfu_ghost -> deleted
*
* Return total size of evicted data buffers for eviction progress tracking.
* When evicting from ghost states return logical buffer size to make eviction
* progress at the same (or at least comparable) rate as from non-ghost states.
*
* Return *real_evicted for actual ARC size reduction to wake up threads
* waiting for it. For non-ghost states it includes size of evicted data
* buffers (the headers are not freed there). For ghost states it includes
* only the evicted headers size.
*/
static int64_t
arc_evict_hdr(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, uint64_t *real_evicted)
{
arc_state_t *evicted_state, *state;
int64_t bytes_evicted = 0;
int min_lifetime = HDR_PRESCIENT_PREFETCH(hdr) ?
arc_min_prescient_prefetch_ms : arc_min_prefetch_ms;
ASSERT(MUTEX_HELD(hash_lock));
ASSERT(HDR_HAS_L1HDR(hdr));
*real_evicted = 0;
state = hdr->b_l1hdr.b_state;
if (GHOST_STATE(state)) {
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
/*
* l2arc_write_buffers() relies on a header's L1 portion
* (i.e. its b_pabd field) during it's write phase.
* Thus, we cannot push a header onto the arc_l2c_only
* state (removing its L1 piece) until the header is
* done being written to the l2arc.
*/
if (HDR_HAS_L2HDR(hdr) && HDR_L2_WRITING(hdr)) {
ARCSTAT_BUMP(arcstat_evict_l2_skip);
return (bytes_evicted);
}
ARCSTAT_BUMP(arcstat_deleted);
bytes_evicted += HDR_GET_LSIZE(hdr);
DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, hdr);
if (HDR_HAS_L2HDR(hdr)) {
ASSERT(hdr->b_l1hdr.b_pabd == NULL);
ASSERT(!HDR_HAS_RABD(hdr));
/*
* This buffer is cached on the 2nd Level ARC;
* don't destroy the header.
*/
arc_change_state(arc_l2c_only, hdr, hash_lock);
/*
* dropping from L1+L2 cached to L2-only,
* realloc to remove the L1 header.
*/
hdr = arc_hdr_realloc(hdr, hdr_full_cache,
hdr_l2only_cache);
*real_evicted += HDR_FULL_SIZE - HDR_L2ONLY_SIZE;
} else {
arc_change_state(arc_anon, hdr, hash_lock);
arc_hdr_destroy(hdr);
*real_evicted += HDR_FULL_SIZE;
}
return (bytes_evicted);
}
ASSERT(state == arc_mru || state == arc_mfu);
evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
/* prefetch buffers have a minimum lifespan */
if (HDR_IO_IN_PROGRESS(hdr) ||
((hdr->b_flags & (ARC_FLAG_PREFETCH | ARC_FLAG_INDIRECT)) &&
ddi_get_lbolt() - hdr->b_l1hdr.b_arc_access <
MSEC_TO_TICK(min_lifetime))) {
ARCSTAT_BUMP(arcstat_evict_skip);
return (bytes_evicted);
}
ASSERT0(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt));
while (hdr->b_l1hdr.b_buf) {
arc_buf_t *buf = hdr->b_l1hdr.b_buf;
if (!mutex_tryenter(&buf->b_evict_lock)) {
ARCSTAT_BUMP(arcstat_mutex_miss);
break;
}
if (buf->b_data != NULL) {
bytes_evicted += HDR_GET_LSIZE(hdr);
*real_evicted += HDR_GET_LSIZE(hdr);
}
mutex_exit(&buf->b_evict_lock);
arc_buf_destroy_impl(buf);
}
if (HDR_HAS_L2HDR(hdr)) {
ARCSTAT_INCR(arcstat_evict_l2_cached, HDR_GET_LSIZE(hdr));
} else {
if (l2arc_write_eligible(hdr->b_spa, hdr)) {
ARCSTAT_INCR(arcstat_evict_l2_eligible,
HDR_GET_LSIZE(hdr));
switch (state->arcs_state) {
case ARC_STATE_MRU:
ARCSTAT_INCR(
arcstat_evict_l2_eligible_mru,
HDR_GET_LSIZE(hdr));
break;
case ARC_STATE_MFU:
ARCSTAT_INCR(
arcstat_evict_l2_eligible_mfu,
HDR_GET_LSIZE(hdr));
break;
default:
break;
}
} else {
ARCSTAT_INCR(arcstat_evict_l2_ineligible,
HDR_GET_LSIZE(hdr));
}
}
if (hdr->b_l1hdr.b_bufcnt == 0) {
arc_cksum_free(hdr);
bytes_evicted += arc_hdr_size(hdr);
*real_evicted += arc_hdr_size(hdr);
/*
* If this hdr is being evicted and has a compressed
* buffer then we discard it here before we change states.
* This ensures that the accounting is updated correctly
* in arc_free_data_impl().
*/
if (hdr->b_l1hdr.b_pabd != NULL)
arc_hdr_free_abd(hdr, B_FALSE);
if (HDR_HAS_RABD(hdr))
arc_hdr_free_abd(hdr, B_TRUE);
arc_change_state(evicted_state, hdr, hash_lock);
ASSERT(HDR_IN_HASH_TABLE(hdr));
arc_hdr_set_flags(hdr, ARC_FLAG_IN_HASH_TABLE);
DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, hdr);
}
return (bytes_evicted);
}
static void
arc_set_need_free(void)
{
ASSERT(MUTEX_HELD(&arc_evict_lock));
int64_t remaining = arc_free_memory() - arc_sys_free / 2;
arc_evict_waiter_t *aw = list_tail(&arc_evict_waiters);
if (aw == NULL) {
arc_need_free = MAX(-remaining, 0);
} else {
arc_need_free =
MAX(-remaining, (int64_t)(aw->aew_count - arc_evict_count));
}
}
static uint64_t
arc_evict_state_impl(multilist_t *ml, int idx, arc_buf_hdr_t *marker,
uint64_t spa, uint64_t bytes)
{
multilist_sublist_t *mls;
uint64_t bytes_evicted = 0, real_evicted = 0;
arc_buf_hdr_t *hdr;
kmutex_t *hash_lock;
int evict_count = zfs_arc_evict_batch_limit;
ASSERT3P(marker, !=, NULL);
mls = multilist_sublist_lock(ml, idx);
for (hdr = multilist_sublist_prev(mls, marker); likely(hdr != NULL);
hdr = multilist_sublist_prev(mls, marker)) {
if ((evict_count <= 0) || (bytes_evicted >= bytes))
break;
/*
* To keep our iteration location, move the marker
* forward. Since we're not holding hdr's hash lock, we
* must be very careful and not remove 'hdr' from the
* sublist. Otherwise, other consumers might mistake the
* 'hdr' as not being on a sublist when they call the
* multilist_link_active() function (they all rely on
* the hash lock protecting concurrent insertions and
* removals). multilist_sublist_move_forward() was
* specifically implemented to ensure this is the case
* (only 'marker' will be removed and re-inserted).
*/
multilist_sublist_move_forward(mls, marker);
/*
* The only case where the b_spa field should ever be
* zero, is the marker headers inserted by
* arc_evict_state(). It's possible for multiple threads
* to be calling arc_evict_state() concurrently (e.g.
* dsl_pool_close() and zio_inject_fault()), so we must
* skip any markers we see from these other threads.
*/
if (hdr->b_spa == 0)
continue;
/* we're only interested in evicting buffers of a certain spa */
if (spa != 0 && hdr->b_spa != spa) {
ARCSTAT_BUMP(arcstat_evict_skip);
continue;
}
hash_lock = HDR_LOCK(hdr);
/*
* We aren't calling this function from any code path
* that would already be holding a hash lock, so we're
* asserting on this assumption to be defensive in case
* this ever changes. Without this check, it would be
* possible to incorrectly increment arcstat_mutex_miss
* below (e.g. if the code changed such that we called
* this function with a hash lock held).
*/
ASSERT(!MUTEX_HELD(hash_lock));
if (mutex_tryenter(hash_lock)) {
uint64_t revicted;
uint64_t evicted = arc_evict_hdr(hdr, hash_lock,
&revicted);
mutex_exit(hash_lock);
bytes_evicted += evicted;
real_evicted += revicted;
/*
* If evicted is zero, arc_evict_hdr() must have
* decided to skip this header, don't increment
* evict_count in this case.
*/
if (evicted != 0)
evict_count--;
} else {
ARCSTAT_BUMP(arcstat_mutex_miss);
}
}
multilist_sublist_unlock(mls);
/*
* Increment the count of evicted bytes, and wake up any threads that
* are waiting for the count to reach this value. Since the list is
* ordered by ascending aew_count, we pop off the beginning of the
* list until we reach the end, or a waiter that's past the current
* "count". Doing this outside the loop reduces the number of times
* we need to acquire the global arc_evict_lock.
*
* Only wake when there's sufficient free memory in the system
* (specifically, arc_sys_free/2, which by default is a bit more than
* 1/64th of RAM). See the comments in arc_wait_for_eviction().
*/
mutex_enter(&arc_evict_lock);
arc_evict_count += real_evicted;
if (arc_free_memory() > arc_sys_free / 2) {
arc_evict_waiter_t *aw;
while ((aw = list_head(&arc_evict_waiters)) != NULL &&
aw->aew_count <= arc_evict_count) {
list_remove(&arc_evict_waiters, aw);
cv_broadcast(&aw->aew_cv);
}
}
arc_set_need_free();
mutex_exit(&arc_evict_lock);
/*
* If the ARC size is reduced from arc_c_max to arc_c_min (especially
* if the average cached block is small), eviction can be on-CPU for
* many seconds. To ensure that other threads that may be bound to
* this CPU are able to make progress, make a voluntary preemption
* call here.
*/
cond_resched();
return (bytes_evicted);
}
/*
* Allocate an array of buffer headers used as placeholders during arc state
* eviction.
*/
static arc_buf_hdr_t **
arc_state_alloc_markers(int count)
{
arc_buf_hdr_t **markers;
markers = kmem_zalloc(sizeof (*markers) * count, KM_SLEEP);
for (int i = 0; i < count; i++) {
markers[i] = kmem_cache_alloc(hdr_full_cache, KM_SLEEP);
/*
* A b_spa of 0 is used to indicate that this header is
* a marker. This fact is used in arc_evict_type() and
* arc_evict_state_impl().
*/
markers[i]->b_spa = 0;
}
return (markers);
}
static void
arc_state_free_markers(arc_buf_hdr_t **markers, int count)
{
for (int i = 0; i < count; i++)
kmem_cache_free(hdr_full_cache, markers[i]);
kmem_free(markers, sizeof (*markers) * count);
}
/*
* Evict buffers from the given arc state, until we've removed the
* specified number of bytes. Move the removed buffers to the
* appropriate evict state.
*
* This function makes a "best effort". It skips over any buffers
* it can't get a hash_lock on, and so, may not catch all candidates.
* It may also return without evicting as much space as requested.
*
* If bytes is specified using the special value ARC_EVICT_ALL, this
* will evict all available (i.e. unlocked and evictable) buffers from
* the given arc state; which is used by arc_flush().
*/
static uint64_t
arc_evict_state(arc_state_t *state, uint64_t spa, uint64_t bytes,
arc_buf_contents_t type)
{
uint64_t total_evicted = 0;
multilist_t *ml = &state->arcs_list[type];
int num_sublists;
arc_buf_hdr_t **markers;
num_sublists = multilist_get_num_sublists(ml);
/*
* If we've tried to evict from each sublist, made some
* progress, but still have not hit the target number of bytes
* to evict, we want to keep trying. The markers allow us to
* pick up where we left off for each individual sublist, rather
* than starting from the tail each time.
*/
if (zthr_iscurthread(arc_evict_zthr)) {
markers = arc_state_evict_markers;
ASSERT3S(num_sublists, <=, arc_state_evict_marker_count);
} else {
markers = arc_state_alloc_markers(num_sublists);
}
for (int i = 0; i < num_sublists; i++) {
multilist_sublist_t *mls;
mls = multilist_sublist_lock(ml, i);
multilist_sublist_insert_tail(mls, markers[i]);
multilist_sublist_unlock(mls);
}
/*
* While we haven't hit our target number of bytes to evict, or
* we're evicting all available buffers.
*/
while (total_evicted < bytes) {
int sublist_idx = multilist_get_random_index(ml);
uint64_t scan_evicted = 0;
/*
* Try to reduce pinned dnodes with a floor of arc_dnode_limit.
* Request that 10% of the LRUs be scanned by the superblock
* shrinker.
*/
if (type == ARC_BUFC_DATA && aggsum_compare(
&arc_sums.arcstat_dnode_size, arc_dnode_size_limit) > 0) {
arc_prune_async((aggsum_upper_bound(
&arc_sums.arcstat_dnode_size) -
arc_dnode_size_limit) / sizeof (dnode_t) /
zfs_arc_dnode_reduce_percent);
}
/*
* Start eviction using a randomly selected sublist,
* this is to try and evenly balance eviction across all
* sublists. Always starting at the same sublist
* (e.g. index 0) would cause evictions to favor certain
* sublists over others.
*/
for (int i = 0; i < num_sublists; i++) {
uint64_t bytes_remaining;
uint64_t bytes_evicted;
if (total_evicted < bytes)
bytes_remaining = bytes - total_evicted;
else
break;
bytes_evicted = arc_evict_state_impl(ml, sublist_idx,
markers[sublist_idx], spa, bytes_remaining);
scan_evicted += bytes_evicted;
total_evicted += bytes_evicted;
/* we've reached the end, wrap to the beginning */
if (++sublist_idx >= num_sublists)
sublist_idx = 0;
}
/*
* If we didn't evict anything during this scan, we have
* no reason to believe we'll evict more during another
* scan, so break the loop.
*/
if (scan_evicted == 0) {
/* This isn't possible, let's make that obvious */
ASSERT3S(bytes, !=, 0);
/*
* When bytes is ARC_EVICT_ALL, the only way to
* break the loop is when scan_evicted is zero.
* In that case, we actually have evicted enough,
* so we don't want to increment the kstat.
*/
if (bytes != ARC_EVICT_ALL) {
ASSERT3S(total_evicted, <, bytes);
ARCSTAT_BUMP(arcstat_evict_not_enough);
}
break;
}
}
for (int i = 0; i < num_sublists; i++) {
multilist_sublist_t *mls = multilist_sublist_lock(ml, i);
multilist_sublist_remove(mls, markers[i]);
multilist_sublist_unlock(mls);
}
if (markers != arc_state_evict_markers)
arc_state_free_markers(markers, num_sublists);
return (total_evicted);
}
/*
* Flush all "evictable" data of the given type from the arc state
* specified. This will not evict any "active" buffers (i.e. referenced).
*
* When 'retry' is set to B_FALSE, the function will make a single pass
* over the state and evict any buffers that it can. Since it doesn't
* continually retry the eviction, it might end up leaving some buffers
* in the ARC due to lock misses.
*
* When 'retry' is set to B_TRUE, the function will continually retry the
* eviction until *all* evictable buffers have been removed from the
* state. As a result, if concurrent insertions into the state are
* allowed (e.g. if the ARC isn't shutting down), this function might
* wind up in an infinite loop, continually trying to evict buffers.
*/
static uint64_t
arc_flush_state(arc_state_t *state, uint64_t spa, arc_buf_contents_t type,
boolean_t retry)
{
uint64_t evicted = 0;
while (zfs_refcount_count(&state->arcs_esize[type]) != 0) {
evicted += arc_evict_state(state, spa, ARC_EVICT_ALL, type);
if (!retry)
break;
}
return (evicted);
}
/*
* Evict the specified number of bytes from the state specified,
* restricting eviction to the spa and type given. This function
* prevents us from trying to evict more from a state's list than
* is "evictable", and to skip evicting altogether when passed a
* negative value for "bytes". In contrast, arc_evict_state() will
* evict everything it can, when passed a negative value for "bytes".
*/
static uint64_t
arc_evict_impl(arc_state_t *state, uint64_t spa, int64_t bytes,
arc_buf_contents_t type)
{
uint64_t delta;
if (bytes > 0 && zfs_refcount_count(&state->arcs_esize[type]) > 0) {
delta = MIN(zfs_refcount_count(&state->arcs_esize[type]),
bytes);
return (arc_evict_state(state, spa, delta, type));
}
return (0);
}
/*
* The goal of this function is to evict enough meta data buffers from the
* ARC in order to enforce the arc_meta_limit. Achieving this is slightly
* more complicated than it appears because it is common for data buffers
* to have holds on meta data buffers. In addition, dnode meta data buffers
* will be held by the dnodes in the block preventing them from being freed.
* This means we can't simply traverse the ARC and expect to always find
* enough unheld meta data buffer to release.
*
* Therefore, this function has been updated to make alternating passes
* over the ARC releasing data buffers and then newly unheld meta data
* buffers. This ensures forward progress is maintained and meta_used
* will decrease. Normally this is sufficient, but if required the ARC
* will call the registered prune callbacks causing dentry and inodes to
* be dropped from the VFS cache. This will make dnode meta data buffers
* available for reclaim.
*/
static uint64_t
arc_evict_meta_balanced(uint64_t meta_used)
{
int64_t delta, prune = 0, adjustmnt;
uint64_t total_evicted = 0;
arc_buf_contents_t type = ARC_BUFC_DATA;
int restarts = MAX(zfs_arc_meta_adjust_restarts, 0);
restart:
/*
* This slightly differs than the way we evict from the mru in
* arc_evict because we don't have a "target" value (i.e. no
* "meta" arc_p). As a result, I think we can completely
* cannibalize the metadata in the MRU before we evict the
* metadata from the MFU. I think we probably need to implement a
* "metadata arc_p" value to do this properly.
*/
adjustmnt = meta_used - arc_meta_limit;
if (adjustmnt > 0 &&
zfs_refcount_count(&arc_mru->arcs_esize[type]) > 0) {
delta = MIN(zfs_refcount_count(&arc_mru->arcs_esize[type]),
adjustmnt);
total_evicted += arc_evict_impl(arc_mru, 0, delta, type);
adjustmnt -= delta;
}
/*
* We can't afford to recalculate adjustmnt here. If we do,
* new metadata buffers can sneak into the MRU or ANON lists,
* thus penalize the MFU metadata. Although the fudge factor is
* small, it has been empirically shown to be significant for
* certain workloads (e.g. creating many empty directories). As
* such, we use the original calculation for adjustmnt, and
* simply decrement the amount of data evicted from the MRU.
*/
if (adjustmnt > 0 &&
zfs_refcount_count(&arc_mfu->arcs_esize[type]) > 0) {
delta = MIN(zfs_refcount_count(&arc_mfu->arcs_esize[type]),
adjustmnt);
total_evicted += arc_evict_impl(arc_mfu, 0, delta, type);
}
adjustmnt = meta_used - arc_meta_limit;
if (adjustmnt > 0 &&
zfs_refcount_count(&arc_mru_ghost->arcs_esize[type]) > 0) {
delta = MIN(adjustmnt,
zfs_refcount_count(&arc_mru_ghost->arcs_esize[type]));
total_evicted += arc_evict_impl(arc_mru_ghost, 0, delta, type);
adjustmnt -= delta;
}
if (adjustmnt > 0 &&
zfs_refcount_count(&arc_mfu_ghost->arcs_esize[type]) > 0) {
delta = MIN(adjustmnt,
zfs_refcount_count(&arc_mfu_ghost->arcs_esize[type]));
total_evicted += arc_evict_impl(arc_mfu_ghost, 0, delta, type);
}
/*
* If after attempting to make the requested adjustment to the ARC
* the meta limit is still being exceeded then request that the
* higher layers drop some cached objects which have holds on ARC
* meta buffers. Requests to the upper layers will be made with
* increasingly large scan sizes until the ARC is below the limit.
*/
if (meta_used > arc_meta_limit) {
if (type == ARC_BUFC_DATA) {
type = ARC_BUFC_METADATA;
} else {
type = ARC_BUFC_DATA;
if (zfs_arc_meta_prune) {
prune += zfs_arc_meta_prune;
arc_prune_async(prune);
}
}
if (restarts > 0) {
restarts--;
goto restart;
}
}
return (total_evicted);
}
/*
* Evict metadata buffers from the cache, such that arcstat_meta_used is
* capped by the arc_meta_limit tunable.
*/
static uint64_t
arc_evict_meta_only(uint64_t meta_used)
{
uint64_t total_evicted = 0;
int64_t target;
/*
* If we're over the meta limit, we want to evict enough
* metadata to get back under the meta limit. We don't want to
* evict so much that we drop the MRU below arc_p, though. If
* we're over the meta limit more than we're over arc_p, we
* evict some from the MRU here, and some from the MFU below.
*/
target = MIN((int64_t)(meta_used - arc_meta_limit),
(int64_t)(zfs_refcount_count(&arc_anon->arcs_size) +
zfs_refcount_count(&arc_mru->arcs_size) - arc_p));
total_evicted += arc_evict_impl(arc_mru, 0, target, ARC_BUFC_METADATA);
/*
* Similar to the above, we want to evict enough bytes to get us
* below the meta limit, but not so much as to drop us below the
* space allotted to the MFU (which is defined as arc_c - arc_p).
*/
target = MIN((int64_t)(meta_used - arc_meta_limit),
(int64_t)(zfs_refcount_count(&arc_mfu->arcs_size) -
(arc_c - arc_p)));
total_evicted += arc_evict_impl(arc_mfu, 0, target, ARC_BUFC_METADATA);
return (total_evicted);
}
static uint64_t
arc_evict_meta(uint64_t meta_used)
{
if (zfs_arc_meta_strategy == ARC_STRATEGY_META_ONLY)
return (arc_evict_meta_only(meta_used));
else
return (arc_evict_meta_balanced(meta_used));
}
/*
* Return the type of the oldest buffer in the given arc state
*
* This function will select a random sublist of type ARC_BUFC_DATA and
* a random sublist of type ARC_BUFC_METADATA. The tail of each sublist
* is compared, and the type which contains the "older" buffer will be
* returned.
*/
static arc_buf_contents_t
arc_evict_type(arc_state_t *state)
{
multilist_t *data_ml = &state->arcs_list[ARC_BUFC_DATA];
multilist_t *meta_ml = &state->arcs_list[ARC_BUFC_METADATA];
int data_idx = multilist_get_random_index(data_ml);
int meta_idx = multilist_get_random_index(meta_ml);
multilist_sublist_t *data_mls;
multilist_sublist_t *meta_mls;
arc_buf_contents_t type;
arc_buf_hdr_t *data_hdr;
arc_buf_hdr_t *meta_hdr;
/*
* We keep the sublist lock until we're finished, to prevent
* the headers from being destroyed via arc_evict_state().
*/
data_mls = multilist_sublist_lock(data_ml, data_idx);
meta_mls = multilist_sublist_lock(meta_ml, meta_idx);
/*
* These two loops are to ensure we skip any markers that
* might be at the tail of the lists due to arc_evict_state().
*/
for (data_hdr = multilist_sublist_tail(data_mls); data_hdr != NULL;
data_hdr = multilist_sublist_prev(data_mls, data_hdr)) {
if (data_hdr->b_spa != 0)
break;
}
for (meta_hdr = multilist_sublist_tail(meta_mls); meta_hdr != NULL;
meta_hdr = multilist_sublist_prev(meta_mls, meta_hdr)) {
if (meta_hdr->b_spa != 0)
break;
}
if (data_hdr == NULL && meta_hdr == NULL) {
type = ARC_BUFC_DATA;
} else if (data_hdr == NULL) {
ASSERT3P(meta_hdr, !=, NULL);
type = ARC_BUFC_METADATA;
} else if (meta_hdr == NULL) {
ASSERT3P(data_hdr, !=, NULL);
type = ARC_BUFC_DATA;
} else {
ASSERT3P(data_hdr, !=, NULL);
ASSERT3P(meta_hdr, !=, NULL);
/* The headers can't be on the sublist without an L1 header */
ASSERT(HDR_HAS_L1HDR(data_hdr));
ASSERT(HDR_HAS_L1HDR(meta_hdr));
if (data_hdr->b_l1hdr.b_arc_access <
meta_hdr->b_l1hdr.b_arc_access) {
type = ARC_BUFC_DATA;
} else {
type = ARC_BUFC_METADATA;
}
}
multilist_sublist_unlock(meta_mls);
multilist_sublist_unlock(data_mls);
return (type);
}
/*
* Evict buffers from the cache, such that arcstat_size is capped by arc_c.
*/
static uint64_t
arc_evict(void)
{
uint64_t total_evicted = 0;
uint64_t bytes;
int64_t target;
uint64_t asize = aggsum_value(&arc_sums.arcstat_size);
uint64_t ameta = aggsum_value(&arc_sums.arcstat_meta_used);
/*
* If we're over arc_meta_limit, we want to correct that before
* potentially evicting data buffers below.
*/
total_evicted += arc_evict_meta(ameta);
/*
* Adjust MRU size
*
* If we're over the target cache size, we want to evict enough
* from the list to get back to our target size. We don't want
* to evict too much from the MRU, such that it drops below
* arc_p. So, if we're over our target cache size more than
* the MRU is over arc_p, we'll evict enough to get back to
* arc_p here, and then evict more from the MFU below.
*/
target = MIN((int64_t)(asize - arc_c),
(int64_t)(zfs_refcount_count(&arc_anon->arcs_size) +
zfs_refcount_count(&arc_mru->arcs_size) + ameta - arc_p));
/*
* If we're below arc_meta_min, always prefer to evict data.
* Otherwise, try to satisfy the requested number of bytes to
* evict from the type which contains older buffers; in an
* effort to keep newer buffers in the cache regardless of their
* type. If we cannot satisfy the number of bytes from this
* type, spill over into the next type.
*/
if (arc_evict_type(arc_mru) == ARC_BUFC_METADATA &&
ameta > arc_meta_min) {
bytes = arc_evict_impl(arc_mru, 0, target, ARC_BUFC_METADATA);
total_evicted += bytes;
/*
* If we couldn't evict our target number of bytes from
* metadata, we try to get the rest from data.
*/
target -= bytes;
total_evicted +=
arc_evict_impl(arc_mru, 0, target, ARC_BUFC_DATA);
} else {
bytes = arc_evict_impl(arc_mru, 0, target, ARC_BUFC_DATA);
total_evicted += bytes;
/*
* If we couldn't evict our target number of bytes from
* data, we try to get the rest from metadata.
*/
target -= bytes;
total_evicted +=
arc_evict_impl(arc_mru, 0, target, ARC_BUFC_METADATA);
}
/*
* Re-sum ARC stats after the first round of evictions.
*/
asize = aggsum_value(&arc_sums.arcstat_size);
ameta = aggsum_value(&arc_sums.arcstat_meta_used);
/*
* Adjust MFU size
*
* Now that we've tried to evict enough from the MRU to get its
* size back to arc_p, if we're still above the target cache
* size, we evict the rest from the MFU.
*/
target = asize - arc_c;
if (arc_evict_type(arc_mfu) == ARC_BUFC_METADATA &&
ameta > arc_meta_min) {
bytes = arc_evict_impl(arc_mfu, 0, target, ARC_BUFC_METADATA);
total_evicted += bytes;
/*
* If we couldn't evict our target number of bytes from
* metadata, we try to get the rest from data.
*/
target -= bytes;
total_evicted +=
arc_evict_impl(arc_mfu, 0, target, ARC_BUFC_DATA);
} else {
bytes = arc_evict_impl(arc_mfu, 0, target, ARC_BUFC_DATA);
total_evicted += bytes;
/*
* If we couldn't evict our target number of bytes from
* data, we try to get the rest from data.
*/
target -= bytes;
total_evicted +=
arc_evict_impl(arc_mfu, 0, target, ARC_BUFC_METADATA);
}
/*
* Adjust ghost lists
*
* In addition to the above, the ARC also defines target values
* for the ghost lists. The sum of the mru list and mru ghost
* list should never exceed the target size of the cache, and
* the sum of the mru list, mfu list, mru ghost list, and mfu
* ghost list should never exceed twice the target size of the
* cache. The following logic enforces these limits on the ghost
* caches, and evicts from them as needed.
*/
target = zfs_refcount_count(&arc_mru->arcs_size) +
zfs_refcount_count(&arc_mru_ghost->arcs_size) - arc_c;
bytes = arc_evict_impl(arc_mru_ghost, 0, target, ARC_BUFC_DATA);
total_evicted += bytes;
target -= bytes;
total_evicted +=
arc_evict_impl(arc_mru_ghost, 0, target, ARC_BUFC_METADATA);
/*
* We assume the sum of the mru list and mfu list is less than
* or equal to arc_c (we enforced this above), which means we
* can use the simpler of the two equations below:
*
* mru + mfu + mru ghost + mfu ghost <= 2 * arc_c
* mru ghost + mfu ghost <= arc_c
*/
target = zfs_refcount_count(&arc_mru_ghost->arcs_size) +
zfs_refcount_count(&arc_mfu_ghost->arcs_size) - arc_c;
bytes = arc_evict_impl(arc_mfu_ghost, 0, target, ARC_BUFC_DATA);
total_evicted += bytes;
target -= bytes;
total_evicted +=
arc_evict_impl(arc_mfu_ghost, 0, target, ARC_BUFC_METADATA);
return (total_evicted);
}
void
arc_flush(spa_t *spa, boolean_t retry)
{
uint64_t guid = 0;
/*
* If retry is B_TRUE, a spa must not be specified since we have
* no good way to determine if all of a spa's buffers have been
* evicted from an arc state.
*/
ASSERT(!retry || spa == 0);
if (spa != NULL)
guid = spa_load_guid(spa);
(void) arc_flush_state(arc_mru, guid, ARC_BUFC_DATA, retry);
(void) arc_flush_state(arc_mru, guid, ARC_BUFC_METADATA, retry);
(void) arc_flush_state(arc_mfu, guid, ARC_BUFC_DATA, retry);
(void) arc_flush_state(arc_mfu, guid, ARC_BUFC_METADATA, retry);
(void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_DATA, retry);
(void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_METADATA, retry);
(void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_DATA, retry);
(void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_METADATA, retry);
}
void
arc_reduce_target_size(int64_t to_free)
{
uint64_t asize = aggsum_value(&arc_sums.arcstat_size);
/*
* All callers want the ARC to actually evict (at least) this much
* memory. Therefore we reduce from the lower of the current size and
* the target size. This way, even if arc_c is much higher than
* arc_size (as can be the case after many calls to arc_freed(), we will
* immediately have arc_c < arc_size and therefore the arc_evict_zthr
* will evict.
*/
uint64_t c = MIN(arc_c, asize);
if (c > to_free && c - to_free > arc_c_min) {
arc_c = c - to_free;
atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift));
if (arc_p > arc_c)
arc_p = (arc_c >> 1);
ASSERT(arc_c >= arc_c_min);
ASSERT((int64_t)arc_p >= 0);
} else {
arc_c = arc_c_min;
}
if (asize > arc_c) {
/* See comment in arc_evict_cb_check() on why lock+flag */
mutex_enter(&arc_evict_lock);
arc_evict_needed = B_TRUE;
mutex_exit(&arc_evict_lock);
zthr_wakeup(arc_evict_zthr);
}
}
/*
* Determine if the system is under memory pressure and is asking
* to reclaim memory. A return value of B_TRUE indicates that the system
* is under memory pressure and that the arc should adjust accordingly.
*/
boolean_t
arc_reclaim_needed(void)
{
return (arc_available_memory() < 0);
}
void
arc_kmem_reap_soon(void)
{
size_t i;
kmem_cache_t *prev_cache = NULL;
kmem_cache_t *prev_data_cache = NULL;
extern kmem_cache_t *zio_buf_cache[];
extern kmem_cache_t *zio_data_buf_cache[];
#ifdef _KERNEL
if ((aggsum_compare(&arc_sums.arcstat_meta_used,
arc_meta_limit) >= 0) && zfs_arc_meta_prune) {
/*
* We are exceeding our meta-data cache limit.
* Prune some entries to release holds on meta-data.
*/
arc_prune_async(zfs_arc_meta_prune);
}
#if defined(_ILP32)
/*
* Reclaim unused memory from all kmem caches.
*/
kmem_reap();
#endif
#endif
for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
#if defined(_ILP32)
/* reach upper limit of cache size on 32-bit */
if (zio_buf_cache[i] == NULL)
break;
#endif
if (zio_buf_cache[i] != prev_cache) {
prev_cache = zio_buf_cache[i];
kmem_cache_reap_now(zio_buf_cache[i]);
}
if (zio_data_buf_cache[i] != prev_data_cache) {
prev_data_cache = zio_data_buf_cache[i];
kmem_cache_reap_now(zio_data_buf_cache[i]);
}
}
kmem_cache_reap_now(buf_cache);
kmem_cache_reap_now(hdr_full_cache);
kmem_cache_reap_now(hdr_l2only_cache);
kmem_cache_reap_now(zfs_btree_leaf_cache);
abd_cache_reap_now();
}
-/* ARGSUSED */
static boolean_t
arc_evict_cb_check(void *arg, zthr_t *zthr)
{
+ (void) arg, (void) zthr;
+
#ifdef ZFS_DEBUG
/*
* This is necessary in order to keep the kstat information
* up to date for tools that display kstat data such as the
* mdb ::arc dcmd and the Linux crash utility. These tools
* typically do not call kstat's update function, but simply
* dump out stats from the most recent update. Without
* this call, these commands may show stale stats for the
* anon, mru, mru_ghost, mfu, and mfu_ghost lists. Even
* with this call, the data might be out of date if the
* evict thread hasn't been woken recently; but that should
* suffice. The arc_state_t structures can be queried
* directly if more accurate information is needed.
*/
if (arc_ksp != NULL)
arc_ksp->ks_update(arc_ksp, KSTAT_READ);
#endif
/*
* We have to rely on arc_wait_for_eviction() to tell us when to
* evict, rather than checking if we are overflowing here, so that we
* are sure to not leave arc_wait_for_eviction() waiting on aew_cv.
* If we have become "not overflowing" since arc_wait_for_eviction()
* checked, we need to wake it up. We could broadcast the CV here,
* but arc_wait_for_eviction() may have not yet gone to sleep. We
* would need to use a mutex to ensure that this function doesn't
* broadcast until arc_wait_for_eviction() has gone to sleep (e.g.
* the arc_evict_lock). However, the lock ordering of such a lock
* would necessarily be incorrect with respect to the zthr_lock,
* which is held before this function is called, and is held by
* arc_wait_for_eviction() when it calls zthr_wakeup().
*/
return (arc_evict_needed);
}
/*
* Keep arc_size under arc_c by running arc_evict which evicts data
* from the ARC.
*/
-/* ARGSUSED */
static void
arc_evict_cb(void *arg, zthr_t *zthr)
{
+ (void) arg, (void) zthr;
+
uint64_t evicted = 0;
fstrans_cookie_t cookie = spl_fstrans_mark();
/* Evict from cache */
evicted = arc_evict();
/*
* If evicted is zero, we couldn't evict anything
* via arc_evict(). This could be due to hash lock
* collisions, but more likely due to the majority of
* arc buffers being unevictable. Therefore, even if
* arc_size is above arc_c, another pass is unlikely to
* be helpful and could potentially cause us to enter an
* infinite loop. Additionally, zthr_iscancelled() is
* checked here so that if the arc is shutting down, the
* broadcast will wake any remaining arc evict waiters.
*/
mutex_enter(&arc_evict_lock);
arc_evict_needed = !zthr_iscancelled(arc_evict_zthr) &&
evicted > 0 && aggsum_compare(&arc_sums.arcstat_size, arc_c) > 0;
if (!arc_evict_needed) {
/*
* We're either no longer overflowing, or we
* can't evict anything more, so we should wake
* arc_get_data_impl() sooner.
*/
arc_evict_waiter_t *aw;
while ((aw = list_remove_head(&arc_evict_waiters)) != NULL) {
cv_broadcast(&aw->aew_cv);
}
arc_set_need_free();
}
mutex_exit(&arc_evict_lock);
spl_fstrans_unmark(cookie);
}
-/* ARGSUSED */
static boolean_t
arc_reap_cb_check(void *arg, zthr_t *zthr)
{
+ (void) arg, (void) zthr;
+
int64_t free_memory = arc_available_memory();
static int reap_cb_check_counter = 0;
/*
* If a kmem reap is already active, don't schedule more. We must
* check for this because kmem_cache_reap_soon() won't actually
* block on the cache being reaped (this is to prevent callers from
* becoming implicitly blocked by a system-wide kmem reap -- which,
* on a system with many, many full magazines, can take minutes).
*/
if (!kmem_cache_reap_active() && free_memory < 0) {
arc_no_grow = B_TRUE;
arc_warm = B_TRUE;
/*
* Wait at least zfs_grow_retry (default 5) seconds
* before considering growing.
*/
arc_growtime = gethrtime() + SEC2NSEC(arc_grow_retry);
return (B_TRUE);
} else if (free_memory < arc_c >> arc_no_grow_shift) {
arc_no_grow = B_TRUE;
} else if (gethrtime() >= arc_growtime) {
arc_no_grow = B_FALSE;
}
/*
* Called unconditionally every 60 seconds to reclaim unused
* zstd compression and decompression context. This is done
* here to avoid the need for an independent thread.
*/
if (!((reap_cb_check_counter++) % 60))
zfs_zstd_cache_reap_now();
return (B_FALSE);
}
/*
* Keep enough free memory in the system by reaping the ARC's kmem
* caches. To cause more slabs to be reapable, we may reduce the
* target size of the cache (arc_c), causing the arc_evict_cb()
* to free more buffers.
*/
-/* ARGSUSED */
static void
arc_reap_cb(void *arg, zthr_t *zthr)
{
+ (void) arg, (void) zthr;
+
int64_t free_memory;
fstrans_cookie_t cookie = spl_fstrans_mark();
/*
* Kick off asynchronous kmem_reap()'s of all our caches.
*/
arc_kmem_reap_soon();
/*
* Wait at least arc_kmem_cache_reap_retry_ms between
* arc_kmem_reap_soon() calls. Without this check it is possible to
* end up in a situation where we spend lots of time reaping
* caches, while we're near arc_c_min. Waiting here also gives the
* subsequent free memory check a chance of finding that the
* asynchronous reap has already freed enough memory, and we don't
* need to call arc_reduce_target_size().
*/
delay((hz * arc_kmem_cache_reap_retry_ms + 999) / 1000);
/*
* Reduce the target size as needed to maintain the amount of free
* memory in the system at a fraction of the arc_size (1/128th by
* default). If oversubscribed (free_memory < 0) then reduce the
* target arc_size by the deficit amount plus the fractional
* amount. If free memory is positive but less than the fractional
* amount, reduce by what is needed to hit the fractional amount.
*/
free_memory = arc_available_memory();
int64_t to_free =
(arc_c >> arc_shrink_shift) - free_memory;
if (to_free > 0) {
arc_reduce_target_size(to_free);
}
spl_fstrans_unmark(cookie);
}
#ifdef _KERNEL
/*
* Determine the amount of memory eligible for eviction contained in the
* ARC. All clean data reported by the ghost lists can always be safely
* evicted. Due to arc_c_min, the same does not hold for all clean data
* contained by the regular mru and mfu lists.
*
* In the case of the regular mru and mfu lists, we need to report as
* much clean data as possible, such that evicting that same reported
* data will not bring arc_size below arc_c_min. Thus, in certain
* circumstances, the total amount of clean data in the mru and mfu
* lists might not actually be evictable.
*
* The following two distinct cases are accounted for:
*
* 1. The sum of the amount of dirty data contained by both the mru and
* mfu lists, plus the ARC's other accounting (e.g. the anon list),
* is greater than or equal to arc_c_min.
* (i.e. amount of dirty data >= arc_c_min)
*
* This is the easy case; all clean data contained by the mru and mfu
* lists is evictable. Evicting all clean data can only drop arc_size
* to the amount of dirty data, which is greater than arc_c_min.
*
* 2. The sum of the amount of dirty data contained by both the mru and
* mfu lists, plus the ARC's other accounting (e.g. the anon list),
* is less than arc_c_min.
* (i.e. arc_c_min > amount of dirty data)
*
* 2.1. arc_size is greater than or equal arc_c_min.
* (i.e. arc_size >= arc_c_min > amount of dirty data)
*
* In this case, not all clean data from the regular mru and mfu
* lists is actually evictable; we must leave enough clean data
* to keep arc_size above arc_c_min. Thus, the maximum amount of
* evictable data from the two lists combined, is exactly the
* difference between arc_size and arc_c_min.
*
* 2.2. arc_size is less than arc_c_min
* (i.e. arc_c_min > arc_size > amount of dirty data)
*
* In this case, none of the data contained in the mru and mfu
* lists is evictable, even if it's clean. Since arc_size is
* already below arc_c_min, evicting any more would only
* increase this negative difference.
*/
#endif /* _KERNEL */
/*
* Adapt arc info given the number of bytes we are trying to add and
* the state that we are coming from. This function is only called
* when we are adding new content to the cache.
*/
static void
arc_adapt(int bytes, arc_state_t *state)
{
int mult;
uint64_t arc_p_min = (arc_c >> arc_p_min_shift);
int64_t mrug_size = zfs_refcount_count(&arc_mru_ghost->arcs_size);
int64_t mfug_size = zfs_refcount_count(&arc_mfu_ghost->arcs_size);
ASSERT(bytes > 0);
/*
* Adapt the target size of the MRU list:
* - if we just hit in the MRU ghost list, then increase
* the target size of the MRU list.
* - if we just hit in the MFU ghost list, then increase
* the target size of the MFU list by decreasing the
* target size of the MRU list.
*/
if (state == arc_mru_ghost) {
mult = (mrug_size >= mfug_size) ? 1 : (mfug_size / mrug_size);
if (!zfs_arc_p_dampener_disable)
mult = MIN(mult, 10); /* avoid wild arc_p adjustment */
arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult);
} else if (state == arc_mfu_ghost) {
uint64_t delta;
mult = (mfug_size >= mrug_size) ? 1 : (mrug_size / mfug_size);
if (!zfs_arc_p_dampener_disable)
mult = MIN(mult, 10);
delta = MIN(bytes * mult, arc_p);
arc_p = MAX(arc_p_min, arc_p - delta);
}
ASSERT((int64_t)arc_p >= 0);
/*
* Wake reap thread if we do not have any available memory
*/
if (arc_reclaim_needed()) {
zthr_wakeup(arc_reap_zthr);
return;
}
if (arc_no_grow)
return;
if (arc_c >= arc_c_max)
return;
/*
* If we're within (2 * maxblocksize) bytes of the target
* cache size, increment the target cache size
*/
ASSERT3U(arc_c, >=, 2ULL << SPA_MAXBLOCKSHIFT);
if (aggsum_upper_bound(&arc_sums.arcstat_size) >=
arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
atomic_add_64(&arc_c, (int64_t)bytes);
if (arc_c > arc_c_max)
arc_c = arc_c_max;
else if (state == arc_anon)
atomic_add_64(&arc_p, (int64_t)bytes);
if (arc_p > arc_c)
arc_p = arc_c;
}
ASSERT((int64_t)arc_p >= 0);
}
/*
* Check if arc_size has grown past our upper threshold, determined by
* zfs_arc_overflow_shift.
*/
static arc_ovf_level_t
arc_is_overflowing(boolean_t use_reserve)
{
/* Always allow at least one block of overflow */
int64_t overflow = MAX(SPA_MAXBLOCKSIZE,
arc_c >> zfs_arc_overflow_shift);
/*
* We just compare the lower bound here for performance reasons. Our
* primary goals are to make sure that the arc never grows without
* bound, and that it can reach its maximum size. This check
* accomplishes both goals. The maximum amount we could run over by is
* 2 * aggsum_borrow_multiplier * NUM_CPUS * the average size of a block
* in the ARC. In practice, that's in the tens of MB, which is low
* enough to be safe.
*/
int64_t over = aggsum_lower_bound(&arc_sums.arcstat_size) -
arc_c - overflow / 2;
if (!use_reserve)
overflow /= 2;
return (over < 0 ? ARC_OVF_NONE :
over < overflow ? ARC_OVF_SOME : ARC_OVF_SEVERE);
}
static abd_t *
arc_get_data_abd(arc_buf_hdr_t *hdr, uint64_t size, void *tag,
int alloc_flags)
{
arc_buf_contents_t type = arc_buf_type(hdr);
arc_get_data_impl(hdr, size, tag, alloc_flags);
if (type == ARC_BUFC_METADATA) {
return (abd_alloc(size, B_TRUE));
} else {
ASSERT(type == ARC_BUFC_DATA);
return (abd_alloc(size, B_FALSE));
}
}
static void *
arc_get_data_buf(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
{
arc_buf_contents_t type = arc_buf_type(hdr);
arc_get_data_impl(hdr, size, tag, ARC_HDR_DO_ADAPT);
if (type == ARC_BUFC_METADATA) {
return (zio_buf_alloc(size));
} else {
ASSERT(type == ARC_BUFC_DATA);
return (zio_data_buf_alloc(size));
}
}
/*
* Wait for the specified amount of data (in bytes) to be evicted from the
* ARC, and for there to be sufficient free memory in the system. Waiting for
* eviction ensures that the memory used by the ARC decreases. Waiting for
* free memory ensures that the system won't run out of free pages, regardless
* of ARC behavior and settings. See arc_lowmem_init().
*/
void
arc_wait_for_eviction(uint64_t amount, boolean_t use_reserve)
{
switch (arc_is_overflowing(use_reserve)) {
case ARC_OVF_NONE:
return;
case ARC_OVF_SOME:
/*
* This is a bit racy without taking arc_evict_lock, but the
* worst that can happen is we either call zthr_wakeup() extra
* time due to race with other thread here, or the set flag
* get cleared by arc_evict_cb(), which is unlikely due to
* big hysteresis, but also not important since at this level
* of overflow the eviction is purely advisory. Same time
* taking the global lock here every time without waiting for
* the actual eviction creates a significant lock contention.
*/
if (!arc_evict_needed) {
arc_evict_needed = B_TRUE;
zthr_wakeup(arc_evict_zthr);
}
return;
case ARC_OVF_SEVERE:
default:
{
arc_evict_waiter_t aw;
list_link_init(&aw.aew_node);
cv_init(&aw.aew_cv, NULL, CV_DEFAULT, NULL);
uint64_t last_count = 0;
mutex_enter(&arc_evict_lock);
if (!list_is_empty(&arc_evict_waiters)) {
arc_evict_waiter_t *last =
list_tail(&arc_evict_waiters);
last_count = last->aew_count;
} else if (!arc_evict_needed) {
arc_evict_needed = B_TRUE;
zthr_wakeup(arc_evict_zthr);
}
/*
* Note, the last waiter's count may be less than
* arc_evict_count if we are low on memory in which
* case arc_evict_state_impl() may have deferred
* wakeups (but still incremented arc_evict_count).
*/
aw.aew_count = MAX(last_count, arc_evict_count) + amount;
list_insert_tail(&arc_evict_waiters, &aw);
arc_set_need_free();
DTRACE_PROBE3(arc__wait__for__eviction,
uint64_t, amount,
uint64_t, arc_evict_count,
uint64_t, aw.aew_count);
/*
* We will be woken up either when arc_evict_count reaches
* aew_count, or when the ARC is no longer overflowing and
* eviction completes.
* In case of "false" wakeup, we will still be on the list.
*/
do {
cv_wait(&aw.aew_cv, &arc_evict_lock);
} while (list_link_active(&aw.aew_node));
mutex_exit(&arc_evict_lock);
cv_destroy(&aw.aew_cv);
}
}
}
/*
* Allocate a block and return it to the caller. If we are hitting the
* hard limit for the cache size, we must sleep, waiting for the eviction
* thread to catch up. If we're past the target size but below the hard
* limit, we'll only signal the reclaim thread and continue on.
*/
static void
arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag,
int alloc_flags)
{
arc_state_t *state = hdr->b_l1hdr.b_state;
arc_buf_contents_t type = arc_buf_type(hdr);
if (alloc_flags & ARC_HDR_DO_ADAPT)
arc_adapt(size, state);
/*
* If arc_size is currently overflowing, we must be adding data
* faster than we are evicting. To ensure we don't compound the
* problem by adding more data and forcing arc_size to grow even
* further past it's target size, we wait for the eviction thread to
* make some progress. We also wait for there to be sufficient free
* memory in the system, as measured by arc_free_memory().
*
* Specifically, we wait for zfs_arc_eviction_pct percent of the
* requested size to be evicted. This should be more than 100%, to
* ensure that that progress is also made towards getting arc_size
* under arc_c. See the comment above zfs_arc_eviction_pct.
*/
arc_wait_for_eviction(size * zfs_arc_eviction_pct / 100,
alloc_flags & ARC_HDR_USE_RESERVE);
VERIFY3U(hdr->b_type, ==, type);
if (type == ARC_BUFC_METADATA) {
arc_space_consume(size, ARC_SPACE_META);
} else {
arc_space_consume(size, ARC_SPACE_DATA);
}
/*
* Update the state size. Note that ghost states have a
* "ghost size" and so don't need to be updated.
*/
if (!GHOST_STATE(state)) {
(void) zfs_refcount_add_many(&state->arcs_size, size, tag);
/*
* If this is reached via arc_read, the link is
* protected by the hash lock. If reached via
* arc_buf_alloc, the header should not be accessed by
* any other thread. And, if reached via arc_read_done,
* the hash lock will protect it if it's found in the
* hash table; otherwise no other thread should be
* trying to [add|remove]_reference it.
*/
if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
(void) zfs_refcount_add_many(&state->arcs_esize[type],
size, tag);
}
/*
* If we are growing the cache, and we are adding anonymous
* data, and we have outgrown arc_p, update arc_p
*/
if (aggsum_upper_bound(&arc_sums.arcstat_size) < arc_c &&
hdr->b_l1hdr.b_state == arc_anon &&
(zfs_refcount_count(&arc_anon->arcs_size) +
zfs_refcount_count(&arc_mru->arcs_size) > arc_p))
arc_p = MIN(arc_c, arc_p + size);
}
}
static void
arc_free_data_abd(arc_buf_hdr_t *hdr, abd_t *abd, uint64_t size, void *tag)
{
arc_free_data_impl(hdr, size, tag);
abd_free(abd);
}
static void
arc_free_data_buf(arc_buf_hdr_t *hdr, void *buf, uint64_t size, void *tag)
{
arc_buf_contents_t type = arc_buf_type(hdr);
arc_free_data_impl(hdr, size, tag);
if (type == ARC_BUFC_METADATA) {
zio_buf_free(buf, size);
} else {
ASSERT(type == ARC_BUFC_DATA);
zio_data_buf_free(buf, size);
}
}
/*
* Free the arc data buffer.
*/
static void
arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
{
arc_state_t *state = hdr->b_l1hdr.b_state;
arc_buf_contents_t type = arc_buf_type(hdr);
/* protected by hash lock, if in the hash table */
if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT(state != arc_anon && state != arc_l2c_only);
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
size, tag);
}
(void) zfs_refcount_remove_many(&state->arcs_size, size, tag);
VERIFY3U(hdr->b_type, ==, type);
if (type == ARC_BUFC_METADATA) {
arc_space_return(size, ARC_SPACE_META);
} else {
ASSERT(type == ARC_BUFC_DATA);
arc_space_return(size, ARC_SPACE_DATA);
}
}
/*
* This routine is called whenever a buffer is accessed.
* NOTE: the hash lock is dropped in this function.
*/
static void
arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
{
clock_t now;
ASSERT(MUTEX_HELD(hash_lock));
ASSERT(HDR_HAS_L1HDR(hdr));
if (hdr->b_l1hdr.b_state == arc_anon) {
/*
* This buffer is not in the cache, and does not
* appear in our "ghost" list. Add the new buffer
* to the MRU state.
*/
ASSERT0(hdr->b_l1hdr.b_arc_access);
hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
arc_change_state(arc_mru, hdr, hash_lock);
} else if (hdr->b_l1hdr.b_state == arc_mru) {
now = ddi_get_lbolt();
/*
* If this buffer is here because of a prefetch, then either:
* - clear the flag if this is a "referencing" read
* (any subsequent access will bump this into the MFU state).
* or
* - move the buffer to the head of the list if this is
* another prefetch (to make it less likely to be evicted).
*/
if (HDR_PREFETCH(hdr) || HDR_PRESCIENT_PREFETCH(hdr)) {
if (zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
/* link protected by hash lock */
ASSERT(multilist_link_active(
&hdr->b_l1hdr.b_arc_node));
} else {
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_decrement_state(hdr);
arc_hdr_clear_flags(hdr,
ARC_FLAG_PREFETCH |
ARC_FLAG_PRESCIENT_PREFETCH);
hdr->b_l1hdr.b_mru_hits++;
ARCSTAT_BUMP(arcstat_mru_hits);
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_increment_state(hdr);
}
hdr->b_l1hdr.b_arc_access = now;
return;
}
/*
* This buffer has been "accessed" only once so far,
* but it is still in the cache. Move it to the MFU
* state.
*/
if (ddi_time_after(now, hdr->b_l1hdr.b_arc_access +
ARC_MINTIME)) {
/*
* More than 125ms have passed since we
* instantiated this buffer. Move it to the
* most frequently used state.
*/
hdr->b_l1hdr.b_arc_access = now;
DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
arc_change_state(arc_mfu, hdr, hash_lock);
}
hdr->b_l1hdr.b_mru_hits++;
ARCSTAT_BUMP(arcstat_mru_hits);
} else if (hdr->b_l1hdr.b_state == arc_mru_ghost) {
arc_state_t *new_state;
/*
* This buffer has been "accessed" recently, but
* was evicted from the cache. Move it to the
* MFU state.
*/
if (HDR_PREFETCH(hdr) || HDR_PRESCIENT_PREFETCH(hdr)) {
new_state = arc_mru;
if (zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) > 0) {
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_decrement_state(hdr);
arc_hdr_clear_flags(hdr,
ARC_FLAG_PREFETCH |
ARC_FLAG_PRESCIENT_PREFETCH);
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_increment_state(hdr);
}
DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
} else {
new_state = arc_mfu;
DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
}
hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
arc_change_state(new_state, hdr, hash_lock);
hdr->b_l1hdr.b_mru_ghost_hits++;
ARCSTAT_BUMP(arcstat_mru_ghost_hits);
} else if (hdr->b_l1hdr.b_state == arc_mfu) {
/*
* This buffer has been accessed more than once and is
* still in the cache. Keep it in the MFU state.
*
* NOTE: an add_reference() that occurred when we did
* the arc_read() will have kicked this off the list.
* If it was a prefetch, we will explicitly move it to
* the head of the list now.
*/
hdr->b_l1hdr.b_mfu_hits++;
ARCSTAT_BUMP(arcstat_mfu_hits);
hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
} else if (hdr->b_l1hdr.b_state == arc_mfu_ghost) {
arc_state_t *new_state = arc_mfu;
/*
* This buffer has been accessed more than once but has
* been evicted from the cache. Move it back to the
* MFU state.
*/
if (HDR_PREFETCH(hdr) || HDR_PRESCIENT_PREFETCH(hdr)) {
/*
* This is a prefetch access...
* move this block back to the MRU state.
*/
new_state = arc_mru;
}
hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
arc_change_state(new_state, hdr, hash_lock);
hdr->b_l1hdr.b_mfu_ghost_hits++;
ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
} else if (hdr->b_l1hdr.b_state == arc_l2c_only) {
/*
* This buffer is on the 2nd Level ARC.
*/
hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
arc_change_state(arc_mfu, hdr, hash_lock);
} else {
cmn_err(CE_PANIC, "invalid arc state 0x%p",
hdr->b_l1hdr.b_state);
}
}
/*
* This routine is called by dbuf_hold() to update the arc_access() state
* which otherwise would be skipped for entries in the dbuf cache.
*/
void
arc_buf_access(arc_buf_t *buf)
{
mutex_enter(&buf->b_evict_lock);
arc_buf_hdr_t *hdr = buf->b_hdr;
/*
* Avoid taking the hash_lock when possible as an optimization.
* The header must be checked again under the hash_lock in order
* to handle the case where it is concurrently being released.
*/
if (hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY(hdr)) {
mutex_exit(&buf->b_evict_lock);
return;
}
kmutex_t *hash_lock = HDR_LOCK(hdr);
mutex_enter(hash_lock);
if (hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY(hdr)) {
mutex_exit(hash_lock);
mutex_exit(&buf->b_evict_lock);
ARCSTAT_BUMP(arcstat_access_skip);
return;
}
mutex_exit(&buf->b_evict_lock);
ASSERT(hdr->b_l1hdr.b_state == arc_mru ||
hdr->b_l1hdr.b_state == arc_mfu);
DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
arc_access(hdr, hash_lock);
mutex_exit(hash_lock);
ARCSTAT_BUMP(arcstat_hits);
ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr) && !HDR_PRESCIENT_PREFETCH(hdr),
demand, prefetch, !HDR_ISTYPE_METADATA(hdr), data, metadata, hits);
}
/* a generic arc_read_done_func_t which you can use */
-/* ARGSUSED */
void
arc_bcopy_func(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
arc_buf_t *buf, void *arg)
{
+ (void) zio, (void) zb, (void) bp;
+
if (buf == NULL)
return;
bcopy(buf->b_data, arg, arc_buf_size(buf));
arc_buf_destroy(buf, arg);
}
/* a generic arc_read_done_func_t */
-/* ARGSUSED */
void
arc_getbuf_func(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
arc_buf_t *buf, void *arg)
{
+ (void) zb, (void) bp;
arc_buf_t **bufp = arg;
if (buf == NULL) {
ASSERT(zio == NULL || zio->io_error != 0);
*bufp = NULL;
} else {
ASSERT(zio == NULL || zio->io_error == 0);
*bufp = buf;
ASSERT(buf->b_data != NULL);
}
}
static void
arc_hdr_verify(arc_buf_hdr_t *hdr, blkptr_t *bp)
{
if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) {
ASSERT3U(HDR_GET_PSIZE(hdr), ==, 0);
ASSERT3U(arc_hdr_get_compress(hdr), ==, ZIO_COMPRESS_OFF);
} else {
if (HDR_COMPRESSION_ENABLED(hdr)) {
ASSERT3U(arc_hdr_get_compress(hdr), ==,
BP_GET_COMPRESS(bp));
}
ASSERT3U(HDR_GET_LSIZE(hdr), ==, BP_GET_LSIZE(bp));
ASSERT3U(HDR_GET_PSIZE(hdr), ==, BP_GET_PSIZE(bp));
ASSERT3U(!!HDR_PROTECTED(hdr), ==, BP_IS_PROTECTED(bp));
}
}
static void
arc_read_done(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
arc_buf_hdr_t *hdr = zio->io_private;
kmutex_t *hash_lock = NULL;
arc_callback_t *callback_list;
arc_callback_t *acb;
boolean_t freeable = B_FALSE;
/*
* The hdr was inserted into hash-table and removed from lists
* prior to starting I/O. We should find this header, since
* it's in the hash table, and it should be legit since it's
* not possible to evict it during the I/O. The only possible
* reason for it not to be found is if we were freed during the
* read.
*/
if (HDR_IN_HASH_TABLE(hdr)) {
arc_buf_hdr_t *found;
ASSERT3U(hdr->b_birth, ==, BP_PHYSICAL_BIRTH(zio->io_bp));
ASSERT3U(hdr->b_dva.dva_word[0], ==,
BP_IDENTITY(zio->io_bp)->dva_word[0]);
ASSERT3U(hdr->b_dva.dva_word[1], ==,
BP_IDENTITY(zio->io_bp)->dva_word[1]);
found = buf_hash_find(hdr->b_spa, zio->io_bp, &hash_lock);
ASSERT((found == hdr &&
DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) ||
(found == hdr && HDR_L2_READING(hdr)));
ASSERT3P(hash_lock, !=, NULL);
}
if (BP_IS_PROTECTED(bp)) {
hdr->b_crypt_hdr.b_ot = BP_GET_TYPE(bp);
hdr->b_crypt_hdr.b_dsobj = zio->io_bookmark.zb_objset;
zio_crypt_decode_params_bp(bp, hdr->b_crypt_hdr.b_salt,
hdr->b_crypt_hdr.b_iv);
if (BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG) {
void *tmpbuf;
tmpbuf = abd_borrow_buf_copy(zio->io_abd,
sizeof (zil_chain_t));
zio_crypt_decode_mac_zil(tmpbuf,
hdr->b_crypt_hdr.b_mac);
abd_return_buf(zio->io_abd, tmpbuf,
sizeof (zil_chain_t));
} else {
zio_crypt_decode_mac_bp(bp, hdr->b_crypt_hdr.b_mac);
}
}
if (zio->io_error == 0) {
/* byteswap if necessary */
if (BP_SHOULD_BYTESWAP(zio->io_bp)) {
if (BP_GET_LEVEL(zio->io_bp) > 0) {
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_UINT64;
} else {
hdr->b_l1hdr.b_byteswap =
DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp));
}
} else {
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
}
if (!HDR_L2_READING(hdr)) {
hdr->b_complevel = zio->io_prop.zp_complevel;
}
}
arc_hdr_clear_flags(hdr, ARC_FLAG_L2_EVICTED);
if (l2arc_noprefetch && HDR_PREFETCH(hdr))
arc_hdr_clear_flags(hdr, ARC_FLAG_L2CACHE);
callback_list = hdr->b_l1hdr.b_acb;
ASSERT3P(callback_list, !=, NULL);
if (hash_lock && zio->io_error == 0 &&
hdr->b_l1hdr.b_state == arc_anon) {
/*
* Only call arc_access on anonymous buffers. This is because
* if we've issued an I/O for an evicted buffer, we've already
* called arc_access (to prevent any simultaneous readers from
* getting confused).
*/
arc_access(hdr, hash_lock);
}
/*
* If a read request has a callback (i.e. acb_done is not NULL), then we
* make a buf containing the data according to the parameters which were
* passed in. The implementation of arc_buf_alloc_impl() ensures that we
* aren't needlessly decompressing the data multiple times.
*/
int callback_cnt = 0;
for (acb = callback_list; acb != NULL; acb = acb->acb_next) {
if (!acb->acb_done || acb->acb_nobuf)
continue;
callback_cnt++;
if (zio->io_error != 0)
continue;
int error = arc_buf_alloc_impl(hdr, zio->io_spa,
&acb->acb_zb, acb->acb_private, acb->acb_encrypted,
acb->acb_compressed, acb->acb_noauth, B_TRUE,
&acb->acb_buf);
/*
* Assert non-speculative zios didn't fail because an
* encryption key wasn't loaded
*/
ASSERT((zio->io_flags & ZIO_FLAG_SPECULATIVE) ||
error != EACCES);
/*
* If we failed to decrypt, report an error now (as the zio
* layer would have done if it had done the transforms).
*/
if (error == ECKSUM) {
ASSERT(BP_IS_PROTECTED(bp));
error = SET_ERROR(EIO);
if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) {
spa_log_error(zio->io_spa, &acb->acb_zb);
(void) zfs_ereport_post(
FM_EREPORT_ZFS_AUTHENTICATION,
zio->io_spa, NULL, &acb->acb_zb, zio, 0);
}
}
if (error != 0) {
/*
* Decompression or decryption failed. Set
* io_error so that when we call acb_done
* (below), we will indicate that the read
* failed. Note that in the unusual case
* where one callback is compressed and another
* uncompressed, we will mark all of them
* as failed, even though the uncompressed
* one can't actually fail. In this case,
* the hdr will not be anonymous, because
* if there are multiple callbacks, it's
* because multiple threads found the same
* arc buf in the hash table.
*/
zio->io_error = error;
}
}
/*
* If there are multiple callbacks, we must have the hash lock,
* because the only way for multiple threads to find this hdr is
* in the hash table. This ensures that if there are multiple
* callbacks, the hdr is not anonymous. If it were anonymous,
* we couldn't use arc_buf_destroy() in the error case below.
*/
ASSERT(callback_cnt < 2 || hash_lock != NULL);
hdr->b_l1hdr.b_acb = NULL;
arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
if (callback_cnt == 0)
ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr));
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt) ||
callback_list != NULL);
if (zio->io_error == 0) {
arc_hdr_verify(hdr, zio->io_bp);
} else {
arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR);
if (hdr->b_l1hdr.b_state != arc_anon)
arc_change_state(arc_anon, hdr, hash_lock);
if (HDR_IN_HASH_TABLE(hdr))
buf_hash_remove(hdr);
freeable = zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
}
/*
* Broadcast before we drop the hash_lock to avoid the possibility
* that the hdr (and hence the cv) might be freed before we get to
* the cv_broadcast().
*/
cv_broadcast(&hdr->b_l1hdr.b_cv);
if (hash_lock != NULL) {
mutex_exit(hash_lock);
} else {
/*
* This block was freed while we waited for the read to
* complete. It has been removed from the hash table and
* moved to the anonymous state (so that it won't show up
* in the cache).
*/
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
freeable = zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
}
/* execute each callback and free its structure */
while ((acb = callback_list) != NULL) {
if (acb->acb_done != NULL) {
if (zio->io_error != 0 && acb->acb_buf != NULL) {
/*
* If arc_buf_alloc_impl() fails during
* decompression, the buf will still be
* allocated, and needs to be freed here.
*/
arc_buf_destroy(acb->acb_buf,
acb->acb_private);
acb->acb_buf = NULL;
}
acb->acb_done(zio, &zio->io_bookmark, zio->io_bp,
acb->acb_buf, acb->acb_private);
}
if (acb->acb_zio_dummy != NULL) {
acb->acb_zio_dummy->io_error = zio->io_error;
zio_nowait(acb->acb_zio_dummy);
}
callback_list = acb->acb_next;
kmem_free(acb, sizeof (arc_callback_t));
}
if (freeable)
arc_hdr_destroy(hdr);
}
/*
* "Read" the block at the specified DVA (in bp) via the
* cache. If the block is found in the cache, invoke the provided
* callback immediately and return. Note that the `zio' parameter
* in the callback will be NULL in this case, since no IO was
* required. If the block is not in the cache pass the read request
* on to the spa with a substitute callback function, so that the
* requested block will be added to the cache.
*
* If a read request arrives for a block that has a read in-progress,
* either wait for the in-progress read to complete (and return the
* results); or, if this is a read with a "done" func, add a record
* to the read to invoke the "done" func when the read completes,
* and return; or just return.
*
* arc_read_done() will invoke all the requested "done" functions
* for readers of this block.
*/
int
arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
arc_read_done_func_t *done, void *private, zio_priority_t priority,
int zio_flags, arc_flags_t *arc_flags, const zbookmark_phys_t *zb)
{
arc_buf_hdr_t *hdr = NULL;
kmutex_t *hash_lock = NULL;
zio_t *rzio;
uint64_t guid = spa_load_guid(spa);
boolean_t compressed_read = (zio_flags & ZIO_FLAG_RAW_COMPRESS) != 0;
boolean_t encrypted_read = BP_IS_ENCRYPTED(bp) &&
(zio_flags & ZIO_FLAG_RAW_ENCRYPT) != 0;
boolean_t noauth_read = BP_IS_AUTHENTICATED(bp) &&
(zio_flags & ZIO_FLAG_RAW_ENCRYPT) != 0;
boolean_t embedded_bp = !!BP_IS_EMBEDDED(bp);
boolean_t no_buf = *arc_flags & ARC_FLAG_NO_BUF;
int rc = 0;
ASSERT(!embedded_bp ||
BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA);
ASSERT(!BP_IS_HOLE(bp));
ASSERT(!BP_IS_REDACTED(bp));
/*
* Normally SPL_FSTRANS will already be set since kernel threads which
* expect to call the DMU interfaces will set it when created. System
* calls are similarly handled by setting/cleaning the bit in the
* registered callback (module/os/.../zfs/zpl_*).
*
* External consumers such as Lustre which call the exported DMU
* interfaces may not have set SPL_FSTRANS. To avoid a deadlock
* on the hash_lock always set and clear the bit.
*/
fstrans_cookie_t cookie = spl_fstrans_mark();
top:
/*
* Verify the block pointer contents are reasonable. This should
* always be the case since the blkptr is protected by a checksum.
* However, if there is damage it's desirable to detect this early
* and treat it as a checksum error. This allows an alternate blkptr
* to be tried when one is available (e.g. ditto blocks).
*/
if (!zfs_blkptr_verify(spa, bp, zio_flags & ZIO_FLAG_CONFIG_WRITER,
BLK_VERIFY_LOG)) {
rc = SET_ERROR(ECKSUM);
goto out;
}
if (!embedded_bp) {
/*
* Embedded BP's have no DVA and require no I/O to "read".
* Create an anonymous arc buf to back it.
*/
hdr = buf_hash_find(guid, bp, &hash_lock);
}
/*
* Determine if we have an L1 cache hit or a cache miss. For simplicity
* we maintain encrypted data separately from compressed / uncompressed
* data. If the user is requesting raw encrypted data and we don't have
* that in the header we will read from disk to guarantee that we can
* get it even if the encryption keys aren't loaded.
*/
if (hdr != NULL && HDR_HAS_L1HDR(hdr) && (HDR_HAS_RABD(hdr) ||
(hdr->b_l1hdr.b_pabd != NULL && !encrypted_read))) {
arc_buf_t *buf = NULL;
*arc_flags |= ARC_FLAG_CACHED;
if (HDR_IO_IN_PROGRESS(hdr)) {
zio_t *head_zio = hdr->b_l1hdr.b_acb->acb_zio_head;
if (*arc_flags & ARC_FLAG_CACHED_ONLY) {
mutex_exit(hash_lock);
ARCSTAT_BUMP(arcstat_cached_only_in_progress);
rc = SET_ERROR(ENOENT);
goto out;
}
ASSERT3P(head_zio, !=, NULL);
if ((hdr->b_flags & ARC_FLAG_PRIO_ASYNC_READ) &&
priority == ZIO_PRIORITY_SYNC_READ) {
/*
* This is a sync read that needs to wait for
* an in-flight async read. Request that the
* zio have its priority upgraded.
*/
zio_change_priority(head_zio, priority);
DTRACE_PROBE1(arc__async__upgrade__sync,
arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(arcstat_async_upgrade_sync);
}
if (hdr->b_flags & ARC_FLAG_PREDICTIVE_PREFETCH) {
arc_hdr_clear_flags(hdr,
ARC_FLAG_PREDICTIVE_PREFETCH);
}
if (*arc_flags & ARC_FLAG_WAIT) {
cv_wait(&hdr->b_l1hdr.b_cv, hash_lock);
mutex_exit(hash_lock);
goto top;
}
ASSERT(*arc_flags & ARC_FLAG_NOWAIT);
if (done) {
arc_callback_t *acb = NULL;
acb = kmem_zalloc(sizeof (arc_callback_t),
KM_SLEEP);
acb->acb_done = done;
acb->acb_private = private;
acb->acb_compressed = compressed_read;
acb->acb_encrypted = encrypted_read;
acb->acb_noauth = noauth_read;
acb->acb_nobuf = no_buf;
acb->acb_zb = *zb;
if (pio != NULL)
acb->acb_zio_dummy = zio_null(pio,
spa, NULL, NULL, NULL, zio_flags);
ASSERT3P(acb->acb_done, !=, NULL);
acb->acb_zio_head = head_zio;
acb->acb_next = hdr->b_l1hdr.b_acb;
hdr->b_l1hdr.b_acb = acb;
}
mutex_exit(hash_lock);
goto out;
}
ASSERT(hdr->b_l1hdr.b_state == arc_mru ||
hdr->b_l1hdr.b_state == arc_mfu);
if (done && !no_buf) {
if (hdr->b_flags & ARC_FLAG_PREDICTIVE_PREFETCH) {
/*
* This is a demand read which does not have to
* wait for i/o because we did a predictive
* prefetch i/o for it, which has completed.
*/
DTRACE_PROBE1(
arc__demand__hit__predictive__prefetch,
arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(
arcstat_demand_hit_predictive_prefetch);
arc_hdr_clear_flags(hdr,
ARC_FLAG_PREDICTIVE_PREFETCH);
}
if (hdr->b_flags & ARC_FLAG_PRESCIENT_PREFETCH) {
ARCSTAT_BUMP(
arcstat_demand_hit_prescient_prefetch);
arc_hdr_clear_flags(hdr,
ARC_FLAG_PRESCIENT_PREFETCH);
}
ASSERT(!embedded_bp || !BP_IS_HOLE(bp));
/* Get a buf with the desired data in it. */
rc = arc_buf_alloc_impl(hdr, spa, zb, private,
encrypted_read, compressed_read, noauth_read,
B_TRUE, &buf);
if (rc == ECKSUM) {
/*
* Convert authentication and decryption errors
* to EIO (and generate an ereport if needed)
* before leaving the ARC.
*/
rc = SET_ERROR(EIO);
if ((zio_flags & ZIO_FLAG_SPECULATIVE) == 0) {
spa_log_error(spa, zb);
(void) zfs_ereport_post(
FM_EREPORT_ZFS_AUTHENTICATION,
spa, NULL, zb, NULL, 0);
}
}
if (rc != 0) {
(void) remove_reference(hdr, hash_lock,
private);
arc_buf_destroy_impl(buf);
buf = NULL;
}
/* assert any errors weren't due to unloaded keys */
ASSERT((zio_flags & ZIO_FLAG_SPECULATIVE) ||
rc != EACCES);
} else if (*arc_flags & ARC_FLAG_PREFETCH &&
zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_decrement_state(hdr);
arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_increment_state(hdr);
}
DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
arc_access(hdr, hash_lock);
if (*arc_flags & ARC_FLAG_PRESCIENT_PREFETCH)
arc_hdr_set_flags(hdr, ARC_FLAG_PRESCIENT_PREFETCH);
if (*arc_flags & ARC_FLAG_L2CACHE)
arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE);
mutex_exit(hash_lock);
ARCSTAT_BUMP(arcstat_hits);
ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr),
demand, prefetch, !HDR_ISTYPE_METADATA(hdr),
data, metadata, hits);
if (done)
done(NULL, zb, bp, buf, private);
} else {
uint64_t lsize = BP_GET_LSIZE(bp);
uint64_t psize = BP_GET_PSIZE(bp);
arc_callback_t *acb;
vdev_t *vd = NULL;
uint64_t addr = 0;
boolean_t devw = B_FALSE;
uint64_t size;
abd_t *hdr_abd;
int alloc_flags = encrypted_read ? ARC_HDR_ALLOC_RDATA : 0;
if (*arc_flags & ARC_FLAG_CACHED_ONLY) {
rc = SET_ERROR(ENOENT);
if (hash_lock != NULL)
mutex_exit(hash_lock);
goto out;
}
if (hdr == NULL) {
/*
* This block is not in the cache or it has
* embedded data.
*/
arc_buf_hdr_t *exists = NULL;
arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize,
BP_IS_PROTECTED(bp), BP_GET_COMPRESS(bp), 0, type);
if (!embedded_bp) {
hdr->b_dva = *BP_IDENTITY(bp);
hdr->b_birth = BP_PHYSICAL_BIRTH(bp);
exists = buf_hash_insert(hdr, &hash_lock);
}
if (exists != NULL) {
/* somebody beat us to the hash insert */
mutex_exit(hash_lock);
buf_discard_identity(hdr);
arc_hdr_destroy(hdr);
goto top; /* restart the IO request */
}
alloc_flags |= ARC_HDR_DO_ADAPT;
} else {
/*
* This block is in the ghost cache or encrypted data
* was requested and we didn't have it. If it was
* L2-only (and thus didn't have an L1 hdr),
* we realloc the header to add an L1 hdr.
*/
if (!HDR_HAS_L1HDR(hdr)) {
hdr = arc_hdr_realloc(hdr, hdr_l2only_cache,
hdr_full_cache);
}
if (GHOST_STATE(hdr->b_l1hdr.b_state)) {
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT0(zfs_refcount_count(
&hdr->b_l1hdr.b_refcnt));
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
} else if (HDR_IO_IN_PROGRESS(hdr)) {
/*
* If this header already had an IO in progress
* and we are performing another IO to fetch
* encrypted data we must wait until the first
* IO completes so as not to confuse
* arc_read_done(). This should be very rare
* and so the performance impact shouldn't
* matter.
*/
cv_wait(&hdr->b_l1hdr.b_cv, hash_lock);
mutex_exit(hash_lock);
goto top;
}
/*
* This is a delicate dance that we play here.
* This hdr might be in the ghost list so we access
* it to move it out of the ghost list before we
* initiate the read. If it's a prefetch then
* it won't have a callback so we'll remove the
* reference that arc_buf_alloc_impl() created. We
* do this after we've called arc_access() to
* avoid hitting an assert in remove_reference().
*/
arc_adapt(arc_hdr_size(hdr), hdr->b_l1hdr.b_state);
arc_access(hdr, hash_lock);
}
arc_hdr_alloc_abd(hdr, alloc_flags);
if (encrypted_read) {
ASSERT(HDR_HAS_RABD(hdr));
size = HDR_GET_PSIZE(hdr);
hdr_abd = hdr->b_crypt_hdr.b_rabd;
zio_flags |= ZIO_FLAG_RAW;
} else {
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
size = arc_hdr_size(hdr);
hdr_abd = hdr->b_l1hdr.b_pabd;
if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF) {
zio_flags |= ZIO_FLAG_RAW_COMPRESS;
}
/*
* For authenticated bp's, we do not ask the ZIO layer
* to authenticate them since this will cause the entire
* IO to fail if the key isn't loaded. Instead, we
* defer authentication until arc_buf_fill(), which will
* verify the data when the key is available.
*/
if (BP_IS_AUTHENTICATED(bp))
zio_flags |= ZIO_FLAG_RAW_ENCRYPT;
}
if (*arc_flags & ARC_FLAG_PREFETCH &&
zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_decrement_state(hdr);
arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_increment_state(hdr);
}
if (*arc_flags & ARC_FLAG_PRESCIENT_PREFETCH)
arc_hdr_set_flags(hdr, ARC_FLAG_PRESCIENT_PREFETCH);
if (*arc_flags & ARC_FLAG_L2CACHE)
arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE);
if (BP_IS_AUTHENTICATED(bp))
arc_hdr_set_flags(hdr, ARC_FLAG_NOAUTH);
if (BP_GET_LEVEL(bp) > 0)
arc_hdr_set_flags(hdr, ARC_FLAG_INDIRECT);
if (*arc_flags & ARC_FLAG_PREDICTIVE_PREFETCH)
arc_hdr_set_flags(hdr, ARC_FLAG_PREDICTIVE_PREFETCH);
ASSERT(!GHOST_STATE(hdr->b_l1hdr.b_state));
acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
acb->acb_done = done;
acb->acb_private = private;
acb->acb_compressed = compressed_read;
acb->acb_encrypted = encrypted_read;
acb->acb_noauth = noauth_read;
acb->acb_zb = *zb;
ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
hdr->b_l1hdr.b_acb = acb;
arc_hdr_set_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
if (HDR_HAS_L2HDR(hdr) &&
(vd = hdr->b_l2hdr.b_dev->l2ad_vdev) != NULL) {
devw = hdr->b_l2hdr.b_dev->l2ad_writing;
addr = hdr->b_l2hdr.b_daddr;
/*
* Lock out L2ARC device removal.
*/
if (vdev_is_dead(vd) ||
!spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER))
vd = NULL;
}
/*
* We count both async reads and scrub IOs as asynchronous so
* that both can be upgraded in the event of a cache hit while
* the read IO is still in-flight.
*/
if (priority == ZIO_PRIORITY_ASYNC_READ ||
priority == ZIO_PRIORITY_SCRUB)
arc_hdr_set_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ);
else
arc_hdr_clear_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ);
/*
* At this point, we have a level 1 cache miss or a blkptr
* with embedded data. Try again in L2ARC if possible.
*/
ASSERT3U(HDR_GET_LSIZE(hdr), ==, lsize);
/*
* Skip ARC stat bump for block pointers with embedded
* data. The data are read from the blkptr itself via
* decode_embedded_bp_compressed().
*/
if (!embedded_bp) {
DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr,
blkptr_t *, bp, uint64_t, lsize,
zbookmark_phys_t *, zb);
ARCSTAT_BUMP(arcstat_misses);
ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr),
demand, prefetch, !HDR_ISTYPE_METADATA(hdr), data,
metadata, misses);
zfs_racct_read(size, 1);
}
/* Check if the spa even has l2 configured */
const boolean_t spa_has_l2 = l2arc_ndev != 0 &&
spa->spa_l2cache.sav_count > 0;
if (vd != NULL && spa_has_l2 && !(l2arc_norw && devw)) {
/*
* Read from the L2ARC if the following are true:
* 1. The L2ARC vdev was previously cached.
* 2. This buffer still has L2ARC metadata.
* 3. This buffer isn't currently writing to the L2ARC.
* 4. The L2ARC entry wasn't evicted, which may
* also have invalidated the vdev.
* 5. This isn't prefetch or l2arc_noprefetch is 0.
*/
if (HDR_HAS_L2HDR(hdr) &&
!HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) &&
!(l2arc_noprefetch && HDR_PREFETCH(hdr))) {
l2arc_read_callback_t *cb;
abd_t *abd;
uint64_t asize;
DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(arcstat_l2_hits);
hdr->b_l2hdr.b_hits++;
cb = kmem_zalloc(sizeof (l2arc_read_callback_t),
KM_SLEEP);
cb->l2rcb_hdr = hdr;
cb->l2rcb_bp = *bp;
cb->l2rcb_zb = *zb;
cb->l2rcb_flags = zio_flags;
/*
* When Compressed ARC is disabled, but the
* L2ARC block is compressed, arc_hdr_size()
* will have returned LSIZE rather than PSIZE.
*/
if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr) &&
HDR_GET_PSIZE(hdr) != 0) {
size = HDR_GET_PSIZE(hdr);
}
asize = vdev_psize_to_asize(vd, size);
if (asize != size) {
abd = abd_alloc_for_io(asize,
HDR_ISTYPE_METADATA(hdr));
cb->l2rcb_abd = abd;
} else {
abd = hdr_abd;
}
ASSERT(addr >= VDEV_LABEL_START_SIZE &&
addr + asize <= vd->vdev_psize -
VDEV_LABEL_END_SIZE);
/*
* l2arc read. The SCL_L2ARC lock will be
* released by l2arc_read_done().
* Issue a null zio if the underlying buffer
* was squashed to zero size by compression.
*/
ASSERT3U(arc_hdr_get_compress(hdr), !=,
ZIO_COMPRESS_EMPTY);
rzio = zio_read_phys(pio, vd, addr,
asize, abd,
ZIO_CHECKSUM_OFF,
l2arc_read_done, cb, priority,
zio_flags | ZIO_FLAG_DONT_CACHE |
ZIO_FLAG_CANFAIL |
ZIO_FLAG_DONT_PROPAGATE |
ZIO_FLAG_DONT_RETRY, B_FALSE);
acb->acb_zio_head = rzio;
if (hash_lock != NULL)
mutex_exit(hash_lock);
DTRACE_PROBE2(l2arc__read, vdev_t *, vd,
zio_t *, rzio);
ARCSTAT_INCR(arcstat_l2_read_bytes,
HDR_GET_PSIZE(hdr));
if (*arc_flags & ARC_FLAG_NOWAIT) {
zio_nowait(rzio);
goto out;
}
ASSERT(*arc_flags & ARC_FLAG_WAIT);
if (zio_wait(rzio) == 0)
goto out;
/* l2arc read error; goto zio_read() */
if (hash_lock != NULL)
mutex_enter(hash_lock);
} else {
DTRACE_PROBE1(l2arc__miss,
arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(arcstat_l2_misses);
if (HDR_L2_WRITING(hdr))
ARCSTAT_BUMP(arcstat_l2_rw_clash);
spa_config_exit(spa, SCL_L2ARC, vd);
}
} else {
if (vd != NULL)
spa_config_exit(spa, SCL_L2ARC, vd);
/*
* Only a spa with l2 should contribute to l2
* miss stats. (Including the case of having a
* faulted cache device - that's also a miss.)
*/
if (spa_has_l2) {
/*
* Skip ARC stat bump for block pointers with
* embedded data. The data are read from the
* blkptr itself via
* decode_embedded_bp_compressed().
*/
if (!embedded_bp) {
DTRACE_PROBE1(l2arc__miss,
arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(arcstat_l2_misses);
}
}
}
rzio = zio_read(pio, spa, bp, hdr_abd, size,
arc_read_done, hdr, priority, zio_flags, zb);
acb->acb_zio_head = rzio;
if (hash_lock != NULL)
mutex_exit(hash_lock);
if (*arc_flags & ARC_FLAG_WAIT) {
rc = zio_wait(rzio);
goto out;
}
ASSERT(*arc_flags & ARC_FLAG_NOWAIT);
zio_nowait(rzio);
}
out:
/* embedded bps don't actually go to disk */
if (!embedded_bp)
spa_read_history_add(spa, zb, *arc_flags);
spl_fstrans_unmark(cookie);
return (rc);
}
arc_prune_t *
arc_add_prune_callback(arc_prune_func_t *func, void *private)
{
arc_prune_t *p;
p = kmem_alloc(sizeof (*p), KM_SLEEP);
p->p_pfunc = func;
p->p_private = private;
list_link_init(&p->p_node);
zfs_refcount_create(&p->p_refcnt);
mutex_enter(&arc_prune_mtx);
zfs_refcount_add(&p->p_refcnt, &arc_prune_list);
list_insert_head(&arc_prune_list, p);
mutex_exit(&arc_prune_mtx);
return (p);
}
void
arc_remove_prune_callback(arc_prune_t *p)
{
boolean_t wait = B_FALSE;
mutex_enter(&arc_prune_mtx);
list_remove(&arc_prune_list, p);
if (zfs_refcount_remove(&p->p_refcnt, &arc_prune_list) > 0)
wait = B_TRUE;
mutex_exit(&arc_prune_mtx);
/* wait for arc_prune_task to finish */
if (wait)
taskq_wait_outstanding(arc_prune_taskq, 0);
ASSERT0(zfs_refcount_count(&p->p_refcnt));
zfs_refcount_destroy(&p->p_refcnt);
kmem_free(p, sizeof (*p));
}
/*
* Notify the arc that a block was freed, and thus will never be used again.
*/
void
arc_freed(spa_t *spa, const blkptr_t *bp)
{
arc_buf_hdr_t *hdr;
kmutex_t *hash_lock;
uint64_t guid = spa_load_guid(spa);
ASSERT(!BP_IS_EMBEDDED(bp));
hdr = buf_hash_find(guid, bp, &hash_lock);
if (hdr == NULL)
return;
/*
* We might be trying to free a block that is still doing I/O
* (i.e. prefetch) or has a reference (i.e. a dedup-ed,
* dmu_sync-ed block). If this block is being prefetched, then it
* would still have the ARC_FLAG_IO_IN_PROGRESS flag set on the hdr
* until the I/O completes. A block may also have a reference if it is
* part of a dedup-ed, dmu_synced write. The dmu_sync() function would
* have written the new block to its final resting place on disk but
* without the dedup flag set. This would have left the hdr in the MRU
* state and discoverable. When the txg finally syncs it detects that
* the block was overridden in open context and issues an override I/O.
* Since this is a dedup block, the override I/O will determine if the
* block is already in the DDT. If so, then it will replace the io_bp
* with the bp from the DDT and allow the I/O to finish. When the I/O
* reaches the done callback, dbuf_write_override_done, it will
* check to see if the io_bp and io_bp_override are identical.
* If they are not, then it indicates that the bp was replaced with
* the bp in the DDT and the override bp is freed. This allows
* us to arrive here with a reference on a block that is being
* freed. So if we have an I/O in progress, or a reference to
* this hdr, then we don't destroy the hdr.
*/
if (!HDR_HAS_L1HDR(hdr) || (!HDR_IO_IN_PROGRESS(hdr) &&
zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt))) {
arc_change_state(arc_anon, hdr, hash_lock);
arc_hdr_destroy(hdr);
mutex_exit(hash_lock);
} else {
mutex_exit(hash_lock);
}
}
/*
* Release this buffer from the cache, making it an anonymous buffer. This
* must be done after a read and prior to modifying the buffer contents.
* If the buffer has more than one reference, we must make
* a new hdr for the buffer.
*/
void
arc_release(arc_buf_t *buf, void *tag)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
/*
* It would be nice to assert that if its DMU metadata (level >
* 0 || it's the dnode file), then it must be syncing context.
* But we don't know that information at this level.
*/
mutex_enter(&buf->b_evict_lock);
ASSERT(HDR_HAS_L1HDR(hdr));
/*
* We don't grab the hash lock prior to this check, because if
* the buffer's header is in the arc_anon state, it won't be
* linked into the hash table.
*/
if (hdr->b_l1hdr.b_state == arc_anon) {
mutex_exit(&buf->b_evict_lock);
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT(!HDR_IN_HASH_TABLE(hdr));
ASSERT(!HDR_HAS_L2HDR(hdr));
ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1);
ASSERT3S(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1);
ASSERT(!list_link_active(&hdr->b_l1hdr.b_arc_node));
hdr->b_l1hdr.b_arc_access = 0;
/*
* If the buf is being overridden then it may already
* have a hdr that is not empty.
*/
buf_discard_identity(hdr);
arc_buf_thaw(buf);
return;
}
kmutex_t *hash_lock = HDR_LOCK(hdr);
mutex_enter(hash_lock);
/*
* This assignment is only valid as long as the hash_lock is
* held, we must be careful not to reference state or the
* b_state field after dropping the lock.
*/
arc_state_t *state = hdr->b_l1hdr.b_state;
ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
ASSERT3P(state, !=, arc_anon);
/* this buffer is not on any list */
ASSERT3S(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt), >, 0);
if (HDR_HAS_L2HDR(hdr)) {
mutex_enter(&hdr->b_l2hdr.b_dev->l2ad_mtx);
/*
* We have to recheck this conditional again now that
* we're holding the l2ad_mtx to prevent a race with
* another thread which might be concurrently calling
* l2arc_evict(). In that case, l2arc_evict() might have
* destroyed the header's L2 portion as we were waiting
* to acquire the l2ad_mtx.
*/
if (HDR_HAS_L2HDR(hdr))
arc_hdr_l2hdr_destroy(hdr);
mutex_exit(&hdr->b_l2hdr.b_dev->l2ad_mtx);
}
/*
* Do we have more than one buf?
*/
if (hdr->b_l1hdr.b_bufcnt > 1) {
arc_buf_hdr_t *nhdr;
uint64_t spa = hdr->b_spa;
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t lsize = HDR_GET_LSIZE(hdr);
boolean_t protected = HDR_PROTECTED(hdr);
enum zio_compress compress = arc_hdr_get_compress(hdr);
arc_buf_contents_t type = arc_buf_type(hdr);
VERIFY3U(hdr->b_type, ==, type);
ASSERT(hdr->b_l1hdr.b_buf != buf || buf->b_next != NULL);
(void) remove_reference(hdr, hash_lock, tag);
if (arc_buf_is_shared(buf) && !ARC_BUF_COMPRESSED(buf)) {
ASSERT3P(hdr->b_l1hdr.b_buf, !=, buf);
ASSERT(ARC_BUF_LAST(buf));
}
/*
* Pull the data off of this hdr and attach it to
* a new anonymous hdr. Also find the last buffer
* in the hdr's buffer list.
*/
arc_buf_t *lastbuf = arc_buf_remove(hdr, buf);
ASSERT3P(lastbuf, !=, NULL);
/*
* If the current arc_buf_t and the hdr are sharing their data
* buffer, then we must stop sharing that block.
*/
if (arc_buf_is_shared(buf)) {
ASSERT3P(hdr->b_l1hdr.b_buf, !=, buf);
VERIFY(!arc_buf_is_shared(lastbuf));
/*
* First, sever the block sharing relationship between
* buf and the arc_buf_hdr_t.
*/
arc_unshare_buf(hdr, buf);
/*
* Now we need to recreate the hdr's b_pabd. Since we
* have lastbuf handy, we try to share with it, but if
* we can't then we allocate a new b_pabd and copy the
* data from buf into it.
*/
if (arc_can_share(hdr, lastbuf)) {
arc_share_buf(hdr, lastbuf);
} else {
arc_hdr_alloc_abd(hdr, ARC_HDR_DO_ADAPT);
abd_copy_from_buf(hdr->b_l1hdr.b_pabd,
buf->b_data, psize);
}
VERIFY3P(lastbuf->b_data, !=, NULL);
} else if (HDR_SHARED_DATA(hdr)) {
/*
* Uncompressed shared buffers are always at the end
* of the list. Compressed buffers don't have the
* same requirements. This makes it hard to
* simply assert that the lastbuf is shared so
* we rely on the hdr's compression flags to determine
* if we have a compressed, shared buffer.
*/
ASSERT(arc_buf_is_shared(lastbuf) ||
arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF);
ASSERT(!ARC_BUF_SHARED(buf));
}
ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr));
ASSERT3P(state, !=, arc_l2c_only);
(void) zfs_refcount_remove_many(&state->arcs_size,
arc_buf_size(buf), buf);
if (zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
ASSERT3P(state, !=, arc_l2c_only);
(void) zfs_refcount_remove_many(
&state->arcs_esize[type],
arc_buf_size(buf), buf);
}
hdr->b_l1hdr.b_bufcnt -= 1;
if (ARC_BUF_ENCRYPTED(buf))
hdr->b_crypt_hdr.b_ebufcnt -= 1;
arc_cksum_verify(buf);
arc_buf_unwatch(buf);
/* if this is the last uncompressed buf free the checksum */
if (!arc_hdr_has_uncompressed_buf(hdr))
arc_cksum_free(hdr);
mutex_exit(hash_lock);
/*
* Allocate a new hdr. The new hdr will contain a b_pabd
* buffer which will be freed in arc_write().
*/
nhdr = arc_hdr_alloc(spa, psize, lsize, protected,
compress, hdr->b_complevel, type);
ASSERT3P(nhdr->b_l1hdr.b_buf, ==, NULL);
ASSERT0(nhdr->b_l1hdr.b_bufcnt);
ASSERT0(zfs_refcount_count(&nhdr->b_l1hdr.b_refcnt));
VERIFY3U(nhdr->b_type, ==, type);
ASSERT(!HDR_SHARED_DATA(nhdr));
nhdr->b_l1hdr.b_buf = buf;
nhdr->b_l1hdr.b_bufcnt = 1;
if (ARC_BUF_ENCRYPTED(buf))
nhdr->b_crypt_hdr.b_ebufcnt = 1;
(void) zfs_refcount_add(&nhdr->b_l1hdr.b_refcnt, tag);
buf->b_hdr = nhdr;
mutex_exit(&buf->b_evict_lock);
(void) zfs_refcount_add_many(&arc_anon->arcs_size,
arc_buf_size(buf), buf);
} else {
mutex_exit(&buf->b_evict_lock);
ASSERT(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 1);
/* protected by hash lock, or hdr is on arc_anon */
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
hdr->b_l1hdr.b_mru_hits = 0;
hdr->b_l1hdr.b_mru_ghost_hits = 0;
hdr->b_l1hdr.b_mfu_hits = 0;
hdr->b_l1hdr.b_mfu_ghost_hits = 0;
arc_change_state(arc_anon, hdr, hash_lock);
hdr->b_l1hdr.b_arc_access = 0;
mutex_exit(hash_lock);
buf_discard_identity(hdr);
arc_buf_thaw(buf);
}
}
int
arc_released(arc_buf_t *buf)
{
int released;
mutex_enter(&buf->b_evict_lock);
released = (buf->b_data != NULL &&
buf->b_hdr->b_l1hdr.b_state == arc_anon);
mutex_exit(&buf->b_evict_lock);
return (released);
}
#ifdef ZFS_DEBUG
int
arc_referenced(arc_buf_t *buf)
{
int referenced;
mutex_enter(&buf->b_evict_lock);
referenced = (zfs_refcount_count(&buf->b_hdr->b_l1hdr.b_refcnt));
mutex_exit(&buf->b_evict_lock);
return (referenced);
}
#endif
static void
arc_write_ready(zio_t *zio)
{
arc_write_callback_t *callback = zio->io_private;
arc_buf_t *buf = callback->awcb_buf;
arc_buf_hdr_t *hdr = buf->b_hdr;
blkptr_t *bp = zio->io_bp;
uint64_t psize = BP_IS_HOLE(bp) ? 0 : BP_GET_PSIZE(bp);
fstrans_cookie_t cookie = spl_fstrans_mark();
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(!zfs_refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt));
ASSERT(hdr->b_l1hdr.b_bufcnt > 0);
/*
* If we're reexecuting this zio because the pool suspended, then
* cleanup any state that was previously set the first time the
* callback was invoked.
*/
if (zio->io_flags & ZIO_FLAG_REEXECUTED) {
arc_cksum_free(hdr);
arc_buf_unwatch(buf);
if (hdr->b_l1hdr.b_pabd != NULL) {
if (arc_buf_is_shared(buf)) {
arc_unshare_buf(hdr, buf);
} else {
arc_hdr_free_abd(hdr, B_FALSE);
}
}
if (HDR_HAS_RABD(hdr))
arc_hdr_free_abd(hdr, B_TRUE);
}
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
ASSERT(!HDR_SHARED_DATA(hdr));
ASSERT(!arc_buf_is_shared(buf));
callback->awcb_ready(zio, buf, callback->awcb_private);
if (HDR_IO_IN_PROGRESS(hdr))
ASSERT(zio->io_flags & ZIO_FLAG_REEXECUTED);
arc_hdr_set_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
if (BP_IS_PROTECTED(bp) != !!HDR_PROTECTED(hdr))
hdr = arc_hdr_realloc_crypt(hdr, BP_IS_PROTECTED(bp));
if (BP_IS_PROTECTED(bp)) {
/* ZIL blocks are written through zio_rewrite */
ASSERT3U(BP_GET_TYPE(bp), !=, DMU_OT_INTENT_LOG);
ASSERT(HDR_PROTECTED(hdr));
if (BP_SHOULD_BYTESWAP(bp)) {
if (BP_GET_LEVEL(bp) > 0) {
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_UINT64;
} else {
hdr->b_l1hdr.b_byteswap =
DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
}
} else {
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
}
hdr->b_crypt_hdr.b_ot = BP_GET_TYPE(bp);
hdr->b_crypt_hdr.b_dsobj = zio->io_bookmark.zb_objset;
zio_crypt_decode_params_bp(bp, hdr->b_crypt_hdr.b_salt,
hdr->b_crypt_hdr.b_iv);
zio_crypt_decode_mac_bp(bp, hdr->b_crypt_hdr.b_mac);
}
/*
* If this block was written for raw encryption but the zio layer
* ended up only authenticating it, adjust the buffer flags now.
*/
if (BP_IS_AUTHENTICATED(bp) && ARC_BUF_ENCRYPTED(buf)) {
arc_hdr_set_flags(hdr, ARC_FLAG_NOAUTH);
buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED;
if (BP_GET_COMPRESS(bp) == ZIO_COMPRESS_OFF)
buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
} else if (BP_IS_HOLE(bp) && ARC_BUF_ENCRYPTED(buf)) {
buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED;
buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
}
/* this must be done after the buffer flags are adjusted */
arc_cksum_compute(buf);
enum zio_compress compress;
if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) {
compress = ZIO_COMPRESS_OFF;
} else {
ASSERT3U(HDR_GET_LSIZE(hdr), ==, BP_GET_LSIZE(bp));
compress = BP_GET_COMPRESS(bp);
}
HDR_SET_PSIZE(hdr, psize);
arc_hdr_set_compress(hdr, compress);
hdr->b_complevel = zio->io_prop.zp_complevel;
if (zio->io_error != 0 || psize == 0)
goto out;
/*
* Fill the hdr with data. If the buffer is encrypted we have no choice
* but to copy the data into b_radb. If the hdr is compressed, the data
* we want is available from the zio, otherwise we can take it from
* the buf.
*
* We might be able to share the buf's data with the hdr here. However,
* doing so would cause the ARC to be full of linear ABDs if we write a
* lot of shareable data. As a compromise, we check whether scattered
* ABDs are allowed, and assume that if they are then the user wants
* the ARC to be primarily filled with them regardless of the data being
* written. Therefore, if they're allowed then we allocate one and copy
* the data into it; otherwise, we share the data directly if we can.
*/
if (ARC_BUF_ENCRYPTED(buf)) {
ASSERT3U(psize, >, 0);
ASSERT(ARC_BUF_COMPRESSED(buf));
arc_hdr_alloc_abd(hdr, ARC_HDR_DO_ADAPT | ARC_HDR_ALLOC_RDATA |
ARC_HDR_USE_RESERVE);
abd_copy(hdr->b_crypt_hdr.b_rabd, zio->io_abd, psize);
} else if (zfs_abd_scatter_enabled || !arc_can_share(hdr, buf)) {
/*
* Ideally, we would always copy the io_abd into b_pabd, but the
* user may have disabled compressed ARC, thus we must check the
* hdr's compression setting rather than the io_bp's.
*/
if (BP_IS_ENCRYPTED(bp)) {
ASSERT3U(psize, >, 0);
arc_hdr_alloc_abd(hdr, ARC_HDR_DO_ADAPT |
ARC_HDR_ALLOC_RDATA | ARC_HDR_USE_RESERVE);
abd_copy(hdr->b_crypt_hdr.b_rabd, zio->io_abd, psize);
} else if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF &&
!ARC_BUF_COMPRESSED(buf)) {
ASSERT3U(psize, >, 0);
arc_hdr_alloc_abd(hdr, ARC_HDR_DO_ADAPT |
ARC_HDR_USE_RESERVE);
abd_copy(hdr->b_l1hdr.b_pabd, zio->io_abd, psize);
} else {
ASSERT3U(zio->io_orig_size, ==, arc_hdr_size(hdr));
arc_hdr_alloc_abd(hdr, ARC_HDR_DO_ADAPT |
ARC_HDR_USE_RESERVE);
abd_copy_from_buf(hdr->b_l1hdr.b_pabd, buf->b_data,
arc_buf_size(buf));
}
} else {
ASSERT3P(buf->b_data, ==, abd_to_buf(zio->io_orig_abd));
ASSERT3U(zio->io_orig_size, ==, arc_buf_size(buf));
ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1);
arc_share_buf(hdr, buf);
}
out:
arc_hdr_verify(hdr, bp);
spl_fstrans_unmark(cookie);
}
static void
arc_write_children_ready(zio_t *zio)
{
arc_write_callback_t *callback = zio->io_private;
arc_buf_t *buf = callback->awcb_buf;
callback->awcb_children_ready(zio, buf, callback->awcb_private);
}
/*
* The SPA calls this callback for each physical write that happens on behalf
* of a logical write. See the comment in dbuf_write_physdone() for details.
*/
static void
arc_write_physdone(zio_t *zio)
{
arc_write_callback_t *cb = zio->io_private;
if (cb->awcb_physdone != NULL)
cb->awcb_physdone(zio, cb->awcb_buf, cb->awcb_private);
}
static void
arc_write_done(zio_t *zio)
{
arc_write_callback_t *callback = zio->io_private;
arc_buf_t *buf = callback->awcb_buf;
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
if (zio->io_error == 0) {
arc_hdr_verify(hdr, zio->io_bp);
if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) {
buf_discard_identity(hdr);
} else {
hdr->b_dva = *BP_IDENTITY(zio->io_bp);
hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp);
}
} else {
ASSERT(HDR_EMPTY(hdr));
}
/*
* If the block to be written was all-zero or compressed enough to be
* embedded in the BP, no write was performed so there will be no
* dva/birth/checksum. The buffer must therefore remain anonymous
* (and uncached).
*/
if (!HDR_EMPTY(hdr)) {
arc_buf_hdr_t *exists;
kmutex_t *hash_lock;
ASSERT3U(zio->io_error, ==, 0);
arc_cksum_verify(buf);
exists = buf_hash_insert(hdr, &hash_lock);
if (exists != NULL) {
/*
* This can only happen if we overwrite for
* sync-to-convergence, because we remove
* buffers from the hash table when we arc_free().
*/
if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
panic("bad overwrite, hdr=%p exists=%p",
(void *)hdr, (void *)exists);
ASSERT(zfs_refcount_is_zero(
&exists->b_l1hdr.b_refcnt));
arc_change_state(arc_anon, exists, hash_lock);
arc_hdr_destroy(exists);
mutex_exit(hash_lock);
exists = buf_hash_insert(hdr, &hash_lock);
ASSERT3P(exists, ==, NULL);
} else if (zio->io_flags & ZIO_FLAG_NOPWRITE) {
/* nopwrite */
ASSERT(zio->io_prop.zp_nopwrite);
if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
panic("bad nopwrite, hdr=%p exists=%p",
(void *)hdr, (void *)exists);
} else {
/* Dedup */
ASSERT(hdr->b_l1hdr.b_bufcnt == 1);
ASSERT(hdr->b_l1hdr.b_state == arc_anon);
ASSERT(BP_GET_DEDUP(zio->io_bp));
ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
}
}
arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
/* if it's not anon, we are doing a scrub */
if (exists == NULL && hdr->b_l1hdr.b_state == arc_anon)
arc_access(hdr, hash_lock);
mutex_exit(hash_lock);
} else {
arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
}
ASSERT(!zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
callback->awcb_done(zio, buf, callback->awcb_private);
abd_free(zio->io_abd);
kmem_free(callback, sizeof (arc_write_callback_t));
}
zio_t *
arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc,
const zio_prop_t *zp, arc_write_done_func_t *ready,
arc_write_done_func_t *children_ready, arc_write_done_func_t *physdone,
arc_write_done_func_t *done, void *private, zio_priority_t priority,
int zio_flags, const zbookmark_phys_t *zb)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
arc_write_callback_t *callback;
zio_t *zio;
zio_prop_t localprop = *zp;
ASSERT3P(ready, !=, NULL);
ASSERT3P(done, !=, NULL);
ASSERT(!HDR_IO_ERROR(hdr));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0);
if (l2arc)
arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE);
if (ARC_BUF_ENCRYPTED(buf)) {
ASSERT(ARC_BUF_COMPRESSED(buf));
localprop.zp_encrypt = B_TRUE;
localprop.zp_compress = HDR_GET_COMPRESS(hdr);
localprop.zp_complevel = hdr->b_complevel;
localprop.zp_byteorder =
(hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS) ?
ZFS_HOST_BYTEORDER : !ZFS_HOST_BYTEORDER;
bcopy(hdr->b_crypt_hdr.b_salt, localprop.zp_salt,
ZIO_DATA_SALT_LEN);
bcopy(hdr->b_crypt_hdr.b_iv, localprop.zp_iv,
ZIO_DATA_IV_LEN);
bcopy(hdr->b_crypt_hdr.b_mac, localprop.zp_mac,
ZIO_DATA_MAC_LEN);
if (DMU_OT_IS_ENCRYPTED(localprop.zp_type)) {
localprop.zp_nopwrite = B_FALSE;
localprop.zp_copies =
MIN(localprop.zp_copies, SPA_DVAS_PER_BP - 1);
}
zio_flags |= ZIO_FLAG_RAW;
} else if (ARC_BUF_COMPRESSED(buf)) {
ASSERT3U(HDR_GET_LSIZE(hdr), !=, arc_buf_size(buf));
localprop.zp_compress = HDR_GET_COMPRESS(hdr);
localprop.zp_complevel = hdr->b_complevel;
zio_flags |= ZIO_FLAG_RAW_COMPRESS;
}
callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
callback->awcb_ready = ready;
callback->awcb_children_ready = children_ready;
callback->awcb_physdone = physdone;
callback->awcb_done = done;
callback->awcb_private = private;
callback->awcb_buf = buf;
/*
* The hdr's b_pabd is now stale, free it now. A new data block
* will be allocated when the zio pipeline calls arc_write_ready().
*/
if (hdr->b_l1hdr.b_pabd != NULL) {
/*
* If the buf is currently sharing the data block with
* the hdr then we need to break that relationship here.
* The hdr will remain with a NULL data pointer and the
* buf will take sole ownership of the block.
*/
if (arc_buf_is_shared(buf)) {
arc_unshare_buf(hdr, buf);
} else {
arc_hdr_free_abd(hdr, B_FALSE);
}
VERIFY3P(buf->b_data, !=, NULL);
}
if (HDR_HAS_RABD(hdr))
arc_hdr_free_abd(hdr, B_TRUE);
if (!(zio_flags & ZIO_FLAG_RAW))
arc_hdr_set_compress(hdr, ZIO_COMPRESS_OFF);
ASSERT(!arc_buf_is_shared(buf));
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
zio = zio_write(pio, spa, txg, bp,
abd_get_from_buf(buf->b_data, HDR_GET_LSIZE(hdr)),
HDR_GET_LSIZE(hdr), arc_buf_size(buf), &localprop, arc_write_ready,
(children_ready != NULL) ? arc_write_children_ready : NULL,
arc_write_physdone, arc_write_done, callback,
priority, zio_flags, zb);
return (zio);
}
void
arc_tempreserve_clear(uint64_t reserve)
{
atomic_add_64(&arc_tempreserve, -reserve);
ASSERT((int64_t)arc_tempreserve >= 0);
}
int
arc_tempreserve_space(spa_t *spa, uint64_t reserve, uint64_t txg)
{
int error;
uint64_t anon_size;
if (!arc_no_grow &&
reserve > arc_c/4 &&
reserve * 4 > (2ULL << SPA_MAXBLOCKSHIFT))
arc_c = MIN(arc_c_max, reserve * 4);
/*
* Throttle when the calculated memory footprint for the TXG
* exceeds the target ARC size.
*/
if (reserve > arc_c) {
DMU_TX_STAT_BUMP(dmu_tx_memory_reserve);
return (SET_ERROR(ERESTART));
}
/*
* Don't count loaned bufs as in flight dirty data to prevent long
* network delays from blocking transactions that are ready to be
* assigned to a txg.
*/
/* assert that it has not wrapped around */
ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0);
anon_size = MAX((int64_t)(zfs_refcount_count(&arc_anon->arcs_size) -
arc_loaned_bytes), 0);
/*
* Writes will, almost always, require additional memory allocations
* in order to compress/encrypt/etc the data. We therefore need to
* make sure that there is sufficient available memory for this.
*/
error = arc_memory_throttle(spa, reserve, txg);
if (error != 0)
return (error);
/*
* Throttle writes when the amount of dirty data in the cache
* gets too large. We try to keep the cache less than half full
* of dirty blocks so that our sync times don't grow too large.
*
* In the case of one pool being built on another pool, we want
* to make sure we don't end up throttling the lower (backing)
* pool when the upper pool is the majority contributor to dirty
* data. To insure we make forward progress during throttling, we
* also check the current pool's net dirty data and only throttle
* if it exceeds zfs_arc_pool_dirty_percent of the anonymous dirty
* data in the cache.
*
* Note: if two requests come in concurrently, we might let them
* both succeed, when one of them should fail. Not a huge deal.
*/
uint64_t total_dirty = reserve + arc_tempreserve + anon_size;
uint64_t spa_dirty_anon = spa_dirty_data(spa);
uint64_t rarc_c = arc_warm ? arc_c : arc_c_max;
if (total_dirty > rarc_c * zfs_arc_dirty_limit_percent / 100 &&
anon_size > rarc_c * zfs_arc_anon_limit_percent / 100 &&
spa_dirty_anon > anon_size * zfs_arc_pool_dirty_percent / 100) {
#ifdef ZFS_DEBUG
uint64_t meta_esize = zfs_refcount_count(
&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
uint64_t data_esize =
zfs_refcount_count(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
"anon_data=%lluK tempreserve=%lluK rarc_c=%lluK\n",
(u_longlong_t)arc_tempreserve >> 10,
(u_longlong_t)meta_esize >> 10,
(u_longlong_t)data_esize >> 10,
(u_longlong_t)reserve >> 10,
(u_longlong_t)rarc_c >> 10);
#endif
DMU_TX_STAT_BUMP(dmu_tx_dirty_throttle);
return (SET_ERROR(ERESTART));
}
atomic_add_64(&arc_tempreserve, reserve);
return (0);
}
static void
arc_kstat_update_state(arc_state_t *state, kstat_named_t *size,
kstat_named_t *evict_data, kstat_named_t *evict_metadata)
{
size->value.ui64 = zfs_refcount_count(&state->arcs_size);
evict_data->value.ui64 =
zfs_refcount_count(&state->arcs_esize[ARC_BUFC_DATA]);
evict_metadata->value.ui64 =
zfs_refcount_count(&state->arcs_esize[ARC_BUFC_METADATA]);
}
static int
arc_kstat_update(kstat_t *ksp, int rw)
{
arc_stats_t *as = ksp->ks_data;
if (rw == KSTAT_WRITE)
return (SET_ERROR(EACCES));
as->arcstat_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_hits);
as->arcstat_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_misses);
as->arcstat_demand_data_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_data_hits);
as->arcstat_demand_data_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_data_misses);
as->arcstat_demand_metadata_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_metadata_hits);
as->arcstat_demand_metadata_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_metadata_misses);
as->arcstat_prefetch_data_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_prefetch_data_hits);
as->arcstat_prefetch_data_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_prefetch_data_misses);
as->arcstat_prefetch_metadata_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_prefetch_metadata_hits);
as->arcstat_prefetch_metadata_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_prefetch_metadata_misses);
as->arcstat_mru_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_mru_hits);
as->arcstat_mru_ghost_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_mru_ghost_hits);
as->arcstat_mfu_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_mfu_hits);
as->arcstat_mfu_ghost_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_mfu_ghost_hits);
as->arcstat_deleted.value.ui64 =
wmsum_value(&arc_sums.arcstat_deleted);
as->arcstat_mutex_miss.value.ui64 =
wmsum_value(&arc_sums.arcstat_mutex_miss);
as->arcstat_access_skip.value.ui64 =
wmsum_value(&arc_sums.arcstat_access_skip);
as->arcstat_evict_skip.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_skip);
as->arcstat_evict_not_enough.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_not_enough);
as->arcstat_evict_l2_cached.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_cached);
as->arcstat_evict_l2_eligible.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_eligible);
as->arcstat_evict_l2_eligible_mfu.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_eligible_mfu);
as->arcstat_evict_l2_eligible_mru.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_eligible_mru);
as->arcstat_evict_l2_ineligible.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_ineligible);
as->arcstat_evict_l2_skip.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_skip);
as->arcstat_hash_collisions.value.ui64 =
wmsum_value(&arc_sums.arcstat_hash_collisions);
as->arcstat_hash_chains.value.ui64 =
wmsum_value(&arc_sums.arcstat_hash_chains);
as->arcstat_size.value.ui64 =
aggsum_value(&arc_sums.arcstat_size);
as->arcstat_compressed_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_compressed_size);
as->arcstat_uncompressed_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_uncompressed_size);
as->arcstat_overhead_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_overhead_size);
as->arcstat_hdr_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_hdr_size);
as->arcstat_data_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_data_size);
as->arcstat_metadata_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_metadata_size);
as->arcstat_dbuf_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_dbuf_size);
#if defined(COMPAT_FREEBSD11)
as->arcstat_other_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_bonus_size) +
aggsum_value(&arc_sums.arcstat_dnode_size) +
wmsum_value(&arc_sums.arcstat_dbuf_size);
#endif
arc_kstat_update_state(arc_anon,
&as->arcstat_anon_size,
&as->arcstat_anon_evictable_data,
&as->arcstat_anon_evictable_metadata);
arc_kstat_update_state(arc_mru,
&as->arcstat_mru_size,
&as->arcstat_mru_evictable_data,
&as->arcstat_mru_evictable_metadata);
arc_kstat_update_state(arc_mru_ghost,
&as->arcstat_mru_ghost_size,
&as->arcstat_mru_ghost_evictable_data,
&as->arcstat_mru_ghost_evictable_metadata);
arc_kstat_update_state(arc_mfu,
&as->arcstat_mfu_size,
&as->arcstat_mfu_evictable_data,
&as->arcstat_mfu_evictable_metadata);
arc_kstat_update_state(arc_mfu_ghost,
&as->arcstat_mfu_ghost_size,
&as->arcstat_mfu_ghost_evictable_data,
&as->arcstat_mfu_ghost_evictable_metadata);
as->arcstat_dnode_size.value.ui64 =
aggsum_value(&arc_sums.arcstat_dnode_size);
as->arcstat_bonus_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_bonus_size);
as->arcstat_l2_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_hits);
as->arcstat_l2_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_misses);
as->arcstat_l2_prefetch_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_prefetch_asize);
as->arcstat_l2_mru_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_mru_asize);
as->arcstat_l2_mfu_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_mfu_asize);
as->arcstat_l2_bufc_data_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_bufc_data_asize);
as->arcstat_l2_bufc_metadata_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_bufc_metadata_asize);
as->arcstat_l2_feeds.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_feeds);
as->arcstat_l2_rw_clash.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rw_clash);
as->arcstat_l2_read_bytes.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_read_bytes);
as->arcstat_l2_write_bytes.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_write_bytes);
as->arcstat_l2_writes_sent.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_writes_sent);
as->arcstat_l2_writes_done.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_writes_done);
as->arcstat_l2_writes_error.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_writes_error);
as->arcstat_l2_writes_lock_retry.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_writes_lock_retry);
as->arcstat_l2_evict_lock_retry.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_evict_lock_retry);
as->arcstat_l2_evict_reading.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_evict_reading);
as->arcstat_l2_evict_l1cached.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_evict_l1cached);
as->arcstat_l2_free_on_write.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_free_on_write);
as->arcstat_l2_abort_lowmem.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_abort_lowmem);
as->arcstat_l2_cksum_bad.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_cksum_bad);
as->arcstat_l2_io_error.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_io_error);
as->arcstat_l2_lsize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_lsize);
as->arcstat_l2_psize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_psize);
as->arcstat_l2_hdr_size.value.ui64 =
aggsum_value(&arc_sums.arcstat_l2_hdr_size);
as->arcstat_l2_log_blk_writes.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_log_blk_writes);
as->arcstat_l2_log_blk_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_log_blk_asize);
as->arcstat_l2_log_blk_count.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_log_blk_count);
as->arcstat_l2_rebuild_success.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_success);
as->arcstat_l2_rebuild_abort_unsupported.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_unsupported);
as->arcstat_l2_rebuild_abort_io_errors.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_io_errors);
as->arcstat_l2_rebuild_abort_dh_errors.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_dh_errors);
as->arcstat_l2_rebuild_abort_cksum_lb_errors.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_cksum_lb_errors);
as->arcstat_l2_rebuild_abort_lowmem.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_lowmem);
as->arcstat_l2_rebuild_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_size);
as->arcstat_l2_rebuild_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_asize);
as->arcstat_l2_rebuild_bufs.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_bufs);
as->arcstat_l2_rebuild_bufs_precached.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_bufs_precached);
as->arcstat_l2_rebuild_log_blks.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_log_blks);
as->arcstat_memory_throttle_count.value.ui64 =
wmsum_value(&arc_sums.arcstat_memory_throttle_count);
as->arcstat_memory_direct_count.value.ui64 =
wmsum_value(&arc_sums.arcstat_memory_direct_count);
as->arcstat_memory_indirect_count.value.ui64 =
wmsum_value(&arc_sums.arcstat_memory_indirect_count);
as->arcstat_memory_all_bytes.value.ui64 =
arc_all_memory();
as->arcstat_memory_free_bytes.value.ui64 =
arc_free_memory();
as->arcstat_memory_available_bytes.value.i64 =
arc_available_memory();
as->arcstat_prune.value.ui64 =
wmsum_value(&arc_sums.arcstat_prune);
as->arcstat_meta_used.value.ui64 =
aggsum_value(&arc_sums.arcstat_meta_used);
as->arcstat_async_upgrade_sync.value.ui64 =
wmsum_value(&arc_sums.arcstat_async_upgrade_sync);
as->arcstat_demand_hit_predictive_prefetch.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_hit_predictive_prefetch);
as->arcstat_demand_hit_prescient_prefetch.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_hit_prescient_prefetch);
as->arcstat_raw_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_raw_size);
as->arcstat_cached_only_in_progress.value.ui64 =
wmsum_value(&arc_sums.arcstat_cached_only_in_progress);
as->arcstat_abd_chunk_waste_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_abd_chunk_waste_size);
return (0);
}
/*
* This function *must* return indices evenly distributed between all
* sublists of the multilist. This is needed due to how the ARC eviction
* code is laid out; arc_evict_state() assumes ARC buffers are evenly
* distributed between all sublists and uses this assumption when
* deciding which sublist to evict from and how much to evict from it.
*/
static unsigned int
arc_state_multilist_index_func(multilist_t *ml, void *obj)
{
arc_buf_hdr_t *hdr = obj;
/*
* We rely on b_dva to generate evenly distributed index
* numbers using buf_hash below. So, as an added precaution,
* let's make sure we never add empty buffers to the arc lists.
*/
ASSERT(!HDR_EMPTY(hdr));
/*
* The assumption here, is the hash value for a given
* arc_buf_hdr_t will remain constant throughout its lifetime
* (i.e. its b_spa, b_dva, and b_birth fields don't change).
* Thus, we don't need to store the header's sublist index
* on insertion, as this index can be recalculated on removal.
*
* Also, the low order bits of the hash value are thought to be
* distributed evenly. Otherwise, in the case that the multilist
* has a power of two number of sublists, each sublists' usage
* would not be evenly distributed. In this context full 64bit
* division would be a waste of time, so limit it to 32 bits.
*/
return ((unsigned int)buf_hash(hdr->b_spa, &hdr->b_dva, hdr->b_birth) %
multilist_get_num_sublists(ml));
}
static unsigned int
arc_state_l2c_multilist_index_func(multilist_t *ml, void *obj)
{
panic("Header %p insert into arc_l2c_only %p", obj, ml);
}
#define WARN_IF_TUNING_IGNORED(tuning, value, do_warn) do { \
if ((do_warn) && (tuning) && ((tuning) != (value))) { \
cmn_err(CE_WARN, \
"ignoring tunable %s (using %llu instead)", \
(#tuning), (value)); \
} \
} while (0)
/*
* Called during module initialization and periodically thereafter to
* apply reasonable changes to the exposed performance tunings. Can also be
* called explicitly by param_set_arc_*() functions when ARC tunables are
* updated manually. Non-zero zfs_* values which differ from the currently set
* values will be applied.
*/
void
arc_tuning_update(boolean_t verbose)
{
uint64_t allmem = arc_all_memory();
unsigned long limit;
/* Valid range: 32M - <arc_c_max> */
if ((zfs_arc_min) && (zfs_arc_min != arc_c_min) &&
(zfs_arc_min >= 2ULL << SPA_MAXBLOCKSHIFT) &&
(zfs_arc_min <= arc_c_max)) {
arc_c_min = zfs_arc_min;
arc_c = MAX(arc_c, arc_c_min);
}
WARN_IF_TUNING_IGNORED(zfs_arc_min, arc_c_min, verbose);
/* Valid range: 64M - <all physical memory> */
if ((zfs_arc_max) && (zfs_arc_max != arc_c_max) &&
(zfs_arc_max >= MIN_ARC_MAX) && (zfs_arc_max < allmem) &&
(zfs_arc_max > arc_c_min)) {
arc_c_max = zfs_arc_max;
arc_c = MIN(arc_c, arc_c_max);
arc_p = (arc_c >> 1);
if (arc_meta_limit > arc_c_max)
arc_meta_limit = arc_c_max;
if (arc_dnode_size_limit > arc_meta_limit)
arc_dnode_size_limit = arc_meta_limit;
}
WARN_IF_TUNING_IGNORED(zfs_arc_max, arc_c_max, verbose);
/* Valid range: 16M - <arc_c_max> */
if ((zfs_arc_meta_min) && (zfs_arc_meta_min != arc_meta_min) &&
(zfs_arc_meta_min >= 1ULL << SPA_MAXBLOCKSHIFT) &&
(zfs_arc_meta_min <= arc_c_max)) {
arc_meta_min = zfs_arc_meta_min;
if (arc_meta_limit < arc_meta_min)
arc_meta_limit = arc_meta_min;
if (arc_dnode_size_limit < arc_meta_min)
arc_dnode_size_limit = arc_meta_min;
}
WARN_IF_TUNING_IGNORED(zfs_arc_meta_min, arc_meta_min, verbose);
/* Valid range: <arc_meta_min> - <arc_c_max> */
limit = zfs_arc_meta_limit ? zfs_arc_meta_limit :
MIN(zfs_arc_meta_limit_percent, 100) * arc_c_max / 100;
if ((limit != arc_meta_limit) &&
(limit >= arc_meta_min) &&
(limit <= arc_c_max))
arc_meta_limit = limit;
WARN_IF_TUNING_IGNORED(zfs_arc_meta_limit, arc_meta_limit, verbose);
/* Valid range: <arc_meta_min> - <arc_meta_limit> */
limit = zfs_arc_dnode_limit ? zfs_arc_dnode_limit :
MIN(zfs_arc_dnode_limit_percent, 100) * arc_meta_limit / 100;
if ((limit != arc_dnode_size_limit) &&
(limit >= arc_meta_min) &&
(limit <= arc_meta_limit))
arc_dnode_size_limit = limit;
WARN_IF_TUNING_IGNORED(zfs_arc_dnode_limit, arc_dnode_size_limit,
verbose);
/* Valid range: 1 - N */
if (zfs_arc_grow_retry)
arc_grow_retry = zfs_arc_grow_retry;
/* Valid range: 1 - N */
if (zfs_arc_shrink_shift) {
arc_shrink_shift = zfs_arc_shrink_shift;
arc_no_grow_shift = MIN(arc_no_grow_shift, arc_shrink_shift -1);
}
/* Valid range: 1 - N */
if (zfs_arc_p_min_shift)
arc_p_min_shift = zfs_arc_p_min_shift;
/* Valid range: 1 - N ms */
if (zfs_arc_min_prefetch_ms)
arc_min_prefetch_ms = zfs_arc_min_prefetch_ms;
/* Valid range: 1 - N ms */
if (zfs_arc_min_prescient_prefetch_ms) {
arc_min_prescient_prefetch_ms =
zfs_arc_min_prescient_prefetch_ms;
}
/* Valid range: 0 - 100 */
if ((zfs_arc_lotsfree_percent >= 0) &&
(zfs_arc_lotsfree_percent <= 100))
arc_lotsfree_percent = zfs_arc_lotsfree_percent;
WARN_IF_TUNING_IGNORED(zfs_arc_lotsfree_percent, arc_lotsfree_percent,
verbose);
/* Valid range: 0 - <all physical memory> */
if ((zfs_arc_sys_free) && (zfs_arc_sys_free != arc_sys_free))
arc_sys_free = MIN(MAX(zfs_arc_sys_free, 0), allmem);
WARN_IF_TUNING_IGNORED(zfs_arc_sys_free, arc_sys_free, verbose);
}
static void
arc_state_multilist_init(multilist_t *ml,
multilist_sublist_index_func_t *index_func, int *maxcountp)
{
multilist_create(ml, sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), index_func);
*maxcountp = MAX(*maxcountp, multilist_get_num_sublists(ml));
}
static void
arc_state_init(void)
{
int num_sublists = 0;
arc_state_multilist_init(&arc_mru->arcs_list[ARC_BUFC_METADATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_mru->arcs_list[ARC_BUFC_DATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_mfu->arcs_list[ARC_BUFC_METADATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_mfu->arcs_list[ARC_BUFC_DATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA],
arc_state_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA],
arc_state_multilist_index_func, &num_sublists);
/*
* L2 headers should never be on the L2 state list since they don't
* have L1 headers allocated. Special index function asserts that.
*/
arc_state_multilist_init(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA],
arc_state_l2c_multilist_index_func, &num_sublists);
arc_state_multilist_init(&arc_l2c_only->arcs_list[ARC_BUFC_DATA],
arc_state_l2c_multilist_index_func, &num_sublists);
/*
* Keep track of the number of markers needed to reclaim buffers from
* any ARC state. The markers will be pre-allocated so as to minimize
* the number of memory allocations performed by the eviction thread.
*/
arc_state_evict_marker_count = num_sublists;
zfs_refcount_create(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_anon->arcs_size);
zfs_refcount_create(&arc_mru->arcs_size);
zfs_refcount_create(&arc_mru_ghost->arcs_size);
zfs_refcount_create(&arc_mfu->arcs_size);
zfs_refcount_create(&arc_mfu_ghost->arcs_size);
zfs_refcount_create(&arc_l2c_only->arcs_size);
wmsum_init(&arc_sums.arcstat_hits, 0);
wmsum_init(&arc_sums.arcstat_misses, 0);
wmsum_init(&arc_sums.arcstat_demand_data_hits, 0);
wmsum_init(&arc_sums.arcstat_demand_data_misses, 0);
wmsum_init(&arc_sums.arcstat_demand_metadata_hits, 0);
wmsum_init(&arc_sums.arcstat_demand_metadata_misses, 0);
wmsum_init(&arc_sums.arcstat_prefetch_data_hits, 0);
wmsum_init(&arc_sums.arcstat_prefetch_data_misses, 0);
wmsum_init(&arc_sums.arcstat_prefetch_metadata_hits, 0);
wmsum_init(&arc_sums.arcstat_prefetch_metadata_misses, 0);
wmsum_init(&arc_sums.arcstat_mru_hits, 0);
wmsum_init(&arc_sums.arcstat_mru_ghost_hits, 0);
wmsum_init(&arc_sums.arcstat_mfu_hits, 0);
wmsum_init(&arc_sums.arcstat_mfu_ghost_hits, 0);
wmsum_init(&arc_sums.arcstat_deleted, 0);
wmsum_init(&arc_sums.arcstat_mutex_miss, 0);
wmsum_init(&arc_sums.arcstat_access_skip, 0);
wmsum_init(&arc_sums.arcstat_evict_skip, 0);
wmsum_init(&arc_sums.arcstat_evict_not_enough, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_cached, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_eligible, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_eligible_mfu, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_eligible_mru, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_ineligible, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_skip, 0);
wmsum_init(&arc_sums.arcstat_hash_collisions, 0);
wmsum_init(&arc_sums.arcstat_hash_chains, 0);
aggsum_init(&arc_sums.arcstat_size, 0);
wmsum_init(&arc_sums.arcstat_compressed_size, 0);
wmsum_init(&arc_sums.arcstat_uncompressed_size, 0);
wmsum_init(&arc_sums.arcstat_overhead_size, 0);
wmsum_init(&arc_sums.arcstat_hdr_size, 0);
wmsum_init(&arc_sums.arcstat_data_size, 0);
wmsum_init(&arc_sums.arcstat_metadata_size, 0);
wmsum_init(&arc_sums.arcstat_dbuf_size, 0);
aggsum_init(&arc_sums.arcstat_dnode_size, 0);
wmsum_init(&arc_sums.arcstat_bonus_size, 0);
wmsum_init(&arc_sums.arcstat_l2_hits, 0);
wmsum_init(&arc_sums.arcstat_l2_misses, 0);
wmsum_init(&arc_sums.arcstat_l2_prefetch_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_mru_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_mfu_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_bufc_data_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_bufc_metadata_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_feeds, 0);
wmsum_init(&arc_sums.arcstat_l2_rw_clash, 0);
wmsum_init(&arc_sums.arcstat_l2_read_bytes, 0);
wmsum_init(&arc_sums.arcstat_l2_write_bytes, 0);
wmsum_init(&arc_sums.arcstat_l2_writes_sent, 0);
wmsum_init(&arc_sums.arcstat_l2_writes_done, 0);
wmsum_init(&arc_sums.arcstat_l2_writes_error, 0);
wmsum_init(&arc_sums.arcstat_l2_writes_lock_retry, 0);
wmsum_init(&arc_sums.arcstat_l2_evict_lock_retry, 0);
wmsum_init(&arc_sums.arcstat_l2_evict_reading, 0);
wmsum_init(&arc_sums.arcstat_l2_evict_l1cached, 0);
wmsum_init(&arc_sums.arcstat_l2_free_on_write, 0);
wmsum_init(&arc_sums.arcstat_l2_abort_lowmem, 0);
wmsum_init(&arc_sums.arcstat_l2_cksum_bad, 0);
wmsum_init(&arc_sums.arcstat_l2_io_error, 0);
wmsum_init(&arc_sums.arcstat_l2_lsize, 0);
wmsum_init(&arc_sums.arcstat_l2_psize, 0);
aggsum_init(&arc_sums.arcstat_l2_hdr_size, 0);
wmsum_init(&arc_sums.arcstat_l2_log_blk_writes, 0);
wmsum_init(&arc_sums.arcstat_l2_log_blk_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_log_blk_count, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_success, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_unsupported, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_io_errors, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_dh_errors, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_cksum_lb_errors, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_lowmem, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_size, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_bufs, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_bufs_precached, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_log_blks, 0);
wmsum_init(&arc_sums.arcstat_memory_throttle_count, 0);
wmsum_init(&arc_sums.arcstat_memory_direct_count, 0);
wmsum_init(&arc_sums.arcstat_memory_indirect_count, 0);
wmsum_init(&arc_sums.arcstat_prune, 0);
aggsum_init(&arc_sums.arcstat_meta_used, 0);
wmsum_init(&arc_sums.arcstat_async_upgrade_sync, 0);
wmsum_init(&arc_sums.arcstat_demand_hit_predictive_prefetch, 0);
wmsum_init(&arc_sums.arcstat_demand_hit_prescient_prefetch, 0);
wmsum_init(&arc_sums.arcstat_raw_size, 0);
wmsum_init(&arc_sums.arcstat_cached_only_in_progress, 0);
wmsum_init(&arc_sums.arcstat_abd_chunk_waste_size, 0);
arc_anon->arcs_state = ARC_STATE_ANON;
arc_mru->arcs_state = ARC_STATE_MRU;
arc_mru_ghost->arcs_state = ARC_STATE_MRU_GHOST;
arc_mfu->arcs_state = ARC_STATE_MFU;
arc_mfu_ghost->arcs_state = ARC_STATE_MFU_GHOST;
arc_l2c_only->arcs_state = ARC_STATE_L2C_ONLY;
}
static void
arc_state_fini(void)
{
zfs_refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_anon->arcs_size);
zfs_refcount_destroy(&arc_mru->arcs_size);
zfs_refcount_destroy(&arc_mru_ghost->arcs_size);
zfs_refcount_destroy(&arc_mfu->arcs_size);
zfs_refcount_destroy(&arc_mfu_ghost->arcs_size);
zfs_refcount_destroy(&arc_l2c_only->arcs_size);
multilist_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]);
multilist_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]);
multilist_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]);
multilist_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]);
multilist_destroy(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_l2c_only->arcs_list[ARC_BUFC_DATA]);
wmsum_fini(&arc_sums.arcstat_hits);
wmsum_fini(&arc_sums.arcstat_misses);
wmsum_fini(&arc_sums.arcstat_demand_data_hits);
wmsum_fini(&arc_sums.arcstat_demand_data_misses);
wmsum_fini(&arc_sums.arcstat_demand_metadata_hits);
wmsum_fini(&arc_sums.arcstat_demand_metadata_misses);
wmsum_fini(&arc_sums.arcstat_prefetch_data_hits);
wmsum_fini(&arc_sums.arcstat_prefetch_data_misses);
wmsum_fini(&arc_sums.arcstat_prefetch_metadata_hits);
wmsum_fini(&arc_sums.arcstat_prefetch_metadata_misses);
wmsum_fini(&arc_sums.arcstat_mru_hits);
wmsum_fini(&arc_sums.arcstat_mru_ghost_hits);
wmsum_fini(&arc_sums.arcstat_mfu_hits);
wmsum_fini(&arc_sums.arcstat_mfu_ghost_hits);
wmsum_fini(&arc_sums.arcstat_deleted);
wmsum_fini(&arc_sums.arcstat_mutex_miss);
wmsum_fini(&arc_sums.arcstat_access_skip);
wmsum_fini(&arc_sums.arcstat_evict_skip);
wmsum_fini(&arc_sums.arcstat_evict_not_enough);
wmsum_fini(&arc_sums.arcstat_evict_l2_cached);
wmsum_fini(&arc_sums.arcstat_evict_l2_eligible);
wmsum_fini(&arc_sums.arcstat_evict_l2_eligible_mfu);
wmsum_fini(&arc_sums.arcstat_evict_l2_eligible_mru);
wmsum_fini(&arc_sums.arcstat_evict_l2_ineligible);
wmsum_fini(&arc_sums.arcstat_evict_l2_skip);
wmsum_fini(&arc_sums.arcstat_hash_collisions);
wmsum_fini(&arc_sums.arcstat_hash_chains);
aggsum_fini(&arc_sums.arcstat_size);
wmsum_fini(&arc_sums.arcstat_compressed_size);
wmsum_fini(&arc_sums.arcstat_uncompressed_size);
wmsum_fini(&arc_sums.arcstat_overhead_size);
wmsum_fini(&arc_sums.arcstat_hdr_size);
wmsum_fini(&arc_sums.arcstat_data_size);
wmsum_fini(&arc_sums.arcstat_metadata_size);
wmsum_fini(&arc_sums.arcstat_dbuf_size);
aggsum_fini(&arc_sums.arcstat_dnode_size);
wmsum_fini(&arc_sums.arcstat_bonus_size);
wmsum_fini(&arc_sums.arcstat_l2_hits);
wmsum_fini(&arc_sums.arcstat_l2_misses);
wmsum_fini(&arc_sums.arcstat_l2_prefetch_asize);
wmsum_fini(&arc_sums.arcstat_l2_mru_asize);
wmsum_fini(&arc_sums.arcstat_l2_mfu_asize);
wmsum_fini(&arc_sums.arcstat_l2_bufc_data_asize);
wmsum_fini(&arc_sums.arcstat_l2_bufc_metadata_asize);
wmsum_fini(&arc_sums.arcstat_l2_feeds);
wmsum_fini(&arc_sums.arcstat_l2_rw_clash);
wmsum_fini(&arc_sums.arcstat_l2_read_bytes);
wmsum_fini(&arc_sums.arcstat_l2_write_bytes);
wmsum_fini(&arc_sums.arcstat_l2_writes_sent);
wmsum_fini(&arc_sums.arcstat_l2_writes_done);
wmsum_fini(&arc_sums.arcstat_l2_writes_error);
wmsum_fini(&arc_sums.arcstat_l2_writes_lock_retry);
wmsum_fini(&arc_sums.arcstat_l2_evict_lock_retry);
wmsum_fini(&arc_sums.arcstat_l2_evict_reading);
wmsum_fini(&arc_sums.arcstat_l2_evict_l1cached);
wmsum_fini(&arc_sums.arcstat_l2_free_on_write);
wmsum_fini(&arc_sums.arcstat_l2_abort_lowmem);
wmsum_fini(&arc_sums.arcstat_l2_cksum_bad);
wmsum_fini(&arc_sums.arcstat_l2_io_error);
wmsum_fini(&arc_sums.arcstat_l2_lsize);
wmsum_fini(&arc_sums.arcstat_l2_psize);
aggsum_fini(&arc_sums.arcstat_l2_hdr_size);
wmsum_fini(&arc_sums.arcstat_l2_log_blk_writes);
wmsum_fini(&arc_sums.arcstat_l2_log_blk_asize);
wmsum_fini(&arc_sums.arcstat_l2_log_blk_count);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_success);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_unsupported);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_io_errors);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_dh_errors);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_cksum_lb_errors);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_lowmem);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_size);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_asize);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_bufs);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_bufs_precached);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_log_blks);
wmsum_fini(&arc_sums.arcstat_memory_throttle_count);
wmsum_fini(&arc_sums.arcstat_memory_direct_count);
wmsum_fini(&arc_sums.arcstat_memory_indirect_count);
wmsum_fini(&arc_sums.arcstat_prune);
aggsum_fini(&arc_sums.arcstat_meta_used);
wmsum_fini(&arc_sums.arcstat_async_upgrade_sync);
wmsum_fini(&arc_sums.arcstat_demand_hit_predictive_prefetch);
wmsum_fini(&arc_sums.arcstat_demand_hit_prescient_prefetch);
wmsum_fini(&arc_sums.arcstat_raw_size);
wmsum_fini(&arc_sums.arcstat_cached_only_in_progress);
wmsum_fini(&arc_sums.arcstat_abd_chunk_waste_size);
}
uint64_t
arc_target_bytes(void)
{
return (arc_c);
}
void
arc_set_limits(uint64_t allmem)
{
/* Set min cache to 1/32 of all memory, or 32MB, whichever is more. */
arc_c_min = MAX(allmem / 32, 2ULL << SPA_MAXBLOCKSHIFT);
/* How to set default max varies by platform. */
arc_c_max = arc_default_max(arc_c_min, allmem);
}
void
arc_init(void)
{
uint64_t percent, allmem = arc_all_memory();
mutex_init(&arc_evict_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&arc_evict_waiters, sizeof (arc_evict_waiter_t),
offsetof(arc_evict_waiter_t, aew_node));
arc_min_prefetch_ms = 1000;
arc_min_prescient_prefetch_ms = 6000;
#if defined(_KERNEL)
arc_lowmem_init();
#endif
arc_set_limits(allmem);
#ifdef _KERNEL
/*
* If zfs_arc_max is non-zero at init, meaning it was set in the kernel
* environment before the module was loaded, don't block setting the
* maximum because it is less than arc_c_min, instead, reset arc_c_min
* to a lower value.
* zfs_arc_min will be handled by arc_tuning_update().
*/
if (zfs_arc_max != 0 && zfs_arc_max >= MIN_ARC_MAX &&
zfs_arc_max < allmem) {
arc_c_max = zfs_arc_max;
if (arc_c_min >= arc_c_max) {
arc_c_min = MAX(zfs_arc_max / 2,
2ULL << SPA_MAXBLOCKSHIFT);
}
}
#else
/*
* In userland, there's only the memory pressure that we artificially
* create (see arc_available_memory()). Don't let arc_c get too
* small, because it can cause transactions to be larger than
* arc_c, causing arc_tempreserve_space() to fail.
*/
arc_c_min = MAX(arc_c_max / 2, 2ULL << SPA_MAXBLOCKSHIFT);
#endif
arc_c = arc_c_min;
arc_p = (arc_c >> 1);
/* Set min to 1/2 of arc_c_min */
arc_meta_min = 1ULL << SPA_MAXBLOCKSHIFT;
/*
* Set arc_meta_limit to a percent of arc_c_max with a floor of
* arc_meta_min, and a ceiling of arc_c_max.
*/
percent = MIN(zfs_arc_meta_limit_percent, 100);
arc_meta_limit = MAX(arc_meta_min, (percent * arc_c_max) / 100);
percent = MIN(zfs_arc_dnode_limit_percent, 100);
arc_dnode_size_limit = (percent * arc_meta_limit) / 100;
/* Apply user specified tunings */
arc_tuning_update(B_TRUE);
/* if kmem_flags are set, lets try to use less memory */
if (kmem_debugging())
arc_c = arc_c / 2;
if (arc_c < arc_c_min)
arc_c = arc_c_min;
arc_register_hotplug();
arc_state_init();
buf_init();
list_create(&arc_prune_list, sizeof (arc_prune_t),
offsetof(arc_prune_t, p_node));
mutex_init(&arc_prune_mtx, NULL, MUTEX_DEFAULT, NULL);
- arc_prune_taskq = taskq_create("arc_prune", 100, defclsyspri,
- boot_ncpus, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC |
- TASKQ_THREADS_CPU_PCT);
+ arc_prune_taskq = taskq_create("arc_prune", zfs_arc_prune_task_threads,
+ defclsyspri, 100, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
if (arc_ksp != NULL) {
arc_ksp->ks_data = &arc_stats;
arc_ksp->ks_update = arc_kstat_update;
kstat_install(arc_ksp);
}
arc_state_evict_markers =
arc_state_alloc_markers(arc_state_evict_marker_count);
arc_evict_zthr = zthr_create("arc_evict",
arc_evict_cb_check, arc_evict_cb, NULL, defclsyspri);
arc_reap_zthr = zthr_create_timer("arc_reap",
arc_reap_cb_check, arc_reap_cb, NULL, SEC2NSEC(1), minclsyspri);
arc_warm = B_FALSE;
/*
* Calculate maximum amount of dirty data per pool.
*
* If it has been set by a module parameter, take that.
* Otherwise, use a percentage of physical memory defined by
* zfs_dirty_data_max_percent (default 10%) with a cap at
* zfs_dirty_data_max_max (default 4G or 25% of physical memory).
*/
#ifdef __LP64__
if (zfs_dirty_data_max_max == 0)
zfs_dirty_data_max_max = MIN(4ULL * 1024 * 1024 * 1024,
allmem * zfs_dirty_data_max_max_percent / 100);
#else
if (zfs_dirty_data_max_max == 0)
zfs_dirty_data_max_max = MIN(1ULL * 1024 * 1024 * 1024,
allmem * zfs_dirty_data_max_max_percent / 100);
#endif
if (zfs_dirty_data_max == 0) {
zfs_dirty_data_max = allmem *
zfs_dirty_data_max_percent / 100;
zfs_dirty_data_max = MIN(zfs_dirty_data_max,
zfs_dirty_data_max_max);
}
}
void
arc_fini(void)
{
arc_prune_t *p;
#ifdef _KERNEL
arc_lowmem_fini();
#endif /* _KERNEL */
/* Use B_TRUE to ensure *all* buffers are evicted */
arc_flush(NULL, B_TRUE);
if (arc_ksp != NULL) {
kstat_delete(arc_ksp);
arc_ksp = NULL;
}
taskq_wait(arc_prune_taskq);
taskq_destroy(arc_prune_taskq);
mutex_enter(&arc_prune_mtx);
while ((p = list_head(&arc_prune_list)) != NULL) {
list_remove(&arc_prune_list, p);
zfs_refcount_remove(&p->p_refcnt, &arc_prune_list);
zfs_refcount_destroy(&p->p_refcnt);
kmem_free(p, sizeof (*p));
}
mutex_exit(&arc_prune_mtx);
list_destroy(&arc_prune_list);
mutex_destroy(&arc_prune_mtx);
(void) zthr_cancel(arc_evict_zthr);
(void) zthr_cancel(arc_reap_zthr);
arc_state_free_markers(arc_state_evict_markers,
arc_state_evict_marker_count);
mutex_destroy(&arc_evict_lock);
list_destroy(&arc_evict_waiters);
/*
* Free any buffers that were tagged for destruction. This needs
* to occur before arc_state_fini() runs and destroys the aggsum
* values which are updated when freeing scatter ABDs.
*/
l2arc_do_free_on_write();
/*
* buf_fini() must proceed arc_state_fini() because buf_fin() may
* trigger the release of kmem magazines, which can callback to
* arc_space_return() which accesses aggsums freed in act_state_fini().
*/
buf_fini();
arc_state_fini();
arc_unregister_hotplug();
/*
* We destroy the zthrs after all the ARC state has been
* torn down to avoid the case of them receiving any
* wakeup() signals after they are destroyed.
*/
zthr_destroy(arc_evict_zthr);
zthr_destroy(arc_reap_zthr);
ASSERT0(arc_loaned_bytes);
}
/*
* Level 2 ARC
*
* The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
* It uses dedicated storage devices to hold cached data, which are populated
* using large infrequent writes. The main role of this cache is to boost
* the performance of random read workloads. The intended L2ARC devices
* include short-stroked disks, solid state disks, and other media with
* substantially faster read latency than disk.
*
* +-----------------------+
* | ARC |
* +-----------------------+
* | ^ ^
* | | |
* l2arc_feed_thread() arc_read()
* | | |
* | l2arc read |
* V | |
* +---------------+ |
* | L2ARC | |
* +---------------+ |
* | ^ |
* l2arc_write() | |
* | | |
* V | |
* +-------+ +-------+
* | vdev | | vdev |
* | cache | | cache |
* +-------+ +-------+
* +=========+ .-----.
* : L2ARC : |-_____-|
* : devices : | Disks |
* +=========+ `-_____-'
*
* Read requests are satisfied from the following sources, in order:
*
* 1) ARC
* 2) vdev cache of L2ARC devices
* 3) L2ARC devices
* 4) vdev cache of disks
* 5) disks
*
* Some L2ARC device types exhibit extremely slow write performance.
* To accommodate for this there are some significant differences between
* the L2ARC and traditional cache design:
*
* 1. There is no eviction path from the ARC to the L2ARC. Evictions from
* the ARC behave as usual, freeing buffers and placing headers on ghost
* lists. The ARC does not send buffers to the L2ARC during eviction as
* this would add inflated write latencies for all ARC memory pressure.
*
* 2. The L2ARC attempts to cache data from the ARC before it is evicted.
* It does this by periodically scanning buffers from the eviction-end of
* the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
* not already there. It scans until a headroom of buffers is satisfied,
* which itself is a buffer for ARC eviction. If a compressible buffer is
* found during scanning and selected for writing to an L2ARC device, we
* temporarily boost scanning headroom during the next scan cycle to make
* sure we adapt to compression effects (which might significantly reduce
* the data volume we write to L2ARC). The thread that does this is
* l2arc_feed_thread(), illustrated below; example sizes are included to
* provide a better sense of ratio than this diagram:
*
* head --> tail
* +---------------------+----------+
* ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC
* +---------------------+----------+ | o L2ARC eligible
* ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer
* +---------------------+----------+ |
* 15.9 Gbytes ^ 32 Mbytes |
* headroom |
* l2arc_feed_thread()
* |
* l2arc write hand <--[oooo]--'
* | 8 Mbyte
* | write max
* V
* +==============================+
* L2ARC dev |####|#|###|###| |####| ... |
* +==============================+
* 32 Gbytes
*
* 3. If an ARC buffer is copied to the L2ARC but then hit instead of
* evicted, then the L2ARC has cached a buffer much sooner than it probably
* needed to, potentially wasting L2ARC device bandwidth and storage. It is
* safe to say that this is an uncommon case, since buffers at the end of
* the ARC lists have moved there due to inactivity.
*
* 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
* then the L2ARC simply misses copying some buffers. This serves as a
* pressure valve to prevent heavy read workloads from both stalling the ARC
* with waits and clogging the L2ARC with writes. This also helps prevent
* the potential for the L2ARC to churn if it attempts to cache content too
* quickly, such as during backups of the entire pool.
*
* 5. After system boot and before the ARC has filled main memory, there are
* no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
* lists can remain mostly static. Instead of searching from tail of these
* lists as pictured, the l2arc_feed_thread() will search from the list heads
* for eligible buffers, greatly increasing its chance of finding them.
*
* The L2ARC device write speed is also boosted during this time so that
* the L2ARC warms up faster. Since there have been no ARC evictions yet,
* there are no L2ARC reads, and no fear of degrading read performance
* through increased writes.
*
* 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
* the vdev queue can aggregate them into larger and fewer writes. Each
* device is written to in a rotor fashion, sweeping writes through
* available space then repeating.
*
* 7. The L2ARC does not store dirty content. It never needs to flush
* write buffers back to disk based storage.
*
* 8. If an ARC buffer is written (and dirtied) which also exists in the
* L2ARC, the now stale L2ARC buffer is immediately dropped.
*
* The performance of the L2ARC can be tweaked by a number of tunables, which
* may be necessary for different workloads:
*
* l2arc_write_max max write bytes per interval
* l2arc_write_boost extra write bytes during device warmup
* l2arc_noprefetch skip caching prefetched buffers
* l2arc_headroom number of max device writes to precache
* l2arc_headroom_boost when we find compressed buffers during ARC
* scanning, we multiply headroom by this
* percentage factor for the next scan cycle,
* since more compressed buffers are likely to
* be present
* l2arc_feed_secs seconds between L2ARC writing
*
* Tunables may be removed or added as future performance improvements are
* integrated, and also may become zpool properties.
*
* There are three key functions that control how the L2ARC warms up:
*
* l2arc_write_eligible() check if a buffer is eligible to cache
* l2arc_write_size() calculate how much to write
* l2arc_write_interval() calculate sleep delay between writes
*
* These three functions determine what to write, how much, and how quickly
* to send writes.
*
* L2ARC persistence:
*
* When writing buffers to L2ARC, we periodically add some metadata to
* make sure we can pick them up after reboot, thus dramatically reducing
* the impact that any downtime has on the performance of storage systems
* with large caches.
*
* The implementation works fairly simply by integrating the following two
* modifications:
*
* *) When writing to the L2ARC, we occasionally write a "l2arc log block",
* which is an additional piece of metadata which describes what's been
* written. This allows us to rebuild the arc_buf_hdr_t structures of the
* main ARC buffers. There are 2 linked-lists of log blocks headed by
* dh_start_lbps[2]. We alternate which chain we append to, so they are
* time-wise and offset-wise interleaved, but that is an optimization rather
* than for correctness. The log block also includes a pointer to the
* previous block in its chain.
*
* *) We reserve SPA_MINBLOCKSIZE of space at the start of each L2ARC device
* for our header bookkeeping purposes. This contains a device header,
* which contains our top-level reference structures. We update it each
* time we write a new log block, so that we're able to locate it in the
* L2ARC device. If this write results in an inconsistent device header
* (e.g. due to power failure), we detect this by verifying the header's
* checksum and simply fail to reconstruct the L2ARC after reboot.
*
* Implementation diagram:
*
* +=== L2ARC device (not to scale) ======================================+
* | ___two newest log block pointers__.__________ |
* | / \dh_start_lbps[1] |
* | / \ \dh_start_lbps[0]|
* |.___/__. V V |
* ||L2 dev|....|lb |bufs |lb |bufs |lb |bufs |lb |bufs |lb |---(empty)---|
* || hdr| ^ /^ /^ / / |
* |+------+ ...--\-------/ \-----/--\------/ / |
* | \--------------/ \--------------/ |
* +======================================================================+
*
* As can be seen on the diagram, rather than using a simple linked list,
* we use a pair of linked lists with alternating elements. This is a
* performance enhancement due to the fact that we only find out the
* address of the next log block access once the current block has been
* completely read in. Obviously, this hurts performance, because we'd be
* keeping the device's I/O queue at only a 1 operation deep, thus
* incurring a large amount of I/O round-trip latency. Having two lists
* allows us to fetch two log blocks ahead of where we are currently
* rebuilding L2ARC buffers.
*
* On-device data structures:
*
* L2ARC device header: l2arc_dev_hdr_phys_t
* L2ARC log block: l2arc_log_blk_phys_t
*
* L2ARC reconstruction:
*
* When writing data, we simply write in the standard rotary fashion,
* evicting buffers as we go and simply writing new data over them (writing
* a new log block every now and then). This obviously means that once we
* loop around the end of the device, we will start cutting into an already
* committed log block (and its referenced data buffers), like so:
*
* current write head__ __old tail
* \ /
* V V
* <--|bufs |lb |bufs |lb | |bufs |lb |bufs |lb |-->
* ^ ^^^^^^^^^___________________________________
* | \
* <<nextwrite>> may overwrite this blk and/or its bufs --'
*
* When importing the pool, we detect this situation and use it to stop
* our scanning process (see l2arc_rebuild).
*
* There is one significant caveat to consider when rebuilding ARC contents
* from an L2ARC device: what about invalidated buffers? Given the above
* construction, we cannot update blocks which we've already written to amend
* them to remove buffers which were invalidated. Thus, during reconstruction,
* we might be populating the cache with buffers for data that's not on the
* main pool anymore, or may have been overwritten!
*
* As it turns out, this isn't a problem. Every arc_read request includes
* both the DVA and, crucially, the birth TXG of the BP the caller is
* looking for. So even if the cache were populated by completely rotten
* blocks for data that had been long deleted and/or overwritten, we'll
* never actually return bad data from the cache, since the DVA with the
* birth TXG uniquely identify a block in space and time - once created,
* a block is immutable on disk. The worst thing we have done is wasted
* some time and memory at l2arc rebuild to reconstruct outdated ARC
* entries that will get dropped from the l2arc as it is being updated
* with new blocks.
*
* L2ARC buffers that have been evicted by l2arc_evict() ahead of the write
* hand are not restored. This is done by saving the offset (in bytes)
* l2arc_evict() has evicted to in the L2ARC device header and taking it
* into account when restoring buffers.
*/
static boolean_t
l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *hdr)
{
/*
* A buffer is *not* eligible for the L2ARC if it:
* 1. belongs to a different spa.
* 2. is already cached on the L2ARC.
* 3. has an I/O in progress (it may be an incomplete read).
* 4. is flagged not eligible (zfs property).
*/
if (hdr->b_spa != spa_guid || HDR_HAS_L2HDR(hdr) ||
HDR_IO_IN_PROGRESS(hdr) || !HDR_L2CACHE(hdr))
return (B_FALSE);
return (B_TRUE);
}
static uint64_t
l2arc_write_size(l2arc_dev_t *dev)
{
uint64_t size, dev_size, tsize;
/*
* Make sure our globals have meaningful values in case the user
* altered them.
*/
size = l2arc_write_max;
if (size == 0) {
cmn_err(CE_NOTE, "Bad value for l2arc_write_max, value must "
"be greater than zero, resetting it to the default (%d)",
L2ARC_WRITE_SIZE);
size = l2arc_write_max = L2ARC_WRITE_SIZE;
}
if (arc_warm == B_FALSE)
size += l2arc_write_boost;
/*
* Make sure the write size does not exceed the size of the cache
* device. This is important in l2arc_evict(), otherwise infinite
* iteration can occur.
*/
dev_size = dev->l2ad_end - dev->l2ad_start;
tsize = size + l2arc_log_blk_overhead(size, dev);
if (dev->l2ad_vdev->vdev_has_trim && l2arc_trim_ahead > 0)
tsize += MAX(64 * 1024 * 1024,
(tsize * l2arc_trim_ahead) / 100);
if (tsize >= dev_size) {
cmn_err(CE_NOTE, "l2arc_write_max or l2arc_write_boost "
"plus the overhead of log blocks (persistent L2ARC, "
"%llu bytes) exceeds the size of the cache device "
"(guid %llu), resetting them to the default (%d)",
l2arc_log_blk_overhead(size, dev),
dev->l2ad_vdev->vdev_guid, L2ARC_WRITE_SIZE);
size = l2arc_write_max = l2arc_write_boost = L2ARC_WRITE_SIZE;
if (arc_warm == B_FALSE)
size += l2arc_write_boost;
}
return (size);
}
static clock_t
l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote)
{
clock_t interval, next, now;
/*
* If the ARC lists are busy, increase our write rate; if the
* lists are stale, idle back. This is achieved by checking
* how much we previously wrote - if it was more than half of
* what we wanted, schedule the next write much sooner.
*/
if (l2arc_feed_again && wrote > (wanted / 2))
interval = (hz * l2arc_feed_min_ms) / 1000;
else
interval = hz * l2arc_feed_secs;
now = ddi_get_lbolt();
next = MAX(now, MIN(now + interval, began + interval));
return (next);
}
/*
* Cycle through L2ARC devices. This is how L2ARC load balances.
* If a device is returned, this also returns holding the spa config lock.
*/
static l2arc_dev_t *
l2arc_dev_get_next(void)
{
l2arc_dev_t *first, *next = NULL;
/*
* Lock out the removal of spas (spa_namespace_lock), then removal
* of cache devices (l2arc_dev_mtx). Once a device has been selected,
* both locks will be dropped and a spa config lock held instead.
*/
mutex_enter(&spa_namespace_lock);
mutex_enter(&l2arc_dev_mtx);
/* if there are no vdevs, there is nothing to do */
if (l2arc_ndev == 0)
goto out;
first = NULL;
next = l2arc_dev_last;
do {
/* loop around the list looking for a non-faulted vdev */
if (next == NULL) {
next = list_head(l2arc_dev_list);
} else {
next = list_next(l2arc_dev_list, next);
if (next == NULL)
next = list_head(l2arc_dev_list);
}
/* if we have come back to the start, bail out */
if (first == NULL)
first = next;
else if (next == first)
break;
} while (vdev_is_dead(next->l2ad_vdev) || next->l2ad_rebuild ||
next->l2ad_trim_all);
/* if we were unable to find any usable vdevs, return NULL */
if (vdev_is_dead(next->l2ad_vdev) || next->l2ad_rebuild ||
next->l2ad_trim_all)
next = NULL;
l2arc_dev_last = next;
out:
mutex_exit(&l2arc_dev_mtx);
/*
* Grab the config lock to prevent the 'next' device from being
* removed while we are writing to it.
*/
if (next != NULL)
spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER);
mutex_exit(&spa_namespace_lock);
return (next);
}
/*
* Free buffers that were tagged for destruction.
*/
static void
l2arc_do_free_on_write(void)
{
list_t *buflist;
l2arc_data_free_t *df, *df_prev;
mutex_enter(&l2arc_free_on_write_mtx);
buflist = l2arc_free_on_write;
for (df = list_tail(buflist); df; df = df_prev) {
df_prev = list_prev(buflist, df);
ASSERT3P(df->l2df_abd, !=, NULL);
abd_free(df->l2df_abd);
list_remove(buflist, df);
kmem_free(df, sizeof (l2arc_data_free_t));
}
mutex_exit(&l2arc_free_on_write_mtx);
}
/*
* A write to a cache device has completed. Update all headers to allow
* reads from these buffers to begin.
*/
static void
l2arc_write_done(zio_t *zio)
{
l2arc_write_callback_t *cb;
l2arc_lb_abd_buf_t *abd_buf;
l2arc_lb_ptr_buf_t *lb_ptr_buf;
l2arc_dev_t *dev;
l2arc_dev_hdr_phys_t *l2dhdr;
list_t *buflist;
arc_buf_hdr_t *head, *hdr, *hdr_prev;
kmutex_t *hash_lock;
int64_t bytes_dropped = 0;
cb = zio->io_private;
ASSERT3P(cb, !=, NULL);
dev = cb->l2wcb_dev;
l2dhdr = dev->l2ad_dev_hdr;
ASSERT3P(dev, !=, NULL);
head = cb->l2wcb_head;
ASSERT3P(head, !=, NULL);
buflist = &dev->l2ad_buflist;
ASSERT3P(buflist, !=, NULL);
DTRACE_PROBE2(l2arc__iodone, zio_t *, zio,
l2arc_write_callback_t *, cb);
/*
* All writes completed, or an error was hit.
*/
top:
mutex_enter(&dev->l2ad_mtx);
for (hdr = list_prev(buflist, head); hdr; hdr = hdr_prev) {
hdr_prev = list_prev(buflist, hdr);
hash_lock = HDR_LOCK(hdr);
/*
* We cannot use mutex_enter or else we can deadlock
* with l2arc_write_buffers (due to swapping the order
* the hash lock and l2ad_mtx are taken).
*/
if (!mutex_tryenter(hash_lock)) {
/*
* Missed the hash lock. We must retry so we
* don't leave the ARC_FLAG_L2_WRITING bit set.
*/
ARCSTAT_BUMP(arcstat_l2_writes_lock_retry);
/*
* We don't want to rescan the headers we've
* already marked as having been written out, so
* we reinsert the head node so we can pick up
* where we left off.
*/
list_remove(buflist, head);
list_insert_after(buflist, hdr, head);
mutex_exit(&dev->l2ad_mtx);
/*
* We wait for the hash lock to become available
* to try and prevent busy waiting, and increase
* the chance we'll be able to acquire the lock
* the next time around.
*/
mutex_enter(hash_lock);
mutex_exit(hash_lock);
goto top;
}
/*
* We could not have been moved into the arc_l2c_only
* state while in-flight due to our ARC_FLAG_L2_WRITING
* bit being set. Let's just ensure that's being enforced.
*/
ASSERT(HDR_HAS_L1HDR(hdr));
/*
* Skipped - drop L2ARC entry and mark the header as no
* longer L2 eligibile.
*/
if (zio->io_error != 0) {
/*
* Error - drop L2ARC entry.
*/
list_remove(buflist, hdr);
arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR);
uint64_t psize = HDR_GET_PSIZE(hdr);
l2arc_hdr_arcstats_decrement(hdr);
bytes_dropped +=
vdev_psize_to_asize(dev->l2ad_vdev, psize);
(void) zfs_refcount_remove_many(&dev->l2ad_alloc,
arc_hdr_size(hdr), hdr);
}
/*
* Allow ARC to begin reads and ghost list evictions to
* this L2ARC entry.
*/
arc_hdr_clear_flags(hdr, ARC_FLAG_L2_WRITING);
mutex_exit(hash_lock);
}
/*
* Free the allocated abd buffers for writing the log blocks.
* If the zio failed reclaim the allocated space and remove the
* pointers to these log blocks from the log block pointer list
* of the L2ARC device.
*/
while ((abd_buf = list_remove_tail(&cb->l2wcb_abd_list)) != NULL) {
abd_free(abd_buf->abd);
zio_buf_free(abd_buf, sizeof (*abd_buf));
if (zio->io_error != 0) {
lb_ptr_buf = list_remove_head(&dev->l2ad_lbptr_list);
/*
* L2BLK_GET_PSIZE returns aligned size for log
* blocks.
*/
uint64_t asize =
L2BLK_GET_PSIZE((lb_ptr_buf->lb_ptr)->lbp_prop);
bytes_dropped += asize;
ARCSTAT_INCR(arcstat_l2_log_blk_asize, -asize);
ARCSTAT_BUMPDOWN(arcstat_l2_log_blk_count);
zfs_refcount_remove_many(&dev->l2ad_lb_asize, asize,
lb_ptr_buf);
zfs_refcount_remove(&dev->l2ad_lb_count, lb_ptr_buf);
kmem_free(lb_ptr_buf->lb_ptr,
sizeof (l2arc_log_blkptr_t));
kmem_free(lb_ptr_buf, sizeof (l2arc_lb_ptr_buf_t));
}
}
list_destroy(&cb->l2wcb_abd_list);
if (zio->io_error != 0) {
ARCSTAT_BUMP(arcstat_l2_writes_error);
/*
* Restore the lbps array in the header to its previous state.
* If the list of log block pointers is empty, zero out the
* log block pointers in the device header.
*/
lb_ptr_buf = list_head(&dev->l2ad_lbptr_list);
for (int i = 0; i < 2; i++) {
if (lb_ptr_buf == NULL) {
/*
* If the list is empty zero out the device
* header. Otherwise zero out the second log
* block pointer in the header.
*/
if (i == 0) {
bzero(l2dhdr, dev->l2ad_dev_hdr_asize);
} else {
bzero(&l2dhdr->dh_start_lbps[i],
sizeof (l2arc_log_blkptr_t));
}
break;
}
bcopy(lb_ptr_buf->lb_ptr, &l2dhdr->dh_start_lbps[i],
sizeof (l2arc_log_blkptr_t));
lb_ptr_buf = list_next(&dev->l2ad_lbptr_list,
lb_ptr_buf);
}
}
ARCSTAT_BUMP(arcstat_l2_writes_done);
list_remove(buflist, head);
ASSERT(!HDR_HAS_L1HDR(head));
kmem_cache_free(hdr_l2only_cache, head);
mutex_exit(&dev->l2ad_mtx);
ASSERT(dev->l2ad_vdev != NULL);
vdev_space_update(dev->l2ad_vdev, -bytes_dropped, 0, 0);
l2arc_do_free_on_write();
kmem_free(cb, sizeof (l2arc_write_callback_t));
}
static int
l2arc_untransform(zio_t *zio, l2arc_read_callback_t *cb)
{
int ret;
spa_t *spa = zio->io_spa;
arc_buf_hdr_t *hdr = cb->l2rcb_hdr;
blkptr_t *bp = zio->io_bp;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
boolean_t no_crypt = B_FALSE;
/*
* ZIL data is never be written to the L2ARC, so we don't need
* special handling for its unique MAC storage.
*/
ASSERT3U(BP_GET_TYPE(bp), !=, DMU_OT_INTENT_LOG);
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
/*
* If the data was encrypted, decrypt it now. Note that
* we must check the bp here and not the hdr, since the
* hdr does not have its encryption parameters updated
* until arc_read_done().
*/
if (BP_IS_ENCRYPTED(bp)) {
abd_t *eabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr,
ARC_HDR_DO_ADAPT | ARC_HDR_USE_RESERVE);
zio_crypt_decode_params_bp(bp, salt, iv);
zio_crypt_decode_mac_bp(bp, mac);
ret = spa_do_crypt_abd(B_FALSE, spa, &cb->l2rcb_zb,
BP_GET_TYPE(bp), BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp),
salt, iv, mac, HDR_GET_PSIZE(hdr), eabd,
hdr->b_l1hdr.b_pabd, &no_crypt);
if (ret != 0) {
arc_free_data_abd(hdr, eabd, arc_hdr_size(hdr), hdr);
goto error;
}
/*
* If we actually performed decryption, replace b_pabd
* with the decrypted data. Otherwise we can just throw
* our decryption buffer away.
*/
if (!no_crypt) {
arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd,
arc_hdr_size(hdr), hdr);
hdr->b_l1hdr.b_pabd = eabd;
zio->io_abd = eabd;
} else {
arc_free_data_abd(hdr, eabd, arc_hdr_size(hdr), hdr);
}
}
/*
* If the L2ARC block was compressed, but ARC compression
* is disabled we decompress the data into a new buffer and
* replace the existing data.
*/
if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr)) {
abd_t *cabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr,
ARC_HDR_DO_ADAPT | ARC_HDR_USE_RESERVE);
void *tmp = abd_borrow_buf(cabd, arc_hdr_size(hdr));
ret = zio_decompress_data(HDR_GET_COMPRESS(hdr),
hdr->b_l1hdr.b_pabd, tmp, HDR_GET_PSIZE(hdr),
HDR_GET_LSIZE(hdr), &hdr->b_complevel);
if (ret != 0) {
abd_return_buf_copy(cabd, tmp, arc_hdr_size(hdr));
arc_free_data_abd(hdr, cabd, arc_hdr_size(hdr), hdr);
goto error;
}
abd_return_buf_copy(cabd, tmp, arc_hdr_size(hdr));
arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd,
arc_hdr_size(hdr), hdr);
hdr->b_l1hdr.b_pabd = cabd;
zio->io_abd = cabd;
zio->io_size = HDR_GET_LSIZE(hdr);
}
return (0);
error:
return (ret);
}
/*
* A read to a cache device completed. Validate buffer contents before
* handing over to the regular ARC routines.
*/
static void
l2arc_read_done(zio_t *zio)
{
int tfm_error = 0;
l2arc_read_callback_t *cb = zio->io_private;
arc_buf_hdr_t *hdr;
kmutex_t *hash_lock;
boolean_t valid_cksum;
boolean_t using_rdata = (BP_IS_ENCRYPTED(&cb->l2rcb_bp) &&
(cb->l2rcb_flags & ZIO_FLAG_RAW_ENCRYPT));
ASSERT3P(zio->io_vd, !=, NULL);
ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE);
spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd);
ASSERT3P(cb, !=, NULL);
hdr = cb->l2rcb_hdr;
ASSERT3P(hdr, !=, NULL);
hash_lock = HDR_LOCK(hdr);
mutex_enter(hash_lock);
ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
/*
* If the data was read into a temporary buffer,
* move it and free the buffer.
*/
if (cb->l2rcb_abd != NULL) {
ASSERT3U(arc_hdr_size(hdr), <, zio->io_size);
if (zio->io_error == 0) {
if (using_rdata) {
abd_copy(hdr->b_crypt_hdr.b_rabd,
cb->l2rcb_abd, arc_hdr_size(hdr));
} else {
abd_copy(hdr->b_l1hdr.b_pabd,
cb->l2rcb_abd, arc_hdr_size(hdr));
}
}
/*
* The following must be done regardless of whether
* there was an error:
* - free the temporary buffer
* - point zio to the real ARC buffer
* - set zio size accordingly
* These are required because zio is either re-used for
* an I/O of the block in the case of the error
* or the zio is passed to arc_read_done() and it
* needs real data.
*/
abd_free(cb->l2rcb_abd);
zio->io_size = zio->io_orig_size = arc_hdr_size(hdr);
if (using_rdata) {
ASSERT(HDR_HAS_RABD(hdr));
zio->io_abd = zio->io_orig_abd =
hdr->b_crypt_hdr.b_rabd;
} else {
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
zio->io_abd = zio->io_orig_abd = hdr->b_l1hdr.b_pabd;
}
}
ASSERT3P(zio->io_abd, !=, NULL);
/*
* Check this survived the L2ARC journey.
*/
ASSERT(zio->io_abd == hdr->b_l1hdr.b_pabd ||
(HDR_HAS_RABD(hdr) && zio->io_abd == hdr->b_crypt_hdr.b_rabd));
zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */
zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */
zio->io_prop.zp_complevel = hdr->b_complevel;
valid_cksum = arc_cksum_is_equal(hdr, zio);
/*
* b_rabd will always match the data as it exists on disk if it is
* being used. Therefore if we are reading into b_rabd we do not
* attempt to untransform the data.
*/
if (valid_cksum && !using_rdata)
tfm_error = l2arc_untransform(zio, cb);
if (valid_cksum && tfm_error == 0 && zio->io_error == 0 &&
!HDR_L2_EVICTED(hdr)) {
mutex_exit(hash_lock);
zio->io_private = hdr;
arc_read_done(zio);
} else {
/*
* Buffer didn't survive caching. Increment stats and
* reissue to the original storage device.
*/
if (zio->io_error != 0) {
ARCSTAT_BUMP(arcstat_l2_io_error);
} else {
zio->io_error = SET_ERROR(EIO);
}
if (!valid_cksum || tfm_error != 0)
ARCSTAT_BUMP(arcstat_l2_cksum_bad);
/*
* If there's no waiter, issue an async i/o to the primary
* storage now. If there *is* a waiter, the caller must
* issue the i/o in a context where it's OK to block.
*/
if (zio->io_waiter == NULL) {
zio_t *pio = zio_unique_parent(zio);
void *abd = (using_rdata) ?
hdr->b_crypt_hdr.b_rabd : hdr->b_l1hdr.b_pabd;
ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL);
zio = zio_read(pio, zio->io_spa, zio->io_bp,
abd, zio->io_size, arc_read_done,
hdr, zio->io_priority, cb->l2rcb_flags,
&cb->l2rcb_zb);
/*
* Original ZIO will be freed, so we need to update
* ARC header with the new ZIO pointer to be used
* by zio_change_priority() in arc_read().
*/
for (struct arc_callback *acb = hdr->b_l1hdr.b_acb;
acb != NULL; acb = acb->acb_next)
acb->acb_zio_head = zio;
mutex_exit(hash_lock);
zio_nowait(zio);
} else {
mutex_exit(hash_lock);
}
}
kmem_free(cb, sizeof (l2arc_read_callback_t));
}
/*
* This is the list priority from which the L2ARC will search for pages to
* cache. This is used within loops (0..3) to cycle through lists in the
* desired order. This order can have a significant effect on cache
* performance.
*
* Currently the metadata lists are hit first, MFU then MRU, followed by
* the data lists. This function returns a locked list, and also returns
* the lock pointer.
*/
static multilist_sublist_t *
l2arc_sublist_lock(int list_num)
{
multilist_t *ml = NULL;
unsigned int idx;
ASSERT(list_num >= 0 && list_num < L2ARC_FEED_TYPES);
switch (list_num) {
case 0:
ml = &arc_mfu->arcs_list[ARC_BUFC_METADATA];
break;
case 1:
ml = &arc_mru->arcs_list[ARC_BUFC_METADATA];
break;
case 2:
ml = &arc_mfu->arcs_list[ARC_BUFC_DATA];
break;
case 3:
ml = &arc_mru->arcs_list[ARC_BUFC_DATA];
break;
default:
return (NULL);
}
/*
* Return a randomly-selected sublist. This is acceptable
* because the caller feeds only a little bit of data for each
* call (8MB). Subsequent calls will result in different
* sublists being selected.
*/
idx = multilist_get_random_index(ml);
return (multilist_sublist_lock(ml, idx));
}
/*
* Calculates the maximum overhead of L2ARC metadata log blocks for a given
* L2ARC write size. l2arc_evict and l2arc_write_size need to include this
* overhead in processing to make sure there is enough headroom available
* when writing buffers.
*/
static inline uint64_t
l2arc_log_blk_overhead(uint64_t write_sz, l2arc_dev_t *dev)
{
if (dev->l2ad_log_entries == 0) {
return (0);
} else {
uint64_t log_entries = write_sz >> SPA_MINBLOCKSHIFT;
uint64_t log_blocks = (log_entries +
dev->l2ad_log_entries - 1) /
dev->l2ad_log_entries;
return (vdev_psize_to_asize(dev->l2ad_vdev,
sizeof (l2arc_log_blk_phys_t)) * log_blocks);
}
}
/*
* Evict buffers from the device write hand to the distance specified in
* bytes. This distance may span populated buffers, it may span nothing.
* This is clearing a region on the L2ARC device ready for writing.
* If the 'all' boolean is set, every buffer is evicted.
*/
static void
l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all)
{
list_t *buflist;
arc_buf_hdr_t *hdr, *hdr_prev;
kmutex_t *hash_lock;
uint64_t taddr;
l2arc_lb_ptr_buf_t *lb_ptr_buf, *lb_ptr_buf_prev;
vdev_t *vd = dev->l2ad_vdev;
boolean_t rerun;
buflist = &dev->l2ad_buflist;
/*
* We need to add in the worst case scenario of log block overhead.
*/
distance += l2arc_log_blk_overhead(distance, dev);
if (vd->vdev_has_trim && l2arc_trim_ahead > 0) {
/*
* Trim ahead of the write size 64MB or (l2arc_trim_ahead/100)
* times the write size, whichever is greater.
*/
distance += MAX(64 * 1024 * 1024,
(distance * l2arc_trim_ahead) / 100);
}
top:
rerun = B_FALSE;
if (dev->l2ad_hand >= (dev->l2ad_end - distance)) {
/*
* When there is no space to accommodate upcoming writes,
* evict to the end. Then bump the write and evict hands
* to the start and iterate. This iteration does not
* happen indefinitely as we make sure in
* l2arc_write_size() that when the write hand is reset,
* the write size does not exceed the end of the device.
*/
rerun = B_TRUE;
taddr = dev->l2ad_end;
} else {
taddr = dev->l2ad_hand + distance;
}
DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist,
uint64_t, taddr, boolean_t, all);
if (!all) {
/*
* This check has to be placed after deciding whether to
* iterate (rerun).
*/
if (dev->l2ad_first) {
/*
* This is the first sweep through the device. There is
* nothing to evict. We have already trimmmed the
* whole device.
*/
goto out;
} else {
/*
* Trim the space to be evicted.
*/
if (vd->vdev_has_trim && dev->l2ad_evict < taddr &&
l2arc_trim_ahead > 0) {
/*
* We have to drop the spa_config lock because
* vdev_trim_range() will acquire it.
* l2ad_evict already accounts for the label
* size. To prevent vdev_trim_ranges() from
* adding it again, we subtract it from
* l2ad_evict.
*/
spa_config_exit(dev->l2ad_spa, SCL_L2ARC, dev);
vdev_trim_simple(vd,
dev->l2ad_evict - VDEV_LABEL_START_SIZE,
taddr - dev->l2ad_evict);
spa_config_enter(dev->l2ad_spa, SCL_L2ARC, dev,
RW_READER);
}
/*
* When rebuilding L2ARC we retrieve the evict hand
* from the header of the device. Of note, l2arc_evict()
* does not actually delete buffers from the cache
* device, but trimming may do so depending on the
* hardware implementation. Thus keeping track of the
* evict hand is useful.
*/
dev->l2ad_evict = MAX(dev->l2ad_evict, taddr);
}
}
retry:
mutex_enter(&dev->l2ad_mtx);
/*
* We have to account for evicted log blocks. Run vdev_space_update()
* on log blocks whose offset (in bytes) is before the evicted offset
* (in bytes) by searching in the list of pointers to log blocks
* present in the L2ARC device.
*/
for (lb_ptr_buf = list_tail(&dev->l2ad_lbptr_list); lb_ptr_buf;
lb_ptr_buf = lb_ptr_buf_prev) {
lb_ptr_buf_prev = list_prev(&dev->l2ad_lbptr_list, lb_ptr_buf);
/* L2BLK_GET_PSIZE returns aligned size for log blocks */
uint64_t asize = L2BLK_GET_PSIZE(
(lb_ptr_buf->lb_ptr)->lbp_prop);
/*
* We don't worry about log blocks left behind (ie
* lbp_payload_start < l2ad_hand) because l2arc_write_buffers()
* will never write more than l2arc_evict() evicts.
*/
if (!all && l2arc_log_blkptr_valid(dev, lb_ptr_buf->lb_ptr)) {
break;
} else {
vdev_space_update(vd, -asize, 0, 0);
ARCSTAT_INCR(arcstat_l2_log_blk_asize, -asize);
ARCSTAT_BUMPDOWN(arcstat_l2_log_blk_count);
zfs_refcount_remove_many(&dev->l2ad_lb_asize, asize,
lb_ptr_buf);
zfs_refcount_remove(&dev->l2ad_lb_count, lb_ptr_buf);
list_remove(&dev->l2ad_lbptr_list, lb_ptr_buf);
kmem_free(lb_ptr_buf->lb_ptr,
sizeof (l2arc_log_blkptr_t));
kmem_free(lb_ptr_buf, sizeof (l2arc_lb_ptr_buf_t));
}
}
for (hdr = list_tail(buflist); hdr; hdr = hdr_prev) {
hdr_prev = list_prev(buflist, hdr);
ASSERT(!HDR_EMPTY(hdr));
hash_lock = HDR_LOCK(hdr);
/*
* We cannot use mutex_enter or else we can deadlock
* with l2arc_write_buffers (due to swapping the order
* the hash lock and l2ad_mtx are taken).
*/
if (!mutex_tryenter(hash_lock)) {
/*
* Missed the hash lock. Retry.
*/
ARCSTAT_BUMP(arcstat_l2_evict_lock_retry);
mutex_exit(&dev->l2ad_mtx);
mutex_enter(hash_lock);
mutex_exit(hash_lock);
goto retry;
}
/*
* A header can't be on this list if it doesn't have L2 header.
*/
ASSERT(HDR_HAS_L2HDR(hdr));
/* Ensure this header has finished being written. */
ASSERT(!HDR_L2_WRITING(hdr));
ASSERT(!HDR_L2_WRITE_HEAD(hdr));
if (!all && (hdr->b_l2hdr.b_daddr >= dev->l2ad_evict ||
hdr->b_l2hdr.b_daddr < dev->l2ad_hand)) {
/*
* We've evicted to the target address,
* or the end of the device.
*/
mutex_exit(hash_lock);
break;
}
if (!HDR_HAS_L1HDR(hdr)) {
ASSERT(!HDR_L2_READING(hdr));
/*
* This doesn't exist in the ARC. Destroy.
* arc_hdr_destroy() will call list_remove()
* and decrement arcstat_l2_lsize.
*/
arc_change_state(arc_anon, hdr, hash_lock);
arc_hdr_destroy(hdr);
} else {
ASSERT(hdr->b_l1hdr.b_state != arc_l2c_only);
ARCSTAT_BUMP(arcstat_l2_evict_l1cached);
/*
* Invalidate issued or about to be issued
* reads, since we may be about to write
* over this location.
*/
if (HDR_L2_READING(hdr)) {
ARCSTAT_BUMP(arcstat_l2_evict_reading);
arc_hdr_set_flags(hdr, ARC_FLAG_L2_EVICTED);
}
arc_hdr_l2hdr_destroy(hdr);
}
mutex_exit(hash_lock);
}
mutex_exit(&dev->l2ad_mtx);
out:
/*
* We need to check if we evict all buffers, otherwise we may iterate
* unnecessarily.
*/
if (!all && rerun) {
/*
* Bump device hand to the device start if it is approaching the
* end. l2arc_evict() has already evicted ahead for this case.
*/
dev->l2ad_hand = dev->l2ad_start;
dev->l2ad_evict = dev->l2ad_start;
dev->l2ad_first = B_FALSE;
goto top;
}
if (!all) {
/*
* In case of cache device removal (all) the following
* assertions may be violated without functional consequences
* as the device is about to be removed.
*/
ASSERT3U(dev->l2ad_hand + distance, <, dev->l2ad_end);
if (!dev->l2ad_first)
ASSERT3U(dev->l2ad_hand, <, dev->l2ad_evict);
}
}
/*
* Handle any abd transforms that might be required for writing to the L2ARC.
* If successful, this function will always return an abd with the data
* transformed as it is on disk in a new abd of asize bytes.
*/
static int
l2arc_apply_transforms(spa_t *spa, arc_buf_hdr_t *hdr, uint64_t asize,
abd_t **abd_out)
{
int ret;
void *tmp = NULL;
abd_t *cabd = NULL, *eabd = NULL, *to_write = hdr->b_l1hdr.b_pabd;
enum zio_compress compress = HDR_GET_COMPRESS(hdr);
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t size = arc_hdr_size(hdr);
boolean_t ismd = HDR_ISTYPE_METADATA(hdr);
boolean_t bswap = (hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS);
dsl_crypto_key_t *dck = NULL;
uint8_t mac[ZIO_DATA_MAC_LEN] = { 0 };
boolean_t no_crypt = B_FALSE;
ASSERT((HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr)) ||
HDR_ENCRYPTED(hdr) || HDR_SHARED_DATA(hdr) || psize != asize);
ASSERT3U(psize, <=, asize);
/*
* If this data simply needs its own buffer, we simply allocate it
* and copy the data. This may be done to eliminate a dependency on a
* shared buffer or to reallocate the buffer to match asize.
*/
if (HDR_HAS_RABD(hdr) && asize != psize) {
ASSERT3U(asize, >=, psize);
to_write = abd_alloc_for_io(asize, ismd);
abd_copy(to_write, hdr->b_crypt_hdr.b_rabd, psize);
if (psize != asize)
abd_zero_off(to_write, psize, asize - psize);
goto out;
}
if ((compress == ZIO_COMPRESS_OFF || HDR_COMPRESSION_ENABLED(hdr)) &&
!HDR_ENCRYPTED(hdr)) {
ASSERT3U(size, ==, psize);
to_write = abd_alloc_for_io(asize, ismd);
abd_copy(to_write, hdr->b_l1hdr.b_pabd, size);
if (size != asize)
abd_zero_off(to_write, size, asize - size);
goto out;
}
if (compress != ZIO_COMPRESS_OFF && !HDR_COMPRESSION_ENABLED(hdr)) {
cabd = abd_alloc_for_io(asize, ismd);
tmp = abd_borrow_buf(cabd, asize);
psize = zio_compress_data(compress, to_write, tmp, size,
hdr->b_complevel);
if (psize >= size) {
abd_return_buf(cabd, tmp, asize);
HDR_SET_COMPRESS(hdr, ZIO_COMPRESS_OFF);
to_write = cabd;
abd_copy(to_write, hdr->b_l1hdr.b_pabd, size);
if (size != asize)
abd_zero_off(to_write, size, asize - size);
goto encrypt;
}
ASSERT3U(psize, <=, HDR_GET_PSIZE(hdr));
if (psize < asize)
bzero((char *)tmp + psize, asize - psize);
psize = HDR_GET_PSIZE(hdr);
abd_return_buf_copy(cabd, tmp, asize);
to_write = cabd;
}
encrypt:
if (HDR_ENCRYPTED(hdr)) {
eabd = abd_alloc_for_io(asize, ismd);
/*
* If the dataset was disowned before the buffer
* made it to this point, the key to re-encrypt
* it won't be available. In this case we simply
* won't write the buffer to the L2ARC.
*/
ret = spa_keystore_lookup_key(spa, hdr->b_crypt_hdr.b_dsobj,
FTAG, &dck);
if (ret != 0)
goto error;
ret = zio_do_crypt_abd(B_TRUE, &dck->dck_key,
hdr->b_crypt_hdr.b_ot, bswap, hdr->b_crypt_hdr.b_salt,
hdr->b_crypt_hdr.b_iv, mac, psize, to_write, eabd,
&no_crypt);
if (ret != 0)
goto error;
if (no_crypt)
abd_copy(eabd, to_write, psize);
if (psize != asize)
abd_zero_off(eabd, psize, asize - psize);
/* assert that the MAC we got here matches the one we saved */
ASSERT0(bcmp(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN));
spa_keystore_dsl_key_rele(spa, dck, FTAG);
if (to_write == cabd)
abd_free(cabd);
to_write = eabd;
}
out:
ASSERT3P(to_write, !=, hdr->b_l1hdr.b_pabd);
*abd_out = to_write;
return (0);
error:
if (dck != NULL)
spa_keystore_dsl_key_rele(spa, dck, FTAG);
if (cabd != NULL)
abd_free(cabd);
if (eabd != NULL)
abd_free(eabd);
*abd_out = NULL;
return (ret);
}
static void
l2arc_blk_fetch_done(zio_t *zio)
{
l2arc_read_callback_t *cb;
cb = zio->io_private;
if (cb->l2rcb_abd != NULL)
abd_free(cb->l2rcb_abd);
kmem_free(cb, sizeof (l2arc_read_callback_t));
}
/*
* Find and write ARC buffers to the L2ARC device.
*
* An ARC_FLAG_L2_WRITING flag is set so that the L2ARC buffers are not valid
* for reading until they have completed writing.
* The headroom_boost is an in-out parameter used to maintain headroom boost
* state between calls to this function.
*
* Returns the number of bytes actually written (which may be smaller than
* the delta by which the device hand has changed due to alignment and the
* writing of log blocks).
*/
static uint64_t
l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
{
arc_buf_hdr_t *hdr, *hdr_prev, *head;
uint64_t write_asize, write_psize, write_lsize, headroom;
boolean_t full;
l2arc_write_callback_t *cb = NULL;
zio_t *pio, *wzio;
uint64_t guid = spa_load_guid(spa);
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
ASSERT3P(dev->l2ad_vdev, !=, NULL);
pio = NULL;
write_lsize = write_asize = write_psize = 0;
full = B_FALSE;
head = kmem_cache_alloc(hdr_l2only_cache, KM_PUSHPAGE);
arc_hdr_set_flags(head, ARC_FLAG_L2_WRITE_HEAD | ARC_FLAG_HAS_L2HDR);
/*
* Copy buffers for L2ARC writing.
*/
for (int pass = 0; pass < L2ARC_FEED_TYPES; pass++) {
/*
* If pass == 1 or 3, we cache MRU metadata and data
* respectively.
*/
if (l2arc_mfuonly) {
if (pass == 1 || pass == 3)
continue;
}
multilist_sublist_t *mls = l2arc_sublist_lock(pass);
uint64_t passed_sz = 0;
VERIFY3P(mls, !=, NULL);
/*
* L2ARC fast warmup.
*
* Until the ARC is warm and starts to evict, read from the
* head of the ARC lists rather than the tail.
*/
if (arc_warm == B_FALSE)
hdr = multilist_sublist_head(mls);
else
hdr = multilist_sublist_tail(mls);
headroom = target_sz * l2arc_headroom;
if (zfs_compressed_arc_enabled)
headroom = (headroom * l2arc_headroom_boost) / 100;
for (; hdr; hdr = hdr_prev) {
kmutex_t *hash_lock;
abd_t *to_write = NULL;
if (arc_warm == B_FALSE)
hdr_prev = multilist_sublist_next(mls, hdr);
else
hdr_prev = multilist_sublist_prev(mls, hdr);
hash_lock = HDR_LOCK(hdr);
if (!mutex_tryenter(hash_lock)) {
/*
* Skip this buffer rather than waiting.
*/
continue;
}
passed_sz += HDR_GET_LSIZE(hdr);
if (l2arc_headroom != 0 && passed_sz > headroom) {
/*
* Searched too far.
*/
mutex_exit(hash_lock);
break;
}
if (!l2arc_write_eligible(guid, hdr)) {
mutex_exit(hash_lock);
continue;
}
/*
* We rely on the L1 portion of the header below, so
* it's invalid for this header to have been evicted out
* of the ghost cache, prior to being written out. The
* ARC_FLAG_L2_WRITING bit ensures this won't happen.
*/
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3U(HDR_GET_PSIZE(hdr), >, 0);
ASSERT3U(arc_hdr_size(hdr), >, 0);
ASSERT(hdr->b_l1hdr.b_pabd != NULL ||
HDR_HAS_RABD(hdr));
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev,
psize);
if ((write_asize + asize) > target_sz) {
full = B_TRUE;
mutex_exit(hash_lock);
break;
}
/*
* We rely on the L1 portion of the header below, so
* it's invalid for this header to have been evicted out
* of the ghost cache, prior to being written out. The
* ARC_FLAG_L2_WRITING bit ensures this won't happen.
*/
arc_hdr_set_flags(hdr, ARC_FLAG_L2_WRITING);
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3U(HDR_GET_PSIZE(hdr), >, 0);
ASSERT(hdr->b_l1hdr.b_pabd != NULL ||
HDR_HAS_RABD(hdr));
ASSERT3U(arc_hdr_size(hdr), >, 0);
/*
* If this header has b_rabd, we can use this since it
* must always match the data exactly as it exists on
* disk. Otherwise, the L2ARC can normally use the
* hdr's data, but if we're sharing data between the
* hdr and one of its bufs, L2ARC needs its own copy of
* the data so that the ZIO below can't race with the
* buf consumer. To ensure that this copy will be
* available for the lifetime of the ZIO and be cleaned
* up afterwards, we add it to the l2arc_free_on_write
* queue. If we need to apply any transforms to the
* data (compression, encryption) we will also need the
* extra buffer.
*/
if (HDR_HAS_RABD(hdr) && psize == asize) {
to_write = hdr->b_crypt_hdr.b_rabd;
} else if ((HDR_COMPRESSION_ENABLED(hdr) ||
HDR_GET_COMPRESS(hdr) == ZIO_COMPRESS_OFF) &&
!HDR_ENCRYPTED(hdr) && !HDR_SHARED_DATA(hdr) &&
psize == asize) {
to_write = hdr->b_l1hdr.b_pabd;
} else {
int ret;
arc_buf_contents_t type = arc_buf_type(hdr);
ret = l2arc_apply_transforms(spa, hdr, asize,
&to_write);
if (ret != 0) {
arc_hdr_clear_flags(hdr,
ARC_FLAG_L2_WRITING);
mutex_exit(hash_lock);
continue;
}
l2arc_free_abd_on_write(to_write, asize, type);
}
if (pio == NULL) {
/*
* Insert a dummy header on the buflist so
* l2arc_write_done() can find where the
* write buffers begin without searching.
*/
mutex_enter(&dev->l2ad_mtx);
list_insert_head(&dev->l2ad_buflist, head);
mutex_exit(&dev->l2ad_mtx);
cb = kmem_alloc(
sizeof (l2arc_write_callback_t), KM_SLEEP);
cb->l2wcb_dev = dev;
cb->l2wcb_head = head;
/*
* Create a list to save allocated abd buffers
* for l2arc_log_blk_commit().
*/
list_create(&cb->l2wcb_abd_list,
sizeof (l2arc_lb_abd_buf_t),
offsetof(l2arc_lb_abd_buf_t, node));
pio = zio_root(spa, l2arc_write_done, cb,
ZIO_FLAG_CANFAIL);
}
hdr->b_l2hdr.b_dev = dev;
hdr->b_l2hdr.b_hits = 0;
hdr->b_l2hdr.b_daddr = dev->l2ad_hand;
hdr->b_l2hdr.b_arcs_state =
hdr->b_l1hdr.b_state->arcs_state;
arc_hdr_set_flags(hdr, ARC_FLAG_HAS_L2HDR);
mutex_enter(&dev->l2ad_mtx);
list_insert_head(&dev->l2ad_buflist, hdr);
mutex_exit(&dev->l2ad_mtx);
(void) zfs_refcount_add_many(&dev->l2ad_alloc,
arc_hdr_size(hdr), hdr);
wzio = zio_write_phys(pio, dev->l2ad_vdev,
hdr->b_l2hdr.b_daddr, asize, to_write,
ZIO_CHECKSUM_OFF, NULL, hdr,
ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_CANFAIL, B_FALSE);
write_lsize += HDR_GET_LSIZE(hdr);
DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev,
zio_t *, wzio);
write_psize += psize;
write_asize += asize;
dev->l2ad_hand += asize;
l2arc_hdr_arcstats_increment(hdr);
vdev_space_update(dev->l2ad_vdev, asize, 0, 0);
mutex_exit(hash_lock);
/*
* Append buf info to current log and commit if full.
* arcstat_l2_{size,asize} kstats are updated
* internally.
*/
if (l2arc_log_blk_insert(dev, hdr))
l2arc_log_blk_commit(dev, pio, cb);
zio_nowait(wzio);
}
multilist_sublist_unlock(mls);
if (full == B_TRUE)
break;
}
/* No buffers selected for writing? */
if (pio == NULL) {
ASSERT0(write_lsize);
ASSERT(!HDR_HAS_L1HDR(head));
kmem_cache_free(hdr_l2only_cache, head);
/*
* Although we did not write any buffers l2ad_evict may
* have advanced.
*/
if (dev->l2ad_evict != l2dhdr->dh_evict)
l2arc_dev_hdr_update(dev);
return (0);
}
if (!dev->l2ad_first)
ASSERT3U(dev->l2ad_hand, <=, dev->l2ad_evict);
ASSERT3U(write_asize, <=, target_sz);
ARCSTAT_BUMP(arcstat_l2_writes_sent);
ARCSTAT_INCR(arcstat_l2_write_bytes, write_psize);
dev->l2ad_writing = B_TRUE;
(void) zio_wait(pio);
dev->l2ad_writing = B_FALSE;
/*
* Update the device header after the zio completes as
* l2arc_write_done() may have updated the memory holding the log block
* pointers in the device header.
*/
l2arc_dev_hdr_update(dev);
return (write_asize);
}
static boolean_t
l2arc_hdr_limit_reached(void)
{
int64_t s = aggsum_upper_bound(&arc_sums.arcstat_l2_hdr_size);
return (arc_reclaim_needed() || (s > arc_meta_limit * 3 / 4) ||
(s > (arc_warm ? arc_c : arc_c_max) * l2arc_meta_percent / 100));
}
/*
* This thread feeds the L2ARC at regular intervals. This is the beating
* heart of the L2ARC.
*/
-/* ARGSUSED */
static void
l2arc_feed_thread(void *unused)
{
+ (void) unused;
callb_cpr_t cpr;
l2arc_dev_t *dev;
spa_t *spa;
uint64_t size, wrote;
clock_t begin, next = ddi_get_lbolt();
fstrans_cookie_t cookie;
CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG);
mutex_enter(&l2arc_feed_thr_lock);
cookie = spl_fstrans_mark();
while (l2arc_thread_exit == 0) {
CALLB_CPR_SAFE_BEGIN(&cpr);
(void) cv_timedwait_idle(&l2arc_feed_thr_cv,
&l2arc_feed_thr_lock, next);
CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock);
next = ddi_get_lbolt() + hz;
/*
* Quick check for L2ARC devices.
*/
mutex_enter(&l2arc_dev_mtx);
if (l2arc_ndev == 0) {
mutex_exit(&l2arc_dev_mtx);
continue;
}
mutex_exit(&l2arc_dev_mtx);
begin = ddi_get_lbolt();
/*
* This selects the next l2arc device to write to, and in
* doing so the next spa to feed from: dev->l2ad_spa. This
* will return NULL if there are now no l2arc devices or if
* they are all faulted.
*
* If a device is returned, its spa's config lock is also
* held to prevent device removal. l2arc_dev_get_next()
* will grab and release l2arc_dev_mtx.
*/
if ((dev = l2arc_dev_get_next()) == NULL)
continue;
spa = dev->l2ad_spa;
ASSERT3P(spa, !=, NULL);
/*
* If the pool is read-only then force the feed thread to
* sleep a little longer.
*/
if (!spa_writeable(spa)) {
next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz;
spa_config_exit(spa, SCL_L2ARC, dev);
continue;
}
/*
* Avoid contributing to memory pressure.
*/
if (l2arc_hdr_limit_reached()) {
ARCSTAT_BUMP(arcstat_l2_abort_lowmem);
spa_config_exit(spa, SCL_L2ARC, dev);
continue;
}
ARCSTAT_BUMP(arcstat_l2_feeds);
size = l2arc_write_size(dev);
/*
* Evict L2ARC buffers that will be overwritten.
*/
l2arc_evict(dev, size, B_FALSE);
/*
* Write ARC buffers.
*/
wrote = l2arc_write_buffers(spa, dev, size);
/*
* Calculate interval between writes.
*/
next = l2arc_write_interval(begin, size, wrote);
spa_config_exit(spa, SCL_L2ARC, dev);
}
spl_fstrans_unmark(cookie);
l2arc_thread_exit = 0;
cv_broadcast(&l2arc_feed_thr_cv);
CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */
thread_exit();
}
boolean_t
l2arc_vdev_present(vdev_t *vd)
{
return (l2arc_vdev_get(vd) != NULL);
}
/*
* Returns the l2arc_dev_t associated with a particular vdev_t or NULL if
* the vdev_t isn't an L2ARC device.
*/
l2arc_dev_t *
l2arc_vdev_get(vdev_t *vd)
{
l2arc_dev_t *dev;
mutex_enter(&l2arc_dev_mtx);
for (dev = list_head(l2arc_dev_list); dev != NULL;
dev = list_next(l2arc_dev_list, dev)) {
if (dev->l2ad_vdev == vd)
break;
}
mutex_exit(&l2arc_dev_mtx);
return (dev);
}
static void
l2arc_rebuild_dev(l2arc_dev_t *dev, boolean_t reopen)
{
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
uint64_t l2dhdr_asize = dev->l2ad_dev_hdr_asize;
spa_t *spa = dev->l2ad_spa;
/*
* The L2ARC has to hold at least the payload of one log block for
* them to be restored (persistent L2ARC). The payload of a log block
* depends on the amount of its log entries. We always write log blocks
* with 1022 entries. How many of them are committed or restored depends
* on the size of the L2ARC device. Thus the maximum payload of
* one log block is 1022 * SPA_MAXBLOCKSIZE = 16GB. If the L2ARC device
* is less than that, we reduce the amount of committed and restored
* log entries per block so as to enable persistence.
*/
if (dev->l2ad_end < l2arc_rebuild_blocks_min_l2size) {
dev->l2ad_log_entries = 0;
} else {
dev->l2ad_log_entries = MIN((dev->l2ad_end -
dev->l2ad_start) >> SPA_MAXBLOCKSHIFT,
L2ARC_LOG_BLK_MAX_ENTRIES);
}
/*
* Read the device header, if an error is returned do not rebuild L2ARC.
*/
if (l2arc_dev_hdr_read(dev) == 0 && dev->l2ad_log_entries > 0) {
/*
* If we are onlining a cache device (vdev_reopen) that was
* still present (l2arc_vdev_present()) and rebuild is enabled,
* we should evict all ARC buffers and pointers to log blocks
* and reclaim their space before restoring its contents to
* L2ARC.
*/
if (reopen) {
if (!l2arc_rebuild_enabled) {
return;
} else {
l2arc_evict(dev, 0, B_TRUE);
/* start a new log block */
dev->l2ad_log_ent_idx = 0;
dev->l2ad_log_blk_payload_asize = 0;
dev->l2ad_log_blk_payload_start = 0;
}
}
/*
* Just mark the device as pending for a rebuild. We won't
* be starting a rebuild in line here as it would block pool
* import. Instead spa_load_impl will hand that off to an
* async task which will call l2arc_spa_rebuild_start.
*/
dev->l2ad_rebuild = B_TRUE;
} else if (spa_writeable(spa)) {
/*
* In this case TRIM the whole device if l2arc_trim_ahead > 0,
* otherwise create a new header. We zero out the memory holding
* the header to reset dh_start_lbps. If we TRIM the whole
* device the new header will be written by
* vdev_trim_l2arc_thread() at the end of the TRIM to update the
* trim_state in the header too. When reading the header, if
* trim_state is not VDEV_TRIM_COMPLETE and l2arc_trim_ahead > 0
* we opt to TRIM the whole device again.
*/
if (l2arc_trim_ahead > 0) {
dev->l2ad_trim_all = B_TRUE;
} else {
bzero(l2dhdr, l2dhdr_asize);
l2arc_dev_hdr_update(dev);
}
}
}
/*
* Add a vdev for use by the L2ARC. By this point the spa has already
* validated the vdev and opened it.
*/
void
l2arc_add_vdev(spa_t *spa, vdev_t *vd)
{
l2arc_dev_t *adddev;
uint64_t l2dhdr_asize;
ASSERT(!l2arc_vdev_present(vd));
/*
* Create a new l2arc device entry.
*/
adddev = vmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP);
adddev->l2ad_spa = spa;
adddev->l2ad_vdev = vd;
/* leave extra size for an l2arc device header */
l2dhdr_asize = adddev->l2ad_dev_hdr_asize =
MAX(sizeof (*adddev->l2ad_dev_hdr), 1 << vd->vdev_ashift);
adddev->l2ad_start = VDEV_LABEL_START_SIZE + l2dhdr_asize;
adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd);
ASSERT3U(adddev->l2ad_start, <, adddev->l2ad_end);
adddev->l2ad_hand = adddev->l2ad_start;
adddev->l2ad_evict = adddev->l2ad_start;
adddev->l2ad_first = B_TRUE;
adddev->l2ad_writing = B_FALSE;
adddev->l2ad_trim_all = B_FALSE;
list_link_init(&adddev->l2ad_node);
adddev->l2ad_dev_hdr = kmem_zalloc(l2dhdr_asize, KM_SLEEP);
mutex_init(&adddev->l2ad_mtx, NULL, MUTEX_DEFAULT, NULL);
/*
* This is a list of all ARC buffers that are still valid on the
* device.
*/
list_create(&adddev->l2ad_buflist, sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l2hdr.b_l2node));
/*
* This is a list of pointers to log blocks that are still present
* on the device.
*/
list_create(&adddev->l2ad_lbptr_list, sizeof (l2arc_lb_ptr_buf_t),
offsetof(l2arc_lb_ptr_buf_t, node));
vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand);
zfs_refcount_create(&adddev->l2ad_alloc);
zfs_refcount_create(&adddev->l2ad_lb_asize);
zfs_refcount_create(&adddev->l2ad_lb_count);
/*
* Decide if dev is eligible for L2ARC rebuild or whole device
* trimming. This has to happen before the device is added in the
* cache device list and l2arc_dev_mtx is released. Otherwise
* l2arc_feed_thread() might already start writing on the
* device.
*/
l2arc_rebuild_dev(adddev, B_FALSE);
/*
* Add device to global list
*/
mutex_enter(&l2arc_dev_mtx);
list_insert_head(l2arc_dev_list, adddev);
atomic_inc_64(&l2arc_ndev);
mutex_exit(&l2arc_dev_mtx);
}
/*
* Decide if a vdev is eligible for L2ARC rebuild, called from vdev_reopen()
* in case of onlining a cache device.
*/
void
l2arc_rebuild_vdev(vdev_t *vd, boolean_t reopen)
{
l2arc_dev_t *dev = NULL;
dev = l2arc_vdev_get(vd);
ASSERT3P(dev, !=, NULL);
/*
* In contrast to l2arc_add_vdev() we do not have to worry about
* l2arc_feed_thread() invalidating previous content when onlining a
* cache device. The device parameters (l2ad*) are not cleared when
* offlining the device and writing new buffers will not invalidate
* all previous content. In worst case only buffers that have not had
* their log block written to the device will be lost.
* When onlining the cache device (ie offline->online without exporting
* the pool in between) this happens:
* vdev_reopen() -> vdev_open() -> l2arc_rebuild_vdev()
* | |
* vdev_is_dead() = B_FALSE l2ad_rebuild = B_TRUE
* During the time where vdev_is_dead = B_FALSE and until l2ad_rebuild
* is set to B_TRUE we might write additional buffers to the device.
*/
l2arc_rebuild_dev(dev, reopen);
}
/*
* Remove a vdev from the L2ARC.
*/
void
l2arc_remove_vdev(vdev_t *vd)
{
l2arc_dev_t *remdev = NULL;
/*
* Find the device by vdev
*/
remdev = l2arc_vdev_get(vd);
ASSERT3P(remdev, !=, NULL);
/*
* Cancel any ongoing or scheduled rebuild.
*/
mutex_enter(&l2arc_rebuild_thr_lock);
if (remdev->l2ad_rebuild_began == B_TRUE) {
remdev->l2ad_rebuild_cancel = B_TRUE;
while (remdev->l2ad_rebuild == B_TRUE)
cv_wait(&l2arc_rebuild_thr_cv, &l2arc_rebuild_thr_lock);
}
mutex_exit(&l2arc_rebuild_thr_lock);
/*
* Remove device from global list
*/
mutex_enter(&l2arc_dev_mtx);
list_remove(l2arc_dev_list, remdev);
l2arc_dev_last = NULL; /* may have been invalidated */
atomic_dec_64(&l2arc_ndev);
mutex_exit(&l2arc_dev_mtx);
/*
* Clear all buflists and ARC references. L2ARC device flush.
*/
l2arc_evict(remdev, 0, B_TRUE);
list_destroy(&remdev->l2ad_buflist);
ASSERT(list_is_empty(&remdev->l2ad_lbptr_list));
list_destroy(&remdev->l2ad_lbptr_list);
mutex_destroy(&remdev->l2ad_mtx);
zfs_refcount_destroy(&remdev->l2ad_alloc);
zfs_refcount_destroy(&remdev->l2ad_lb_asize);
zfs_refcount_destroy(&remdev->l2ad_lb_count);
kmem_free(remdev->l2ad_dev_hdr, remdev->l2ad_dev_hdr_asize);
vmem_free(remdev, sizeof (l2arc_dev_t));
}
void
l2arc_init(void)
{
l2arc_thread_exit = 0;
l2arc_ndev = 0;
mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&l2arc_rebuild_thr_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&l2arc_rebuild_thr_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL);
l2arc_dev_list = &L2ARC_dev_list;
l2arc_free_on_write = &L2ARC_free_on_write;
list_create(l2arc_dev_list, sizeof (l2arc_dev_t),
offsetof(l2arc_dev_t, l2ad_node));
list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t),
offsetof(l2arc_data_free_t, l2df_list_node));
}
void
l2arc_fini(void)
{
mutex_destroy(&l2arc_feed_thr_lock);
cv_destroy(&l2arc_feed_thr_cv);
mutex_destroy(&l2arc_rebuild_thr_lock);
cv_destroy(&l2arc_rebuild_thr_cv);
mutex_destroy(&l2arc_dev_mtx);
mutex_destroy(&l2arc_free_on_write_mtx);
list_destroy(l2arc_dev_list);
list_destroy(l2arc_free_on_write);
}
void
l2arc_start(void)
{
if (!(spa_mode_global & SPA_MODE_WRITE))
return;
(void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0,
TS_RUN, defclsyspri);
}
void
l2arc_stop(void)
{
if (!(spa_mode_global & SPA_MODE_WRITE))
return;
mutex_enter(&l2arc_feed_thr_lock);
cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */
l2arc_thread_exit = 1;
while (l2arc_thread_exit != 0)
cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock);
mutex_exit(&l2arc_feed_thr_lock);
}
/*
* Punches out rebuild threads for the L2ARC devices in a spa. This should
* be called after pool import from the spa async thread, since starting
* these threads directly from spa_import() will make them part of the
* "zpool import" context and delay process exit (and thus pool import).
*/
void
l2arc_spa_rebuild_start(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
/*
* Locate the spa's l2arc devices and kick off rebuild threads.
*/
for (int i = 0; i < spa->spa_l2cache.sav_count; i++) {
l2arc_dev_t *dev =
l2arc_vdev_get(spa->spa_l2cache.sav_vdevs[i]);
if (dev == NULL) {
/* Don't attempt a rebuild if the vdev is UNAVAIL */
continue;
}
mutex_enter(&l2arc_rebuild_thr_lock);
if (dev->l2ad_rebuild && !dev->l2ad_rebuild_cancel) {
dev->l2ad_rebuild_began = B_TRUE;
(void) thread_create(NULL, 0, l2arc_dev_rebuild_thread,
dev, 0, &p0, TS_RUN, minclsyspri);
}
mutex_exit(&l2arc_rebuild_thr_lock);
}
}
/*
* Main entry point for L2ARC rebuilding.
*/
static void
l2arc_dev_rebuild_thread(void *arg)
{
l2arc_dev_t *dev = arg;
VERIFY(!dev->l2ad_rebuild_cancel);
VERIFY(dev->l2ad_rebuild);
(void) l2arc_rebuild(dev);
mutex_enter(&l2arc_rebuild_thr_lock);
dev->l2ad_rebuild_began = B_FALSE;
dev->l2ad_rebuild = B_FALSE;
mutex_exit(&l2arc_rebuild_thr_lock);
thread_exit();
}
/*
* This function implements the actual L2ARC metadata rebuild. It:
* starts reading the log block chain and restores each block's contents
* to memory (reconstructing arc_buf_hdr_t's).
*
* Operation stops under any of the following conditions:
*
* 1) We reach the end of the log block chain.
* 2) We encounter *any* error condition (cksum errors, io errors)
*/
static int
l2arc_rebuild(l2arc_dev_t *dev)
{
vdev_t *vd = dev->l2ad_vdev;
spa_t *spa = vd->vdev_spa;
int err = 0;
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
l2arc_log_blk_phys_t *this_lb, *next_lb;
zio_t *this_io = NULL, *next_io = NULL;
l2arc_log_blkptr_t lbps[2];
l2arc_lb_ptr_buf_t *lb_ptr_buf;
boolean_t lock_held;
this_lb = vmem_zalloc(sizeof (*this_lb), KM_SLEEP);
next_lb = vmem_zalloc(sizeof (*next_lb), KM_SLEEP);
/*
* We prevent device removal while issuing reads to the device,
* then during the rebuilding phases we drop this lock again so
* that a spa_unload or device remove can be initiated - this is
* safe, because the spa will signal us to stop before removing
* our device and wait for us to stop.
*/
spa_config_enter(spa, SCL_L2ARC, vd, RW_READER);
lock_held = B_TRUE;
/*
* Retrieve the persistent L2ARC device state.
* L2BLK_GET_PSIZE returns aligned size for log blocks.
*/
dev->l2ad_evict = MAX(l2dhdr->dh_evict, dev->l2ad_start);
dev->l2ad_hand = MAX(l2dhdr->dh_start_lbps[0].lbp_daddr +
L2BLK_GET_PSIZE((&l2dhdr->dh_start_lbps[0])->lbp_prop),
dev->l2ad_start);
dev->l2ad_first = !!(l2dhdr->dh_flags & L2ARC_DEV_HDR_EVICT_FIRST);
vd->vdev_trim_action_time = l2dhdr->dh_trim_action_time;
vd->vdev_trim_state = l2dhdr->dh_trim_state;
/*
* In case the zfs module parameter l2arc_rebuild_enabled is false
* we do not start the rebuild process.
*/
if (!l2arc_rebuild_enabled)
goto out;
/* Prepare the rebuild process */
bcopy(l2dhdr->dh_start_lbps, lbps, sizeof (lbps));
/* Start the rebuild process */
for (;;) {
if (!l2arc_log_blkptr_valid(dev, &lbps[0]))
break;
if ((err = l2arc_log_blk_read(dev, &lbps[0], &lbps[1],
this_lb, next_lb, this_io, &next_io)) != 0)
goto out;
/*
* Our memory pressure valve. If the system is running low
* on memory, rather than swamping memory with new ARC buf
* hdrs, we opt not to rebuild the L2ARC. At this point,
* however, we have already set up our L2ARC dev to chain in
* new metadata log blocks, so the user may choose to offline/
* online the L2ARC dev at a later time (or re-import the pool)
* to reconstruct it (when there's less memory pressure).
*/
if (l2arc_hdr_limit_reached()) {
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_lowmem);
cmn_err(CE_NOTE, "System running low on memory, "
"aborting L2ARC rebuild.");
err = SET_ERROR(ENOMEM);
goto out;
}
spa_config_exit(spa, SCL_L2ARC, vd);
lock_held = B_FALSE;
/*
* Now that we know that the next_lb checks out alright, we
* can start reconstruction from this log block.
* L2BLK_GET_PSIZE returns aligned size for log blocks.
*/
uint64_t asize = L2BLK_GET_PSIZE((&lbps[0])->lbp_prop);
l2arc_log_blk_restore(dev, this_lb, asize);
/*
* log block restored, include its pointer in the list of
* pointers to log blocks present in the L2ARC device.
*/
lb_ptr_buf = kmem_zalloc(sizeof (l2arc_lb_ptr_buf_t), KM_SLEEP);
lb_ptr_buf->lb_ptr = kmem_zalloc(sizeof (l2arc_log_blkptr_t),
KM_SLEEP);
bcopy(&lbps[0], lb_ptr_buf->lb_ptr,
sizeof (l2arc_log_blkptr_t));
mutex_enter(&dev->l2ad_mtx);
list_insert_tail(&dev->l2ad_lbptr_list, lb_ptr_buf);
ARCSTAT_INCR(arcstat_l2_log_blk_asize, asize);
ARCSTAT_BUMP(arcstat_l2_log_blk_count);
zfs_refcount_add_many(&dev->l2ad_lb_asize, asize, lb_ptr_buf);
zfs_refcount_add(&dev->l2ad_lb_count, lb_ptr_buf);
mutex_exit(&dev->l2ad_mtx);
vdev_space_update(vd, asize, 0, 0);
/*
* Protection against loops of log blocks:
*
* l2ad_hand l2ad_evict
* V V
* l2ad_start |=======================================| l2ad_end
* -----|||----|||---|||----|||
* (3) (2) (1) (0)
* ---|||---|||----|||---|||
* (7) (6) (5) (4)
*
* In this situation the pointer of log block (4) passes
* l2arc_log_blkptr_valid() but the log block should not be
* restored as it is overwritten by the payload of log block
* (0). Only log blocks (0)-(3) should be restored. We check
* whether l2ad_evict lies in between the payload starting
* offset of the next log block (lbps[1].lbp_payload_start)
* and the payload starting offset of the present log block
* (lbps[0].lbp_payload_start). If true and this isn't the
* first pass, we are looping from the beginning and we should
* stop.
*/
if (l2arc_range_check_overlap(lbps[1].lbp_payload_start,
lbps[0].lbp_payload_start, dev->l2ad_evict) &&
!dev->l2ad_first)
goto out;
cond_resched();
for (;;) {
mutex_enter(&l2arc_rebuild_thr_lock);
if (dev->l2ad_rebuild_cancel) {
dev->l2ad_rebuild = B_FALSE;
cv_signal(&l2arc_rebuild_thr_cv);
mutex_exit(&l2arc_rebuild_thr_lock);
err = SET_ERROR(ECANCELED);
goto out;
}
mutex_exit(&l2arc_rebuild_thr_lock);
if (spa_config_tryenter(spa, SCL_L2ARC, vd,
RW_READER)) {
lock_held = B_TRUE;
break;
}
/*
* L2ARC config lock held by somebody in writer,
* possibly due to them trying to remove us. They'll
* likely to want us to shut down, so after a little
* delay, we check l2ad_rebuild_cancel and retry
* the lock again.
*/
delay(1);
}
/*
* Continue with the next log block.
*/
lbps[0] = lbps[1];
lbps[1] = this_lb->lb_prev_lbp;
PTR_SWAP(this_lb, next_lb);
this_io = next_io;
next_io = NULL;
}
if (this_io != NULL)
l2arc_log_blk_fetch_abort(this_io);
out:
if (next_io != NULL)
l2arc_log_blk_fetch_abort(next_io);
vmem_free(this_lb, sizeof (*this_lb));
vmem_free(next_lb, sizeof (*next_lb));
if (!l2arc_rebuild_enabled) {
spa_history_log_internal(spa, "L2ARC rebuild", NULL,
"disabled");
} else if (err == 0 && zfs_refcount_count(&dev->l2ad_lb_count) > 0) {
ARCSTAT_BUMP(arcstat_l2_rebuild_success);
spa_history_log_internal(spa, "L2ARC rebuild", NULL,
"successful, restored %llu blocks",
(u_longlong_t)zfs_refcount_count(&dev->l2ad_lb_count));
} else if (err == 0 && zfs_refcount_count(&dev->l2ad_lb_count) == 0) {
/*
* No error but also nothing restored, meaning the lbps array
* in the device header points to invalid/non-present log
* blocks. Reset the header.
*/
spa_history_log_internal(spa, "L2ARC rebuild", NULL,
"no valid log blocks");
bzero(l2dhdr, dev->l2ad_dev_hdr_asize);
l2arc_dev_hdr_update(dev);
} else if (err == ECANCELED) {
/*
* In case the rebuild was canceled do not log to spa history
* log as the pool may be in the process of being removed.
*/
zfs_dbgmsg("L2ARC rebuild aborted, restored %llu blocks",
(u_longlong_t)zfs_refcount_count(&dev->l2ad_lb_count));
} else if (err != 0) {
spa_history_log_internal(spa, "L2ARC rebuild", NULL,
"aborted, restored %llu blocks",
(u_longlong_t)zfs_refcount_count(&dev->l2ad_lb_count));
}
if (lock_held)
spa_config_exit(spa, SCL_L2ARC, vd);
return (err);
}
/*
* Attempts to read the device header on the provided L2ARC device and writes
* it to `hdr'. On success, this function returns 0, otherwise the appropriate
* error code is returned.
*/
static int
l2arc_dev_hdr_read(l2arc_dev_t *dev)
{
int err;
uint64_t guid;
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
const uint64_t l2dhdr_asize = dev->l2ad_dev_hdr_asize;
abd_t *abd;
guid = spa_guid(dev->l2ad_vdev->vdev_spa);
abd = abd_get_from_buf(l2dhdr, l2dhdr_asize);
err = zio_wait(zio_read_phys(NULL, dev->l2ad_vdev,
VDEV_LABEL_START_SIZE, l2dhdr_asize, abd,
ZIO_CHECKSUM_LABEL, NULL, NULL, ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL |
ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY |
ZIO_FLAG_SPECULATIVE, B_FALSE));
abd_free(abd);
if (err != 0) {
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_dh_errors);
zfs_dbgmsg("L2ARC IO error (%d) while reading device header, "
"vdev guid: %llu", err,
(u_longlong_t)dev->l2ad_vdev->vdev_guid);
return (err);
}
if (l2dhdr->dh_magic == BSWAP_64(L2ARC_DEV_HDR_MAGIC))
byteswap_uint64_array(l2dhdr, sizeof (*l2dhdr));
if (l2dhdr->dh_magic != L2ARC_DEV_HDR_MAGIC ||
l2dhdr->dh_spa_guid != guid ||
l2dhdr->dh_vdev_guid != dev->l2ad_vdev->vdev_guid ||
l2dhdr->dh_version != L2ARC_PERSISTENT_VERSION ||
l2dhdr->dh_log_entries != dev->l2ad_log_entries ||
l2dhdr->dh_end != dev->l2ad_end ||
!l2arc_range_check_overlap(dev->l2ad_start, dev->l2ad_end,
l2dhdr->dh_evict) ||
(l2dhdr->dh_trim_state != VDEV_TRIM_COMPLETE &&
l2arc_trim_ahead > 0)) {
/*
* Attempt to rebuild a device containing no actual dev hdr
* or containing a header from some other pool or from another
* version of persistent L2ARC.
*/
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_unsupported);
return (SET_ERROR(ENOTSUP));
}
return (0);
}
/*
* Reads L2ARC log blocks from storage and validates their contents.
*
* This function implements a simple fetcher to make sure that while
* we're processing one buffer the L2ARC is already fetching the next
* one in the chain.
*
* The arguments this_lp and next_lp point to the current and next log block
* address in the block chain. Similarly, this_lb and next_lb hold the
* l2arc_log_blk_phys_t's of the current and next L2ARC blk.
*
* The `this_io' and `next_io' arguments are used for block fetching.
* When issuing the first blk IO during rebuild, you should pass NULL for
* `this_io'. This function will then issue a sync IO to read the block and
* also issue an async IO to fetch the next block in the block chain. The
* fetched IO is returned in `next_io'. On subsequent calls to this
* function, pass the value returned in `next_io' from the previous call
* as `this_io' and a fresh `next_io' pointer to hold the next fetch IO.
* Prior to the call, you should initialize your `next_io' pointer to be
* NULL. If no fetch IO was issued, the pointer is left set at NULL.
*
* On success, this function returns 0, otherwise it returns an appropriate
* error code. On error the fetching IO is aborted and cleared before
* returning from this function. Therefore, if we return `success', the
* caller can assume that we have taken care of cleanup of fetch IOs.
*/
static int
l2arc_log_blk_read(l2arc_dev_t *dev,
const l2arc_log_blkptr_t *this_lbp, const l2arc_log_blkptr_t *next_lbp,
l2arc_log_blk_phys_t *this_lb, l2arc_log_blk_phys_t *next_lb,
zio_t *this_io, zio_t **next_io)
{
int err = 0;
zio_cksum_t cksum;
abd_t *abd = NULL;
uint64_t asize;
ASSERT(this_lbp != NULL && next_lbp != NULL);
ASSERT(this_lb != NULL && next_lb != NULL);
ASSERT(next_io != NULL && *next_io == NULL);
ASSERT(l2arc_log_blkptr_valid(dev, this_lbp));
/*
* Check to see if we have issued the IO for this log block in a
* previous run. If not, this is the first call, so issue it now.
*/
if (this_io == NULL) {
this_io = l2arc_log_blk_fetch(dev->l2ad_vdev, this_lbp,
this_lb);
}
/*
* Peek to see if we can start issuing the next IO immediately.
*/
if (l2arc_log_blkptr_valid(dev, next_lbp)) {
/*
* Start issuing IO for the next log block early - this
* should help keep the L2ARC device busy while we
* decompress and restore this log block.
*/
*next_io = l2arc_log_blk_fetch(dev->l2ad_vdev, next_lbp,
next_lb);
}
/* Wait for the IO to read this log block to complete */
if ((err = zio_wait(this_io)) != 0) {
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_io_errors);
zfs_dbgmsg("L2ARC IO error (%d) while reading log block, "
"offset: %llu, vdev guid: %llu", err,
(u_longlong_t)this_lbp->lbp_daddr,
(u_longlong_t)dev->l2ad_vdev->vdev_guid);
goto cleanup;
}
/*
* Make sure the buffer checks out.
* L2BLK_GET_PSIZE returns aligned size for log blocks.
*/
asize = L2BLK_GET_PSIZE((this_lbp)->lbp_prop);
fletcher_4_native(this_lb, asize, NULL, &cksum);
if (!ZIO_CHECKSUM_EQUAL(cksum, this_lbp->lbp_cksum)) {
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_cksum_lb_errors);
zfs_dbgmsg("L2ARC log block cksum failed, offset: %llu, "
"vdev guid: %llu, l2ad_hand: %llu, l2ad_evict: %llu",
(u_longlong_t)this_lbp->lbp_daddr,
(u_longlong_t)dev->l2ad_vdev->vdev_guid,
(u_longlong_t)dev->l2ad_hand,
(u_longlong_t)dev->l2ad_evict);
err = SET_ERROR(ECKSUM);
goto cleanup;
}
/* Now we can take our time decoding this buffer */
switch (L2BLK_GET_COMPRESS((this_lbp)->lbp_prop)) {
case ZIO_COMPRESS_OFF:
break;
case ZIO_COMPRESS_LZ4:
abd = abd_alloc_for_io(asize, B_TRUE);
abd_copy_from_buf_off(abd, this_lb, 0, asize);
if ((err = zio_decompress_data(
L2BLK_GET_COMPRESS((this_lbp)->lbp_prop),
abd, this_lb, asize, sizeof (*this_lb), NULL)) != 0) {
err = SET_ERROR(EINVAL);
goto cleanup;
}
break;
default:
err = SET_ERROR(EINVAL);
goto cleanup;
}
if (this_lb->lb_magic == BSWAP_64(L2ARC_LOG_BLK_MAGIC))
byteswap_uint64_array(this_lb, sizeof (*this_lb));
if (this_lb->lb_magic != L2ARC_LOG_BLK_MAGIC) {
err = SET_ERROR(EINVAL);
goto cleanup;
}
cleanup:
/* Abort an in-flight fetch I/O in case of error */
if (err != 0 && *next_io != NULL) {
l2arc_log_blk_fetch_abort(*next_io);
*next_io = NULL;
}
if (abd != NULL)
abd_free(abd);
return (err);
}
/*
* Restores the payload of a log block to ARC. This creates empty ARC hdr
* entries which only contain an l2arc hdr, essentially restoring the
* buffers to their L2ARC evicted state. This function also updates space
* usage on the L2ARC vdev to make sure it tracks restored buffers.
*/
static void
l2arc_log_blk_restore(l2arc_dev_t *dev, const l2arc_log_blk_phys_t *lb,
uint64_t lb_asize)
{
uint64_t size = 0, asize = 0;
uint64_t log_entries = dev->l2ad_log_entries;
/*
* Usually arc_adapt() is called only for data, not headers, but
* since we may allocate significant amount of memory here, let ARC
* grow its arc_c.
*/
arc_adapt(log_entries * HDR_L2ONLY_SIZE, arc_l2c_only);
for (int i = log_entries - 1; i >= 0; i--) {
/*
* Restore goes in the reverse temporal direction to preserve
* correct temporal ordering of buffers in the l2ad_buflist.
* l2arc_hdr_restore also does a list_insert_tail instead of
* list_insert_head on the l2ad_buflist:
*
* LIST l2ad_buflist LIST
* HEAD <------ (time) ------ TAIL
* direction +-----+-----+-----+-----+-----+ direction
* of l2arc <== | buf | buf | buf | buf | buf | ===> of rebuild
* fill +-----+-----+-----+-----+-----+
* ^ ^
* | |
* | |
* l2arc_feed_thread l2arc_rebuild
* will place new bufs here restores bufs here
*
* During l2arc_rebuild() the device is not used by
* l2arc_feed_thread() as dev->l2ad_rebuild is set to true.
*/
size += L2BLK_GET_LSIZE((&lb->lb_entries[i])->le_prop);
asize += vdev_psize_to_asize(dev->l2ad_vdev,
L2BLK_GET_PSIZE((&lb->lb_entries[i])->le_prop));
l2arc_hdr_restore(&lb->lb_entries[i], dev);
}
/*
* Record rebuild stats:
* size Logical size of restored buffers in the L2ARC
* asize Aligned size of restored buffers in the L2ARC
*/
ARCSTAT_INCR(arcstat_l2_rebuild_size, size);
ARCSTAT_INCR(arcstat_l2_rebuild_asize, asize);
ARCSTAT_INCR(arcstat_l2_rebuild_bufs, log_entries);
ARCSTAT_F_AVG(arcstat_l2_log_blk_avg_asize, lb_asize);
ARCSTAT_F_AVG(arcstat_l2_data_to_meta_ratio, asize / lb_asize);
ARCSTAT_BUMP(arcstat_l2_rebuild_log_blks);
}
/*
* Restores a single ARC buf hdr from a log entry. The ARC buffer is put
* into a state indicating that it has been evicted to L2ARC.
*/
static void
l2arc_hdr_restore(const l2arc_log_ent_phys_t *le, l2arc_dev_t *dev)
{
arc_buf_hdr_t *hdr, *exists;
kmutex_t *hash_lock;
arc_buf_contents_t type = L2BLK_GET_TYPE((le)->le_prop);
uint64_t asize;
/*
* Do all the allocation before grabbing any locks, this lets us
* sleep if memory is full and we don't have to deal with failed
* allocations.
*/
hdr = arc_buf_alloc_l2only(L2BLK_GET_LSIZE((le)->le_prop), type,
dev, le->le_dva, le->le_daddr,
L2BLK_GET_PSIZE((le)->le_prop), le->le_birth,
L2BLK_GET_COMPRESS((le)->le_prop), le->le_complevel,
L2BLK_GET_PROTECTED((le)->le_prop),
L2BLK_GET_PREFETCH((le)->le_prop),
L2BLK_GET_STATE((le)->le_prop));
asize = vdev_psize_to_asize(dev->l2ad_vdev,
L2BLK_GET_PSIZE((le)->le_prop));
/*
* vdev_space_update() has to be called before arc_hdr_destroy() to
* avoid underflow since the latter also calls vdev_space_update().
*/
l2arc_hdr_arcstats_increment(hdr);
vdev_space_update(dev->l2ad_vdev, asize, 0, 0);
mutex_enter(&dev->l2ad_mtx);
list_insert_tail(&dev->l2ad_buflist, hdr);
(void) zfs_refcount_add_many(&dev->l2ad_alloc, arc_hdr_size(hdr), hdr);
mutex_exit(&dev->l2ad_mtx);
exists = buf_hash_insert(hdr, &hash_lock);
if (exists) {
/* Buffer was already cached, no need to restore it. */
arc_hdr_destroy(hdr);
/*
* If the buffer is already cached, check whether it has
* L2ARC metadata. If not, enter them and update the flag.
* This is important is case of onlining a cache device, since
* we previously evicted all L2ARC metadata from ARC.
*/
if (!HDR_HAS_L2HDR(exists)) {
arc_hdr_set_flags(exists, ARC_FLAG_HAS_L2HDR);
exists->b_l2hdr.b_dev = dev;
exists->b_l2hdr.b_daddr = le->le_daddr;
exists->b_l2hdr.b_arcs_state =
L2BLK_GET_STATE((le)->le_prop);
mutex_enter(&dev->l2ad_mtx);
list_insert_tail(&dev->l2ad_buflist, exists);
(void) zfs_refcount_add_many(&dev->l2ad_alloc,
arc_hdr_size(exists), exists);
mutex_exit(&dev->l2ad_mtx);
l2arc_hdr_arcstats_increment(exists);
vdev_space_update(dev->l2ad_vdev, asize, 0, 0);
}
ARCSTAT_BUMP(arcstat_l2_rebuild_bufs_precached);
}
mutex_exit(hash_lock);
}
/*
* Starts an asynchronous read IO to read a log block. This is used in log
* block reconstruction to start reading the next block before we are done
* decoding and reconstructing the current block, to keep the l2arc device
* nice and hot with read IO to process.
* The returned zio will contain a newly allocated memory buffers for the IO
* data which should then be freed by the caller once the zio is no longer
* needed (i.e. due to it having completed). If you wish to abort this
* zio, you should do so using l2arc_log_blk_fetch_abort, which takes
* care of disposing of the allocated buffers correctly.
*/
static zio_t *
l2arc_log_blk_fetch(vdev_t *vd, const l2arc_log_blkptr_t *lbp,
l2arc_log_blk_phys_t *lb)
{
uint32_t asize;
zio_t *pio;
l2arc_read_callback_t *cb;
/* L2BLK_GET_PSIZE returns aligned size for log blocks */
asize = L2BLK_GET_PSIZE((lbp)->lbp_prop);
ASSERT(asize <= sizeof (l2arc_log_blk_phys_t));
cb = kmem_zalloc(sizeof (l2arc_read_callback_t), KM_SLEEP);
cb->l2rcb_abd = abd_get_from_buf(lb, asize);
pio = zio_root(vd->vdev_spa, l2arc_blk_fetch_done, cb,
ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE |
ZIO_FLAG_DONT_RETRY);
(void) zio_nowait(zio_read_phys(pio, vd, lbp->lbp_daddr, asize,
cb->l2rcb_abd, ZIO_CHECKSUM_OFF, NULL, NULL,
ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL |
ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY, B_FALSE));
return (pio);
}
/*
* Aborts a zio returned from l2arc_log_blk_fetch and frees the data
* buffers allocated for it.
*/
static void
l2arc_log_blk_fetch_abort(zio_t *zio)
{
(void) zio_wait(zio);
}
/*
* Creates a zio to update the device header on an l2arc device.
*/
void
l2arc_dev_hdr_update(l2arc_dev_t *dev)
{
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
const uint64_t l2dhdr_asize = dev->l2ad_dev_hdr_asize;
abd_t *abd;
int err;
VERIFY(spa_config_held(dev->l2ad_spa, SCL_STATE_ALL, RW_READER));
l2dhdr->dh_magic = L2ARC_DEV_HDR_MAGIC;
l2dhdr->dh_version = L2ARC_PERSISTENT_VERSION;
l2dhdr->dh_spa_guid = spa_guid(dev->l2ad_vdev->vdev_spa);
l2dhdr->dh_vdev_guid = dev->l2ad_vdev->vdev_guid;
l2dhdr->dh_log_entries = dev->l2ad_log_entries;
l2dhdr->dh_evict = dev->l2ad_evict;
l2dhdr->dh_start = dev->l2ad_start;
l2dhdr->dh_end = dev->l2ad_end;
l2dhdr->dh_lb_asize = zfs_refcount_count(&dev->l2ad_lb_asize);
l2dhdr->dh_lb_count = zfs_refcount_count(&dev->l2ad_lb_count);
l2dhdr->dh_flags = 0;
l2dhdr->dh_trim_action_time = dev->l2ad_vdev->vdev_trim_action_time;
l2dhdr->dh_trim_state = dev->l2ad_vdev->vdev_trim_state;
if (dev->l2ad_first)
l2dhdr->dh_flags |= L2ARC_DEV_HDR_EVICT_FIRST;
abd = abd_get_from_buf(l2dhdr, l2dhdr_asize);
err = zio_wait(zio_write_phys(NULL, dev->l2ad_vdev,
VDEV_LABEL_START_SIZE, l2dhdr_asize, abd, ZIO_CHECKSUM_LABEL, NULL,
NULL, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_CANFAIL, B_FALSE));
abd_free(abd);
if (err != 0) {
zfs_dbgmsg("L2ARC IO error (%d) while writing device header, "
"vdev guid: %llu", err,
(u_longlong_t)dev->l2ad_vdev->vdev_guid);
}
}
/*
* Commits a log block to the L2ARC device. This routine is invoked from
* l2arc_write_buffers when the log block fills up.
* This function allocates some memory to temporarily hold the serialized
* buffer to be written. This is then released in l2arc_write_done.
*/
static void
l2arc_log_blk_commit(l2arc_dev_t *dev, zio_t *pio, l2arc_write_callback_t *cb)
{
l2arc_log_blk_phys_t *lb = &dev->l2ad_log_blk;
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
uint64_t psize, asize;
zio_t *wzio;
l2arc_lb_abd_buf_t *abd_buf;
uint8_t *tmpbuf;
l2arc_lb_ptr_buf_t *lb_ptr_buf;
VERIFY3S(dev->l2ad_log_ent_idx, ==, dev->l2ad_log_entries);
tmpbuf = zio_buf_alloc(sizeof (*lb));
abd_buf = zio_buf_alloc(sizeof (*abd_buf));
abd_buf->abd = abd_get_from_buf(lb, sizeof (*lb));
lb_ptr_buf = kmem_zalloc(sizeof (l2arc_lb_ptr_buf_t), KM_SLEEP);
lb_ptr_buf->lb_ptr = kmem_zalloc(sizeof (l2arc_log_blkptr_t), KM_SLEEP);
/* link the buffer into the block chain */
lb->lb_prev_lbp = l2dhdr->dh_start_lbps[1];
lb->lb_magic = L2ARC_LOG_BLK_MAGIC;
/*
* l2arc_log_blk_commit() may be called multiple times during a single
* l2arc_write_buffers() call. Save the allocated abd buffers in a list
* so we can free them in l2arc_write_done() later on.
*/
list_insert_tail(&cb->l2wcb_abd_list, abd_buf);
/* try to compress the buffer */
psize = zio_compress_data(ZIO_COMPRESS_LZ4,
abd_buf->abd, tmpbuf, sizeof (*lb), 0);
/* a log block is never entirely zero */
ASSERT(psize != 0);
asize = vdev_psize_to_asize(dev->l2ad_vdev, psize);
ASSERT(asize <= sizeof (*lb));
/*
* Update the start log block pointer in the device header to point
* to the log block we're about to write.
*/
l2dhdr->dh_start_lbps[1] = l2dhdr->dh_start_lbps[0];
l2dhdr->dh_start_lbps[0].lbp_daddr = dev->l2ad_hand;
l2dhdr->dh_start_lbps[0].lbp_payload_asize =
dev->l2ad_log_blk_payload_asize;
l2dhdr->dh_start_lbps[0].lbp_payload_start =
dev->l2ad_log_blk_payload_start;
_NOTE(CONSTCOND)
L2BLK_SET_LSIZE(
(&l2dhdr->dh_start_lbps[0])->lbp_prop, sizeof (*lb));
L2BLK_SET_PSIZE(
(&l2dhdr->dh_start_lbps[0])->lbp_prop, asize);
L2BLK_SET_CHECKSUM(
(&l2dhdr->dh_start_lbps[0])->lbp_prop,
ZIO_CHECKSUM_FLETCHER_4);
if (asize < sizeof (*lb)) {
/* compression succeeded */
bzero(tmpbuf + psize, asize - psize);
L2BLK_SET_COMPRESS(
(&l2dhdr->dh_start_lbps[0])->lbp_prop,
ZIO_COMPRESS_LZ4);
} else {
/* compression failed */
bcopy(lb, tmpbuf, sizeof (*lb));
L2BLK_SET_COMPRESS(
(&l2dhdr->dh_start_lbps[0])->lbp_prop,
ZIO_COMPRESS_OFF);
}
/* checksum what we're about to write */
fletcher_4_native(tmpbuf, asize, NULL,
&l2dhdr->dh_start_lbps[0].lbp_cksum);
abd_free(abd_buf->abd);
/* perform the write itself */
abd_buf->abd = abd_get_from_buf(tmpbuf, sizeof (*lb));
abd_take_ownership_of_buf(abd_buf->abd, B_TRUE);
wzio = zio_write_phys(pio, dev->l2ad_vdev, dev->l2ad_hand,
asize, abd_buf->abd, ZIO_CHECKSUM_OFF, NULL, NULL,
ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_CANFAIL, B_FALSE);
DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, zio_t *, wzio);
(void) zio_nowait(wzio);
dev->l2ad_hand += asize;
/*
* Include the committed log block's pointer in the list of pointers
* to log blocks present in the L2ARC device.
*/
bcopy(&l2dhdr->dh_start_lbps[0], lb_ptr_buf->lb_ptr,
sizeof (l2arc_log_blkptr_t));
mutex_enter(&dev->l2ad_mtx);
list_insert_head(&dev->l2ad_lbptr_list, lb_ptr_buf);
ARCSTAT_INCR(arcstat_l2_log_blk_asize, asize);
ARCSTAT_BUMP(arcstat_l2_log_blk_count);
zfs_refcount_add_many(&dev->l2ad_lb_asize, asize, lb_ptr_buf);
zfs_refcount_add(&dev->l2ad_lb_count, lb_ptr_buf);
mutex_exit(&dev->l2ad_mtx);
vdev_space_update(dev->l2ad_vdev, asize, 0, 0);
/* bump the kstats */
ARCSTAT_INCR(arcstat_l2_write_bytes, asize);
ARCSTAT_BUMP(arcstat_l2_log_blk_writes);
ARCSTAT_F_AVG(arcstat_l2_log_blk_avg_asize, asize);
ARCSTAT_F_AVG(arcstat_l2_data_to_meta_ratio,
dev->l2ad_log_blk_payload_asize / asize);
/* start a new log block */
dev->l2ad_log_ent_idx = 0;
dev->l2ad_log_blk_payload_asize = 0;
dev->l2ad_log_blk_payload_start = 0;
}
/*
* Validates an L2ARC log block address to make sure that it can be read
* from the provided L2ARC device.
*/
boolean_t
l2arc_log_blkptr_valid(l2arc_dev_t *dev, const l2arc_log_blkptr_t *lbp)
{
/* L2BLK_GET_PSIZE returns aligned size for log blocks */
uint64_t asize = L2BLK_GET_PSIZE((lbp)->lbp_prop);
uint64_t end = lbp->lbp_daddr + asize - 1;
uint64_t start = lbp->lbp_payload_start;
boolean_t evicted = B_FALSE;
/*
* A log block is valid if all of the following conditions are true:
* - it fits entirely (including its payload) between l2ad_start and
* l2ad_end
* - it has a valid size
* - neither the log block itself nor part of its payload was evicted
* by l2arc_evict():
*
* l2ad_hand l2ad_evict
* | | lbp_daddr
* | start | | end
* | | | | |
* V V V V V
* l2ad_start ============================================ l2ad_end
* --------------------------||||
* ^ ^
* | log block
* payload
*/
evicted =
l2arc_range_check_overlap(start, end, dev->l2ad_hand) ||
l2arc_range_check_overlap(start, end, dev->l2ad_evict) ||
l2arc_range_check_overlap(dev->l2ad_hand, dev->l2ad_evict, start) ||
l2arc_range_check_overlap(dev->l2ad_hand, dev->l2ad_evict, end);
return (start >= dev->l2ad_start && end <= dev->l2ad_end &&
asize > 0 && asize <= sizeof (l2arc_log_blk_phys_t) &&
(!evicted || dev->l2ad_first));
}
/*
* Inserts ARC buffer header `hdr' into the current L2ARC log block on
* the device. The buffer being inserted must be present in L2ARC.
* Returns B_TRUE if the L2ARC log block is full and needs to be committed
* to L2ARC, or B_FALSE if it still has room for more ARC buffers.
*/
static boolean_t
l2arc_log_blk_insert(l2arc_dev_t *dev, const arc_buf_hdr_t *hdr)
{
l2arc_log_blk_phys_t *lb = &dev->l2ad_log_blk;
l2arc_log_ent_phys_t *le;
if (dev->l2ad_log_entries == 0)
return (B_FALSE);
int index = dev->l2ad_log_ent_idx++;
ASSERT3S(index, <, dev->l2ad_log_entries);
ASSERT(HDR_HAS_L2HDR(hdr));
le = &lb->lb_entries[index];
bzero(le, sizeof (*le));
le->le_dva = hdr->b_dva;
le->le_birth = hdr->b_birth;
le->le_daddr = hdr->b_l2hdr.b_daddr;
if (index == 0)
dev->l2ad_log_blk_payload_start = le->le_daddr;
L2BLK_SET_LSIZE((le)->le_prop, HDR_GET_LSIZE(hdr));
L2BLK_SET_PSIZE((le)->le_prop, HDR_GET_PSIZE(hdr));
L2BLK_SET_COMPRESS((le)->le_prop, HDR_GET_COMPRESS(hdr));
le->le_complevel = hdr->b_complevel;
L2BLK_SET_TYPE((le)->le_prop, hdr->b_type);
L2BLK_SET_PROTECTED((le)->le_prop, !!(HDR_PROTECTED(hdr)));
L2BLK_SET_PREFETCH((le)->le_prop, !!(HDR_PREFETCH(hdr)));
L2BLK_SET_STATE((le)->le_prop, hdr->b_l1hdr.b_state->arcs_state);
dev->l2ad_log_blk_payload_asize += vdev_psize_to_asize(dev->l2ad_vdev,
HDR_GET_PSIZE(hdr));
return (dev->l2ad_log_ent_idx == dev->l2ad_log_entries);
}
/*
* Checks whether a given L2ARC device address sits in a time-sequential
* range. The trick here is that the L2ARC is a rotary buffer, so we can't
* just do a range comparison, we need to handle the situation in which the
* range wraps around the end of the L2ARC device. Arguments:
* bottom -- Lower end of the range to check (written to earlier).
* top -- Upper end of the range to check (written to later).
* check -- The address for which we want to determine if it sits in
* between the top and bottom.
*
* The 3-way conditional below represents the following cases:
*
* bottom < top : Sequentially ordered case:
* <check>--------+-------------------+
* | (overlap here?) |
* L2ARC dev V V
* |---------------<bottom>============<top>--------------|
*
* bottom > top: Looped-around case:
* <check>--------+------------------+
* | (overlap here?) |
* L2ARC dev V V
* |===============<top>---------------<bottom>===========|
* ^ ^
* | (or here?) |
* +---------------+---------<check>
*
* top == bottom : Just a single address comparison.
*/
boolean_t
l2arc_range_check_overlap(uint64_t bottom, uint64_t top, uint64_t check)
{
if (bottom < top)
return (bottom <= check && check <= top);
else if (bottom > top)
return (check <= top || bottom <= check);
else
return (check == top);
}
EXPORT_SYMBOL(arc_buf_size);
EXPORT_SYMBOL(arc_write);
EXPORT_SYMBOL(arc_read);
EXPORT_SYMBOL(arc_buf_info);
EXPORT_SYMBOL(arc_getbuf_func);
EXPORT_SYMBOL(arc_add_prune_callback);
EXPORT_SYMBOL(arc_remove_prune_callback);
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, min, param_set_arc_min,
param_get_long, ZMOD_RW, "Min arc size");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, max, param_set_arc_max,
param_get_long, ZMOD_RW, "Max arc size");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, meta_limit, param_set_arc_long,
param_get_long, ZMOD_RW, "Metadata limit for arc size");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, meta_limit_percent,
param_set_arc_long, param_get_long, ZMOD_RW,
"Percent of arc size for arc meta limit");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, meta_min, param_set_arc_long,
param_get_long, ZMOD_RW, "Min arc metadata");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, meta_prune, INT, ZMOD_RW,
"Meta objects to scan for prune");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, meta_adjust_restarts, INT, ZMOD_RW,
"Limit number of restarts in arc_evict_meta");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, meta_strategy, INT, ZMOD_RW,
"Meta reclaim strategy");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, grow_retry, param_set_arc_int,
param_get_int, ZMOD_RW, "Seconds before growing arc size");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, p_dampener_disable, INT, ZMOD_RW,
"Disable arc_p adapt dampener");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, shrink_shift, param_set_arc_int,
param_get_int, ZMOD_RW, "log2(fraction of arc to reclaim)");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, pc_percent, UINT, ZMOD_RW,
"Percent of pagecache to reclaim arc to");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, p_min_shift, param_set_arc_int,
param_get_int, ZMOD_RW, "arc_c shift to calc min/max arc_p");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, average_blocksize, INT, ZMOD_RD,
"Target average block size");
ZFS_MODULE_PARAM(zfs, zfs_, compressed_arc_enabled, INT, ZMOD_RW,
"Disable compressed arc buffers");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, min_prefetch_ms, param_set_arc_int,
param_get_int, ZMOD_RW, "Min life of prefetch block in ms");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, min_prescient_prefetch_ms,
param_set_arc_int, param_get_int, ZMOD_RW,
"Min life of prescient prefetched block in ms");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, write_max, ULONG, ZMOD_RW,
"Max write bytes per interval");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, write_boost, ULONG, ZMOD_RW,
"Extra write bytes during device warmup");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, headroom, ULONG, ZMOD_RW,
"Number of max device writes to precache");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, headroom_boost, ULONG, ZMOD_RW,
"Compressed l2arc_headroom multiplier");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, trim_ahead, ULONG, ZMOD_RW,
"TRIM ahead L2ARC write size multiplier");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, feed_secs, ULONG, ZMOD_RW,
"Seconds between L2ARC writing");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, feed_min_ms, ULONG, ZMOD_RW,
"Min feed interval in milliseconds");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, noprefetch, INT, ZMOD_RW,
"Skip caching prefetched buffers");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, feed_again, INT, ZMOD_RW,
"Turbo L2ARC warmup");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, norw, INT, ZMOD_RW,
"No reads during writes");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, meta_percent, INT, ZMOD_RW,
"Percent of ARC size allowed for L2ARC-only headers");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, rebuild_enabled, INT, ZMOD_RW,
"Rebuild the L2ARC when importing a pool");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, rebuild_blocks_min_l2size, ULONG, ZMOD_RW,
"Min size in bytes to write rebuild log blocks in L2ARC");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, mfuonly, INT, ZMOD_RW,
"Cache only MFU data from ARC into L2ARC");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, lotsfree_percent, param_set_arc_int,
param_get_int, ZMOD_RW, "System free memory I/O throttle in bytes");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, sys_free, param_set_arc_long,
param_get_long, ZMOD_RW, "System free memory target size in bytes");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, dnode_limit, param_set_arc_long,
param_get_long, ZMOD_RW, "Minimum bytes of dnodes in arc");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, dnode_limit_percent,
param_set_arc_long, param_get_long, ZMOD_RW,
"Percent of ARC meta buffers for dnodes");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, dnode_reduce_percent, ULONG, ZMOD_RW,
"Percentage of excess dnodes to try to unpin");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, eviction_pct, INT, ZMOD_RW,
"When full, ARC allocation waits for eviction of this % of alloc size");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, evict_batch_limit, INT, ZMOD_RW,
"The number of headers to evict per sublist before moving to the next");
+
+ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, prune_task_threads, INT, ZMOD_RW,
+ "Number of arc_prune threads");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/bpobj.c b/sys/contrib/openzfs/module/zfs/bpobj.c
index e75ba5cccde6..68f534c6b197 100644
--- a/sys/contrib/openzfs/module/zfs/bpobj.c
+++ b/sys/contrib/openzfs/module/zfs/bpobj.c
@@ -1,943 +1,943 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2018 by Delphix. All rights reserved.
* Copyright (c) 2017 Datto Inc.
*/
#include <sys/bpobj.h>
#include <sys/zfs_context.h>
#include <sys/zfs_refcount.h>
#include <sys/dsl_pool.h>
#include <sys/zfeature.h>
#include <sys/zap.h>
/*
* Return an empty bpobj, preferably the empty dummy one (dp_empty_bpobj).
*/
uint64_t
bpobj_alloc_empty(objset_t *os, int blocksize, dmu_tx_t *tx)
{
spa_t *spa = dmu_objset_spa(os);
dsl_pool_t *dp = dmu_objset_pool(os);
if (spa_feature_is_enabled(spa, SPA_FEATURE_EMPTY_BPOBJ)) {
if (!spa_feature_is_active(spa, SPA_FEATURE_EMPTY_BPOBJ)) {
ASSERT0(dp->dp_empty_bpobj);
dp->dp_empty_bpobj =
bpobj_alloc(os, SPA_OLD_MAXBLOCKSIZE, tx);
VERIFY(zap_add(os,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_EMPTY_BPOBJ, sizeof (uint64_t), 1,
&dp->dp_empty_bpobj, tx) == 0);
}
spa_feature_incr(spa, SPA_FEATURE_EMPTY_BPOBJ, tx);
ASSERT(dp->dp_empty_bpobj != 0);
return (dp->dp_empty_bpobj);
} else {
return (bpobj_alloc(os, blocksize, tx));
}
}
void
bpobj_decr_empty(objset_t *os, dmu_tx_t *tx)
{
dsl_pool_t *dp = dmu_objset_pool(os);
spa_feature_decr(dmu_objset_spa(os), SPA_FEATURE_EMPTY_BPOBJ, tx);
if (!spa_feature_is_active(dmu_objset_spa(os),
SPA_FEATURE_EMPTY_BPOBJ)) {
VERIFY3U(0, ==, zap_remove(dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_EMPTY_BPOBJ, tx));
VERIFY3U(0, ==, dmu_object_free(os, dp->dp_empty_bpobj, tx));
dp->dp_empty_bpobj = 0;
}
}
uint64_t
bpobj_alloc(objset_t *os, int blocksize, dmu_tx_t *tx)
{
int size;
if (spa_version(dmu_objset_spa(os)) < SPA_VERSION_BPOBJ_ACCOUNT)
size = BPOBJ_SIZE_V0;
else if (spa_version(dmu_objset_spa(os)) < SPA_VERSION_DEADLISTS)
size = BPOBJ_SIZE_V1;
else if (!spa_feature_is_active(dmu_objset_spa(os),
SPA_FEATURE_LIVELIST))
size = BPOBJ_SIZE_V2;
else
size = sizeof (bpobj_phys_t);
return (dmu_object_alloc(os, DMU_OT_BPOBJ, blocksize,
DMU_OT_BPOBJ_HDR, size, tx));
}
void
bpobj_free(objset_t *os, uint64_t obj, dmu_tx_t *tx)
{
int64_t i;
bpobj_t bpo;
dmu_object_info_t doi;
int epb;
dmu_buf_t *dbuf = NULL;
ASSERT(obj != dmu_objset_pool(os)->dp_empty_bpobj);
VERIFY3U(0, ==, bpobj_open(&bpo, os, obj));
mutex_enter(&bpo.bpo_lock);
if (!bpo.bpo_havesubobj || bpo.bpo_phys->bpo_subobjs == 0)
goto out;
VERIFY3U(0, ==, dmu_object_info(os, bpo.bpo_phys->bpo_subobjs, &doi));
epb = doi.doi_data_block_size / sizeof (uint64_t);
for (i = bpo.bpo_phys->bpo_num_subobjs - 1; i >= 0; i--) {
uint64_t *objarray;
uint64_t offset, blkoff;
offset = i * sizeof (uint64_t);
blkoff = P2PHASE(i, epb);
if (dbuf == NULL || dbuf->db_offset > offset) {
if (dbuf)
dmu_buf_rele(dbuf, FTAG);
VERIFY3U(0, ==, dmu_buf_hold(os,
bpo.bpo_phys->bpo_subobjs, offset, FTAG, &dbuf, 0));
}
ASSERT3U(offset, >=, dbuf->db_offset);
ASSERT3U(offset, <, dbuf->db_offset + dbuf->db_size);
objarray = dbuf->db_data;
bpobj_free(os, objarray[blkoff], tx);
}
if (dbuf) {
dmu_buf_rele(dbuf, FTAG);
dbuf = NULL;
}
VERIFY3U(0, ==, dmu_object_free(os, bpo.bpo_phys->bpo_subobjs, tx));
out:
mutex_exit(&bpo.bpo_lock);
bpobj_close(&bpo);
VERIFY3U(0, ==, dmu_object_free(os, obj, tx));
}
int
bpobj_open(bpobj_t *bpo, objset_t *os, uint64_t object)
{
dmu_object_info_t doi;
int err;
err = dmu_object_info(os, object, &doi);
if (err)
return (err);
bzero(bpo, sizeof (*bpo));
mutex_init(&bpo->bpo_lock, NULL, MUTEX_DEFAULT, NULL);
ASSERT(bpo->bpo_dbuf == NULL);
ASSERT(bpo->bpo_phys == NULL);
ASSERT(object != 0);
ASSERT3U(doi.doi_type, ==, DMU_OT_BPOBJ);
ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_BPOBJ_HDR);
err = dmu_bonus_hold(os, object, bpo, &bpo->bpo_dbuf);
if (err)
return (err);
bpo->bpo_os = os;
bpo->bpo_object = object;
bpo->bpo_epb = doi.doi_data_block_size >> SPA_BLKPTRSHIFT;
bpo->bpo_havecomp = (doi.doi_bonus_size > BPOBJ_SIZE_V0);
bpo->bpo_havesubobj = (doi.doi_bonus_size > BPOBJ_SIZE_V1);
bpo->bpo_havefreed = (doi.doi_bonus_size > BPOBJ_SIZE_V2);
bpo->bpo_phys = bpo->bpo_dbuf->db_data;
return (0);
}
boolean_t
bpobj_is_open(const bpobj_t *bpo)
{
return (bpo->bpo_object != 0);
}
void
bpobj_close(bpobj_t *bpo)
{
/* Lame workaround for closing a bpobj that was never opened. */
if (bpo->bpo_object == 0)
return;
dmu_buf_rele(bpo->bpo_dbuf, bpo);
if (bpo->bpo_cached_dbuf != NULL)
dmu_buf_rele(bpo->bpo_cached_dbuf, bpo);
bpo->bpo_dbuf = NULL;
bpo->bpo_phys = NULL;
bpo->bpo_cached_dbuf = NULL;
bpo->bpo_object = 0;
mutex_destroy(&bpo->bpo_lock);
}
static boolean_t
bpobj_is_empty_impl(bpobj_t *bpo)
{
ASSERT(MUTEX_HELD(&bpo->bpo_lock));
return (bpo->bpo_phys->bpo_num_blkptrs == 0 &&
(!bpo->bpo_havesubobj || bpo->bpo_phys->bpo_num_subobjs == 0));
}
boolean_t
bpobj_is_empty(bpobj_t *bpo)
{
mutex_enter(&bpo->bpo_lock);
boolean_t is_empty = bpobj_is_empty_impl(bpo);
mutex_exit(&bpo->bpo_lock);
return (is_empty);
}
/*
* A recursive iteration of the bpobjs would be nice here but we run the risk
* of overflowing function stack space. Instead, find each subobj and add it
* to the head of our list so it can be scanned for subjobjs. Like a
* recursive implementation, the "deepest" subobjs will be freed first.
* When a subobj is found to have no additional subojs, free it.
*/
typedef struct bpobj_info {
bpobj_t *bpi_bpo;
/*
* This object is a subobj of bpi_parent,
* at bpi_index in its subobj array.
*/
struct bpobj_info *bpi_parent;
uint64_t bpi_index;
/* How many of our subobj's are left to process. */
uint64_t bpi_unprocessed_subobjs;
/* True after having visited this bpo's directly referenced BPs. */
boolean_t bpi_visited;
list_node_t bpi_node;
} bpobj_info_t;
static bpobj_info_t *
bpi_alloc(bpobj_t *bpo, bpobj_info_t *parent, uint64_t index)
{
bpobj_info_t *bpi = kmem_zalloc(sizeof (bpobj_info_t), KM_SLEEP);
bpi->bpi_bpo = bpo;
bpi->bpi_parent = parent;
bpi->bpi_index = index;
if (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_subobjs != 0) {
bpi->bpi_unprocessed_subobjs = bpo->bpo_phys->bpo_num_subobjs;
}
return (bpi);
}
/*
* Update bpobj and all of its parents with new space accounting.
*/
static void
propagate_space_reduction(bpobj_info_t *bpi, int64_t freed,
int64_t comp_freed, int64_t uncomp_freed, dmu_tx_t *tx)
{
for (; bpi != NULL; bpi = bpi->bpi_parent) {
bpobj_t *p = bpi->bpi_bpo;
ASSERT(dmu_buf_is_dirty(p->bpo_dbuf, tx));
p->bpo_phys->bpo_bytes -= freed;
ASSERT3S(p->bpo_phys->bpo_bytes, >=, 0);
if (p->bpo_havecomp) {
p->bpo_phys->bpo_comp -= comp_freed;
p->bpo_phys->bpo_uncomp -= uncomp_freed;
}
}
}
static int
bpobj_iterate_blkptrs(bpobj_info_t *bpi, bpobj_itor_t func, void *arg,
int64_t start, dmu_tx_t *tx, boolean_t free)
{
int err = 0;
int64_t freed = 0, comp_freed = 0, uncomp_freed = 0;
dmu_buf_t *dbuf = NULL;
bpobj_t *bpo = bpi->bpi_bpo;
for (int64_t i = bpo->bpo_phys->bpo_num_blkptrs - 1; i >= start; i--) {
uint64_t offset = i * sizeof (blkptr_t);
uint64_t blkoff = P2PHASE(i, bpo->bpo_epb);
if (dbuf == NULL || dbuf->db_offset > offset) {
if (dbuf)
dmu_buf_rele(dbuf, FTAG);
err = dmu_buf_hold(bpo->bpo_os, bpo->bpo_object,
offset, FTAG, &dbuf, 0);
if (err)
break;
}
ASSERT3U(offset, >=, dbuf->db_offset);
ASSERT3U(offset, <, dbuf->db_offset + dbuf->db_size);
blkptr_t *bparray = dbuf->db_data;
blkptr_t *bp = &bparray[blkoff];
boolean_t bp_freed = BP_GET_FREE(bp);
err = func(arg, bp, bp_freed, tx);
if (err)
break;
if (free) {
int sign = bp_freed ? -1 : +1;
spa_t *spa = dmu_objset_spa(bpo->bpo_os);
freed += sign * bp_get_dsize_sync(spa, bp);
comp_freed += sign * BP_GET_PSIZE(bp);
uncomp_freed += sign * BP_GET_UCSIZE(bp);
ASSERT(dmu_buf_is_dirty(bpo->bpo_dbuf, tx));
bpo->bpo_phys->bpo_num_blkptrs--;
ASSERT3S(bpo->bpo_phys->bpo_num_blkptrs, >=, 0);
if (bp_freed) {
ASSERT(bpo->bpo_havefreed);
bpo->bpo_phys->bpo_num_freed--;
ASSERT3S(bpo->bpo_phys->bpo_num_freed, >=, 0);
}
}
}
if (free) {
propagate_space_reduction(bpi, freed, comp_freed,
uncomp_freed, tx);
VERIFY0(dmu_free_range(bpo->bpo_os,
bpo->bpo_object,
bpo->bpo_phys->bpo_num_blkptrs * sizeof (blkptr_t),
DMU_OBJECT_END, tx));
}
if (dbuf) {
dmu_buf_rele(dbuf, FTAG);
dbuf = NULL;
}
return (err);
}
/*
* Given an initial bpo, start by freeing the BPs that are directly referenced
* by that bpo. If the bpo has subobjs, read in its last subobj and push the
* subobj to our stack. By popping items off our stack, eventually we will
* encounter a bpo that has no subobjs. We can free its bpobj_info_t, and if
* requested also free the now-empty bpo from disk and decrement
* its parent's subobj count. We continue popping each subobj from our stack,
* visiting its last subobj until they too have no more subobjs, and so on.
*/
static int
bpobj_iterate_impl(bpobj_t *initial_bpo, bpobj_itor_t func, void *arg,
dmu_tx_t *tx, boolean_t free, uint64_t *bpobj_size)
{
list_t stack;
bpobj_info_t *bpi;
int err = 0;
/*
* Create a "stack" for us to work with without worrying about
* stack overflows. Initialize it with the initial_bpo.
*/
list_create(&stack, sizeof (bpobj_info_t),
offsetof(bpobj_info_t, bpi_node));
mutex_enter(&initial_bpo->bpo_lock);
if (bpobj_size != NULL)
*bpobj_size = initial_bpo->bpo_phys->bpo_num_blkptrs;
list_insert_head(&stack, bpi_alloc(initial_bpo, NULL, 0));
while ((bpi = list_head(&stack)) != NULL) {
bpobj_t *bpo = bpi->bpi_bpo;
ASSERT3P(bpo, !=, NULL);
ASSERT(MUTEX_HELD(&bpo->bpo_lock));
ASSERT(bpobj_is_open(bpo));
if (free)
dmu_buf_will_dirty(bpo->bpo_dbuf, tx);
if (bpi->bpi_visited == B_FALSE) {
err = bpobj_iterate_blkptrs(bpi, func, arg, 0, tx,
free);
bpi->bpi_visited = B_TRUE;
if (err != 0)
break;
}
/*
* We've finished with this bpo's directly-referenced BP's and
* it has no more unprocessed subobjs. We can free its
* bpobj_info_t (unless it is the topmost, initial_bpo).
* If we are freeing from disk, we can also do that.
*/
if (bpi->bpi_unprocessed_subobjs == 0) {
/*
* If there are no entries, there should
* be no bytes.
*/
if (bpobj_is_empty_impl(bpo)) {
ASSERT0(bpo->bpo_phys->bpo_bytes);
ASSERT0(bpo->bpo_phys->bpo_comp);
ASSERT0(bpo->bpo_phys->bpo_uncomp);
}
/* The initial_bpo has no parent and is not closed. */
if (bpi->bpi_parent != NULL) {
if (free) {
bpobj_t *p = bpi->bpi_parent->bpi_bpo;
ASSERT0(bpo->bpo_phys->bpo_num_blkptrs);
ASSERT3U(p->bpo_phys->bpo_num_subobjs,
>, 0);
ASSERT3U(bpi->bpi_index, ==,
p->bpo_phys->bpo_num_subobjs - 1);
ASSERT(dmu_buf_is_dirty(bpo->bpo_dbuf,
tx));
p->bpo_phys->bpo_num_subobjs--;
VERIFY0(dmu_free_range(p->bpo_os,
p->bpo_phys->bpo_subobjs,
bpi->bpi_index * sizeof (uint64_t),
sizeof (uint64_t), tx));
/* eliminate the empty subobj list */
if (bpo->bpo_havesubobj &&
bpo->bpo_phys->bpo_subobjs != 0) {
ASSERT0(bpo->bpo_phys->
bpo_num_subobjs);
err = dmu_object_free(
bpo->bpo_os,
bpo->bpo_phys->bpo_subobjs,
tx);
if (err)
break;
bpo->bpo_phys->bpo_subobjs = 0;
}
err = dmu_object_free(p->bpo_os,
bpo->bpo_object, tx);
if (err)
break;
}
mutex_exit(&bpo->bpo_lock);
bpobj_close(bpo);
kmem_free(bpo, sizeof (bpobj_t));
} else {
mutex_exit(&bpo->bpo_lock);
}
/*
* Finished processing this bpo. Unlock, and free
* our "stack" info.
*/
list_remove_head(&stack);
kmem_free(bpi, sizeof (bpobj_info_t));
} else {
/*
* We have unprocessed subobjs. Process the next one.
*/
ASSERT(bpo->bpo_havecomp);
ASSERT3P(bpobj_size, ==, NULL);
/* Add the last subobj to stack. */
int64_t i = bpi->bpi_unprocessed_subobjs - 1;
uint64_t offset = i * sizeof (uint64_t);
uint64_t obj_from_sublist;
err = dmu_read(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs,
offset, sizeof (uint64_t), &obj_from_sublist,
DMU_READ_PREFETCH);
if (err)
break;
bpobj_t *sublist = kmem_alloc(sizeof (bpobj_t),
KM_SLEEP);
err = bpobj_open(sublist, bpo->bpo_os,
obj_from_sublist);
if (err)
break;
list_insert_head(&stack, bpi_alloc(sublist, bpi, i));
mutex_enter(&sublist->bpo_lock);
bpi->bpi_unprocessed_subobjs--;
}
}
/*
* Cleanup anything left on the "stack" after we left the loop.
* Every bpo on the stack is locked so we must remember to undo
* that now (in LIFO order).
*/
while ((bpi = list_remove_head(&stack)) != NULL) {
bpobj_t *bpo = bpi->bpi_bpo;
ASSERT(err != 0);
ASSERT3P(bpo, !=, NULL);
mutex_exit(&bpo->bpo_lock);
/* do not free the initial_bpo */
if (bpi->bpi_parent != NULL) {
bpobj_close(bpi->bpi_bpo);
kmem_free(bpi->bpi_bpo, sizeof (bpobj_t));
}
kmem_free(bpi, sizeof (bpobj_info_t));
}
list_destroy(&stack);
return (err);
}
/*
* Iterate and remove the entries. If func returns nonzero, iteration
* will stop and that entry will not be removed.
*/
int
bpobj_iterate(bpobj_t *bpo, bpobj_itor_t func, void *arg, dmu_tx_t *tx)
{
return (bpobj_iterate_impl(bpo, func, arg, tx, B_TRUE, NULL));
}
/*
* Iterate the entries. If func returns nonzero, iteration will stop.
*
* If there are no subobjs:
*
* *bpobj_size can be used to return the number of block pointers in the
* bpobj. Note that this may be different from the number of block pointers
* that are iterated over, if iteration is terminated early (e.g. by the func
* returning nonzero).
*
* If there are concurrent (or subsequent) modifications to the bpobj then the
* returned *bpobj_size can be passed as "start" to
* livelist_bpobj_iterate_from_nofree() to iterate the newly added entries.
*/
int
bpobj_iterate_nofree(bpobj_t *bpo, bpobj_itor_t func, void *arg,
uint64_t *bpobj_size)
{
return (bpobj_iterate_impl(bpo, func, arg, NULL, B_FALSE, bpobj_size));
}
/*
* Iterate over the blkptrs in the bpobj beginning at index start. If func
* returns nonzero, iteration will stop. This is a livelist specific function
* since it assumes that there are no subobjs present.
*/
int
livelist_bpobj_iterate_from_nofree(bpobj_t *bpo, bpobj_itor_t func, void *arg,
int64_t start)
{
if (bpo->bpo_havesubobj)
VERIFY0(bpo->bpo_phys->bpo_subobjs);
bpobj_info_t *bpi = bpi_alloc(bpo, NULL, 0);
int err = bpobj_iterate_blkptrs(bpi, func, arg, start, NULL, B_FALSE);
kmem_free(bpi, sizeof (bpobj_info_t));
return (err);
}
/*
* Logically add subobj's contents to the parent bpobj.
*
* In the most general case, this is accomplished in constant time by adding
* a reference to subobj. This case is used when enqueuing a large subobj:
* +--------------+ +--------------+
* | bpobj |----------------------->| subobj list |
* +----+----+----+----+----+ +-----+-----+--+--+
* | bp | bp | bp | bp | bp | | obj | obj | obj |
* +----+----+----+----+----+ +-----+-----+-----+
*
* +--------------+ +--------------+
* | sub-bpobj |----------------------> | subsubobj |
* +----+----+----+----+---------+----+ +-----+-----+--+--------+-----+
* | bp | bp | bp | bp | ... | bp | | obj | obj | ... | obj |
* +----+----+----+----+---------+----+ +-----+-----+-----------+-----+
*
* Result: sub-bpobj added to parent's subobj list.
* +--------------+ +--------------+
* | bpobj |----------------------->| subobj list |
* +----+----+----+----+----+ +-----+-----+--+--+-----+
* | bp | bp | bp | bp | bp | | obj | obj | obj | OBJ |
* +----+----+----+----+----+ +-----+-----+-----+--|--+
* |
* /-----------------------------------------------------/
* v
* +--------------+ +--------------+
* | sub-bpobj |----------------------> | subsubobj |
* +----+----+----+----+---------+----+ +-----+-----+--+--------+-----+
* | bp | bp | bp | bp | ... | bp | | obj | obj | ... | obj |
* +----+----+----+----+---------+----+ +-----+-----+-----------+-----+
*
*
* In a common case, the subobj is small: its bp's and its list of subobj's
* are each stored in a single block. In this case we copy the subobj's
* contents to the parent:
* +--------------+ +--------------+
* | bpobj |----------------------->| subobj list |
* +----+----+----+----+----+ +-----+-----+--+--+
* | bp | bp | bp | bp | bp | | obj | obj | obj |
* +----+----+----+----+----+ +-----+-----+-----+
* ^ ^
* +--------------+ | +--------------+ |
* | sub-bpobj |---------^------------> | subsubobj | ^
* +----+----+----+ | +-----+-----+--+ |
* | BP | BP |-->-->-->-->-/ | OBJ | OBJ |-->-/
* +----+----+ +-----+-----+
*
* Result: subobj destroyed, contents copied to parent:
* +--------------+ +--------------+
* | bpobj |----------------------->| subobj list |
* +----+----+----+----+----+----+----+ +-----+-----+--+--+-----+-----+
* | bp | bp | bp | bp | bp | BP | BP | | obj | obj | obj | OBJ | OBJ |
* +----+----+----+----+----+----+----+ +-----+-----+-----+-----+-----+
*
*
* If the subobj has many BP's but few subobj's, we can copy the sub-subobj's
* but retain the sub-bpobj:
* +--------------+ +--------------+
* | bpobj |----------------------->| subobj list |
* +----+----+----+----+----+ +-----+-----+--+--+
* | bp | bp | bp | bp | bp | | obj | obj | obj |
* +----+----+----+----+----+ +-----+-----+-----+
* ^
* +--------------+ +--------------+ |
* | sub-bpobj |----------------------> | subsubobj | ^
* +----+----+----+----+---------+----+ +-----+-----+--+ |
* | bp | bp | bp | bp | ... | bp | | OBJ | OBJ |-->-/
* +----+----+----+----+---------+----+ +-----+-----+
*
* Result: sub-sub-bpobjs and subobj added to parent's subobj list.
* +--------------+ +--------------+
* | bpobj |-------------------->| subobj list |
* +----+----+----+----+----+ +-----+-----+--+--+-----+-----+------+
* | bp | bp | bp | bp | bp | | obj | obj | obj | OBJ | OBJ | OBJ* |
* +----+----+----+----+----+ +-----+-----+-----+-----+-----+--|---+
* |
* /--------------------------------------------------------------/
* v
* +--------------+
* | sub-bpobj |
* +----+----+----+----+---------+----+
* | bp | bp | bp | bp | ... | bp |
* +----+----+----+----+---------+----+
*/
void
bpobj_enqueue_subobj(bpobj_t *bpo, uint64_t subobj, dmu_tx_t *tx)
{
bpobj_t subbpo;
uint64_t used, comp, uncomp, subsubobjs;
boolean_t copy_subsub = B_TRUE;
boolean_t copy_bps = B_TRUE;
ASSERT(bpobj_is_open(bpo));
ASSERT(subobj != 0);
ASSERT(bpo->bpo_havesubobj);
ASSERT(bpo->bpo_havecomp);
ASSERT(bpo->bpo_object != dmu_objset_pool(bpo->bpo_os)->dp_empty_bpobj);
if (subobj == dmu_objset_pool(bpo->bpo_os)->dp_empty_bpobj) {
bpobj_decr_empty(bpo->bpo_os, tx);
return;
}
VERIFY3U(0, ==, bpobj_open(&subbpo, bpo->bpo_os, subobj));
VERIFY3U(0, ==, bpobj_space(&subbpo, &used, &comp, &uncomp));
if (bpobj_is_empty(&subbpo)) {
/* No point in having an empty subobj. */
bpobj_close(&subbpo);
bpobj_free(bpo->bpo_os, subobj, tx);
return;
}
mutex_enter(&bpo->bpo_lock);
dmu_buf_will_dirty(bpo->bpo_dbuf, tx);
dmu_object_info_t doi;
if (bpo->bpo_phys->bpo_subobjs != 0) {
ASSERT0(dmu_object_info(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs,
&doi));
ASSERT3U(doi.doi_type, ==, DMU_OT_BPOBJ_SUBOBJ);
}
/*
* If subobj has only one block of subobjs, then move subobj's
* subobjs to bpo's subobj list directly. This reduces recursion in
* bpobj_iterate due to nested subobjs.
*/
subsubobjs = subbpo.bpo_phys->bpo_subobjs;
if (subsubobjs != 0) {
VERIFY0(dmu_object_info(bpo->bpo_os, subsubobjs, &doi));
if (doi.doi_max_offset > doi.doi_data_block_size) {
copy_subsub = B_FALSE;
}
}
/*
* If, in addition to having only one block of subobj's, subobj has
* only one block of bp's, then move subobj's bp's to bpo's bp list
* directly. This reduces recursion in bpobj_iterate due to nested
* subobjs.
*/
VERIFY3U(0, ==, dmu_object_info(bpo->bpo_os, subobj, &doi));
if (doi.doi_max_offset > doi.doi_data_block_size || !copy_subsub) {
copy_bps = B_FALSE;
}
if (copy_subsub && subsubobjs != 0) {
dmu_buf_t *subdb;
uint64_t numsubsub = subbpo.bpo_phys->bpo_num_subobjs;
VERIFY0(dmu_buf_hold(bpo->bpo_os, subsubobjs,
0, FTAG, &subdb, 0));
/*
* Make sure that we are not asking dmu_write()
* to write more data than we have in our buffer.
*/
VERIFY3U(subdb->db_size, >=,
numsubsub * sizeof (subobj));
if (bpo->bpo_phys->bpo_subobjs == 0) {
bpo->bpo_phys->bpo_subobjs =
dmu_object_alloc(bpo->bpo_os,
DMU_OT_BPOBJ_SUBOBJ, SPA_OLD_MAXBLOCKSIZE,
DMU_OT_NONE, 0, tx);
}
dmu_write(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs,
bpo->bpo_phys->bpo_num_subobjs * sizeof (subobj),
numsubsub * sizeof (subobj), subdb->db_data, tx);
dmu_buf_rele(subdb, FTAG);
bpo->bpo_phys->bpo_num_subobjs += numsubsub;
dmu_buf_will_dirty(subbpo.bpo_dbuf, tx);
subbpo.bpo_phys->bpo_subobjs = 0;
VERIFY0(dmu_object_free(bpo->bpo_os, subsubobjs, tx));
}
if (copy_bps) {
dmu_buf_t *bps;
uint64_t numbps = subbpo.bpo_phys->bpo_num_blkptrs;
ASSERT(copy_subsub);
VERIFY0(dmu_buf_hold(bpo->bpo_os, subobj,
0, FTAG, &bps, 0));
/*
* Make sure that we are not asking dmu_write()
* to write more data than we have in our buffer.
*/
VERIFY3U(bps->db_size, >=, numbps * sizeof (blkptr_t));
dmu_write(bpo->bpo_os, bpo->bpo_object,
bpo->bpo_phys->bpo_num_blkptrs * sizeof (blkptr_t),
numbps * sizeof (blkptr_t),
bps->db_data, tx);
dmu_buf_rele(bps, FTAG);
bpo->bpo_phys->bpo_num_blkptrs += numbps;
bpobj_close(&subbpo);
VERIFY0(dmu_object_free(bpo->bpo_os, subobj, tx));
} else {
bpobj_close(&subbpo);
if (bpo->bpo_phys->bpo_subobjs == 0) {
bpo->bpo_phys->bpo_subobjs =
dmu_object_alloc(bpo->bpo_os,
DMU_OT_BPOBJ_SUBOBJ, SPA_OLD_MAXBLOCKSIZE,
DMU_OT_NONE, 0, tx);
}
dmu_write(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs,
bpo->bpo_phys->bpo_num_subobjs * sizeof (subobj),
sizeof (subobj), &subobj, tx);
bpo->bpo_phys->bpo_num_subobjs++;
}
bpo->bpo_phys->bpo_bytes += used;
bpo->bpo_phys->bpo_comp += comp;
bpo->bpo_phys->bpo_uncomp += uncomp;
mutex_exit(&bpo->bpo_lock);
}
void
bpobj_enqueue(bpobj_t *bpo, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
blkptr_t stored_bp = *bp;
uint64_t offset;
int blkoff;
blkptr_t *bparray;
ASSERT(bpobj_is_open(bpo));
ASSERT(!BP_IS_HOLE(bp));
ASSERT(bpo->bpo_object != dmu_objset_pool(bpo->bpo_os)->dp_empty_bpobj);
if (BP_IS_EMBEDDED(bp)) {
/*
* The bpobj will compress better without the payload.
*
* Note that we store EMBEDDED bp's because they have an
* uncompressed size, which must be accounted for. An
* alternative would be to add their size to bpo_uncomp
* without storing the bp, but that would create additional
* complications: bpo_uncomp would be inconsistent with the
* set of BP's stored, and bpobj_iterate() wouldn't visit
* all the space accounted for in the bpobj.
*/
bzero(&stored_bp, sizeof (stored_bp));
stored_bp.blk_prop = bp->blk_prop;
stored_bp.blk_birth = bp->blk_birth;
} else if (!BP_GET_DEDUP(bp)) {
/* The bpobj will compress better without the checksum */
bzero(&stored_bp.blk_cksum, sizeof (stored_bp.blk_cksum));
}
stored_bp.blk_fill = 0;
BP_SET_FREE(&stored_bp, bp_freed);
mutex_enter(&bpo->bpo_lock);
offset = bpo->bpo_phys->bpo_num_blkptrs * sizeof (stored_bp);
blkoff = P2PHASE(bpo->bpo_phys->bpo_num_blkptrs, bpo->bpo_epb);
if (bpo->bpo_cached_dbuf == NULL ||
offset < bpo->bpo_cached_dbuf->db_offset ||
offset >= bpo->bpo_cached_dbuf->db_offset +
bpo->bpo_cached_dbuf->db_size) {
if (bpo->bpo_cached_dbuf)
dmu_buf_rele(bpo->bpo_cached_dbuf, bpo);
VERIFY3U(0, ==, dmu_buf_hold(bpo->bpo_os, bpo->bpo_object,
offset, bpo, &bpo->bpo_cached_dbuf, 0));
}
dmu_buf_will_dirty(bpo->bpo_cached_dbuf, tx);
bparray = bpo->bpo_cached_dbuf->db_data;
bparray[blkoff] = stored_bp;
dmu_buf_will_dirty(bpo->bpo_dbuf, tx);
bpo->bpo_phys->bpo_num_blkptrs++;
int sign = bp_freed ? -1 : +1;
bpo->bpo_phys->bpo_bytes += sign *
bp_get_dsize_sync(dmu_objset_spa(bpo->bpo_os), bp);
if (bpo->bpo_havecomp) {
bpo->bpo_phys->bpo_comp += sign * BP_GET_PSIZE(bp);
bpo->bpo_phys->bpo_uncomp += sign * BP_GET_UCSIZE(bp);
}
if (bp_freed) {
ASSERT(bpo->bpo_havefreed);
bpo->bpo_phys->bpo_num_freed++;
}
mutex_exit(&bpo->bpo_lock);
}
struct space_range_arg {
spa_t *spa;
uint64_t mintxg;
uint64_t maxtxg;
uint64_t used;
uint64_t comp;
uint64_t uncomp;
};
-/* ARGSUSED */
static int
space_range_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, dmu_tx_t *tx)
{
+ (void) bp_freed, (void) tx;
struct space_range_arg *sra = arg;
if (bp->blk_birth > sra->mintxg && bp->blk_birth <= sra->maxtxg) {
if (dsl_pool_sync_context(spa_get_dsl(sra->spa)))
sra->used += bp_get_dsize_sync(sra->spa, bp);
else
sra->used += bp_get_dsize(sra->spa, bp);
sra->comp += BP_GET_PSIZE(bp);
sra->uncomp += BP_GET_UCSIZE(bp);
}
return (0);
}
int
bpobj_space(bpobj_t *bpo, uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
{
ASSERT(bpobj_is_open(bpo));
mutex_enter(&bpo->bpo_lock);
*usedp = bpo->bpo_phys->bpo_bytes;
if (bpo->bpo_havecomp) {
*compp = bpo->bpo_phys->bpo_comp;
*uncompp = bpo->bpo_phys->bpo_uncomp;
mutex_exit(&bpo->bpo_lock);
return (0);
} else {
mutex_exit(&bpo->bpo_lock);
return (bpobj_space_range(bpo, 0, UINT64_MAX,
usedp, compp, uncompp));
}
}
/*
* Return the amount of space in the bpobj which is:
* mintxg < blk_birth <= maxtxg
*/
int
bpobj_space_range(bpobj_t *bpo, uint64_t mintxg, uint64_t maxtxg,
uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
{
struct space_range_arg sra = { 0 };
int err;
ASSERT(bpobj_is_open(bpo));
/*
* As an optimization, if they want the whole txg range, just
* get bpo_bytes rather than iterating over the bps.
*/
if (mintxg < TXG_INITIAL && maxtxg == UINT64_MAX && bpo->bpo_havecomp)
return (bpobj_space(bpo, usedp, compp, uncompp));
sra.spa = dmu_objset_spa(bpo->bpo_os);
sra.mintxg = mintxg;
sra.maxtxg = maxtxg;
err = bpobj_iterate_nofree(bpo, space_range_cb, &sra, NULL);
*usedp = sra.used;
*compp = sra.comp;
*uncompp = sra.uncomp;
return (err);
}
/*
* A bpobj_itor_t to append blkptrs to a bplist. Note that while blkptrs in a
* bpobj are designated as free or allocated that information is not preserved
* in bplists.
*/
-/* ARGSUSED */
int
bplist_append_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
+ (void) bp_freed, (void) tx;
bplist_t *bpl = arg;
bplist_append(bpl, bp);
return (0);
}
diff --git a/sys/contrib/openzfs/module/zfs/bptree.c b/sys/contrib/openzfs/module/zfs/bptree.c
index 1827a3c4e326..4e9a4825e262 100644
--- a/sys/contrib/openzfs/module/zfs/bptree.c
+++ b/sys/contrib/openzfs/module/zfs/bptree.c
@@ -1,303 +1,303 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2011, 2018 by Delphix. All rights reserved.
*/
#include <sys/arc.h>
#include <sys/bptree.h>
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_tx.h>
#include <sys/dmu_traverse.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_pool.h>
#include <sys/dnode.h>
#include <sys/spa.h>
/*
* A bptree is a queue of root block pointers from destroyed datasets. When a
* dataset is destroyed its root block pointer is put on the end of the pool's
* bptree queue so the dataset's blocks can be freed asynchronously by
* dsl_scan_sync. This allows the delete operation to finish without traversing
* all the dataset's blocks.
*
* Note that while bt_begin and bt_end are only ever incremented in this code,
* they are effectively reset to 0 every time the entire bptree is freed because
* the bptree's object is destroyed and re-created.
*/
struct bptree_args {
bptree_phys_t *ba_phys; /* data in bonus buffer, dirtied if freeing */
boolean_t ba_free; /* true if freeing during traversal */
bptree_itor_t *ba_func; /* function to call for each blockpointer */
void *ba_arg; /* caller supplied argument to ba_func */
dmu_tx_t *ba_tx; /* caller supplied tx, NULL if not freeing */
} bptree_args_t;
uint64_t
bptree_alloc(objset_t *os, dmu_tx_t *tx)
{
uint64_t obj;
dmu_buf_t *db;
bptree_phys_t *bt;
obj = dmu_object_alloc(os, DMU_OTN_UINT64_METADATA,
SPA_OLD_MAXBLOCKSIZE, DMU_OTN_UINT64_METADATA,
sizeof (bptree_phys_t), tx);
/*
* Bonus buffer contents are already initialized to 0, but for
* readability we make it explicit.
*/
VERIFY3U(0, ==, dmu_bonus_hold(os, obj, FTAG, &db));
dmu_buf_will_dirty(db, tx);
bt = db->db_data;
bt->bt_begin = 0;
bt->bt_end = 0;
bt->bt_bytes = 0;
bt->bt_comp = 0;
bt->bt_uncomp = 0;
dmu_buf_rele(db, FTAG);
return (obj);
}
int
bptree_free(objset_t *os, uint64_t obj, dmu_tx_t *tx)
{
dmu_buf_t *db;
bptree_phys_t *bt;
VERIFY3U(0, ==, dmu_bonus_hold(os, obj, FTAG, &db));
bt = db->db_data;
ASSERT3U(bt->bt_begin, ==, bt->bt_end);
ASSERT0(bt->bt_bytes);
ASSERT0(bt->bt_comp);
ASSERT0(bt->bt_uncomp);
dmu_buf_rele(db, FTAG);
return (dmu_object_free(os, obj, tx));
}
boolean_t
bptree_is_empty(objset_t *os, uint64_t obj)
{
dmu_buf_t *db;
bptree_phys_t *bt;
boolean_t rv;
VERIFY0(dmu_bonus_hold(os, obj, FTAG, &db));
bt = db->db_data;
rv = (bt->bt_begin == bt->bt_end);
dmu_buf_rele(db, FTAG);
return (rv);
}
void
bptree_add(objset_t *os, uint64_t obj, blkptr_t *bp, uint64_t birth_txg,
uint64_t bytes, uint64_t comp, uint64_t uncomp, dmu_tx_t *tx)
{
dmu_buf_t *db;
bptree_phys_t *bt;
bptree_entry_phys_t *bte;
/*
* bptree objects are in the pool mos, therefore they can only be
* modified in syncing context. Furthermore, this is only modified
* by the sync thread, so no locking is necessary.
*/
ASSERT(dmu_tx_is_syncing(tx));
VERIFY3U(0, ==, dmu_bonus_hold(os, obj, FTAG, &db));
bt = db->db_data;
bte = kmem_zalloc(sizeof (*bte), KM_SLEEP);
bte->be_birth_txg = birth_txg;
bte->be_bp = *bp;
dmu_write(os, obj, bt->bt_end * sizeof (*bte), sizeof (*bte), bte, tx);
kmem_free(bte, sizeof (*bte));
dmu_buf_will_dirty(db, tx);
bt->bt_end++;
bt->bt_bytes += bytes;
bt->bt_comp += comp;
bt->bt_uncomp += uncomp;
dmu_buf_rele(db, FTAG);
}
-/* ARGSUSED */
static int
bptree_visit_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
+ (void) zilog, (void) dnp;
int err;
struct bptree_args *ba = arg;
if (zb->zb_level == ZB_DNODE_LEVEL || BP_IS_HOLE(bp) ||
BP_IS_REDACTED(bp))
return (0);
err = ba->ba_func(ba->ba_arg, bp, ba->ba_tx);
if (err == 0 && ba->ba_free) {
ba->ba_phys->bt_bytes -= bp_get_dsize_sync(spa, bp);
ba->ba_phys->bt_comp -= BP_GET_PSIZE(bp);
ba->ba_phys->bt_uncomp -= BP_GET_UCSIZE(bp);
}
return (err);
}
/*
* If "free" is set:
* - It is assumed that "func" will be freeing the block pointers.
* - If "func" returns nonzero, the bookmark will be remembered and
* iteration will be restarted from this point on next invocation.
* - If an i/o error is encountered (e.g. "func" returns EIO or ECKSUM),
* bptree_iterate will remember the bookmark, continue traversing
* any additional entries, and return 0.
*
* If "free" is not set, traversal will stop and return an error if
* an i/o error is encountered.
*
* In either case, if zfs_free_leak_on_eio is set, i/o errors will be
* ignored and traversal will continue (i.e. TRAVERSE_HARD will be passed to
* traverse_dataset_destroyed()).
*/
int
bptree_iterate(objset_t *os, uint64_t obj, boolean_t free, bptree_itor_t func,
void *arg, dmu_tx_t *tx)
{
boolean_t ioerr = B_FALSE;
int err;
uint64_t i;
dmu_buf_t *db;
struct bptree_args ba;
ASSERT(!free || dmu_tx_is_syncing(tx));
err = dmu_bonus_hold(os, obj, FTAG, &db);
if (err != 0)
return (err);
if (free)
dmu_buf_will_dirty(db, tx);
ba.ba_phys = db->db_data;
ba.ba_free = free;
ba.ba_func = func;
ba.ba_arg = arg;
ba.ba_tx = tx;
err = 0;
for (i = ba.ba_phys->bt_begin; i < ba.ba_phys->bt_end; i++) {
bptree_entry_phys_t bte;
int flags = TRAVERSE_PREFETCH_METADATA | TRAVERSE_POST |
TRAVERSE_NO_DECRYPT;
err = dmu_read(os, obj, i * sizeof (bte), sizeof (bte),
&bte, DMU_READ_NO_PREFETCH);
if (err != 0)
break;
if (zfs_free_leak_on_eio)
flags |= TRAVERSE_HARD;
zfs_dbgmsg("bptree index %lld: traversing from min_txg=%lld "
"bookmark %lld/%lld/%lld/%lld",
(longlong_t)i,
(longlong_t)bte.be_birth_txg,
(longlong_t)bte.be_zb.zb_objset,
(longlong_t)bte.be_zb.zb_object,
(longlong_t)bte.be_zb.zb_level,
(longlong_t)bte.be_zb.zb_blkid);
err = traverse_dataset_destroyed(os->os_spa, &bte.be_bp,
bte.be_birth_txg, &bte.be_zb, flags,
bptree_visit_cb, &ba);
if (free) {
/*
* The callback has freed the visited block pointers.
* Record our traversal progress on disk, either by
* updating this record's bookmark, or by logically
* removing this record by advancing bt_begin.
*/
if (err != 0) {
/* save bookmark for future resume */
ASSERT3U(bte.be_zb.zb_objset, ==,
ZB_DESTROYED_OBJSET);
ASSERT0(bte.be_zb.zb_level);
dmu_write(os, obj, i * sizeof (bte),
sizeof (bte), &bte, tx);
if (err == EIO || err == ECKSUM ||
err == ENXIO) {
/*
* Skip the rest of this tree and
* continue on to the next entry.
*/
err = 0;
ioerr = B_TRUE;
} else {
break;
}
} else if (ioerr) {
/*
* This entry is finished, but there were
* i/o errors on previous entries, so we
* can't adjust bt_begin. Set this entry's
* be_birth_txg such that it will be
* treated as a no-op in future traversals.
*/
bte.be_birth_txg = UINT64_MAX;
dmu_write(os, obj, i * sizeof (bte),
sizeof (bte), &bte, tx);
}
if (!ioerr) {
ba.ba_phys->bt_begin++;
(void) dmu_free_range(os, obj,
i * sizeof (bte), sizeof (bte), tx);
}
} else if (err != 0) {
break;
}
}
ASSERT(!free || err != 0 || ioerr ||
ba.ba_phys->bt_begin == ba.ba_phys->bt_end);
/* if all blocks are free there should be no used space */
if (ba.ba_phys->bt_begin == ba.ba_phys->bt_end) {
if (zfs_free_leak_on_eio) {
ba.ba_phys->bt_bytes = 0;
ba.ba_phys->bt_comp = 0;
ba.ba_phys->bt_uncomp = 0;
}
ASSERT0(ba.ba_phys->bt_bytes);
ASSERT0(ba.ba_phys->bt_comp);
ASSERT0(ba.ba_phys->bt_uncomp);
}
dmu_buf_rele(db, FTAG);
return (err);
}
diff --git a/sys/contrib/openzfs/module/zfs/dbuf.c b/sys/contrib/openzfs/module/zfs/dbuf.c
index b21ea8d358ea..d61724be8a07 100644
--- a/sys/contrib/openzfs/module/zfs/dbuf.c
+++ b/sys/contrib/openzfs/module/zfs/dbuf.c
@@ -1,5056 +1,5054 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
*/
#include <sys/zfs_context.h>
#include <sys/arc.h>
#include <sys/dmu.h>
#include <sys/dmu_send.h>
#include <sys/dmu_impl.h>
#include <sys/dbuf.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dmu_tx.h>
#include <sys/spa.h>
#include <sys/zio.h>
#include <sys/dmu_zfetch.h>
#include <sys/sa.h>
#include <sys/sa_impl.h>
#include <sys/zfeature.h>
#include <sys/blkptr.h>
#include <sys/range_tree.h>
#include <sys/trace_zfs.h>
#include <sys/callb.h>
#include <sys/abd.h>
#include <sys/vdev.h>
#include <cityhash.h>
#include <sys/spa_impl.h>
#include <sys/wmsum.h>
kstat_t *dbuf_ksp;
typedef struct dbuf_stats {
/*
* Various statistics about the size of the dbuf cache.
*/
kstat_named_t cache_count;
kstat_named_t cache_size_bytes;
kstat_named_t cache_size_bytes_max;
/*
* Statistics regarding the bounds on the dbuf cache size.
*/
kstat_named_t cache_target_bytes;
kstat_named_t cache_lowater_bytes;
kstat_named_t cache_hiwater_bytes;
/*
* Total number of dbuf cache evictions that have occurred.
*/
kstat_named_t cache_total_evicts;
/*
* The distribution of dbuf levels in the dbuf cache and
* the total size of all dbufs at each level.
*/
kstat_named_t cache_levels[DN_MAX_LEVELS];
kstat_named_t cache_levels_bytes[DN_MAX_LEVELS];
/*
* Statistics about the dbuf hash table.
*/
kstat_named_t hash_hits;
kstat_named_t hash_misses;
kstat_named_t hash_collisions;
kstat_named_t hash_elements;
kstat_named_t hash_elements_max;
/*
* Number of sublists containing more than one dbuf in the dbuf
* hash table. Keep track of the longest hash chain.
*/
kstat_named_t hash_chains;
kstat_named_t hash_chain_max;
/*
* Number of times a dbuf_create() discovers that a dbuf was
* already created and in the dbuf hash table.
*/
kstat_named_t hash_insert_race;
/*
* Statistics about the size of the metadata dbuf cache.
*/
kstat_named_t metadata_cache_count;
kstat_named_t metadata_cache_size_bytes;
kstat_named_t metadata_cache_size_bytes_max;
/*
* For diagnostic purposes, this is incremented whenever we can't add
* something to the metadata cache because it's full, and instead put
* the data in the regular dbuf cache.
*/
kstat_named_t metadata_cache_overflow;
} dbuf_stats_t;
dbuf_stats_t dbuf_stats = {
{ "cache_count", KSTAT_DATA_UINT64 },
{ "cache_size_bytes", KSTAT_DATA_UINT64 },
{ "cache_size_bytes_max", KSTAT_DATA_UINT64 },
{ "cache_target_bytes", KSTAT_DATA_UINT64 },
{ "cache_lowater_bytes", KSTAT_DATA_UINT64 },
{ "cache_hiwater_bytes", KSTAT_DATA_UINT64 },
{ "cache_total_evicts", KSTAT_DATA_UINT64 },
{ { "cache_levels_N", KSTAT_DATA_UINT64 } },
{ { "cache_levels_bytes_N", KSTAT_DATA_UINT64 } },
{ "hash_hits", KSTAT_DATA_UINT64 },
{ "hash_misses", KSTAT_DATA_UINT64 },
{ "hash_collisions", KSTAT_DATA_UINT64 },
{ "hash_elements", KSTAT_DATA_UINT64 },
{ "hash_elements_max", KSTAT_DATA_UINT64 },
{ "hash_chains", KSTAT_DATA_UINT64 },
{ "hash_chain_max", KSTAT_DATA_UINT64 },
{ "hash_insert_race", KSTAT_DATA_UINT64 },
{ "metadata_cache_count", KSTAT_DATA_UINT64 },
{ "metadata_cache_size_bytes", KSTAT_DATA_UINT64 },
{ "metadata_cache_size_bytes_max", KSTAT_DATA_UINT64 },
{ "metadata_cache_overflow", KSTAT_DATA_UINT64 }
};
struct {
wmsum_t cache_count;
wmsum_t cache_total_evicts;
wmsum_t cache_levels[DN_MAX_LEVELS];
wmsum_t cache_levels_bytes[DN_MAX_LEVELS];
wmsum_t hash_hits;
wmsum_t hash_misses;
wmsum_t hash_collisions;
wmsum_t hash_chains;
wmsum_t hash_insert_race;
wmsum_t metadata_cache_count;
wmsum_t metadata_cache_overflow;
} dbuf_sums;
#define DBUF_STAT_INCR(stat, val) \
wmsum_add(&dbuf_sums.stat, val);
#define DBUF_STAT_DECR(stat, val) \
DBUF_STAT_INCR(stat, -(val));
#define DBUF_STAT_BUMP(stat) \
DBUF_STAT_INCR(stat, 1);
#define DBUF_STAT_BUMPDOWN(stat) \
DBUF_STAT_INCR(stat, -1);
#define DBUF_STAT_MAX(stat, v) { \
uint64_t _m; \
while ((v) > (_m = dbuf_stats.stat.value.ui64) && \
(_m != atomic_cas_64(&dbuf_stats.stat.value.ui64, _m, (v))))\
continue; \
}
static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
static void dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr);
static int dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags);
-extern inline void dmu_buf_init_user(dmu_buf_user_t *dbu,
- dmu_buf_evict_func_t *evict_func_sync,
- dmu_buf_evict_func_t *evict_func_async,
- dmu_buf_t **clear_on_evict_dbufp);
-
/*
* Global data structures and functions for the dbuf cache.
*/
static kmem_cache_t *dbuf_kmem_cache;
static taskq_t *dbu_evict_taskq;
static kthread_t *dbuf_cache_evict_thread;
static kmutex_t dbuf_evict_lock;
static kcondvar_t dbuf_evict_cv;
static boolean_t dbuf_evict_thread_exit;
/*
* There are two dbuf caches; each dbuf can only be in one of them at a time.
*
* 1. Cache of metadata dbufs, to help make read-heavy administrative commands
* from /sbin/zfs run faster. The "metadata cache" specifically stores dbufs
* that represent the metadata that describes filesystems/snapshots/
* bookmarks/properties/etc. We only evict from this cache when we export a
* pool, to short-circuit as much I/O as possible for all administrative
* commands that need the metadata. There is no eviction policy for this
* cache, because we try to only include types in it which would occupy a
* very small amount of space per object but create a large impact on the
* performance of these commands. Instead, after it reaches a maximum size
* (which should only happen on very small memory systems with a very large
* number of filesystem objects), we stop taking new dbufs into the
* metadata cache, instead putting them in the normal dbuf cache.
*
* 2. LRU cache of dbufs. The dbuf cache maintains a list of dbufs that
* are not currently held but have been recently released. These dbufs
* are not eligible for arc eviction until they are aged out of the cache.
* Dbufs that are aged out of the cache will be immediately destroyed and
* become eligible for arc eviction.
*
* Dbufs are added to these caches once the last hold is released. If a dbuf is
* later accessed and still exists in the dbuf cache, then it will be removed
* from the cache and later re-added to the head of the cache.
*
* If a given dbuf meets the requirements for the metadata cache, it will go
* there, otherwise it will be considered for the generic LRU dbuf cache. The
* caches and the refcounts tracking their sizes are stored in an array indexed
* by those caches' matching enum values (from dbuf_cached_state_t).
*/
typedef struct dbuf_cache {
multilist_t cache;
zfs_refcount_t size ____cacheline_aligned;
} dbuf_cache_t;
dbuf_cache_t dbuf_caches[DB_CACHE_MAX];
/* Size limits for the caches */
unsigned long dbuf_cache_max_bytes = ULONG_MAX;
unsigned long dbuf_metadata_cache_max_bytes = ULONG_MAX;
/* Set the default sizes of the caches to log2 fraction of arc size */
int dbuf_cache_shift = 5;
int dbuf_metadata_cache_shift = 6;
static unsigned long dbuf_cache_target_bytes(void);
static unsigned long dbuf_metadata_cache_target_bytes(void);
/*
* The LRU dbuf cache uses a three-stage eviction policy:
* - A low water marker designates when the dbuf eviction thread
* should stop evicting from the dbuf cache.
* - When we reach the maximum size (aka mid water mark), we
* signal the eviction thread to run.
* - The high water mark indicates when the eviction thread
* is unable to keep up with the incoming load and eviction must
* happen in the context of the calling thread.
*
* The dbuf cache:
* (max size)
* low water mid water hi water
* +----------------------------------------+----------+----------+
* | | | |
* | | | |
* | | | |
* | | | |
* +----------------------------------------+----------+----------+
* stop signal evict
* evicting eviction directly
* thread
*
* The high and low water marks indicate the operating range for the eviction
* thread. The low water mark is, by default, 90% of the total size of the
* cache and the high water mark is at 110% (both of these percentages can be
* changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct,
* respectively). The eviction thread will try to ensure that the cache remains
* within this range by waking up every second and checking if the cache is
* above the low water mark. The thread can also be woken up by callers adding
* elements into the cache if the cache is larger than the mid water (i.e max
* cache size). Once the eviction thread is woken up and eviction is required,
* it will continue evicting buffers until it's able to reduce the cache size
* to the low water mark. If the cache size continues to grow and hits the high
* water mark, then callers adding elements to the cache will begin to evict
* directly from the cache until the cache is no longer above the high water
* mark.
*/
/*
* The percentage above and below the maximum cache size.
*/
uint_t dbuf_cache_hiwater_pct = 10;
uint_t dbuf_cache_lowater_pct = 10;
-/* ARGSUSED */
static int
dbuf_cons(void *vdb, void *unused, int kmflag)
{
+ (void) unused, (void) kmflag;
dmu_buf_impl_t *db = vdb;
bzero(db, sizeof (dmu_buf_impl_t));
mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
rw_init(&db->db_rwlock, NULL, RW_DEFAULT, NULL);
cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
multilist_link_init(&db->db_cache_link);
zfs_refcount_create(&db->db_holds);
return (0);
}
-/* ARGSUSED */
static void
dbuf_dest(void *vdb, void *unused)
{
+ (void) unused;
dmu_buf_impl_t *db = vdb;
mutex_destroy(&db->db_mtx);
rw_destroy(&db->db_rwlock);
cv_destroy(&db->db_changed);
ASSERT(!multilist_link_active(&db->db_cache_link));
zfs_refcount_destroy(&db->db_holds);
}
/*
* dbuf hash table routines
*/
static dbuf_hash_table_t dbuf_hash_table;
/*
* We use Cityhash for this. It's fast, and has good hash properties without
* requiring any large static buffers.
*/
static uint64_t
dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
{
return (cityhash4((uintptr_t)os, obj, (uint64_t)lvl, blkid));
}
#define DTRACE_SET_STATE(db, why) \
DTRACE_PROBE2(dbuf__state_change, dmu_buf_impl_t *, db, \
const char *, why)
#define DBUF_EQUAL(dbuf, os, obj, level, blkid) \
((dbuf)->db.db_object == (obj) && \
(dbuf)->db_objset == (os) && \
(dbuf)->db_level == (level) && \
(dbuf)->db_blkid == (blkid))
dmu_buf_impl_t *
dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid)
{
dbuf_hash_table_t *h = &dbuf_hash_table;
uint64_t hv;
uint64_t idx;
dmu_buf_impl_t *db;
hv = dbuf_hash(os, obj, level, blkid);
idx = hv & h->hash_table_mask;
mutex_enter(DBUF_HASH_MUTEX(h, idx));
for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
if (DBUF_EQUAL(db, os, obj, level, blkid)) {
mutex_enter(&db->db_mtx);
if (db->db_state != DB_EVICTING) {
mutex_exit(DBUF_HASH_MUTEX(h, idx));
return (db);
}
mutex_exit(&db->db_mtx);
}
}
mutex_exit(DBUF_HASH_MUTEX(h, idx));
return (NULL);
}
static dmu_buf_impl_t *
dbuf_find_bonus(objset_t *os, uint64_t object)
{
dnode_t *dn;
dmu_buf_impl_t *db = NULL;
if (dnode_hold(os, object, FTAG, &dn) == 0) {
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (dn->dn_bonus != NULL) {
db = dn->dn_bonus;
mutex_enter(&db->db_mtx);
}
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
}
return (db);
}
/*
* Insert an entry into the hash table. If there is already an element
* equal to elem in the hash table, then the already existing element
* will be returned and the new element will not be inserted.
* Otherwise returns NULL.
*/
static dmu_buf_impl_t *
dbuf_hash_insert(dmu_buf_impl_t *db)
{
dbuf_hash_table_t *h = &dbuf_hash_table;
objset_t *os = db->db_objset;
uint64_t obj = db->db.db_object;
int level = db->db_level;
uint64_t blkid, hv, idx;
dmu_buf_impl_t *dbf;
uint32_t i;
blkid = db->db_blkid;
hv = dbuf_hash(os, obj, level, blkid);
idx = hv & h->hash_table_mask;
mutex_enter(DBUF_HASH_MUTEX(h, idx));
for (dbf = h->hash_table[idx], i = 0; dbf != NULL;
dbf = dbf->db_hash_next, i++) {
if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
mutex_enter(&dbf->db_mtx);
if (dbf->db_state != DB_EVICTING) {
mutex_exit(DBUF_HASH_MUTEX(h, idx));
return (dbf);
}
mutex_exit(&dbf->db_mtx);
}
}
if (i > 0) {
DBUF_STAT_BUMP(hash_collisions);
if (i == 1)
DBUF_STAT_BUMP(hash_chains);
DBUF_STAT_MAX(hash_chain_max, i);
}
mutex_enter(&db->db_mtx);
db->db_hash_next = h->hash_table[idx];
h->hash_table[idx] = db;
mutex_exit(DBUF_HASH_MUTEX(h, idx));
uint64_t he = atomic_inc_64_nv(&dbuf_stats.hash_elements.value.ui64);
DBUF_STAT_MAX(hash_elements_max, he);
return (NULL);
}
/*
* This returns whether this dbuf should be stored in the metadata cache, which
* is based on whether it's from one of the dnode types that store data related
* to traversing dataset hierarchies.
*/
static boolean_t
dbuf_include_in_metadata_cache(dmu_buf_impl_t *db)
{
DB_DNODE_ENTER(db);
dmu_object_type_t type = DB_DNODE(db)->dn_type;
DB_DNODE_EXIT(db);
/* Check if this dbuf is one of the types we care about */
if (DMU_OT_IS_METADATA_CACHED(type)) {
/* If we hit this, then we set something up wrong in dmu_ot */
ASSERT(DMU_OT_IS_METADATA(type));
/*
* Sanity check for small-memory systems: don't allocate too
* much memory for this purpose.
*/
if (zfs_refcount_count(
&dbuf_caches[DB_DBUF_METADATA_CACHE].size) >
dbuf_metadata_cache_target_bytes()) {
DBUF_STAT_BUMP(metadata_cache_overflow);
return (B_FALSE);
}
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Remove an entry from the hash table. It must be in the EVICTING state.
*/
static void
dbuf_hash_remove(dmu_buf_impl_t *db)
{
dbuf_hash_table_t *h = &dbuf_hash_table;
uint64_t hv, idx;
dmu_buf_impl_t *dbf, **dbp;
hv = dbuf_hash(db->db_objset, db->db.db_object,
db->db_level, db->db_blkid);
idx = hv & h->hash_table_mask;
/*
* We mustn't hold db_mtx to maintain lock ordering:
* DBUF_HASH_MUTEX > db_mtx.
*/
ASSERT(zfs_refcount_is_zero(&db->db_holds));
ASSERT(db->db_state == DB_EVICTING);
ASSERT(!MUTEX_HELD(&db->db_mtx));
mutex_enter(DBUF_HASH_MUTEX(h, idx));
dbp = &h->hash_table[idx];
while ((dbf = *dbp) != db) {
dbp = &dbf->db_hash_next;
ASSERT(dbf != NULL);
}
*dbp = db->db_hash_next;
db->db_hash_next = NULL;
if (h->hash_table[idx] &&
h->hash_table[idx]->db_hash_next == NULL)
DBUF_STAT_BUMPDOWN(hash_chains);
mutex_exit(DBUF_HASH_MUTEX(h, idx));
atomic_dec_64(&dbuf_stats.hash_elements.value.ui64);
}
typedef enum {
DBVU_EVICTING,
DBVU_NOT_EVICTING
} dbvu_verify_type_t;
static void
dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type)
{
#ifdef ZFS_DEBUG
int64_t holds;
if (db->db_user == NULL)
return;
/* Only data blocks support the attachment of user data. */
ASSERT(db->db_level == 0);
/* Clients must resolve a dbuf before attaching user data. */
ASSERT(db->db.db_data != NULL);
ASSERT3U(db->db_state, ==, DB_CACHED);
holds = zfs_refcount_count(&db->db_holds);
if (verify_type == DBVU_EVICTING) {
/*
* Immediate eviction occurs when holds == dirtycnt.
* For normal eviction buffers, holds is zero on
* eviction, except when dbuf_fix_old_data() calls
* dbuf_clear_data(). However, the hold count can grow
* during eviction even though db_mtx is held (see
* dmu_bonus_hold() for an example), so we can only
* test the generic invariant that holds >= dirtycnt.
*/
ASSERT3U(holds, >=, db->db_dirtycnt);
} else {
if (db->db_user_immediate_evict == TRUE)
ASSERT3U(holds, >=, db->db_dirtycnt);
else
ASSERT3U(holds, >, 0);
}
#endif
}
static void
dbuf_evict_user(dmu_buf_impl_t *db)
{
dmu_buf_user_t *dbu = db->db_user;
ASSERT(MUTEX_HELD(&db->db_mtx));
if (dbu == NULL)
return;
dbuf_verify_user(db, DBVU_EVICTING);
db->db_user = NULL;
#ifdef ZFS_DEBUG
if (dbu->dbu_clear_on_evict_dbufp != NULL)
*dbu->dbu_clear_on_evict_dbufp = NULL;
#endif
/*
* There are two eviction callbacks - one that we call synchronously
* and one that we invoke via a taskq. The async one is useful for
* avoiding lock order reversals and limiting stack depth.
*
* Note that if we have a sync callback but no async callback,
* it's likely that the sync callback will free the structure
* containing the dbu. In that case we need to take care to not
* dereference dbu after calling the sync evict func.
*/
boolean_t has_async = (dbu->dbu_evict_func_async != NULL);
if (dbu->dbu_evict_func_sync != NULL)
dbu->dbu_evict_func_sync(dbu);
if (has_async) {
taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async,
dbu, 0, &dbu->dbu_tqent);
}
}
boolean_t
dbuf_is_metadata(dmu_buf_impl_t *db)
{
/*
* Consider indirect blocks and spill blocks to be meta data.
*/
if (db->db_level > 0 || db->db_blkid == DMU_SPILL_BLKID) {
return (B_TRUE);
} else {
boolean_t is_metadata;
DB_DNODE_ENTER(db);
is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type);
DB_DNODE_EXIT(db);
return (is_metadata);
}
}
/*
* This function *must* return indices evenly distributed between all
* sublists of the multilist. This is needed due to how the dbuf eviction
* code is laid out; dbuf_evict_thread() assumes dbufs are evenly
* distributed between all sublists and uses this assumption when
* deciding which sublist to evict from and how much to evict from it.
*/
static unsigned int
dbuf_cache_multilist_index_func(multilist_t *ml, void *obj)
{
dmu_buf_impl_t *db = obj;
/*
* The assumption here, is the hash value for a given
* dmu_buf_impl_t will remain constant throughout it's lifetime
* (i.e. it's objset, object, level and blkid fields don't change).
* Thus, we don't need to store the dbuf's sublist index
* on insertion, as this index can be recalculated on removal.
*
* Also, the low order bits of the hash value are thought to be
* distributed evenly. Otherwise, in the case that the multilist
* has a power of two number of sublists, each sublists' usage
* would not be evenly distributed. In this context full 64bit
* division would be a waste of time, so limit it to 32 bits.
*/
return ((unsigned int)dbuf_hash(db->db_objset, db->db.db_object,
db->db_level, db->db_blkid) %
multilist_get_num_sublists(ml));
}
/*
* The target size of the dbuf cache can grow with the ARC target,
* unless limited by the tunable dbuf_cache_max_bytes.
*/
static inline unsigned long
dbuf_cache_target_bytes(void)
{
return (MIN(dbuf_cache_max_bytes,
arc_target_bytes() >> dbuf_cache_shift));
}
/*
* The target size of the dbuf metadata cache can grow with the ARC target,
* unless limited by the tunable dbuf_metadata_cache_max_bytes.
*/
static inline unsigned long
dbuf_metadata_cache_target_bytes(void)
{
return (MIN(dbuf_metadata_cache_max_bytes,
arc_target_bytes() >> dbuf_metadata_cache_shift));
}
static inline uint64_t
dbuf_cache_hiwater_bytes(void)
{
uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
return (dbuf_cache_target +
(dbuf_cache_target * dbuf_cache_hiwater_pct) / 100);
}
static inline uint64_t
dbuf_cache_lowater_bytes(void)
{
uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
return (dbuf_cache_target -
(dbuf_cache_target * dbuf_cache_lowater_pct) / 100);
}
static inline boolean_t
dbuf_cache_above_lowater(void)
{
return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
dbuf_cache_lowater_bytes());
}
/*
* Evict the oldest eligible dbuf from the dbuf cache.
*/
static void
dbuf_evict_one(void)
{
int idx = multilist_get_random_index(&dbuf_caches[DB_DBUF_CACHE].cache);
multilist_sublist_t *mls = multilist_sublist_lock(
&dbuf_caches[DB_DBUF_CACHE].cache, idx);
ASSERT(!MUTEX_HELD(&dbuf_evict_lock));
dmu_buf_impl_t *db = multilist_sublist_tail(mls);
while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) {
db = multilist_sublist_prev(mls, db);
}
DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db,
multilist_sublist_t *, mls);
if (db != NULL) {
multilist_sublist_remove(mls, db);
multilist_sublist_unlock(mls);
(void) zfs_refcount_remove_many(
&dbuf_caches[DB_DBUF_CACHE].size, db->db.db_size, db);
DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
DBUF_STAT_BUMPDOWN(cache_count);
DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
db->db.db_size);
ASSERT3U(db->db_caching_status, ==, DB_DBUF_CACHE);
db->db_caching_status = DB_NO_CACHE;
dbuf_destroy(db);
DBUF_STAT_BUMP(cache_total_evicts);
} else {
multilist_sublist_unlock(mls);
}
}
/*
* The dbuf evict thread is responsible for aging out dbufs from the
* cache. Once the cache has reached it's maximum size, dbufs are removed
* and destroyed. The eviction thread will continue running until the size
* of the dbuf cache is at or below the maximum size. Once the dbuf is aged
* out of the cache it is destroyed and becomes eligible for arc eviction.
*/
-/* ARGSUSED */
static void
dbuf_evict_thread(void *unused)
{
+ (void) unused;
callb_cpr_t cpr;
CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG);
mutex_enter(&dbuf_evict_lock);
while (!dbuf_evict_thread_exit) {
while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
CALLB_CPR_SAFE_BEGIN(&cpr);
(void) cv_timedwait_idle_hires(&dbuf_evict_cv,
&dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0);
CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock);
}
mutex_exit(&dbuf_evict_lock);
/*
* Keep evicting as long as we're above the low water mark
* for the cache. We do this without holding the locks to
* minimize lock contention.
*/
while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
dbuf_evict_one();
}
mutex_enter(&dbuf_evict_lock);
}
dbuf_evict_thread_exit = B_FALSE;
cv_broadcast(&dbuf_evict_cv);
CALLB_CPR_EXIT(&cpr); /* drops dbuf_evict_lock */
thread_exit();
}
/*
* Wake up the dbuf eviction thread if the dbuf cache is at its max size.
* If the dbuf cache is at its high water mark, then evict a dbuf from the
* dbuf cache using the callers context.
*/
static void
dbuf_evict_notify(uint64_t size)
{
/*
* We check if we should evict without holding the dbuf_evict_lock,
* because it's OK to occasionally make the wrong decision here,
* and grabbing the lock results in massive lock contention.
*/
if (size > dbuf_cache_target_bytes()) {
if (size > dbuf_cache_hiwater_bytes())
dbuf_evict_one();
cv_signal(&dbuf_evict_cv);
}
}
static int
dbuf_kstat_update(kstat_t *ksp, int rw)
{
dbuf_stats_t *ds = ksp->ks_data;
if (rw == KSTAT_WRITE)
return (SET_ERROR(EACCES));
ds->cache_count.value.ui64 =
wmsum_value(&dbuf_sums.cache_count);
ds->cache_size_bytes.value.ui64 =
zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size);
ds->cache_target_bytes.value.ui64 = dbuf_cache_target_bytes();
ds->cache_hiwater_bytes.value.ui64 = dbuf_cache_hiwater_bytes();
ds->cache_lowater_bytes.value.ui64 = dbuf_cache_lowater_bytes();
ds->cache_total_evicts.value.ui64 =
wmsum_value(&dbuf_sums.cache_total_evicts);
for (int i = 0; i < DN_MAX_LEVELS; i++) {
ds->cache_levels[i].value.ui64 =
wmsum_value(&dbuf_sums.cache_levels[i]);
ds->cache_levels_bytes[i].value.ui64 =
wmsum_value(&dbuf_sums.cache_levels_bytes[i]);
}
ds->hash_hits.value.ui64 =
wmsum_value(&dbuf_sums.hash_hits);
ds->hash_misses.value.ui64 =
wmsum_value(&dbuf_sums.hash_misses);
ds->hash_collisions.value.ui64 =
wmsum_value(&dbuf_sums.hash_collisions);
ds->hash_chains.value.ui64 =
wmsum_value(&dbuf_sums.hash_chains);
ds->hash_insert_race.value.ui64 =
wmsum_value(&dbuf_sums.hash_insert_race);
ds->metadata_cache_count.value.ui64 =
wmsum_value(&dbuf_sums.metadata_cache_count);
ds->metadata_cache_size_bytes.value.ui64 = zfs_refcount_count(
&dbuf_caches[DB_DBUF_METADATA_CACHE].size);
ds->metadata_cache_overflow.value.ui64 =
wmsum_value(&dbuf_sums.metadata_cache_overflow);
return (0);
}
void
dbuf_init(void)
{
uint64_t hsize = 1ULL << 16;
dbuf_hash_table_t *h = &dbuf_hash_table;
int i;
/*
* The hash table is big enough to fill one eighth of physical memory
* with an average block size of zfs_arc_average_blocksize (default 8K).
* By default, the table will take up
* totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
*/
while (hsize * zfs_arc_average_blocksize < arc_all_memory() / 8)
hsize <<= 1;
retry:
h->hash_table_mask = hsize - 1;
#if defined(_KERNEL)
/*
* Large allocations which do not require contiguous pages
* should be using vmem_alloc() in the linux kernel
*/
h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_SLEEP);
#else
h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP);
#endif
if (h->hash_table == NULL) {
/* XXX - we should really return an error instead of assert */
ASSERT(hsize > (1ULL << 10));
hsize >>= 1;
goto retry;
}
dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t",
sizeof (dmu_buf_impl_t),
0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
for (i = 0; i < DBUF_MUTEXES; i++)
mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL);
dbuf_stats_init(h);
/*
* All entries are queued via taskq_dispatch_ent(), so min/maxalloc
* configuration is not required.
*/
dbu_evict_taskq = taskq_create("dbu_evict", 1, defclsyspri, 0, 0, 0);
for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
multilist_create(&dbuf_caches[dcs].cache,
sizeof (dmu_buf_impl_t),
offsetof(dmu_buf_impl_t, db_cache_link),
dbuf_cache_multilist_index_func);
zfs_refcount_create(&dbuf_caches[dcs].size);
}
dbuf_evict_thread_exit = B_FALSE;
mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL);
dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread,
NULL, 0, &p0, TS_RUN, minclsyspri);
wmsum_init(&dbuf_sums.cache_count, 0);
wmsum_init(&dbuf_sums.cache_total_evicts, 0);
for (i = 0; i < DN_MAX_LEVELS; i++) {
wmsum_init(&dbuf_sums.cache_levels[i], 0);
wmsum_init(&dbuf_sums.cache_levels_bytes[i], 0);
}
wmsum_init(&dbuf_sums.hash_hits, 0);
wmsum_init(&dbuf_sums.hash_misses, 0);
wmsum_init(&dbuf_sums.hash_collisions, 0);
wmsum_init(&dbuf_sums.hash_chains, 0);
wmsum_init(&dbuf_sums.hash_insert_race, 0);
wmsum_init(&dbuf_sums.metadata_cache_count, 0);
wmsum_init(&dbuf_sums.metadata_cache_overflow, 0);
dbuf_ksp = kstat_create("zfs", 0, "dbufstats", "misc",
KSTAT_TYPE_NAMED, sizeof (dbuf_stats) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL);
if (dbuf_ksp != NULL) {
for (i = 0; i < DN_MAX_LEVELS; i++) {
snprintf(dbuf_stats.cache_levels[i].name,
KSTAT_STRLEN, "cache_level_%d", i);
dbuf_stats.cache_levels[i].data_type =
KSTAT_DATA_UINT64;
snprintf(dbuf_stats.cache_levels_bytes[i].name,
KSTAT_STRLEN, "cache_level_%d_bytes", i);
dbuf_stats.cache_levels_bytes[i].data_type =
KSTAT_DATA_UINT64;
}
dbuf_ksp->ks_data = &dbuf_stats;
dbuf_ksp->ks_update = dbuf_kstat_update;
kstat_install(dbuf_ksp);
}
}
void
dbuf_fini(void)
{
dbuf_hash_table_t *h = &dbuf_hash_table;
int i;
dbuf_stats_destroy();
for (i = 0; i < DBUF_MUTEXES; i++)
mutex_destroy(&h->hash_mutexes[i]);
#if defined(_KERNEL)
/*
* Large allocations which do not require contiguous pages
* should be using vmem_free() in the linux kernel
*/
vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
#else
kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
#endif
kmem_cache_destroy(dbuf_kmem_cache);
taskq_destroy(dbu_evict_taskq);
mutex_enter(&dbuf_evict_lock);
dbuf_evict_thread_exit = B_TRUE;
while (dbuf_evict_thread_exit) {
cv_signal(&dbuf_evict_cv);
cv_wait(&dbuf_evict_cv, &dbuf_evict_lock);
}
mutex_exit(&dbuf_evict_lock);
mutex_destroy(&dbuf_evict_lock);
cv_destroy(&dbuf_evict_cv);
for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
zfs_refcount_destroy(&dbuf_caches[dcs].size);
multilist_destroy(&dbuf_caches[dcs].cache);
}
if (dbuf_ksp != NULL) {
kstat_delete(dbuf_ksp);
dbuf_ksp = NULL;
}
wmsum_fini(&dbuf_sums.cache_count);
wmsum_fini(&dbuf_sums.cache_total_evicts);
for (i = 0; i < DN_MAX_LEVELS; i++) {
wmsum_fini(&dbuf_sums.cache_levels[i]);
wmsum_fini(&dbuf_sums.cache_levels_bytes[i]);
}
wmsum_fini(&dbuf_sums.hash_hits);
wmsum_fini(&dbuf_sums.hash_misses);
wmsum_fini(&dbuf_sums.hash_collisions);
wmsum_fini(&dbuf_sums.hash_chains);
wmsum_fini(&dbuf_sums.hash_insert_race);
wmsum_fini(&dbuf_sums.metadata_cache_count);
wmsum_fini(&dbuf_sums.metadata_cache_overflow);
}
/*
* Other stuff.
*/
#ifdef ZFS_DEBUG
static void
dbuf_verify(dmu_buf_impl_t *db)
{
dnode_t *dn;
dbuf_dirty_record_t *dr;
uint32_t txg_prev;
ASSERT(MUTEX_HELD(&db->db_mtx));
if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
return;
ASSERT(db->db_objset != NULL);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
if (dn == NULL) {
ASSERT(db->db_parent == NULL);
ASSERT(db->db_blkptr == NULL);
} else {
ASSERT3U(db->db.db_object, ==, dn->dn_object);
ASSERT3P(db->db_objset, ==, dn->dn_objset);
ASSERT3U(db->db_level, <, dn->dn_nlevels);
ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
db->db_blkid == DMU_SPILL_BLKID ||
!avl_is_empty(&dn->dn_dbufs));
}
if (db->db_blkid == DMU_BONUS_BLKID) {
ASSERT(dn != NULL);
ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
} else if (db->db_blkid == DMU_SPILL_BLKID) {
ASSERT(dn != NULL);
ASSERT0(db->db.db_offset);
} else {
ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
}
if ((dr = list_head(&db->db_dirty_records)) != NULL) {
ASSERT(dr->dr_dbuf == db);
txg_prev = dr->dr_txg;
for (dr = list_next(&db->db_dirty_records, dr); dr != NULL;
dr = list_next(&db->db_dirty_records, dr)) {
ASSERT(dr->dr_dbuf == db);
ASSERT(txg_prev > dr->dr_txg);
txg_prev = dr->dr_txg;
}
}
/*
* We can't assert that db_size matches dn_datablksz because it
* can be momentarily different when another thread is doing
* dnode_set_blksz().
*/
if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
dr = db->db_data_pending;
/*
* It should only be modified in syncing context, so
* make sure we only have one copy of the data.
*/
ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
}
/* verify db->db_blkptr */
if (db->db_blkptr) {
if (db->db_parent == dn->dn_dbuf) {
/* db is pointed to by the dnode */
/* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
ASSERT(db->db_parent == NULL);
else
ASSERT(db->db_parent != NULL);
if (db->db_blkid != DMU_SPILL_BLKID)
ASSERT3P(db->db_blkptr, ==,
&dn->dn_phys->dn_blkptr[db->db_blkid]);
} else {
/* db is pointed to by an indirect block */
int epb __maybe_unused = db->db_parent->db.db_size >>
SPA_BLKPTRSHIFT;
ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
ASSERT3U(db->db_parent->db.db_object, ==,
db->db.db_object);
/*
* dnode_grow_indblksz() can make this fail if we don't
* have the parent's rwlock. XXX indblksz no longer
* grows. safe to do this now?
*/
if (RW_LOCK_HELD(&db->db_parent->db_rwlock)) {
ASSERT3P(db->db_blkptr, ==,
((blkptr_t *)db->db_parent->db.db_data +
db->db_blkid % epb));
}
}
}
if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
(db->db_buf == NULL || db->db_buf->b_data) &&
db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
db->db_state != DB_FILL && !dn->dn_free_txg) {
/*
* If the blkptr isn't set but they have nonzero data,
* it had better be dirty, otherwise we'll lose that
* data when we evict this buffer.
*
* There is an exception to this rule for indirect blocks; in
* this case, if the indirect block is a hole, we fill in a few
* fields on each of the child blocks (importantly, birth time)
* to prevent hole birth times from being lost when you
* partially fill in a hole.
*/
if (db->db_dirtycnt == 0) {
if (db->db_level == 0) {
uint64_t *buf = db->db.db_data;
int i;
for (i = 0; i < db->db.db_size >> 3; i++) {
ASSERT(buf[i] == 0);
}
} else {
blkptr_t *bps = db->db.db_data;
ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==,
db->db.db_size);
/*
* We want to verify that all the blkptrs in the
* indirect block are holes, but we may have
* automatically set up a few fields for them.
* We iterate through each blkptr and verify
* they only have those fields set.
*/
for (int i = 0;
i < db->db.db_size / sizeof (blkptr_t);
i++) {
blkptr_t *bp = &bps[i];
ASSERT(ZIO_CHECKSUM_IS_ZERO(
&bp->blk_cksum));
ASSERT(
DVA_IS_EMPTY(&bp->blk_dva[0]) &&
DVA_IS_EMPTY(&bp->blk_dva[1]) &&
DVA_IS_EMPTY(&bp->blk_dva[2]));
ASSERT0(bp->blk_fill);
ASSERT0(bp->blk_pad[0]);
ASSERT0(bp->blk_pad[1]);
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT(BP_IS_HOLE(bp));
ASSERT0(bp->blk_phys_birth);
}
}
}
}
DB_DNODE_EXIT(db);
}
#endif
static void
dbuf_clear_data(dmu_buf_impl_t *db)
{
ASSERT(MUTEX_HELD(&db->db_mtx));
dbuf_evict_user(db);
ASSERT3P(db->db_buf, ==, NULL);
db->db.db_data = NULL;
if (db->db_state != DB_NOFILL) {
db->db_state = DB_UNCACHED;
DTRACE_SET_STATE(db, "clear data");
}
}
static void
dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
{
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(buf != NULL);
db->db_buf = buf;
ASSERT(buf->b_data != NULL);
db->db.db_data = buf->b_data;
}
static arc_buf_t *
dbuf_alloc_arcbuf(dmu_buf_impl_t *db)
{
spa_t *spa = db->db_objset->os_spa;
return (arc_alloc_buf(spa, db, DBUF_GET_BUFC_TYPE(db), db->db.db_size));
}
/*
* Loan out an arc_buf for read. Return the loaned arc_buf.
*/
arc_buf_t *
dbuf_loan_arcbuf(dmu_buf_impl_t *db)
{
arc_buf_t *abuf;
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
mutex_enter(&db->db_mtx);
if (arc_released(db->db_buf) || zfs_refcount_count(&db->db_holds) > 1) {
int blksz = db->db.db_size;
spa_t *spa = db->db_objset->os_spa;
mutex_exit(&db->db_mtx);
abuf = arc_loan_buf(spa, B_FALSE, blksz);
bcopy(db->db.db_data, abuf->b_data, blksz);
} else {
abuf = db->db_buf;
arc_loan_inuse_buf(abuf, db);
db->db_buf = NULL;
dbuf_clear_data(db);
mutex_exit(&db->db_mtx);
}
return (abuf);
}
/*
* Calculate which level n block references the data at the level 0 offset
* provided.
*/
uint64_t
dbuf_whichblock(const dnode_t *dn, const int64_t level, const uint64_t offset)
{
if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) {
/*
* The level n blkid is equal to the level 0 blkid divided by
* the number of level 0s in a level n block.
*
* The level 0 blkid is offset >> datablkshift =
* offset / 2^datablkshift.
*
* The number of level 0s in a level n is the number of block
* pointers in an indirect block, raised to the power of level.
* This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level =
* 2^(level*(indblkshift - SPA_BLKPTRSHIFT)).
*
* Thus, the level n blkid is: offset /
* ((2^datablkshift)*(2^(level*(indblkshift-SPA_BLKPTRSHIFT))))
* = offset / 2^(datablkshift + level *
* (indblkshift - SPA_BLKPTRSHIFT))
* = offset >> (datablkshift + level *
* (indblkshift - SPA_BLKPTRSHIFT))
*/
const unsigned exp = dn->dn_datablkshift +
level * (dn->dn_indblkshift - SPA_BLKPTRSHIFT);
if (exp >= 8 * sizeof (offset)) {
/* This only happens on the highest indirection level */
ASSERT3U(level, ==, dn->dn_nlevels - 1);
return (0);
}
ASSERT3U(exp, <, 8 * sizeof (offset));
return (offset >> exp);
} else {
ASSERT3U(offset, <, dn->dn_datablksz);
return (0);
}
}
/*
* This function is used to lock the parent of the provided dbuf. This should be
* used when modifying or reading db_blkptr.
*/
db_lock_type_t
dmu_buf_lock_parent(dmu_buf_impl_t *db, krw_t rw, void *tag)
{
enum db_lock_type ret = DLT_NONE;
if (db->db_parent != NULL) {
rw_enter(&db->db_parent->db_rwlock, rw);
ret = DLT_PARENT;
} else if (dmu_objset_ds(db->db_objset) != NULL) {
rrw_enter(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, rw,
tag);
ret = DLT_OBJSET;
}
/*
* We only return a DLT_NONE lock when it's the top-most indirect block
* of the meta-dnode of the MOS.
*/
return (ret);
}
/*
* We need to pass the lock type in because it's possible that the block will
* move from being the topmost indirect block in a dnode (and thus, have no
* parent) to not the top-most via an indirection increase. This would cause a
* panic if we didn't pass the lock type in.
*/
void
dmu_buf_unlock_parent(dmu_buf_impl_t *db, db_lock_type_t type, void *tag)
{
if (type == DLT_PARENT)
rw_exit(&db->db_parent->db_rwlock);
else if (type == DLT_OBJSET)
rrw_exit(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, tag);
}
static void
dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
arc_buf_t *buf, void *vdb)
{
+ (void) zb, (void) bp;
dmu_buf_impl_t *db = vdb;
mutex_enter(&db->db_mtx);
ASSERT3U(db->db_state, ==, DB_READ);
/*
* All reads are synchronous, so we must have a hold on the dbuf
*/
ASSERT(zfs_refcount_count(&db->db_holds) > 0);
ASSERT(db->db_buf == NULL);
ASSERT(db->db.db_data == NULL);
if (buf == NULL) {
/* i/o error */
ASSERT(zio == NULL || zio->io_error != 0);
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT3P(db->db_buf, ==, NULL);
db->db_state = DB_UNCACHED;
DTRACE_SET_STATE(db, "i/o error");
} else if (db->db_level == 0 && db->db_freed_in_flight) {
/* freed in flight */
ASSERT(zio == NULL || zio->io_error == 0);
arc_release(buf, db);
bzero(buf->b_data, db->db.db_size);
arc_buf_freeze(buf);
db->db_freed_in_flight = FALSE;
dbuf_set_data(db, buf);
db->db_state = DB_CACHED;
DTRACE_SET_STATE(db, "freed in flight");
} else {
/* success */
ASSERT(zio == NULL || zio->io_error == 0);
dbuf_set_data(db, buf);
db->db_state = DB_CACHED;
DTRACE_SET_STATE(db, "successful read");
}
cv_broadcast(&db->db_changed);
dbuf_rele_and_unlock(db, NULL, B_FALSE);
}
/*
* Shortcut for performing reads on bonus dbufs. Returns
* an error if we fail to verify the dnode associated with
* a decrypted block. Otherwise success.
*/
static int
dbuf_read_bonus(dmu_buf_impl_t *db, dnode_t *dn, uint32_t flags)
{
int bonuslen, max_bonuslen, err;
err = dbuf_read_verify_dnode_crypt(db, flags);
if (err)
return (err);
bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(DB_DNODE_HELD(db));
ASSERT3U(bonuslen, <=, db->db.db_size);
db->db.db_data = kmem_alloc(max_bonuslen, KM_SLEEP);
arc_space_consume(max_bonuslen, ARC_SPACE_BONUS);
if (bonuslen < max_bonuslen)
bzero(db->db.db_data, max_bonuslen);
if (bonuslen)
bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen);
db->db_state = DB_CACHED;
DTRACE_SET_STATE(db, "bonus buffer filled");
return (0);
}
static void
dbuf_handle_indirect_hole(dmu_buf_impl_t *db, dnode_t *dn)
{
blkptr_t *bps = db->db.db_data;
uint32_t indbs = 1ULL << dn->dn_indblkshift;
int n_bps = indbs >> SPA_BLKPTRSHIFT;
for (int i = 0; i < n_bps; i++) {
blkptr_t *bp = &bps[i];
ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, indbs);
BP_SET_LSIZE(bp, BP_GET_LEVEL(db->db_blkptr) == 1 ?
dn->dn_datablksz : BP_GET_LSIZE(db->db_blkptr));
BP_SET_TYPE(bp, BP_GET_TYPE(db->db_blkptr));
BP_SET_LEVEL(bp, BP_GET_LEVEL(db->db_blkptr) - 1);
BP_SET_BIRTH(bp, db->db_blkptr->blk_birth, 0);
}
}
/*
* Handle reads on dbufs that are holes, if necessary. This function
* requires that the dbuf's mutex is held. Returns success (0) if action
* was taken, ENOENT if no action was taken.
*/
static int
-dbuf_read_hole(dmu_buf_impl_t *db, dnode_t *dn, uint32_t flags)
+dbuf_read_hole(dmu_buf_impl_t *db, dnode_t *dn)
{
ASSERT(MUTEX_HELD(&db->db_mtx));
int is_hole = db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr);
/*
* For level 0 blocks only, if the above check fails:
* Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
* processes the delete record and clears the bp while we are waiting
* for the dn_mtx (resulting in a "no" from block_freed).
*/
if (!is_hole && db->db_level == 0) {
is_hole = dnode_block_freed(dn, db->db_blkid) ||
BP_IS_HOLE(db->db_blkptr);
}
if (is_hole) {
dbuf_set_data(db, dbuf_alloc_arcbuf(db));
bzero(db->db.db_data, db->db.db_size);
if (db->db_blkptr != NULL && db->db_level > 0 &&
BP_IS_HOLE(db->db_blkptr) &&
db->db_blkptr->blk_birth != 0) {
dbuf_handle_indirect_hole(db, dn);
}
db->db_state = DB_CACHED;
DTRACE_SET_STATE(db, "hole read satisfied");
return (0);
}
return (ENOENT);
}
/*
* This function ensures that, when doing a decrypting read of a block,
* we make sure we have decrypted the dnode associated with it. We must do
* this so that we ensure we are fully authenticating the checksum-of-MACs
* tree from the root of the objset down to this block. Indirect blocks are
* always verified against their secure checksum-of-MACs assuming that the
* dnode containing them is correct. Now that we are doing a decrypting read,
* we can be sure that the key is loaded and verify that assumption. This is
* especially important considering that we always read encrypted dnode
* blocks as raw data (without verifying their MACs) to start, and
* decrypt / authenticate them when we need to read an encrypted bonus buffer.
*/
static int
dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags)
{
int err = 0;
objset_t *os = db->db_objset;
arc_buf_t *dnode_abuf;
dnode_t *dn;
zbookmark_phys_t zb;
ASSERT(MUTEX_HELD(&db->db_mtx));
if (!os->os_encrypted || os->os_raw_receive ||
(flags & DB_RF_NO_DECRYPT) != 0)
return (0);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
dnode_abuf = (dn->dn_dbuf != NULL) ? dn->dn_dbuf->db_buf : NULL;
if (dnode_abuf == NULL || !arc_is_encrypted(dnode_abuf)) {
DB_DNODE_EXIT(db);
return (0);
}
SET_BOOKMARK(&zb, dmu_objset_id(os),
DMU_META_DNODE_OBJECT, 0, dn->dn_dbuf->db_blkid);
err = arc_untransform(dnode_abuf, os->os_spa, &zb, B_TRUE);
/*
* An error code of EACCES tells us that the key is still not
* available. This is ok if we are only reading authenticated
* (and therefore non-encrypted) blocks.
*/
if (err == EACCES && ((db->db_blkid != DMU_BONUS_BLKID &&
!DMU_OT_IS_ENCRYPTED(dn->dn_type)) ||
(db->db_blkid == DMU_BONUS_BLKID &&
!DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))))
err = 0;
DB_DNODE_EXIT(db);
return (err);
}
/*
* Drops db_mtx and the parent lock specified by dblt and tag before
* returning.
*/
static int
dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags,
db_lock_type_t dblt, void *tag)
{
dnode_t *dn;
zbookmark_phys_t zb;
uint32_t aflags = ARC_FLAG_NOWAIT;
int err, zio_flags;
boolean_t bonus_read;
err = zio_flags = 0;
bonus_read = B_FALSE;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(db->db_state == DB_UNCACHED);
ASSERT(db->db_buf == NULL);
ASSERT(db->db_parent == NULL ||
RW_LOCK_HELD(&db->db_parent->db_rwlock));
if (db->db_blkid == DMU_BONUS_BLKID) {
err = dbuf_read_bonus(db, dn, flags);
goto early_unlock;
}
- err = dbuf_read_hole(db, dn, flags);
+ err = dbuf_read_hole(db, dn);
if (err == 0)
goto early_unlock;
/*
* Any attempt to read a redacted block should result in an error. This
* will never happen under normal conditions, but can be useful for
* debugging purposes.
*/
if (BP_IS_REDACTED(db->db_blkptr)) {
ASSERT(dsl_dataset_feature_is_active(
db->db_objset->os_dsl_dataset,
SPA_FEATURE_REDACTED_DATASETS));
err = SET_ERROR(EIO);
goto early_unlock;
}
SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
db->db.db_object, db->db_level, db->db_blkid);
/*
* All bps of an encrypted os should have the encryption bit set.
* If this is not true it indicates tampering and we report an error.
*/
if (db->db_objset->os_encrypted && !BP_USES_CRYPT(db->db_blkptr)) {
spa_log_error(db->db_objset->os_spa, &zb);
zfs_panic_recover("unencrypted block in encrypted "
"object set %llu", dmu_objset_id(db->db_objset));
err = SET_ERROR(EIO);
goto early_unlock;
}
err = dbuf_read_verify_dnode_crypt(db, flags);
if (err != 0)
goto early_unlock;
DB_DNODE_EXIT(db);
db->db_state = DB_READ;
DTRACE_SET_STATE(db, "read issued");
mutex_exit(&db->db_mtx);
if (DBUF_IS_L2CACHEABLE(db))
aflags |= ARC_FLAG_L2CACHE;
dbuf_add_ref(db, NULL);
zio_flags = (flags & DB_RF_CANFAIL) ?
ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED;
if ((flags & DB_RF_NO_DECRYPT) && BP_IS_PROTECTED(db->db_blkptr))
zio_flags |= ZIO_FLAG_RAW;
/*
* The zio layer will copy the provided blkptr later, but we need to
* do this now so that we can release the parent's rwlock. We have to
* do that now so that if dbuf_read_done is called synchronously (on
* an l1 cache hit) we don't acquire the db_mtx while holding the
* parent's rwlock, which would be a lock ordering violation.
*/
blkptr_t bp = *db->db_blkptr;
dmu_buf_unlock_parent(db, dblt, tag);
(void) arc_read(zio, db->db_objset->os_spa, &bp,
dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, zio_flags,
&aflags, &zb);
return (err);
early_unlock:
DB_DNODE_EXIT(db);
mutex_exit(&db->db_mtx);
dmu_buf_unlock_parent(db, dblt, tag);
return (err);
}
/*
* This is our just-in-time copy function. It makes a copy of buffers that
* have been modified in a previous transaction group before we access them in
* the current active group.
*
* This function is used in three places: when we are dirtying a buffer for the
* first time in a txg, when we are freeing a range in a dnode that includes
* this buffer, and when we are accessing a buffer which was received compressed
* and later referenced in a WRITE_BYREF record.
*
* Note that when we are called from dbuf_free_range() we do not put a hold on
* the buffer, we just traverse the active dbuf list for the dnode.
*/
static void
dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
{
dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(db->db.db_data != NULL);
ASSERT(db->db_level == 0);
ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
if (dr == NULL ||
(dr->dt.dl.dr_data !=
((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
return;
/*
* If the last dirty record for this dbuf has not yet synced
* and its referencing the dbuf data, either:
* reset the reference to point to a new copy,
* or (if there a no active holders)
* just null out the current db_data pointer.
*/
ASSERT3U(dr->dr_txg, >=, txg - 2);
if (db->db_blkid == DMU_BONUS_BLKID) {
dnode_t *dn = DB_DNODE(db);
int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
dr->dt.dl.dr_data = kmem_alloc(bonuslen, KM_SLEEP);
arc_space_consume(bonuslen, ARC_SPACE_BONUS);
bcopy(db->db.db_data, dr->dt.dl.dr_data, bonuslen);
} else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) {
dnode_t *dn = DB_DNODE(db);
int size = arc_buf_size(db->db_buf);
arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
spa_t *spa = db->db_objset->os_spa;
enum zio_compress compress_type =
arc_get_compression(db->db_buf);
uint8_t complevel = arc_get_complevel(db->db_buf);
if (arc_is_encrypted(db->db_buf)) {
boolean_t byteorder;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
arc_get_raw_params(db->db_buf, &byteorder, salt,
iv, mac);
dr->dt.dl.dr_data = arc_alloc_raw_buf(spa, db,
dmu_objset_id(dn->dn_objset), byteorder, salt, iv,
mac, dn->dn_type, size, arc_buf_lsize(db->db_buf),
compress_type, complevel);
} else if (compress_type != ZIO_COMPRESS_OFF) {
ASSERT3U(type, ==, ARC_BUFC_DATA);
dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db,
size, arc_buf_lsize(db->db_buf), compress_type,
complevel);
} else {
dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size);
}
bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size);
} else {
db->db_buf = NULL;
dbuf_clear_data(db);
}
}
int
dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
{
int err = 0;
boolean_t prefetch;
dnode_t *dn;
/*
* We don't have to hold the mutex to check db_state because it
* can't be freed while we have a hold on the buffer.
*/
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
if (db->db_state == DB_NOFILL)
return (SET_ERROR(EIO));
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
(flags & DB_RF_NOPREFETCH) == 0 && dn != NULL &&
DBUF_IS_CACHEABLE(db);
mutex_enter(&db->db_mtx);
if (db->db_state == DB_CACHED) {
spa_t *spa = dn->dn_objset->os_spa;
/*
* Ensure that this block's dnode has been decrypted if
* the caller has requested decrypted data.
*/
err = dbuf_read_verify_dnode_crypt(db, flags);
/*
* If the arc buf is compressed or encrypted and the caller
* requested uncompressed data, we need to untransform it
* before returning. We also call arc_untransform() on any
* unauthenticated blocks, which will verify their MAC if
* the key is now available.
*/
if (err == 0 && db->db_buf != NULL &&
(flags & DB_RF_NO_DECRYPT) == 0 &&
(arc_is_encrypted(db->db_buf) ||
arc_is_unauthenticated(db->db_buf) ||
arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF)) {
zbookmark_phys_t zb;
SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
db->db.db_object, db->db_level, db->db_blkid);
dbuf_fix_old_data(db, spa_syncing_txg(spa));
err = arc_untransform(db->db_buf, spa, &zb, B_FALSE);
dbuf_set_data(db, db->db_buf);
}
mutex_exit(&db->db_mtx);
if (err == 0 && prefetch) {
dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
B_FALSE, flags & DB_RF_HAVESTRUCT);
}
DB_DNODE_EXIT(db);
DBUF_STAT_BUMP(hash_hits);
} else if (db->db_state == DB_UNCACHED) {
spa_t *spa = dn->dn_objset->os_spa;
boolean_t need_wait = B_FALSE;
db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
if (zio == NULL &&
db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) {
zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
need_wait = B_TRUE;
}
err = dbuf_read_impl(db, zio, flags, dblt, FTAG);
/*
* dbuf_read_impl has dropped db_mtx and our parent's rwlock
* for us
*/
if (!err && prefetch) {
dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
db->db_state != DB_CACHED,
flags & DB_RF_HAVESTRUCT);
}
DB_DNODE_EXIT(db);
DBUF_STAT_BUMP(hash_misses);
/*
* If we created a zio_root we must execute it to avoid
* leaking it, even if it isn't attached to any work due
* to an error in dbuf_read_impl().
*/
if (need_wait) {
if (err == 0)
err = zio_wait(zio);
else
VERIFY0(zio_wait(zio));
}
} else {
/*
* Another reader came in while the dbuf was in flight
* between UNCACHED and CACHED. Either a writer will finish
* writing the buffer (sending the dbuf to CACHED) or the
* first reader's request will reach the read_done callback
* and send the dbuf to CACHED. Otherwise, a failure
* occurred and the dbuf went to UNCACHED.
*/
mutex_exit(&db->db_mtx);
if (prefetch) {
dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
B_TRUE, flags & DB_RF_HAVESTRUCT);
}
DB_DNODE_EXIT(db);
DBUF_STAT_BUMP(hash_misses);
/* Skip the wait per the caller's request. */
if ((flags & DB_RF_NEVERWAIT) == 0) {
mutex_enter(&db->db_mtx);
while (db->db_state == DB_READ ||
db->db_state == DB_FILL) {
ASSERT(db->db_state == DB_READ ||
(flags & DB_RF_HAVESTRUCT) == 0);
DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *,
db, zio_t *, zio);
cv_wait(&db->db_changed, &db->db_mtx);
}
if (db->db_state == DB_UNCACHED)
err = SET_ERROR(EIO);
mutex_exit(&db->db_mtx);
}
}
return (err);
}
static void
dbuf_noread(dmu_buf_impl_t *db)
{
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
mutex_enter(&db->db_mtx);
while (db->db_state == DB_READ || db->db_state == DB_FILL)
cv_wait(&db->db_changed, &db->db_mtx);
if (db->db_state == DB_UNCACHED) {
ASSERT(db->db_buf == NULL);
ASSERT(db->db.db_data == NULL);
dbuf_set_data(db, dbuf_alloc_arcbuf(db));
db->db_state = DB_FILL;
DTRACE_SET_STATE(db, "assigning filled buffer");
} else if (db->db_state == DB_NOFILL) {
dbuf_clear_data(db);
} else {
ASSERT3U(db->db_state, ==, DB_CACHED);
}
mutex_exit(&db->db_mtx);
}
void
dbuf_unoverride(dbuf_dirty_record_t *dr)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
uint64_t txg = dr->dr_txg;
ASSERT(MUTEX_HELD(&db->db_mtx));
/*
* This assert is valid because dmu_sync() expects to be called by
* a zilog's get_data while holding a range lock. This call only
* comes from dbuf_dirty() callers who must also hold a range lock.
*/
ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
ASSERT(db->db_level == 0);
if (db->db_blkid == DMU_BONUS_BLKID ||
dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
return;
ASSERT(db->db_data_pending != dr);
/* free this block */
if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite)
zio_free(db->db_objset->os_spa, txg, bp);
dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
dr->dt.dl.dr_nopwrite = B_FALSE;
dr->dt.dl.dr_has_raw_params = B_FALSE;
/*
* Release the already-written buffer, so we leave it in
* a consistent dirty state. Note that all callers are
* modifying the buffer, so they will immediately do
* another (redundant) arc_release(). Therefore, leave
* the buf thawed to save the effort of freezing &
* immediately re-thawing it.
*/
arc_release(dr->dt.dl.dr_data, db);
}
/*
* Evict (if its unreferenced) or clear (if its referenced) any level-0
* data blocks in the free range, so that any future readers will find
* empty blocks.
*/
void
dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
dmu_tx_t *tx)
{
dmu_buf_impl_t *db_search;
dmu_buf_impl_t *db, *db_next;
uint64_t txg = tx->tx_txg;
avl_index_t where;
dbuf_dirty_record_t *dr;
if (end_blkid > dn->dn_maxblkid &&
!(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID))
end_blkid = dn->dn_maxblkid;
dprintf_dnode(dn, "start=%llu end=%llu\n", (u_longlong_t)start_blkid,
(u_longlong_t)end_blkid);
db_search = kmem_alloc(sizeof (dmu_buf_impl_t), KM_SLEEP);
db_search->db_level = 0;
db_search->db_blkid = start_blkid;
db_search->db_state = DB_SEARCH;
mutex_enter(&dn->dn_dbufs_mtx);
db = avl_find(&dn->dn_dbufs, db_search, &where);
ASSERT3P(db, ==, NULL);
db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
for (; db != NULL; db = db_next) {
db_next = AVL_NEXT(&dn->dn_dbufs, db);
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
if (db->db_level != 0 || db->db_blkid > end_blkid) {
break;
}
ASSERT3U(db->db_blkid, >=, start_blkid);
/* found a level 0 buffer in the range */
mutex_enter(&db->db_mtx);
if (dbuf_undirty(db, tx)) {
/* mutex has been dropped and dbuf destroyed */
continue;
}
if (db->db_state == DB_UNCACHED ||
db->db_state == DB_NOFILL ||
db->db_state == DB_EVICTING) {
ASSERT(db->db.db_data == NULL);
mutex_exit(&db->db_mtx);
continue;
}
if (db->db_state == DB_READ || db->db_state == DB_FILL) {
/* will be handled in dbuf_read_done or dbuf_rele */
db->db_freed_in_flight = TRUE;
mutex_exit(&db->db_mtx);
continue;
}
if (zfs_refcount_count(&db->db_holds) == 0) {
ASSERT(db->db_buf);
dbuf_destroy(db);
continue;
}
/* The dbuf is referenced */
dr = list_head(&db->db_dirty_records);
if (dr != NULL) {
if (dr->dr_txg == txg) {
/*
* This buffer is "in-use", re-adjust the file
* size to reflect that this buffer may
* contain new data when we sync.
*/
if (db->db_blkid != DMU_SPILL_BLKID &&
db->db_blkid > dn->dn_maxblkid)
dn->dn_maxblkid = db->db_blkid;
dbuf_unoverride(dr);
} else {
/*
* This dbuf is not dirty in the open context.
* Either uncache it (if its not referenced in
* the open context) or reset its contents to
* empty.
*/
dbuf_fix_old_data(db, txg);
}
}
/* clear the contents if its cached */
if (db->db_state == DB_CACHED) {
ASSERT(db->db.db_data != NULL);
arc_release(db->db_buf, db);
rw_enter(&db->db_rwlock, RW_WRITER);
bzero(db->db.db_data, db->db.db_size);
rw_exit(&db->db_rwlock);
arc_buf_freeze(db->db_buf);
}
mutex_exit(&db->db_mtx);
}
kmem_free(db_search, sizeof (dmu_buf_impl_t));
mutex_exit(&dn->dn_dbufs_mtx);
}
void
dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
{
arc_buf_t *buf, *old_buf;
dbuf_dirty_record_t *dr;
int osize = db->db.db_size;
arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
dnode_t *dn;
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
/*
* XXX we should be doing a dbuf_read, checking the return
* value and returning that up to our callers
*/
dmu_buf_will_dirty(&db->db, tx);
/* create the data buffer for the new block */
buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size);
/* copy old block data to the new block */
old_buf = db->db_buf;
bcopy(old_buf->b_data, buf->b_data, MIN(osize, size));
/* zero the remainder */
if (size > osize)
bzero((uint8_t *)buf->b_data + osize, size - osize);
mutex_enter(&db->db_mtx);
dbuf_set_data(db, buf);
arc_buf_destroy(old_buf, db);
db->db.db_size = size;
dr = list_head(&db->db_dirty_records);
/* dirty record added by dmu_buf_will_dirty() */
VERIFY(dr != NULL);
if (db->db_level == 0)
dr->dt.dl.dr_data = buf;
ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
ASSERT3U(dr->dr_accounted, ==, osize);
dr->dr_accounted = size;
mutex_exit(&db->db_mtx);
dmu_objset_willuse_space(dn->dn_objset, size - osize, tx);
DB_DNODE_EXIT(db);
}
void
dbuf_release_bp(dmu_buf_impl_t *db)
{
objset_t *os __maybe_unused = db->db_objset;
ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
ASSERT(arc_released(os->os_phys_buf) ||
list_link_active(&os->os_dsl_dataset->ds_synced_link));
ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
(void) arc_release(db->db_buf, db);
}
/*
* We already have a dirty record for this TXG, and we are being
* dirtied again.
*/
static void
dbuf_redirty(dbuf_dirty_record_t *dr)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
ASSERT(MUTEX_HELD(&db->db_mtx));
if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
/*
* If this buffer has already been written out,
* we now need to reset its state.
*/
dbuf_unoverride(dr);
if (db->db.db_object != DMU_META_DNODE_OBJECT &&
db->db_state != DB_NOFILL) {
/* Already released on initial dirty, so just thaw. */
ASSERT(arc_released(db->db_buf));
arc_buf_thaw(db->db_buf);
}
}
}
dbuf_dirty_record_t *
dbuf_dirty_lightweight(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx)
{
rw_enter(&dn->dn_struct_rwlock, RW_READER);
IMPLY(dn->dn_objset->os_raw_receive, dn->dn_maxblkid >= blkid);
dnode_new_blkid(dn, blkid, tx, B_TRUE, B_FALSE);
ASSERT(dn->dn_maxblkid >= blkid);
dbuf_dirty_record_t *dr = kmem_zalloc(sizeof (*dr), KM_SLEEP);
list_link_init(&dr->dr_dirty_node);
list_link_init(&dr->dr_dbuf_node);
dr->dr_dnode = dn;
dr->dr_txg = tx->tx_txg;
dr->dt.dll.dr_blkid = blkid;
dr->dr_accounted = dn->dn_datablksz;
/*
* There should not be any dbuf for the block that we're dirtying.
* Otherwise the buffer contents could be inconsistent between the
* dbuf and the lightweight dirty record.
*/
ASSERT3P(NULL, ==, dbuf_find(dn->dn_objset, dn->dn_object, 0, blkid));
mutex_enter(&dn->dn_mtx);
int txgoff = tx->tx_txg & TXG_MASK;
if (dn->dn_free_ranges[txgoff] != NULL) {
range_tree_clear(dn->dn_free_ranges[txgoff], blkid, 1);
}
if (dn->dn_nlevels == 1) {
ASSERT3U(blkid, <, dn->dn_nblkptr);
list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
mutex_exit(&dn->dn_mtx);
rw_exit(&dn->dn_struct_rwlock);
dnode_setdirty(dn, tx);
} else {
mutex_exit(&dn->dn_mtx);
int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
dmu_buf_impl_t *parent_db = dbuf_hold_level(dn,
1, blkid >> epbs, FTAG);
rw_exit(&dn->dn_struct_rwlock);
if (parent_db == NULL) {
kmem_free(dr, sizeof (*dr));
return (NULL);
}
int err = dbuf_read(parent_db, NULL,
(DB_RF_NOPREFETCH | DB_RF_CANFAIL));
if (err != 0) {
dbuf_rele(parent_db, FTAG);
kmem_free(dr, sizeof (*dr));
return (NULL);
}
dbuf_dirty_record_t *parent_dr = dbuf_dirty(parent_db, tx);
dbuf_rele(parent_db, FTAG);
mutex_enter(&parent_dr->dt.di.dr_mtx);
ASSERT3U(parent_dr->dr_txg, ==, tx->tx_txg);
list_insert_tail(&parent_dr->dt.di.dr_children, dr);
mutex_exit(&parent_dr->dt.di.dr_mtx);
dr->dr_parent = parent_dr;
}
dmu_objset_willuse_space(dn->dn_objset, dr->dr_accounted, tx);
return (dr);
}
dbuf_dirty_record_t *
dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
{
dnode_t *dn;
objset_t *os;
dbuf_dirty_record_t *dr, *dr_next, *dr_head;
int txgoff = tx->tx_txg & TXG_MASK;
boolean_t drop_struct_rwlock = B_FALSE;
ASSERT(tx->tx_txg != 0);
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
DMU_TX_DIRTY_BUF(tx, db);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
/*
* Shouldn't dirty a regular buffer in syncing context. Private
* objects may be dirtied in syncing context, but only if they
* were already pre-dirtied in open context.
*/
#ifdef ZFS_DEBUG
if (dn->dn_objset->os_dsl_dataset != NULL) {
rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
RW_READER, FTAG);
}
ASSERT(!dmu_tx_is_syncing(tx) ||
BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
dn->dn_objset->os_dsl_dataset == NULL);
if (dn->dn_objset->os_dsl_dataset != NULL)
rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG);
#endif
/*
* We make this assert for private objects as well, but after we
* check if we're already dirty. They are allowed to re-dirty
* in syncing context.
*/
ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
(dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
mutex_enter(&db->db_mtx);
/*
* XXX make this true for indirects too? The problem is that
* transactions created with dmu_tx_create_assigned() from
* syncing context don't bother holding ahead.
*/
ASSERT(db->db_level != 0 ||
db->db_state == DB_CACHED || db->db_state == DB_FILL ||
db->db_state == DB_NOFILL);
mutex_enter(&dn->dn_mtx);
dnode_set_dirtyctx(dn, tx, db);
if (tx->tx_txg > dn->dn_dirty_txg)
dn->dn_dirty_txg = tx->tx_txg;
mutex_exit(&dn->dn_mtx);
if (db->db_blkid == DMU_SPILL_BLKID)
dn->dn_have_spill = B_TRUE;
/*
* If this buffer is already dirty, we're done.
*/
dr_head = list_head(&db->db_dirty_records);
ASSERT(dr_head == NULL || dr_head->dr_txg <= tx->tx_txg ||
db->db.db_object == DMU_META_DNODE_OBJECT);
dr_next = dbuf_find_dirty_lte(db, tx->tx_txg);
if (dr_next && dr_next->dr_txg == tx->tx_txg) {
DB_DNODE_EXIT(db);
dbuf_redirty(dr_next);
mutex_exit(&db->db_mtx);
return (dr_next);
}
/*
* Only valid if not already dirty.
*/
ASSERT(dn->dn_object == 0 ||
dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
(dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
ASSERT3U(dn->dn_nlevels, >, db->db_level);
/*
* We should only be dirtying in syncing context if it's the
* mos or we're initializing the os or it's a special object.
* However, we are allowed to dirty in syncing context provided
* we already dirtied it in open context. Hence we must make
* this assertion only if we're not already dirty.
*/
os = dn->dn_objset;
VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa));
#ifdef ZFS_DEBUG
if (dn->dn_objset->os_dsl_dataset != NULL)
rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG);
ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
if (dn->dn_objset->os_dsl_dataset != NULL)
rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG);
#endif
ASSERT(db->db.db_size != 0);
dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
if (db->db_blkid != DMU_BONUS_BLKID) {
dmu_objset_willuse_space(os, db->db.db_size, tx);
}
/*
* If this buffer is dirty in an old transaction group we need
* to make a copy of it so that the changes we make in this
* transaction group won't leak out when we sync the older txg.
*/
dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
list_link_init(&dr->dr_dirty_node);
list_link_init(&dr->dr_dbuf_node);
dr->dr_dnode = dn;
if (db->db_level == 0) {
void *data_old = db->db_buf;
if (db->db_state != DB_NOFILL) {
if (db->db_blkid == DMU_BONUS_BLKID) {
dbuf_fix_old_data(db, tx->tx_txg);
data_old = db->db.db_data;
} else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
/*
* Release the data buffer from the cache so
* that we can modify it without impacting
* possible other users of this cached data
* block. Note that indirect blocks and
* private objects are not released until the
* syncing state (since they are only modified
* then).
*/
arc_release(db->db_buf, db);
dbuf_fix_old_data(db, tx->tx_txg);
data_old = db->db_buf;
}
ASSERT(data_old != NULL);
}
dr->dt.dl.dr_data = data_old;
} else {
mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_NOLOCKDEP, NULL);
list_create(&dr->dt.di.dr_children,
sizeof (dbuf_dirty_record_t),
offsetof(dbuf_dirty_record_t, dr_dirty_node));
}
if (db->db_blkid != DMU_BONUS_BLKID)
dr->dr_accounted = db->db.db_size;
dr->dr_dbuf = db;
dr->dr_txg = tx->tx_txg;
list_insert_before(&db->db_dirty_records, dr_next, dr);
/*
* We could have been freed_in_flight between the dbuf_noread
* and dbuf_dirty. We win, as though the dbuf_noread() had
* happened after the free.
*/
if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
db->db_blkid != DMU_SPILL_BLKID) {
mutex_enter(&dn->dn_mtx);
if (dn->dn_free_ranges[txgoff] != NULL) {
range_tree_clear(dn->dn_free_ranges[txgoff],
db->db_blkid, 1);
}
mutex_exit(&dn->dn_mtx);
db->db_freed_in_flight = FALSE;
}
/*
* This buffer is now part of this txg
*/
dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
db->db_dirtycnt += 1;
ASSERT3U(db->db_dirtycnt, <=, 3);
mutex_exit(&db->db_mtx);
if (db->db_blkid == DMU_BONUS_BLKID ||
db->db_blkid == DMU_SPILL_BLKID) {
mutex_enter(&dn->dn_mtx);
ASSERT(!list_link_active(&dr->dr_dirty_node));
list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
mutex_exit(&dn->dn_mtx);
dnode_setdirty(dn, tx);
DB_DNODE_EXIT(db);
return (dr);
}
if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
rw_enter(&dn->dn_struct_rwlock, RW_READER);
drop_struct_rwlock = B_TRUE;
}
/*
* If we are overwriting a dedup BP, then unless it is snapshotted,
* when we get to syncing context we will need to decrement its
* refcount in the DDT. Prefetch the relevant DDT block so that
* syncing context won't have to wait for the i/o.
*/
if (db->db_blkptr != NULL) {
db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
ddt_prefetch(os->os_spa, db->db_blkptr);
dmu_buf_unlock_parent(db, dblt, FTAG);
}
/*
* We need to hold the dn_struct_rwlock to make this assertion,
* because it protects dn_phys / dn_next_nlevels from changing.
*/
ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
dn->dn_phys->dn_nlevels > db->db_level ||
dn->dn_next_nlevels[txgoff] > db->db_level ||
dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
if (db->db_level == 0) {
ASSERT(!db->db_objset->os_raw_receive ||
dn->dn_maxblkid >= db->db_blkid);
dnode_new_blkid(dn, db->db_blkid, tx,
drop_struct_rwlock, B_FALSE);
ASSERT(dn->dn_maxblkid >= db->db_blkid);
}
if (db->db_level+1 < dn->dn_nlevels) {
dmu_buf_impl_t *parent = db->db_parent;
dbuf_dirty_record_t *di;
int parent_held = FALSE;
if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
parent = dbuf_hold_level(dn, db->db_level + 1,
db->db_blkid >> epbs, FTAG);
ASSERT(parent != NULL);
parent_held = TRUE;
}
if (drop_struct_rwlock)
rw_exit(&dn->dn_struct_rwlock);
ASSERT3U(db->db_level + 1, ==, parent->db_level);
di = dbuf_dirty(parent, tx);
if (parent_held)
dbuf_rele(parent, FTAG);
mutex_enter(&db->db_mtx);
/*
* Since we've dropped the mutex, it's possible that
* dbuf_undirty() might have changed this out from under us.
*/
if (list_head(&db->db_dirty_records) == dr ||
dn->dn_object == DMU_META_DNODE_OBJECT) {
mutex_enter(&di->dt.di.dr_mtx);
ASSERT3U(di->dr_txg, ==, tx->tx_txg);
ASSERT(!list_link_active(&dr->dr_dirty_node));
list_insert_tail(&di->dt.di.dr_children, dr);
mutex_exit(&di->dt.di.dr_mtx);
dr->dr_parent = di;
}
mutex_exit(&db->db_mtx);
} else {
ASSERT(db->db_level + 1 == dn->dn_nlevels);
ASSERT(db->db_blkid < dn->dn_nblkptr);
ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
mutex_enter(&dn->dn_mtx);
ASSERT(!list_link_active(&dr->dr_dirty_node));
list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
mutex_exit(&dn->dn_mtx);
if (drop_struct_rwlock)
rw_exit(&dn->dn_struct_rwlock);
}
dnode_setdirty(dn, tx);
DB_DNODE_EXIT(db);
return (dr);
}
static void
dbuf_undirty_bonus(dbuf_dirty_record_t *dr)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
if (dr->dt.dl.dr_data != db->db.db_data) {
struct dnode *dn = dr->dr_dnode;
int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
kmem_free(dr->dt.dl.dr_data, max_bonuslen);
arc_space_return(max_bonuslen, ARC_SPACE_BONUS);
}
db->db_data_pending = NULL;
ASSERT(list_next(&db->db_dirty_records, dr) == NULL);
list_remove(&db->db_dirty_records, dr);
if (dr->dr_dbuf->db_level != 0) {
mutex_destroy(&dr->dt.di.dr_mtx);
list_destroy(&dr->dt.di.dr_children);
}
kmem_free(dr, sizeof (dbuf_dirty_record_t));
ASSERT3U(db->db_dirtycnt, >, 0);
db->db_dirtycnt -= 1;
}
/*
* Undirty a buffer in the transaction group referenced by the given
* transaction. Return whether this evicted the dbuf.
*/
static boolean_t
dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
{
uint64_t txg = tx->tx_txg;
ASSERT(txg != 0);
/*
* Due to our use of dn_nlevels below, this can only be called
* in open context, unless we are operating on the MOS.
* From syncing context, dn_nlevels may be different from the
* dn_nlevels used when dbuf was dirtied.
*/
ASSERT(db->db_objset ==
dmu_objset_pool(db->db_objset)->dp_meta_objset ||
txg != spa_syncing_txg(dmu_objset_spa(db->db_objset)));
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT0(db->db_level);
ASSERT(MUTEX_HELD(&db->db_mtx));
/*
* If this buffer is not dirty, we're done.
*/
dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, txg);
if (dr == NULL)
return (B_FALSE);
ASSERT(dr->dr_dbuf == db);
dnode_t *dn = dr->dr_dnode;
dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
ASSERT(db->db.db_size != 0);
dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset),
dr->dr_accounted, txg);
list_remove(&db->db_dirty_records, dr);
/*
* Note that there are three places in dbuf_dirty()
* where this dirty record may be put on a list.
* Make sure to do a list_remove corresponding to
* every one of those list_insert calls.
*/
if (dr->dr_parent) {
mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
list_remove(&dr->dr_parent->dt.di.dr_children, dr);
mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
} else if (db->db_blkid == DMU_SPILL_BLKID ||
db->db_level + 1 == dn->dn_nlevels) {
ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
mutex_enter(&dn->dn_mtx);
list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
mutex_exit(&dn->dn_mtx);
}
if (db->db_state != DB_NOFILL) {
dbuf_unoverride(dr);
ASSERT(db->db_buf != NULL);
ASSERT(dr->dt.dl.dr_data != NULL);
if (dr->dt.dl.dr_data != db->db_buf)
arc_buf_destroy(dr->dt.dl.dr_data, db);
}
kmem_free(dr, sizeof (dbuf_dirty_record_t));
ASSERT(db->db_dirtycnt > 0);
db->db_dirtycnt -= 1;
if (zfs_refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
ASSERT(db->db_state == DB_NOFILL || arc_released(db->db_buf));
dbuf_destroy(db);
return (B_TRUE);
}
return (B_FALSE);
}
static void
dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
ASSERT(tx->tx_txg != 0);
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
/*
* Quick check for dirtiness. For already dirty blocks, this
* reduces runtime of this function by >90%, and overall performance
* by 50% for some workloads (e.g. file deletion with indirect blocks
* cached).
*/
mutex_enter(&db->db_mtx);
if (db->db_state == DB_CACHED) {
dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, tx->tx_txg);
/*
* It's possible that it is already dirty but not cached,
* because there are some calls to dbuf_dirty() that don't
* go through dmu_buf_will_dirty().
*/
if (dr != NULL) {
/* This dbuf is already dirty and cached. */
dbuf_redirty(dr);
mutex_exit(&db->db_mtx);
return;
}
}
mutex_exit(&db->db_mtx);
DB_DNODE_ENTER(db);
if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
flags |= DB_RF_HAVESTRUCT;
DB_DNODE_EXIT(db);
(void) dbuf_read(db, NULL, flags);
(void) dbuf_dirty(db, tx);
}
void
dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
{
dmu_buf_will_dirty_impl(db_fake,
DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH, tx);
}
boolean_t
dmu_buf_is_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dbuf_dirty_record_t *dr;
mutex_enter(&db->db_mtx);
dr = dbuf_find_dirty_eq(db, tx->tx_txg);
mutex_exit(&db->db_mtx);
return (dr != NULL);
}
void
dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
db->db_state = DB_NOFILL;
DTRACE_SET_STATE(db, "allocating NOFILL buffer");
dmu_buf_will_fill(db_fake, tx);
}
void
dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT(tx->tx_txg != 0);
ASSERT(db->db_level == 0);
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
dmu_tx_private_ok(tx));
dbuf_noread(db);
(void) dbuf_dirty(db, tx);
}
/*
* This function is effectively the same as dmu_buf_will_dirty(), but
* indicates the caller expects raw encrypted data in the db, and provides
* the crypt params (byteorder, salt, iv, mac) which should be stored in the
* blkptr_t when this dbuf is written. This is only used for blocks of
* dnodes, during raw receive.
*/
void
dmu_buf_set_crypt_params(dmu_buf_t *db_fake, boolean_t byteorder,
const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dbuf_dirty_record_t *dr;
/*
* dr_has_raw_params is only processed for blocks of dnodes
* (see dbuf_sync_dnode_leaf_crypt()).
*/
ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
ASSERT3U(db->db_level, ==, 0);
ASSERT(db->db_objset->os_raw_receive);
dmu_buf_will_dirty_impl(db_fake,
DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_NO_DECRYPT, tx);
dr = dbuf_find_dirty_eq(db, tx->tx_txg);
ASSERT3P(dr, !=, NULL);
dr->dt.dl.dr_has_raw_params = B_TRUE;
dr->dt.dl.dr_byteorder = byteorder;
bcopy(salt, dr->dt.dl.dr_salt, ZIO_DATA_SALT_LEN);
bcopy(iv, dr->dt.dl.dr_iv, ZIO_DATA_IV_LEN);
bcopy(mac, dr->dt.dl.dr_mac, ZIO_DATA_MAC_LEN);
}
static void
dbuf_override_impl(dmu_buf_impl_t *db, const blkptr_t *bp, dmu_tx_t *tx)
{
struct dirty_leaf *dl;
dbuf_dirty_record_t *dr;
dr = list_head(&db->db_dirty_records);
ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
dl = &dr->dt.dl;
dl->dr_overridden_by = *bp;
dl->dr_override_state = DR_OVERRIDDEN;
dl->dr_overridden_by.blk_birth = dr->dr_txg;
}
-/* ARGSUSED */
void
dmu_buf_fill_done(dmu_buf_t *dbuf, dmu_tx_t *tx)
{
+ (void) tx;
dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
dbuf_states_t old_state;
mutex_enter(&db->db_mtx);
DBUF_VERIFY(db);
old_state = db->db_state;
db->db_state = DB_CACHED;
if (old_state == DB_FILL) {
if (db->db_level == 0 && db->db_freed_in_flight) {
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
/* we were freed while filling */
/* XXX dbuf_undirty? */
bzero(db->db.db_data, db->db.db_size);
db->db_freed_in_flight = FALSE;
DTRACE_SET_STATE(db,
"fill done handling freed in flight");
} else {
DTRACE_SET_STATE(db, "fill done");
}
cv_broadcast(&db->db_changed);
}
mutex_exit(&db->db_mtx);
}
void
dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
bp_embedded_type_t etype, enum zio_compress comp,
int uncompressed_size, int compressed_size, int byteorder,
dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
struct dirty_leaf *dl;
dmu_object_type_t type;
dbuf_dirty_record_t *dr;
if (etype == BP_EMBEDDED_TYPE_DATA) {
ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset),
SPA_FEATURE_EMBEDDED_DATA));
}
DB_DNODE_ENTER(db);
type = DB_DNODE(db)->dn_type;
DB_DNODE_EXIT(db);
ASSERT0(db->db_level);
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
dmu_buf_will_not_fill(dbuf, tx);
dr = list_head(&db->db_dirty_records);
ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
dl = &dr->dt.dl;
encode_embedded_bp_compressed(&dl->dr_overridden_by,
data, comp, uncompressed_size, compressed_size);
BPE_SET_ETYPE(&dl->dr_overridden_by, etype);
BP_SET_TYPE(&dl->dr_overridden_by, type);
BP_SET_LEVEL(&dl->dr_overridden_by, 0);
BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder);
dl->dr_override_state = DR_OVERRIDDEN;
dl->dr_overridden_by.blk_birth = dr->dr_txg;
}
void
dmu_buf_redact(dmu_buf_t *dbuf, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
dmu_object_type_t type;
ASSERT(dsl_dataset_feature_is_active(db->db_objset->os_dsl_dataset,
SPA_FEATURE_REDACTED_DATASETS));
DB_DNODE_ENTER(db);
type = DB_DNODE(db)->dn_type;
DB_DNODE_EXIT(db);
ASSERT0(db->db_level);
dmu_buf_will_not_fill(dbuf, tx);
blkptr_t bp = { { { {0} } } };
BP_SET_TYPE(&bp, type);
BP_SET_LEVEL(&bp, 0);
BP_SET_BIRTH(&bp, tx->tx_txg, 0);
BP_SET_REDACTED(&bp);
BPE_SET_LSIZE(&bp, dbuf->db_size);
dbuf_override_impl(db, &bp, tx);
}
/*
* Directly assign a provided arc buf to a given dbuf if it's not referenced
* by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
*/
void
dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
{
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT(db->db_level == 0);
ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf));
ASSERT(buf != NULL);
ASSERT3U(arc_buf_lsize(buf), ==, db->db.db_size);
ASSERT(tx->tx_txg != 0);
arc_return_buf(buf, db);
ASSERT(arc_released(buf));
mutex_enter(&db->db_mtx);
while (db->db_state == DB_READ || db->db_state == DB_FILL)
cv_wait(&db->db_changed, &db->db_mtx);
ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
if (db->db_state == DB_CACHED &&
zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
/*
* In practice, we will never have a case where we have an
* encrypted arc buffer while additional holds exist on the
* dbuf. We don't handle this here so we simply assert that
* fact instead.
*/
ASSERT(!arc_is_encrypted(buf));
mutex_exit(&db->db_mtx);
(void) dbuf_dirty(db, tx);
bcopy(buf->b_data, db->db.db_data, db->db.db_size);
arc_buf_destroy(buf, db);
return;
}
if (db->db_state == DB_CACHED) {
dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
ASSERT(db->db_buf != NULL);
if (dr != NULL && dr->dr_txg == tx->tx_txg) {
ASSERT(dr->dt.dl.dr_data == db->db_buf);
if (!arc_released(db->db_buf)) {
ASSERT(dr->dt.dl.dr_override_state ==
DR_OVERRIDDEN);
arc_release(db->db_buf, db);
}
dr->dt.dl.dr_data = buf;
arc_buf_destroy(db->db_buf, db);
} else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
arc_release(db->db_buf, db);
arc_buf_destroy(db->db_buf, db);
}
db->db_buf = NULL;
}
ASSERT(db->db_buf == NULL);
dbuf_set_data(db, buf);
db->db_state = DB_FILL;
DTRACE_SET_STATE(db, "filling assigned arcbuf");
mutex_exit(&db->db_mtx);
(void) dbuf_dirty(db, tx);
dmu_buf_fill_done(&db->db, tx);
}
void
dbuf_destroy(dmu_buf_impl_t *db)
{
dnode_t *dn;
dmu_buf_impl_t *parent = db->db_parent;
dmu_buf_impl_t *dndb;
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(zfs_refcount_is_zero(&db->db_holds));
if (db->db_buf != NULL) {
arc_buf_destroy(db->db_buf, db);
db->db_buf = NULL;
}
if (db->db_blkid == DMU_BONUS_BLKID) {
int slots = DB_DNODE(db)->dn_num_slots;
int bonuslen = DN_SLOTS_TO_BONUSLEN(slots);
if (db->db.db_data != NULL) {
kmem_free(db->db.db_data, bonuslen);
arc_space_return(bonuslen, ARC_SPACE_BONUS);
db->db_state = DB_UNCACHED;
DTRACE_SET_STATE(db, "buffer cleared");
}
}
dbuf_clear_data(db);
if (multilist_link_active(&db->db_cache_link)) {
ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
db->db_caching_status == DB_DBUF_METADATA_CACHE);
multilist_remove(&dbuf_caches[db->db_caching_status].cache, db);
(void) zfs_refcount_remove_many(
&dbuf_caches[db->db_caching_status].size,
db->db.db_size, db);
if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
DBUF_STAT_BUMPDOWN(metadata_cache_count);
} else {
DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
DBUF_STAT_BUMPDOWN(cache_count);
DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
db->db.db_size);
}
db->db_caching_status = DB_NO_CACHE;
}
ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
ASSERT(db->db_data_pending == NULL);
ASSERT(list_is_empty(&db->db_dirty_records));
db->db_state = DB_EVICTING;
DTRACE_SET_STATE(db, "buffer eviction started");
db->db_blkptr = NULL;
/*
* Now that db_state is DB_EVICTING, nobody else can find this via
* the hash table. We can now drop db_mtx, which allows us to
* acquire the dn_dbufs_mtx.
*/
mutex_exit(&db->db_mtx);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
dndb = dn->dn_dbuf;
if (db->db_blkid != DMU_BONUS_BLKID) {
boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx);
if (needlock)
mutex_enter_nested(&dn->dn_dbufs_mtx,
NESTED_SINGLE);
avl_remove(&dn->dn_dbufs, db);
membar_producer();
DB_DNODE_EXIT(db);
if (needlock)
mutex_exit(&dn->dn_dbufs_mtx);
/*
* Decrementing the dbuf count means that the hold corresponding
* to the removed dbuf is no longer discounted in dnode_move(),
* so the dnode cannot be moved until after we release the hold.
* The membar_producer() ensures visibility of the decremented
* value in dnode_move(), since DB_DNODE_EXIT doesn't actually
* release any lock.
*/
mutex_enter(&dn->dn_mtx);
dnode_rele_and_unlock(dn, db, B_TRUE);
db->db_dnode_handle = NULL;
dbuf_hash_remove(db);
} else {
DB_DNODE_EXIT(db);
}
ASSERT(zfs_refcount_is_zero(&db->db_holds));
db->db_parent = NULL;
ASSERT(db->db_buf == NULL);
ASSERT(db->db.db_data == NULL);
ASSERT(db->db_hash_next == NULL);
ASSERT(db->db_blkptr == NULL);
ASSERT(db->db_data_pending == NULL);
ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
ASSERT(!multilist_link_active(&db->db_cache_link));
kmem_cache_free(dbuf_kmem_cache, db);
arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
/*
* If this dbuf is referenced from an indirect dbuf,
* decrement the ref count on the indirect dbuf.
*/
if (parent && parent != dndb) {
mutex_enter(&parent->db_mtx);
dbuf_rele_and_unlock(parent, db, B_TRUE);
}
}
/*
* Note: While bpp will always be updated if the function returns success,
* parentp will not be updated if the dnode does not have dn_dbuf filled in;
* this happens when the dnode is the meta-dnode, or {user|group|project}used
* object.
*/
__attribute__((always_inline))
static inline int
dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
dmu_buf_impl_t **parentp, blkptr_t **bpp)
{
*parentp = NULL;
*bpp = NULL;
ASSERT(blkid != DMU_BONUS_BLKID);
if (blkid == DMU_SPILL_BLKID) {
mutex_enter(&dn->dn_mtx);
if (dn->dn_have_spill &&
(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
*bpp = DN_SPILL_BLKPTR(dn->dn_phys);
else
*bpp = NULL;
dbuf_add_ref(dn->dn_dbuf, NULL);
*parentp = dn->dn_dbuf;
mutex_exit(&dn->dn_mtx);
return (0);
}
int nlevels =
(dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels;
int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
ASSERT3U(level * epbs, <, 64);
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
/*
* This assertion shouldn't trip as long as the max indirect block size
* is less than 1M. The reason for this is that up to that point,
* the number of levels required to address an entire object with blocks
* of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64. In
* other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55
* (i.e. we can address the entire object), objects will all use at most
* N-1 levels and the assertion won't overflow. However, once epbs is
* 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66. Then, 4 levels will not be
* enough to address an entire object, so objects will have 5 levels,
* but then this assertion will overflow.
*
* All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we
* need to redo this logic to handle overflows.
*/
ASSERT(level >= nlevels ||
((nlevels - level - 1) * epbs) +
highbit64(dn->dn_phys->dn_nblkptr) <= 64);
if (level >= nlevels ||
blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr <<
((nlevels - level - 1) * epbs)) ||
(fail_sparse &&
blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
/* the buffer has no parent yet */
return (SET_ERROR(ENOENT));
} else if (level < nlevels-1) {
/* this block is referenced from an indirect block */
int err;
err = dbuf_hold_impl(dn, level + 1,
blkid >> epbs, fail_sparse, FALSE, NULL, parentp);
if (err)
return (err);
err = dbuf_read(*parentp, NULL,
(DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
if (err) {
dbuf_rele(*parentp, NULL);
*parentp = NULL;
return (err);
}
rw_enter(&(*parentp)->db_rwlock, RW_READER);
*bpp = ((blkptr_t *)(*parentp)->db.db_data) +
(blkid & ((1ULL << epbs) - 1));
if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))
ASSERT(BP_IS_HOLE(*bpp));
rw_exit(&(*parentp)->db_rwlock);
return (0);
} else {
/* the block is referenced from the dnode */
ASSERT3U(level, ==, nlevels-1);
ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
blkid < dn->dn_phys->dn_nblkptr);
if (dn->dn_dbuf) {
dbuf_add_ref(dn->dn_dbuf, NULL);
*parentp = dn->dn_dbuf;
}
*bpp = &dn->dn_phys->dn_blkptr[blkid];
return (0);
}
}
static dmu_buf_impl_t *
dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
dmu_buf_impl_t *parent, blkptr_t *blkptr)
{
objset_t *os = dn->dn_objset;
dmu_buf_impl_t *db, *odb;
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
ASSERT(dn->dn_type != DMU_OT_NONE);
db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP);
list_create(&db->db_dirty_records, sizeof (dbuf_dirty_record_t),
offsetof(dbuf_dirty_record_t, dr_dbuf_node));
db->db_objset = os;
db->db.db_object = dn->dn_object;
db->db_level = level;
db->db_blkid = blkid;
db->db_dirtycnt = 0;
db->db_dnode_handle = dn->dn_handle;
db->db_parent = parent;
db->db_blkptr = blkptr;
db->db_user = NULL;
db->db_user_immediate_evict = FALSE;
db->db_freed_in_flight = FALSE;
db->db_pending_evict = FALSE;
if (blkid == DMU_BONUS_BLKID) {
ASSERT3P(parent, ==, dn->dn_dbuf);
db->db.db_size = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
(dn->dn_nblkptr-1) * sizeof (blkptr_t);
ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
db->db.db_offset = DMU_BONUS_BLKID;
db->db_state = DB_UNCACHED;
DTRACE_SET_STATE(db, "bonus buffer created");
db->db_caching_status = DB_NO_CACHE;
/* the bonus dbuf is not placed in the hash table */
arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
return (db);
} else if (blkid == DMU_SPILL_BLKID) {
db->db.db_size = (blkptr != NULL) ?
BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
db->db.db_offset = 0;
} else {
int blocksize =
db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz;
db->db.db_size = blocksize;
db->db.db_offset = db->db_blkid * blocksize;
}
/*
* Hold the dn_dbufs_mtx while we get the new dbuf
* in the hash table *and* added to the dbufs list.
* This prevents a possible deadlock with someone
* trying to look up this dbuf before it's added to the
* dn_dbufs list.
*/
mutex_enter(&dn->dn_dbufs_mtx);
db->db_state = DB_EVICTING; /* not worth logging this state change */
if ((odb = dbuf_hash_insert(db)) != NULL) {
/* someone else inserted it first */
mutex_exit(&dn->dn_dbufs_mtx);
kmem_cache_free(dbuf_kmem_cache, db);
DBUF_STAT_BUMP(hash_insert_race);
return (odb);
}
avl_add(&dn->dn_dbufs, db);
db->db_state = DB_UNCACHED;
DTRACE_SET_STATE(db, "regular buffer created");
db->db_caching_status = DB_NO_CACHE;
mutex_exit(&dn->dn_dbufs_mtx);
arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
if (parent && parent != dn->dn_dbuf)
dbuf_add_ref(parent, db);
ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
zfs_refcount_count(&dn->dn_holds) > 0);
(void) zfs_refcount_add(&dn->dn_holds, db);
dprintf_dbuf(db, "db=%p\n", db);
return (db);
}
/*
* This function returns a block pointer and information about the object,
* given a dnode and a block. This is a publicly accessible version of
* dbuf_findbp that only returns some information, rather than the
* dbuf. Note that the dnode passed in must be held, and the dn_struct_rwlock
* should be locked as (at least) a reader.
*/
int
dbuf_dnode_findbp(dnode_t *dn, uint64_t level, uint64_t blkid,
blkptr_t *bp, uint16_t *datablkszsec, uint8_t *indblkshift)
{
dmu_buf_impl_t *dbp = NULL;
blkptr_t *bp2;
int err = 0;
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
err = dbuf_findbp(dn, level, blkid, B_FALSE, &dbp, &bp2);
if (err == 0) {
*bp = *bp2;
if (dbp != NULL)
dbuf_rele(dbp, NULL);
if (datablkszsec != NULL)
*datablkszsec = dn->dn_phys->dn_datablkszsec;
if (indblkshift != NULL)
*indblkshift = dn->dn_phys->dn_indblkshift;
}
return (err);
}
typedef struct dbuf_prefetch_arg {
spa_t *dpa_spa; /* The spa to issue the prefetch in. */
zbookmark_phys_t dpa_zb; /* The target block to prefetch. */
int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */
int dpa_curlevel; /* The current level that we're reading */
dnode_t *dpa_dnode; /* The dnode associated with the prefetch */
zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */
zio_t *dpa_zio; /* The parent zio_t for all prefetches. */
arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */
dbuf_prefetch_fn dpa_cb; /* prefetch completion callback */
void *dpa_arg; /* prefetch completion arg */
} dbuf_prefetch_arg_t;
static void
dbuf_prefetch_fini(dbuf_prefetch_arg_t *dpa, boolean_t io_done)
{
if (dpa->dpa_cb != NULL)
dpa->dpa_cb(dpa->dpa_arg, io_done);
kmem_free(dpa, sizeof (*dpa));
}
static void
dbuf_issue_final_prefetch_done(zio_t *zio, const zbookmark_phys_t *zb,
const blkptr_t *iobp, arc_buf_t *abuf, void *private)
{
+ (void) zio, (void) zb, (void) iobp;
dbuf_prefetch_arg_t *dpa = private;
dbuf_prefetch_fini(dpa, B_TRUE);
if (abuf != NULL)
arc_buf_destroy(abuf, private);
}
/*
* Actually issue the prefetch read for the block given.
*/
static void
dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp)
{
ASSERT(!BP_IS_REDACTED(bp) ||
dsl_dataset_feature_is_active(
dpa->dpa_dnode->dn_objset->os_dsl_dataset,
SPA_FEATURE_REDACTED_DATASETS));
if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp))
return (dbuf_prefetch_fini(dpa, B_FALSE));
int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE;
arc_flags_t aflags =
dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH |
ARC_FLAG_NO_BUF;
/* dnodes are always read as raw and then converted later */
if (BP_GET_TYPE(bp) == DMU_OT_DNODE && BP_IS_PROTECTED(bp) &&
dpa->dpa_curlevel == 0)
zio_flags |= ZIO_FLAG_RAW;
ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level);
ASSERT(dpa->dpa_zio != NULL);
(void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp,
dbuf_issue_final_prefetch_done, dpa,
dpa->dpa_prio, zio_flags, &aflags, &dpa->dpa_zb);
}
/*
* Called when an indirect block above our prefetch target is read in. This
* will either read in the next indirect block down the tree or issue the actual
* prefetch if the next block down is our target.
*/
static void
dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb,
const blkptr_t *iobp, arc_buf_t *abuf, void *private)
{
+ (void) zb, (void) iobp;
dbuf_prefetch_arg_t *dpa = private;
ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel);
ASSERT3S(dpa->dpa_curlevel, >, 0);
if (abuf == NULL) {
ASSERT(zio == NULL || zio->io_error != 0);
return (dbuf_prefetch_fini(dpa, B_TRUE));
}
ASSERT(zio == NULL || zio->io_error == 0);
/*
* The dpa_dnode is only valid if we are called with a NULL
* zio. This indicates that the arc_read() returned without
* first calling zio_read() to issue a physical read. Once
* a physical read is made the dpa_dnode must be invalidated
* as the locks guarding it may have been dropped. If the
* dpa_dnode is still valid, then we want to add it to the dbuf
* cache. To do so, we must hold the dbuf associated with the block
* we just prefetched, read its contents so that we associate it
* with an arc_buf_t, and then release it.
*/
if (zio != NULL) {
ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel);
if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS) {
ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size);
} else {
ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size);
}
ASSERT3P(zio->io_spa, ==, dpa->dpa_spa);
dpa->dpa_dnode = NULL;
} else if (dpa->dpa_dnode != NULL) {
uint64_t curblkid = dpa->dpa_zb.zb_blkid >>
(dpa->dpa_epbs * (dpa->dpa_curlevel -
dpa->dpa_zb.zb_level));
dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode,
dpa->dpa_curlevel, curblkid, FTAG);
if (db == NULL) {
arc_buf_destroy(abuf, private);
return (dbuf_prefetch_fini(dpa, B_TRUE));
}
(void) dbuf_read(db, NULL,
DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT);
dbuf_rele(db, FTAG);
}
dpa->dpa_curlevel--;
uint64_t nextblkid = dpa->dpa_zb.zb_blkid >>
(dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level));
blkptr_t *bp = ((blkptr_t *)abuf->b_data) +
P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs);
ASSERT(!BP_IS_REDACTED(bp) ||
dsl_dataset_feature_is_active(
dpa->dpa_dnode->dn_objset->os_dsl_dataset,
SPA_FEATURE_REDACTED_DATASETS));
if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp)) {
dbuf_prefetch_fini(dpa, B_TRUE);
} else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) {
ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid);
dbuf_issue_final_prefetch(dpa, bp);
} else {
arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
zbookmark_phys_t zb;
/* flag if L2ARC eligible, l2arc_noprefetch then decides */
if (dpa->dpa_aflags & ARC_FLAG_L2CACHE)
iter_aflags |= ARC_FLAG_L2CACHE;
ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset,
dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid);
(void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
bp, dbuf_prefetch_indirect_done, dpa, dpa->dpa_prio,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
&iter_aflags, &zb);
}
arc_buf_destroy(abuf, private);
}
/*
* Issue prefetch reads for the given block on the given level. If the indirect
* blocks above that block are not in memory, we will read them in
* asynchronously. As a result, this call never blocks waiting for a read to
* complete. Note that the prefetch might fail if the dataset is encrypted and
* the encryption key is unmapped before the IO completes.
*/
int
dbuf_prefetch_impl(dnode_t *dn, int64_t level, uint64_t blkid,
zio_priority_t prio, arc_flags_t aflags, dbuf_prefetch_fn cb,
void *arg)
{
blkptr_t bp;
int epbs, nlevels, curlevel;
uint64_t curblkid;
ASSERT(blkid != DMU_BONUS_BLKID);
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
if (blkid > dn->dn_maxblkid)
goto no_issue;
if (level == 0 && dnode_block_freed(dn, blkid))
goto no_issue;
/*
* This dnode hasn't been written to disk yet, so there's nothing to
* prefetch.
*/
nlevels = dn->dn_phys->dn_nlevels;
if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0)
goto no_issue;
epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level))
goto no_issue;
dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object,
level, blkid);
if (db != NULL) {
mutex_exit(&db->db_mtx);
/*
* This dbuf already exists. It is either CACHED, or
* (we assume) about to be read or filled.
*/
goto no_issue;
}
/*
* Find the closest ancestor (indirect block) of the target block
* that is present in the cache. In this indirect block, we will
* find the bp that is at curlevel, curblkid.
*/
curlevel = level;
curblkid = blkid;
while (curlevel < nlevels - 1) {
int parent_level = curlevel + 1;
uint64_t parent_blkid = curblkid >> epbs;
dmu_buf_impl_t *db;
if (dbuf_hold_impl(dn, parent_level, parent_blkid,
FALSE, TRUE, FTAG, &db) == 0) {
blkptr_t *bpp = db->db_buf->b_data;
bp = bpp[P2PHASE(curblkid, 1 << epbs)];
dbuf_rele(db, FTAG);
break;
}
curlevel = parent_level;
curblkid = parent_blkid;
}
if (curlevel == nlevels - 1) {
/* No cached indirect blocks found. */
ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr);
bp = dn->dn_phys->dn_blkptr[curblkid];
}
ASSERT(!BP_IS_REDACTED(&bp) ||
dsl_dataset_feature_is_active(dn->dn_objset->os_dsl_dataset,
SPA_FEATURE_REDACTED_DATASETS));
if (BP_IS_HOLE(&bp) || BP_IS_REDACTED(&bp))
goto no_issue;
ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp));
zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL,
ZIO_FLAG_CANFAIL);
dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP);
dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
dn->dn_object, level, blkid);
dpa->dpa_curlevel = curlevel;
dpa->dpa_prio = prio;
dpa->dpa_aflags = aflags;
dpa->dpa_spa = dn->dn_objset->os_spa;
dpa->dpa_dnode = dn;
dpa->dpa_epbs = epbs;
dpa->dpa_zio = pio;
dpa->dpa_cb = cb;
dpa->dpa_arg = arg;
/* flag if L2ARC eligible, l2arc_noprefetch then decides */
if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level))
dpa->dpa_aflags |= ARC_FLAG_L2CACHE;
/*
* If we have the indirect just above us, no need to do the asynchronous
* prefetch chain; we'll just run the last step ourselves. If we're at
* a higher level, though, we want to issue the prefetches for all the
* indirect blocks asynchronously, so we can go on with whatever we were
* doing.
*/
if (curlevel == level) {
ASSERT3U(curblkid, ==, blkid);
dbuf_issue_final_prefetch(dpa, &bp);
} else {
arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
zbookmark_phys_t zb;
/* flag if L2ARC eligible, l2arc_noprefetch then decides */
if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level))
iter_aflags |= ARC_FLAG_L2CACHE;
SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
dn->dn_object, curlevel, curblkid);
(void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
&bp, dbuf_prefetch_indirect_done, dpa, prio,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
&iter_aflags, &zb);
}
/*
* We use pio here instead of dpa_zio since it's possible that
* dpa may have already been freed.
*/
zio_nowait(pio);
return (1);
no_issue:
if (cb != NULL)
cb(arg, B_FALSE);
return (0);
}
int
dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
arc_flags_t aflags)
{
return (dbuf_prefetch_impl(dn, level, blkid, prio, aflags, NULL, NULL));
}
/*
* Helper function for dbuf_hold_impl() to copy a buffer. Handles
* the case of encrypted, compressed and uncompressed buffers by
* allocating the new buffer, respectively, with arc_alloc_raw_buf(),
* arc_alloc_compressed_buf() or arc_alloc_buf().*
*
* NOTE: Declared noinline to avoid stack bloat in dbuf_hold_impl().
*/
noinline static void
dbuf_hold_copy(dnode_t *dn, dmu_buf_impl_t *db)
{
dbuf_dirty_record_t *dr = db->db_data_pending;
arc_buf_t *data = dr->dt.dl.dr_data;
enum zio_compress compress_type = arc_get_compression(data);
uint8_t complevel = arc_get_complevel(data);
if (arc_is_encrypted(data)) {
boolean_t byteorder;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
arc_get_raw_params(data, &byteorder, salt, iv, mac);
dbuf_set_data(db, arc_alloc_raw_buf(dn->dn_objset->os_spa, db,
dmu_objset_id(dn->dn_objset), byteorder, salt, iv, mac,
dn->dn_type, arc_buf_size(data), arc_buf_lsize(data),
compress_type, complevel));
} else if (compress_type != ZIO_COMPRESS_OFF) {
dbuf_set_data(db, arc_alloc_compressed_buf(
dn->dn_objset->os_spa, db, arc_buf_size(data),
arc_buf_lsize(data), compress_type, complevel));
} else {
dbuf_set_data(db, arc_alloc_buf(dn->dn_objset->os_spa, db,
DBUF_GET_BUFC_TYPE(db), db->db.db_size));
}
rw_enter(&db->db_rwlock, RW_WRITER);
bcopy(data->b_data, db->db.db_data, arc_buf_size(data));
rw_exit(&db->db_rwlock);
}
/*
* Returns with db_holds incremented, and db_mtx not held.
* Note: dn_struct_rwlock must be held.
*/
int
dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid,
boolean_t fail_sparse, boolean_t fail_uncached,
void *tag, dmu_buf_impl_t **dbp)
{
dmu_buf_impl_t *db, *parent = NULL;
/* If the pool has been created, verify the tx_sync_lock is not held */
spa_t *spa = dn->dn_objset->os_spa;
dsl_pool_t *dp = spa->spa_dsl_pool;
if (dp != NULL) {
ASSERT(!MUTEX_HELD(&dp->dp_tx.tx_sync_lock));
}
ASSERT(blkid != DMU_BONUS_BLKID);
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
ASSERT3U(dn->dn_nlevels, >, level);
*dbp = NULL;
/* dbuf_find() returns with db_mtx held */
db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid);
if (db == NULL) {
blkptr_t *bp = NULL;
int err;
if (fail_uncached)
return (SET_ERROR(ENOENT));
ASSERT3P(parent, ==, NULL);
err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp);
if (fail_sparse) {
if (err == 0 && bp && BP_IS_HOLE(bp))
err = SET_ERROR(ENOENT);
if (err) {
if (parent)
dbuf_rele(parent, NULL);
return (err);
}
}
if (err && err != ENOENT)
return (err);
db = dbuf_create(dn, level, blkid, parent, bp);
}
if (fail_uncached && db->db_state != DB_CACHED) {
mutex_exit(&db->db_mtx);
return (SET_ERROR(ENOENT));
}
if (db->db_buf != NULL) {
arc_buf_access(db->db_buf);
ASSERT3P(db->db.db_data, ==, db->db_buf->b_data);
}
ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf));
/*
* If this buffer is currently syncing out, and we are
* still referencing it from db_data, we need to make a copy
* of it in case we decide we want to dirty it again in this txg.
*/
if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
dn->dn_object != DMU_META_DNODE_OBJECT &&
db->db_state == DB_CACHED && db->db_data_pending) {
dbuf_dirty_record_t *dr = db->db_data_pending;
if (dr->dt.dl.dr_data == db->db_buf)
dbuf_hold_copy(dn, db);
}
if (multilist_link_active(&db->db_cache_link)) {
ASSERT(zfs_refcount_is_zero(&db->db_holds));
ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
db->db_caching_status == DB_DBUF_METADATA_CACHE);
multilist_remove(&dbuf_caches[db->db_caching_status].cache, db);
(void) zfs_refcount_remove_many(
&dbuf_caches[db->db_caching_status].size,
db->db.db_size, db);
if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
DBUF_STAT_BUMPDOWN(metadata_cache_count);
} else {
DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
DBUF_STAT_BUMPDOWN(cache_count);
DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
db->db.db_size);
}
db->db_caching_status = DB_NO_CACHE;
}
(void) zfs_refcount_add(&db->db_holds, tag);
DBUF_VERIFY(db);
mutex_exit(&db->db_mtx);
/* NOTE: we can't rele the parent until after we drop the db_mtx */
if (parent)
dbuf_rele(parent, NULL);
ASSERT3P(DB_DNODE(db), ==, dn);
ASSERT3U(db->db_blkid, ==, blkid);
ASSERT3U(db->db_level, ==, level);
*dbp = db;
return (0);
}
dmu_buf_impl_t *
dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag)
{
return (dbuf_hold_level(dn, 0, blkid, tag));
}
dmu_buf_impl_t *
dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag)
{
dmu_buf_impl_t *db;
int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db);
return (err ? NULL : db);
}
void
dbuf_create_bonus(dnode_t *dn)
{
ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
ASSERT(dn->dn_bonus == NULL);
dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL);
}
int
dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
if (db->db_blkid != DMU_SPILL_BLKID)
return (SET_ERROR(ENOTSUP));
if (blksz == 0)
blksz = SPA_MINBLOCKSIZE;
ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset)));
blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
dbuf_new_size(db, blksz, tx);
return (0);
}
void
dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
{
dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
}
#pragma weak dmu_buf_add_ref = dbuf_add_ref
void
dbuf_add_ref(dmu_buf_impl_t *db, void *tag)
{
int64_t holds = zfs_refcount_add(&db->db_holds, tag);
VERIFY3S(holds, >, 1);
}
#pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref
boolean_t
dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid,
void *tag)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dmu_buf_impl_t *found_db;
boolean_t result = B_FALSE;
if (blkid == DMU_BONUS_BLKID)
found_db = dbuf_find_bonus(os, obj);
else
found_db = dbuf_find(os, obj, 0, blkid);
if (found_db != NULL) {
if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) {
(void) zfs_refcount_add(&db->db_holds, tag);
result = B_TRUE;
}
mutex_exit(&found_db->db_mtx);
}
return (result);
}
/*
* If you call dbuf_rele() you had better not be referencing the dnode handle
* unless you have some other direct or indirect hold on the dnode. (An indirect
* hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
* Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
* dnode's parent dbuf evicting its dnode handles.
*/
void
dbuf_rele(dmu_buf_impl_t *db, void *tag)
{
mutex_enter(&db->db_mtx);
dbuf_rele_and_unlock(db, tag, B_FALSE);
}
void
dmu_buf_rele(dmu_buf_t *db, void *tag)
{
dbuf_rele((dmu_buf_impl_t *)db, tag);
}
/*
* dbuf_rele() for an already-locked dbuf. This is necessary to allow
* db_dirtycnt and db_holds to be updated atomically. The 'evicting'
* argument should be set if we are already in the dbuf-evicting code
* path, in which case we don't want to recursively evict. This allows us to
* avoid deeply nested stacks that would have a call flow similar to this:
*
* dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify()
* ^ |
* | |
* +-----dbuf_destroy()<--dbuf_evict_one()<--------+
*
*/
void
dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag, boolean_t evicting)
{
int64_t holds;
uint64_t size;
ASSERT(MUTEX_HELD(&db->db_mtx));
DBUF_VERIFY(db);
/*
* Remove the reference to the dbuf before removing its hold on the
* dnode so we can guarantee in dnode_move() that a referenced bonus
* buffer has a corresponding dnode hold.
*/
holds = zfs_refcount_remove(&db->db_holds, tag);
ASSERT(holds >= 0);
/*
* We can't freeze indirects if there is a possibility that they
* may be modified in the current syncing context.
*/
if (db->db_buf != NULL &&
holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) {
arc_buf_freeze(db->db_buf);
}
if (holds == db->db_dirtycnt &&
db->db_level == 0 && db->db_user_immediate_evict)
dbuf_evict_user(db);
if (holds == 0) {
if (db->db_blkid == DMU_BONUS_BLKID) {
dnode_t *dn;
boolean_t evict_dbuf = db->db_pending_evict;
/*
* If the dnode moves here, we cannot cross this
* barrier until the move completes.
*/
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
atomic_dec_32(&dn->dn_dbufs_count);
/*
* Decrementing the dbuf count means that the bonus
* buffer's dnode hold is no longer discounted in
* dnode_move(). The dnode cannot move until after
* the dnode_rele() below.
*/
DB_DNODE_EXIT(db);
/*
* Do not reference db after its lock is dropped.
* Another thread may evict it.
*/
mutex_exit(&db->db_mtx);
if (evict_dbuf)
dnode_evict_bonus(dn);
dnode_rele(dn, db);
} else if (db->db_buf == NULL) {
/*
* This is a special case: we never associated this
* dbuf with any data allocated from the ARC.
*/
ASSERT(db->db_state == DB_UNCACHED ||
db->db_state == DB_NOFILL);
dbuf_destroy(db);
} else if (arc_released(db->db_buf)) {
/*
* This dbuf has anonymous data associated with it.
*/
dbuf_destroy(db);
} else {
boolean_t do_arc_evict = B_FALSE;
blkptr_t bp;
spa_t *spa = dmu_objset_spa(db->db_objset);
if (!DBUF_IS_CACHEABLE(db) &&
db->db_blkptr != NULL &&
!BP_IS_HOLE(db->db_blkptr) &&
!BP_IS_EMBEDDED(db->db_blkptr)) {
do_arc_evict = B_TRUE;
bp = *db->db_blkptr;
}
if (!DBUF_IS_CACHEABLE(db) ||
db->db_pending_evict) {
dbuf_destroy(db);
} else if (!multilist_link_active(&db->db_cache_link)) {
ASSERT3U(db->db_caching_status, ==,
DB_NO_CACHE);
dbuf_cached_state_t dcs =
dbuf_include_in_metadata_cache(db) ?
DB_DBUF_METADATA_CACHE : DB_DBUF_CACHE;
db->db_caching_status = dcs;
multilist_insert(&dbuf_caches[dcs].cache, db);
uint64_t db_size = db->db.db_size;
size = zfs_refcount_add_many(
&dbuf_caches[dcs].size, db_size, db);
uint8_t db_level = db->db_level;
mutex_exit(&db->db_mtx);
if (dcs == DB_DBUF_METADATA_CACHE) {
DBUF_STAT_BUMP(metadata_cache_count);
DBUF_STAT_MAX(
metadata_cache_size_bytes_max,
size);
} else {
DBUF_STAT_BUMP(cache_count);
DBUF_STAT_MAX(cache_size_bytes_max,
size);
DBUF_STAT_BUMP(cache_levels[db_level]);
DBUF_STAT_INCR(
cache_levels_bytes[db_level],
db_size);
}
if (dcs == DB_DBUF_CACHE && !evicting)
dbuf_evict_notify(size);
}
if (do_arc_evict)
arc_freed(spa, &bp);
}
} else {
mutex_exit(&db->db_mtx);
}
}
#pragma weak dmu_buf_refcount = dbuf_refcount
uint64_t
dbuf_refcount(dmu_buf_impl_t *db)
{
return (zfs_refcount_count(&db->db_holds));
}
uint64_t
dmu_buf_user_refcount(dmu_buf_t *db_fake)
{
uint64_t holds;
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
mutex_enter(&db->db_mtx);
ASSERT3U(zfs_refcount_count(&db->db_holds), >=, db->db_dirtycnt);
holds = zfs_refcount_count(&db->db_holds) - db->db_dirtycnt;
mutex_exit(&db->db_mtx);
return (holds);
}
void *
dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user,
dmu_buf_user_t *new_user)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
mutex_enter(&db->db_mtx);
dbuf_verify_user(db, DBVU_NOT_EVICTING);
if (db->db_user == old_user)
db->db_user = new_user;
else
old_user = db->db_user;
dbuf_verify_user(db, DBVU_NOT_EVICTING);
mutex_exit(&db->db_mtx);
return (old_user);
}
void *
dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
{
return (dmu_buf_replace_user(db_fake, NULL, user));
}
void *
dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
db->db_user_immediate_evict = TRUE;
return (dmu_buf_set_user(db_fake, user));
}
void *
dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
{
return (dmu_buf_replace_user(db_fake, user, NULL));
}
void *
dmu_buf_get_user(dmu_buf_t *db_fake)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dbuf_verify_user(db, DBVU_NOT_EVICTING);
return (db->db_user);
}
void
dmu_buf_user_evict_wait()
{
taskq_wait(dbu_evict_taskq);
}
blkptr_t *
dmu_buf_get_blkptr(dmu_buf_t *db)
{
dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
return (dbi->db_blkptr);
}
objset_t *
dmu_buf_get_objset(dmu_buf_t *db)
{
dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
return (dbi->db_objset);
}
dnode_t *
dmu_buf_dnode_enter(dmu_buf_t *db)
{
dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
DB_DNODE_ENTER(dbi);
return (DB_DNODE(dbi));
}
void
dmu_buf_dnode_exit(dmu_buf_t *db)
{
dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
DB_DNODE_EXIT(dbi);
}
static void
dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
{
/* ASSERT(dmu_tx_is_syncing(tx) */
ASSERT(MUTEX_HELD(&db->db_mtx));
if (db->db_blkptr != NULL)
return;
if (db->db_blkid == DMU_SPILL_BLKID) {
db->db_blkptr = DN_SPILL_BLKPTR(dn->dn_phys);
BP_ZERO(db->db_blkptr);
return;
}
if (db->db_level == dn->dn_phys->dn_nlevels-1) {
/*
* This buffer was allocated at a time when there was
* no available blkptrs from the dnode, or it was
* inappropriate to hook it in (i.e., nlevels mismatch).
*/
ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
ASSERT(db->db_parent == NULL);
db->db_parent = dn->dn_dbuf;
db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
DBUF_VERIFY(db);
} else {
dmu_buf_impl_t *parent = db->db_parent;
int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
ASSERT(dn->dn_phys->dn_nlevels > 1);
if (parent == NULL) {
mutex_exit(&db->db_mtx);
rw_enter(&dn->dn_struct_rwlock, RW_READER);
parent = dbuf_hold_level(dn, db->db_level + 1,
db->db_blkid >> epbs, db);
rw_exit(&dn->dn_struct_rwlock);
mutex_enter(&db->db_mtx);
db->db_parent = parent;
}
db->db_blkptr = (blkptr_t *)parent->db.db_data +
(db->db_blkid & ((1ULL << epbs) - 1));
DBUF_VERIFY(db);
}
}
static void
dbuf_sync_bonus(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
void *data = dr->dt.dl.dr_data;
ASSERT0(db->db_level);
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(db->db_blkid == DMU_BONUS_BLKID);
ASSERT(data != NULL);
dnode_t *dn = dr->dr_dnode;
ASSERT3U(DN_MAX_BONUS_LEN(dn->dn_phys), <=,
DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1));
bcopy(data, DN_BONUS(dn->dn_phys), DN_MAX_BONUS_LEN(dn->dn_phys));
dbuf_sync_leaf_verify_bonus_dnode(dr);
dbuf_undirty_bonus(dr);
dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
}
/*
* When syncing out a blocks of dnodes, adjust the block to deal with
* encryption. Normally, we make sure the block is decrypted before writing
* it. If we have crypt params, then we are writing a raw (encrypted) block,
* from a raw receive. In this case, set the ARC buf's crypt params so
* that the BP will be filled with the correct byteorder, salt, iv, and mac.
*/
static void
dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t *dr)
{
int err;
dmu_buf_impl_t *db = dr->dr_dbuf;
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
ASSERT3U(db->db_level, ==, 0);
if (!db->db_objset->os_raw_receive && arc_is_encrypted(db->db_buf)) {
zbookmark_phys_t zb;
/*
* Unfortunately, there is currently no mechanism for
* syncing context to handle decryption errors. An error
* here is only possible if an attacker maliciously
* changed a dnode block and updated the associated
* checksums going up the block tree.
*/
SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
db->db.db_object, db->db_level, db->db_blkid);
err = arc_untransform(db->db_buf, db->db_objset->os_spa,
&zb, B_TRUE);
if (err)
panic("Invalid dnode block MAC");
} else if (dr->dt.dl.dr_has_raw_params) {
(void) arc_release(dr->dt.dl.dr_data, db);
arc_convert_to_raw(dr->dt.dl.dr_data,
dmu_objset_id(db->db_objset),
dr->dt.dl.dr_byteorder, DMU_OT_DNODE,
dr->dt.dl.dr_salt, dr->dt.dl.dr_iv, dr->dt.dl.dr_mac);
}
}
/*
* dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it
* is critical the we not allow the compiler to inline this function in to
* dbuf_sync_list() thereby drastically bloating the stack usage.
*/
noinline static void
dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
dnode_t *dn = dr->dr_dnode;
ASSERT(dmu_tx_is_syncing(tx));
dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
mutex_enter(&db->db_mtx);
ASSERT(db->db_level > 0);
DBUF_VERIFY(db);
/* Read the block if it hasn't been read yet. */
if (db->db_buf == NULL) {
mutex_exit(&db->db_mtx);
(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
mutex_enter(&db->db_mtx);
}
ASSERT3U(db->db_state, ==, DB_CACHED);
ASSERT(db->db_buf != NULL);
/* Indirect block size must match what the dnode thinks it is. */
ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
dbuf_check_blkptr(dn, db);
/* Provide the pending dirty record to child dbufs */
db->db_data_pending = dr;
mutex_exit(&db->db_mtx);
dbuf_write(dr, db->db_buf, tx);
zio_t *zio = dr->dr_zio;
mutex_enter(&dr->dt.di.dr_mtx);
dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx);
ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
mutex_exit(&dr->dt.di.dr_mtx);
zio_nowait(zio);
}
/*
* Verify that the size of the data in our bonus buffer does not exceed
* its recorded size.
*
* The purpose of this verification is to catch any cases in development
* where the size of a phys structure (i.e space_map_phys_t) grows and,
* due to incorrect feature management, older pools expect to read more
* data even though they didn't actually write it to begin with.
*
* For a example, this would catch an error in the feature logic where we
* open an older pool and we expect to write the space map histogram of
* a space map with size SPACE_MAP_SIZE_V0.
*/
static void
dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr)
{
#ifdef ZFS_DEBUG
dnode_t *dn = dr->dr_dnode;
/*
* Encrypted bonus buffers can have data past their bonuslen.
* Skip the verification of these blocks.
*/
if (DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))
return;
uint16_t bonuslen = dn->dn_phys->dn_bonuslen;
uint16_t maxbonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
ASSERT3U(bonuslen, <=, maxbonuslen);
arc_buf_t *datap = dr->dt.dl.dr_data;
char *datap_end = ((char *)datap) + bonuslen;
char *datap_max = ((char *)datap) + maxbonuslen;
/* ensure that everything is zero after our data */
for (; datap_end < datap_max; datap_end++)
ASSERT(*datap_end == 0);
#endif
}
static blkptr_t *
dbuf_lightweight_bp(dbuf_dirty_record_t *dr)
{
/* This must be a lightweight dirty record. */
ASSERT3P(dr->dr_dbuf, ==, NULL);
dnode_t *dn = dr->dr_dnode;
if (dn->dn_phys->dn_nlevels == 1) {
VERIFY3U(dr->dt.dll.dr_blkid, <, dn->dn_phys->dn_nblkptr);
return (&dn->dn_phys->dn_blkptr[dr->dt.dll.dr_blkid]);
} else {
dmu_buf_impl_t *parent_db = dr->dr_parent->dr_dbuf;
int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
VERIFY3U(parent_db->db_level, ==, 1);
VERIFY3P(parent_db->db_dnode_handle->dnh_dnode, ==, dn);
VERIFY3U(dr->dt.dll.dr_blkid >> epbs, ==, parent_db->db_blkid);
blkptr_t *bp = parent_db->db.db_data;
return (&bp[dr->dt.dll.dr_blkid & ((1 << epbs) - 1)]);
}
}
static void
dbuf_lightweight_ready(zio_t *zio)
{
dbuf_dirty_record_t *dr = zio->io_private;
blkptr_t *bp = zio->io_bp;
if (zio->io_error != 0)
return;
dnode_t *dn = dr->dr_dnode;
blkptr_t *bp_orig = dbuf_lightweight_bp(dr);
spa_t *spa = dmu_objset_spa(dn->dn_objset);
int64_t delta = bp_get_dsize_sync(spa, bp) -
bp_get_dsize_sync(spa, bp_orig);
dnode_diduse_space(dn, delta);
uint64_t blkid = dr->dt.dll.dr_blkid;
mutex_enter(&dn->dn_mtx);
if (blkid > dn->dn_phys->dn_maxblkid) {
ASSERT0(dn->dn_objset->os_raw_receive);
dn->dn_phys->dn_maxblkid = blkid;
}
mutex_exit(&dn->dn_mtx);
if (!BP_IS_EMBEDDED(bp)) {
uint64_t fill = BP_IS_HOLE(bp) ? 0 : 1;
BP_SET_FILL(bp, fill);
}
dmu_buf_impl_t *parent_db;
EQUIV(dr->dr_parent == NULL, dn->dn_phys->dn_nlevels == 1);
if (dr->dr_parent == NULL) {
parent_db = dn->dn_dbuf;
} else {
parent_db = dr->dr_parent->dr_dbuf;
}
rw_enter(&parent_db->db_rwlock, RW_WRITER);
*bp_orig = *bp;
rw_exit(&parent_db->db_rwlock);
}
static void
dbuf_lightweight_physdone(zio_t *zio)
{
dbuf_dirty_record_t *dr = zio->io_private;
dsl_pool_t *dp = spa_get_dsl(zio->io_spa);
ASSERT3U(dr->dr_txg, ==, zio->io_txg);
/*
* The callback will be called io_phys_children times. Retire one
* portion of our dirty space each time we are called. Any rounding
* error will be cleaned up by dbuf_lightweight_done().
*/
int delta = dr->dr_accounted / zio->io_phys_children;
dsl_pool_undirty_space(dp, delta, zio->io_txg);
}
static void
dbuf_lightweight_done(zio_t *zio)
{
dbuf_dirty_record_t *dr = zio->io_private;
VERIFY0(zio->io_error);
objset_t *os = dr->dr_dnode->dn_objset;
dmu_tx_t *tx = os->os_synctx;
if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
ASSERT(BP_EQUAL(zio->io_bp, &zio->io_bp_orig));
} else {
dsl_dataset_t *ds = os->os_dsl_dataset;
(void) dsl_dataset_block_kill(ds, &zio->io_bp_orig, tx, B_TRUE);
dsl_dataset_block_born(ds, zio->io_bp, tx);
}
/*
* See comment in dbuf_write_done().
*/
if (zio->io_phys_children == 0) {
dsl_pool_undirty_space(dmu_objset_pool(os),
dr->dr_accounted, zio->io_txg);
} else {
dsl_pool_undirty_space(dmu_objset_pool(os),
dr->dr_accounted % zio->io_phys_children, zio->io_txg);
}
abd_free(dr->dt.dll.dr_abd);
kmem_free(dr, sizeof (*dr));
}
noinline static void
dbuf_sync_lightweight(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
{
dnode_t *dn = dr->dr_dnode;
zio_t *pio;
if (dn->dn_phys->dn_nlevels == 1) {
pio = dn->dn_zio;
} else {
pio = dr->dr_parent->dr_zio;
}
zbookmark_phys_t zb = {
.zb_objset = dmu_objset_id(dn->dn_objset),
.zb_object = dn->dn_object,
.zb_level = 0,
.zb_blkid = dr->dt.dll.dr_blkid,
};
/*
* See comment in dbuf_write(). This is so that zio->io_bp_orig
* will have the old BP in dbuf_lightweight_done().
*/
dr->dr_bp_copy = *dbuf_lightweight_bp(dr);
dr->dr_zio = zio_write(pio, dmu_objset_spa(dn->dn_objset),
dmu_tx_get_txg(tx), &dr->dr_bp_copy, dr->dt.dll.dr_abd,
dn->dn_datablksz, abd_get_size(dr->dt.dll.dr_abd),
&dr->dt.dll.dr_props, dbuf_lightweight_ready, NULL,
dbuf_lightweight_physdone, dbuf_lightweight_done, dr,
ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_MUSTSUCCEED | dr->dt.dll.dr_flags, &zb);
zio_nowait(dr->dr_zio);
}
/*
* dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is
* critical the we not allow the compiler to inline this function in to
* dbuf_sync_list() thereby drastically bloating the stack usage.
*/
noinline static void
dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
{
arc_buf_t **datap = &dr->dt.dl.dr_data;
dmu_buf_impl_t *db = dr->dr_dbuf;
dnode_t *dn = dr->dr_dnode;
objset_t *os;
uint64_t txg = tx->tx_txg;
ASSERT(dmu_tx_is_syncing(tx));
dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
mutex_enter(&db->db_mtx);
/*
* To be synced, we must be dirtied. But we
* might have been freed after the dirty.
*/
if (db->db_state == DB_UNCACHED) {
/* This buffer has been freed since it was dirtied */
ASSERT(db->db.db_data == NULL);
} else if (db->db_state == DB_FILL) {
/* This buffer was freed and is now being re-filled */
ASSERT(db->db.db_data != dr->dt.dl.dr_data);
} else {
ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
}
DBUF_VERIFY(db);
if (db->db_blkid == DMU_SPILL_BLKID) {
mutex_enter(&dn->dn_mtx);
if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
/*
* In the previous transaction group, the bonus buffer
* was entirely used to store the attributes for the
* dnode which overrode the dn_spill field. However,
* when adding more attributes to the file a spill
* block was required to hold the extra attributes.
*
* Make sure to clear the garbage left in the dn_spill
* field from the previous attributes in the bonus
* buffer. Otherwise, after writing out the spill
* block to the new allocated dva, it will free
* the old block pointed to by the invalid dn_spill.
*/
db->db_blkptr = NULL;
}
dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
mutex_exit(&dn->dn_mtx);
}
/*
* If this is a bonus buffer, simply copy the bonus data into the
* dnode. It will be written out when the dnode is synced (and it
* will be synced, since it must have been dirty for dbuf_sync to
* be called).
*/
if (db->db_blkid == DMU_BONUS_BLKID) {
ASSERT(dr->dr_dbuf == db);
dbuf_sync_bonus(dr, tx);
return;
}
os = dn->dn_objset;
/*
* This function may have dropped the db_mtx lock allowing a dmu_sync
* operation to sneak in. As a result, we need to ensure that we
* don't check the dr_override_state until we have returned from
* dbuf_check_blkptr.
*/
dbuf_check_blkptr(dn, db);
/*
* If this buffer is in the middle of an immediate write,
* wait for the synchronous IO to complete.
*/
while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
cv_wait(&db->db_changed, &db->db_mtx);
ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN);
}
/*
* If this is a dnode block, ensure it is appropriately encrypted
* or decrypted, depending on what we are writing to it this txg.
*/
if (os->os_encrypted && dn->dn_object == DMU_META_DNODE_OBJECT)
dbuf_prepare_encrypted_dnode_leaf(dr);
if (db->db_state != DB_NOFILL &&
dn->dn_object != DMU_META_DNODE_OBJECT &&
zfs_refcount_count(&db->db_holds) > 1 &&
dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
*datap == db->db_buf) {
/*
* If this buffer is currently "in use" (i.e., there
* are active holds and db_data still references it),
* then make a copy before we start the write so that
* any modifications from the open txg will not leak
* into this write.
*
* NOTE: this copy does not need to be made for
* objects only modified in the syncing context (e.g.
* DNONE_DNODE blocks).
*/
int psize = arc_buf_size(*datap);
int lsize = arc_buf_lsize(*datap);
arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
enum zio_compress compress_type = arc_get_compression(*datap);
uint8_t complevel = arc_get_complevel(*datap);
if (arc_is_encrypted(*datap)) {
boolean_t byteorder;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
arc_get_raw_params(*datap, &byteorder, salt, iv, mac);
*datap = arc_alloc_raw_buf(os->os_spa, db,
dmu_objset_id(os), byteorder, salt, iv, mac,
dn->dn_type, psize, lsize, compress_type,
complevel);
} else if (compress_type != ZIO_COMPRESS_OFF) {
ASSERT3U(type, ==, ARC_BUFC_DATA);
*datap = arc_alloc_compressed_buf(os->os_spa, db,
psize, lsize, compress_type, complevel);
} else {
*datap = arc_alloc_buf(os->os_spa, db, type, psize);
}
bcopy(db->db.db_data, (*datap)->b_data, psize);
}
db->db_data_pending = dr;
mutex_exit(&db->db_mtx);
dbuf_write(dr, *datap, tx);
ASSERT(!list_link_active(&dr->dr_dirty_node));
if (dn->dn_object == DMU_META_DNODE_OBJECT) {
list_insert_tail(&dn->dn_dirty_records[txg & TXG_MASK], dr);
} else {
zio_nowait(dr->dr_zio);
}
}
void
dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx)
{
dbuf_dirty_record_t *dr;
while ((dr = list_head(list))) {
if (dr->dr_zio != NULL) {
/*
* If we find an already initialized zio then we
* are processing the meta-dnode, and we have finished.
* The dbufs for all dnodes are put back on the list
* during processing, so that we can zio_wait()
* these IOs after initiating all child IOs.
*/
ASSERT3U(dr->dr_dbuf->db.db_object, ==,
DMU_META_DNODE_OBJECT);
break;
}
list_remove(list, dr);
if (dr->dr_dbuf == NULL) {
dbuf_sync_lightweight(dr, tx);
} else {
if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) {
VERIFY3U(dr->dr_dbuf->db_level, ==, level);
}
if (dr->dr_dbuf->db_level > 0)
dbuf_sync_indirect(dr, tx);
else
dbuf_sync_leaf(dr, tx);
}
}
}
-/* ARGSUSED */
static void
dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
{
+ (void) buf;
dmu_buf_impl_t *db = vdb;
dnode_t *dn;
blkptr_t *bp = zio->io_bp;
blkptr_t *bp_orig = &zio->io_bp_orig;
spa_t *spa = zio->io_spa;
int64_t delta;
uint64_t fill = 0;
int i;
ASSERT3P(db->db_blkptr, !=, NULL);
ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
zio->io_prev_space_delta = delta;
if (bp->blk_birth != 0) {
ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
BP_GET_TYPE(bp) == dn->dn_type) ||
(db->db_blkid == DMU_SPILL_BLKID &&
BP_GET_TYPE(bp) == dn->dn_bonustype) ||
BP_IS_EMBEDDED(bp));
ASSERT(BP_GET_LEVEL(bp) == db->db_level);
}
mutex_enter(&db->db_mtx);
#ifdef ZFS_DEBUG
if (db->db_blkid == DMU_SPILL_BLKID) {
ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
ASSERT(!(BP_IS_HOLE(bp)) &&
db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
}
#endif
if (db->db_level == 0) {
mutex_enter(&dn->dn_mtx);
if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
db->db_blkid != DMU_SPILL_BLKID) {
ASSERT0(db->db_objset->os_raw_receive);
dn->dn_phys->dn_maxblkid = db->db_blkid;
}
mutex_exit(&dn->dn_mtx);
if (dn->dn_type == DMU_OT_DNODE) {
i = 0;
while (i < db->db.db_size) {
dnode_phys_t *dnp =
(void *)(((char *)db->db.db_data) + i);
i += DNODE_MIN_SIZE;
if (dnp->dn_type != DMU_OT_NONE) {
fill++;
i += dnp->dn_extra_slots *
DNODE_MIN_SIZE;
}
}
} else {
if (BP_IS_HOLE(bp)) {
fill = 0;
} else {
fill = 1;
}
}
} else {
blkptr_t *ibp = db->db.db_data;
ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
if (BP_IS_HOLE(ibp))
continue;
fill += BP_GET_FILL(ibp);
}
}
DB_DNODE_EXIT(db);
if (!BP_IS_EMBEDDED(bp))
BP_SET_FILL(bp, fill);
mutex_exit(&db->db_mtx);
db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_WRITER, FTAG);
*db->db_blkptr = *bp;
dmu_buf_unlock_parent(db, dblt, FTAG);
}
-/* ARGSUSED */
/*
* This function gets called just prior to running through the compression
* stage of the zio pipeline. If we're an indirect block comprised of only
* holes, then we want this indirect to be compressed away to a hole. In
* order to do that we must zero out any information about the holes that
* this indirect points to prior to before we try to compress it.
*/
static void
dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
{
+ (void) zio, (void) buf;
dmu_buf_impl_t *db = vdb;
dnode_t *dn;
blkptr_t *bp;
unsigned int epbs, i;
ASSERT3U(db->db_level, >, 0);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
ASSERT3U(epbs, <, 31);
/* Determine if all our children are holes */
for (i = 0, bp = db->db.db_data; i < 1ULL << epbs; i++, bp++) {
if (!BP_IS_HOLE(bp))
break;
}
/*
* If all the children are holes, then zero them all out so that
* we may get compressed away.
*/
if (i == 1ULL << epbs) {
/*
* We only found holes. Grab the rwlock to prevent
* anybody from reading the blocks we're about to
* zero out.
*/
rw_enter(&db->db_rwlock, RW_WRITER);
bzero(db->db.db_data, db->db.db_size);
rw_exit(&db->db_rwlock);
}
DB_DNODE_EXIT(db);
}
/*
* The SPA will call this callback several times for each zio - once
* for every physical child i/o (zio->io_phys_children times). This
* allows the DMU to monitor the progress of each logical i/o. For example,
* there may be 2 copies of an indirect block, or many fragments of a RAID-Z
* block. There may be a long delay before all copies/fragments are completed,
* so this callback allows us to retire dirty space gradually, as the physical
* i/os complete.
*/
-/* ARGSUSED */
static void
dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg)
{
+ (void) buf;
dmu_buf_impl_t *db = arg;
objset_t *os = db->db_objset;
dsl_pool_t *dp = dmu_objset_pool(os);
dbuf_dirty_record_t *dr;
int delta = 0;
dr = db->db_data_pending;
ASSERT3U(dr->dr_txg, ==, zio->io_txg);
/*
* The callback will be called io_phys_children times. Retire one
* portion of our dirty space each time we are called. Any rounding
* error will be cleaned up by dbuf_write_done().
*/
delta = dr->dr_accounted / zio->io_phys_children;
dsl_pool_undirty_space(dp, delta, zio->io_txg);
}
-/* ARGSUSED */
static void
dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
{
+ (void) buf;
dmu_buf_impl_t *db = vdb;
blkptr_t *bp_orig = &zio->io_bp_orig;
blkptr_t *bp = db->db_blkptr;
objset_t *os = db->db_objset;
dmu_tx_t *tx = os->os_synctx;
ASSERT0(zio->io_error);
ASSERT(db->db_blkptr == bp);
/*
* For nopwrites and rewrites we ensure that the bp matches our
* original and bypass all the accounting.
*/
if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
ASSERT(BP_EQUAL(bp, bp_orig));
} else {
dsl_dataset_t *ds = os->os_dsl_dataset;
(void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
dsl_dataset_block_born(ds, bp, tx);
}
mutex_enter(&db->db_mtx);
DBUF_VERIFY(db);
dbuf_dirty_record_t *dr = db->db_data_pending;
dnode_t *dn = dr->dr_dnode;
ASSERT(!list_link_active(&dr->dr_dirty_node));
ASSERT(dr->dr_dbuf == db);
ASSERT(list_next(&db->db_dirty_records, dr) == NULL);
list_remove(&db->db_dirty_records, dr);
#ifdef ZFS_DEBUG
if (db->db_blkid == DMU_SPILL_BLKID) {
ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
}
#endif
if (db->db_level == 0) {
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
if (db->db_state != DB_NOFILL) {
if (dr->dt.dl.dr_data != db->db_buf)
arc_buf_destroy(dr->dt.dl.dr_data, db);
}
} else {
ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
if (!BP_IS_HOLE(db->db_blkptr)) {
int epbs __maybe_unused = dn->dn_phys->dn_indblkshift -
SPA_BLKPTRSHIFT;
ASSERT3U(db->db_blkid, <=,
dn->dn_phys->dn_maxblkid >> (db->db_level * epbs));
ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
db->db.db_size);
}
mutex_destroy(&dr->dt.di.dr_mtx);
list_destroy(&dr->dt.di.dr_children);
}
cv_broadcast(&db->db_changed);
ASSERT(db->db_dirtycnt > 0);
db->db_dirtycnt -= 1;
db->db_data_pending = NULL;
dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
/*
* If we didn't do a physical write in this ZIO and we
* still ended up here, it means that the space of the
* dbuf that we just released (and undirtied) above hasn't
* been marked as undirtied in the pool's accounting.
*
* Thus, we undirty that space in the pool's view of the
* world here. For physical writes this type of update
* happens in dbuf_write_physdone().
*
* If we did a physical write, cleanup any rounding errors
* that came up due to writing multiple copies of a block
* on disk [see dbuf_write_physdone()].
*/
if (zio->io_phys_children == 0) {
dsl_pool_undirty_space(dmu_objset_pool(os),
dr->dr_accounted, zio->io_txg);
} else {
dsl_pool_undirty_space(dmu_objset_pool(os),
dr->dr_accounted % zio->io_phys_children, zio->io_txg);
}
kmem_free(dr, sizeof (dbuf_dirty_record_t));
}
static void
dbuf_write_nofill_ready(zio_t *zio)
{
dbuf_write_ready(zio, NULL, zio->io_private);
}
static void
dbuf_write_nofill_done(zio_t *zio)
{
dbuf_write_done(zio, NULL, zio->io_private);
}
static void
dbuf_write_override_ready(zio_t *zio)
{
dbuf_dirty_record_t *dr = zio->io_private;
dmu_buf_impl_t *db = dr->dr_dbuf;
dbuf_write_ready(zio, NULL, db);
}
static void
dbuf_write_override_done(zio_t *zio)
{
dbuf_dirty_record_t *dr = zio->io_private;
dmu_buf_impl_t *db = dr->dr_dbuf;
blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
mutex_enter(&db->db_mtx);
if (!BP_EQUAL(zio->io_bp, obp)) {
if (!BP_IS_HOLE(obp))
dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
arc_release(dr->dt.dl.dr_data, db);
}
mutex_exit(&db->db_mtx);
dbuf_write_done(zio, NULL, db);
if (zio->io_abd != NULL)
abd_free(zio->io_abd);
}
typedef struct dbuf_remap_impl_callback_arg {
objset_t *drica_os;
uint64_t drica_blk_birth;
dmu_tx_t *drica_tx;
} dbuf_remap_impl_callback_arg_t;
static void
dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size,
void *arg)
{
dbuf_remap_impl_callback_arg_t *drica = arg;
objset_t *os = drica->drica_os;
spa_t *spa = dmu_objset_spa(os);
dmu_tx_t *tx = drica->drica_tx;
ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
if (os == spa_meta_objset(spa)) {
spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx);
} else {
dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset,
size, drica->drica_blk_birth, tx);
}
}
static void
dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, krwlock_t *rw, dmu_tx_t *tx)
{
blkptr_t bp_copy = *bp;
spa_t *spa = dmu_objset_spa(dn->dn_objset);
dbuf_remap_impl_callback_arg_t drica;
ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
drica.drica_os = dn->dn_objset;
drica.drica_blk_birth = bp->blk_birth;
drica.drica_tx = tx;
if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback,
&drica)) {
/*
* If the blkptr being remapped is tracked by a livelist,
* then we need to make sure the livelist reflects the update.
* First, cancel out the old blkptr by appending a 'FREE'
* entry. Next, add an 'ALLOC' to track the new version. This
* way we avoid trying to free an inaccurate blkptr at delete.
* Note that embedded blkptrs are not tracked in livelists.
*/
if (dn->dn_objset != spa_meta_objset(spa)) {
dsl_dataset_t *ds = dmu_objset_ds(dn->dn_objset);
if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) &&
bp->blk_birth > ds->ds_dir->dd_origin_txg) {
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT(dsl_dir_is_clone(ds->ds_dir));
ASSERT(spa_feature_is_enabled(spa,
SPA_FEATURE_LIVELIST));
bplist_append(&ds->ds_dir->dd_pending_frees,
bp);
bplist_append(&ds->ds_dir->dd_pending_allocs,
&bp_copy);
}
}
/*
* The db_rwlock prevents dbuf_read_impl() from
* dereferencing the BP while we are changing it. To
* avoid lock contention, only grab it when we are actually
* changing the BP.
*/
if (rw != NULL)
rw_enter(rw, RW_WRITER);
*bp = bp_copy;
if (rw != NULL)
rw_exit(rw);
}
}
/*
* Remap any existing BP's to concrete vdevs, if possible.
*/
static void
dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx)
{
spa_t *spa = dmu_objset_spa(db->db_objset);
ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL))
return;
if (db->db_level > 0) {
blkptr_t *bp = db->db.db_data;
for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) {
dbuf_remap_impl(dn, &bp[i], &db->db_rwlock, tx);
}
} else if (db->db.db_object == DMU_META_DNODE_OBJECT) {
dnode_phys_t *dnp = db->db.db_data;
ASSERT3U(db->db_dnode_handle->dnh_dnode->dn_type, ==,
DMU_OT_DNODE);
for (int i = 0; i < db->db.db_size >> DNODE_SHIFT;
i += dnp[i].dn_extra_slots + 1) {
for (int j = 0; j < dnp[i].dn_nblkptr; j++) {
krwlock_t *lock = (dn->dn_dbuf == NULL ? NULL :
&dn->dn_dbuf->db_rwlock);
dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], lock,
tx);
}
}
}
}
/* Issue I/O to commit a dirty buffer to disk. */
static void
dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
dnode_t *dn = dr->dr_dnode;
objset_t *os;
dmu_buf_impl_t *parent = db->db_parent;
uint64_t txg = tx->tx_txg;
zbookmark_phys_t zb;
zio_prop_t zp;
zio_t *pio; /* parent I/O */
int wp_flag = 0;
ASSERT(dmu_tx_is_syncing(tx));
os = dn->dn_objset;
if (db->db_state != DB_NOFILL) {
if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
/*
* Private object buffers are released here rather
* than in dbuf_dirty() since they are only modified
* in the syncing context and we don't want the
* overhead of making multiple copies of the data.
*/
if (BP_IS_HOLE(db->db_blkptr)) {
arc_buf_thaw(data);
} else {
dbuf_release_bp(db);
}
dbuf_remap(dn, db, tx);
}
}
if (parent != dn->dn_dbuf) {
/* Our parent is an indirect block. */
/* We have a dirty parent that has been scheduled for write. */
ASSERT(parent && parent->db_data_pending);
/* Our parent's buffer is one level closer to the dnode. */
ASSERT(db->db_level == parent->db_level-1);
/*
* We're about to modify our parent's db_data by modifying
* our block pointer, so the parent must be released.
*/
ASSERT(arc_released(parent->db_buf));
pio = parent->db_data_pending->dr_zio;
} else {
/* Our parent is the dnode itself. */
ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
db->db_blkid != DMU_SPILL_BLKID) ||
(db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
if (db->db_blkid != DMU_SPILL_BLKID)
ASSERT3P(db->db_blkptr, ==,
&dn->dn_phys->dn_blkptr[db->db_blkid]);
pio = dn->dn_zio;
}
ASSERT(db->db_level == 0 || data == db->db_buf);
ASSERT3U(db->db_blkptr->blk_birth, <=, txg);
ASSERT(pio);
SET_BOOKMARK(&zb, os->os_dsl_dataset ?
os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
db->db.db_object, db->db_level, db->db_blkid);
if (db->db_blkid == DMU_SPILL_BLKID)
wp_flag = WP_SPILL;
wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0;
dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
/*
* We copy the blkptr now (rather than when we instantiate the dirty
* record), because its value can change between open context and
* syncing context. We do not need to hold dn_struct_rwlock to read
* db_blkptr because we are in syncing context.
*/
dr->dr_bp_copy = *db->db_blkptr;
if (db->db_level == 0 &&
dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
/*
* The BP for this block has been provided by open context
* (by dmu_sync() or dmu_buf_write_embedded()).
*/
abd_t *contents = (data != NULL) ?
abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL;
dr->dr_zio = zio_write(pio, os->os_spa, txg, &dr->dr_bp_copy,
contents, db->db.db_size, db->db.db_size, &zp,
dbuf_write_override_ready, NULL, NULL,
dbuf_write_override_done,
dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
mutex_enter(&db->db_mtx);
dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite);
mutex_exit(&db->db_mtx);
} else if (db->db_state == DB_NOFILL) {
ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF ||
zp.zp_checksum == ZIO_CHECKSUM_NOPARITY);
dr->dr_zio = zio_write(pio, os->os_spa, txg,
&dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp,
dbuf_write_nofill_ready, NULL, NULL,
dbuf_write_nofill_done, db,
ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
} else {
ASSERT(arc_released(data));
/*
* For indirect blocks, we want to setup the children
* ready callback so that we can properly handle an indirect
* block that only contains holes.
*/
arc_write_done_func_t *children_ready_cb = NULL;
if (db->db_level != 0)
children_ready_cb = dbuf_write_children_ready;
dr->dr_zio = arc_write(pio, os->os_spa, txg,
&dr->dr_bp_copy, data, DBUF_IS_L2CACHEABLE(db),
&zp, dbuf_write_ready,
children_ready_cb, dbuf_write_physdone,
dbuf_write_done, db, ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_MUSTSUCCEED, &zb);
}
}
EXPORT_SYMBOL(dbuf_find);
EXPORT_SYMBOL(dbuf_is_metadata);
EXPORT_SYMBOL(dbuf_destroy);
EXPORT_SYMBOL(dbuf_loan_arcbuf);
EXPORT_SYMBOL(dbuf_whichblock);
EXPORT_SYMBOL(dbuf_read);
EXPORT_SYMBOL(dbuf_unoverride);
EXPORT_SYMBOL(dbuf_free_range);
EXPORT_SYMBOL(dbuf_new_size);
EXPORT_SYMBOL(dbuf_release_bp);
EXPORT_SYMBOL(dbuf_dirty);
EXPORT_SYMBOL(dmu_buf_set_crypt_params);
EXPORT_SYMBOL(dmu_buf_will_dirty);
EXPORT_SYMBOL(dmu_buf_is_dirty);
EXPORT_SYMBOL(dmu_buf_will_not_fill);
EXPORT_SYMBOL(dmu_buf_will_fill);
EXPORT_SYMBOL(dmu_buf_fill_done);
EXPORT_SYMBOL(dmu_buf_rele);
EXPORT_SYMBOL(dbuf_assign_arcbuf);
EXPORT_SYMBOL(dbuf_prefetch);
EXPORT_SYMBOL(dbuf_hold_impl);
EXPORT_SYMBOL(dbuf_hold);
EXPORT_SYMBOL(dbuf_hold_level);
EXPORT_SYMBOL(dbuf_create_bonus);
EXPORT_SYMBOL(dbuf_spill_set_blksz);
EXPORT_SYMBOL(dbuf_rm_spill);
EXPORT_SYMBOL(dbuf_add_ref);
EXPORT_SYMBOL(dbuf_rele);
EXPORT_SYMBOL(dbuf_rele_and_unlock);
EXPORT_SYMBOL(dbuf_refcount);
EXPORT_SYMBOL(dbuf_sync_list);
EXPORT_SYMBOL(dmu_buf_set_user);
EXPORT_SYMBOL(dmu_buf_set_user_ie);
EXPORT_SYMBOL(dmu_buf_get_user);
EXPORT_SYMBOL(dmu_buf_get_blkptr);
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, max_bytes, ULONG, ZMOD_RW,
"Maximum size in bytes of the dbuf cache.");
ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, hiwater_pct, UINT, ZMOD_RW,
"Percentage over dbuf_cache_max_bytes when dbufs must be evicted "
"directly.");
ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, lowater_pct, UINT, ZMOD_RW,
"Percentage below dbuf_cache_max_bytes when the evict thread stops "
"evicting dbufs.");
ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_max_bytes, ULONG, ZMOD_RW,
"Maximum size in bytes of the dbuf metadata cache.");
ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, cache_shift, INT, ZMOD_RW,
"Set the size of the dbuf cache to a log2 fraction of arc size.");
ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_shift, INT, ZMOD_RW,
"Set the size of the dbuf metadata cache to a log2 fraction of arc "
"size.");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/dmu.c b/sys/contrib/openzfs/module/zfs/dmu.c
index 8302d506146f..0c528f68ccf4 100644
--- a/sys/contrib/openzfs/module/zfs/dmu.c
+++ b/sys/contrib/openzfs/module/zfs/dmu.c
@@ -1,2358 +1,2359 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright (c) 2013, Joyent, Inc. All rights reserved.
* Copyright (c) 2016, Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2015 by Chunwei Chen. All rights reserved.
* Copyright (c) 2019 Datto Inc.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
*/
#include <sys/dmu.h>
#include <sys/dmu_impl.h>
#include <sys/dmu_tx.h>
#include <sys/dbuf.h>
#include <sys/dnode.h>
#include <sys/zfs_context.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_traverse.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_synctask.h>
#include <sys/dsl_prop.h>
#include <sys/dmu_zfetch.h>
#include <sys/zfs_ioctl.h>
#include <sys/zap.h>
#include <sys/zio_checksum.h>
#include <sys/zio_compress.h>
#include <sys/sa.h>
#include <sys/zfeature.h>
#include <sys/abd.h>
#include <sys/trace_zfs.h>
#include <sys/zfs_racct.h>
#include <sys/zfs_rlock.h>
#ifdef _KERNEL
#include <sys/vmsystm.h>
#include <sys/zfs_znode.h>
#endif
/*
* Enable/disable nopwrite feature.
*/
int zfs_nopwrite_enabled = 1;
/*
* Tunable to control percentage of dirtied L1 blocks from frees allowed into
* one TXG. After this threshold is crossed, additional dirty blocks from frees
* will wait until the next TXG.
* A value of zero will disable this throttle.
*/
unsigned long zfs_per_txg_dirty_frees_percent = 5;
/*
* Enable/disable forcing txg sync when dirty in dmu_offset_next.
*/
int zfs_dmu_offset_next_sync = 0;
/*
* Limit the amount we can prefetch with one call to this amount. This
* helps to limit the amount of memory that can be used by prefetching.
* Larger objects should be prefetched a bit at a time.
*/
int dmu_prefetch_max = 8 * SPA_MAXBLOCKSIZE;
const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = {
{DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "unallocated" },
{DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "object directory" },
{DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "object array" },
{DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "packed nvlist" },
{DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "packed nvlist size" },
{DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "bpobj" },
{DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "bpobj header" },
{DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "SPA space map header" },
{DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "SPA space map" },
{DMU_BSWAP_UINT64, TRUE, FALSE, TRUE, "ZIL intent log" },
{DMU_BSWAP_DNODE, TRUE, FALSE, TRUE, "DMU dnode" },
{DMU_BSWAP_OBJSET, TRUE, TRUE, FALSE, "DMU objset" },
{DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "DSL directory" },
{DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL directory child map"},
{DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL dataset snap map" },
{DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL props" },
{DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "DSL dataset" },
{DMU_BSWAP_ZNODE, TRUE, FALSE, FALSE, "ZFS znode" },
{DMU_BSWAP_OLDACL, TRUE, FALSE, TRUE, "ZFS V0 ACL" },
{DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "ZFS plain file" },
{DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS directory" },
{DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "ZFS master node" },
{DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS delete queue" },
{DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "zvol object" },
{DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "zvol prop" },
{DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "other uint8[]" },
{DMU_BSWAP_UINT64, FALSE, FALSE, TRUE, "other uint64[]" },
{DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "other ZAP" },
{DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "persistent error log" },
{DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "SPA history" },
{DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "SPA history offsets" },
{DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "Pool properties" },
{DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL permissions" },
{DMU_BSWAP_ACL, TRUE, FALSE, TRUE, "ZFS ACL" },
{DMU_BSWAP_UINT8, TRUE, FALSE, TRUE, "ZFS SYSACL" },
{DMU_BSWAP_UINT8, TRUE, FALSE, TRUE, "FUID table" },
{DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "FUID table size" },
{DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL dataset next clones"},
{DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "scan work queue" },
{DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS user/group/project used" },
{DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS user/group/project quota"},
{DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "snapshot refcount tags"},
{DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "DDT ZAP algorithm" },
{DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "DDT statistics" },
{DMU_BSWAP_UINT8, TRUE, FALSE, TRUE, "System attributes" },
{DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "SA master node" },
{DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "SA attr registration" },
{DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "SA attr layouts" },
{DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "scan translations" },
{DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "deduplicated block" },
{DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL deadlist map" },
{DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "DSL deadlist map hdr" },
{DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL dir clones" },
{DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "bpobj subobj" }
};
const dmu_object_byteswap_info_t dmu_ot_byteswap[DMU_BSWAP_NUMFUNCS] = {
{ byteswap_uint8_array, "uint8" },
{ byteswap_uint16_array, "uint16" },
{ byteswap_uint32_array, "uint32" },
{ byteswap_uint64_array, "uint64" },
{ zap_byteswap, "zap" },
{ dnode_buf_byteswap, "dnode" },
{ dmu_objset_byteswap, "objset" },
{ zfs_znode_byteswap, "znode" },
{ zfs_oldacl_byteswap, "oldacl" },
{ zfs_acl_byteswap, "acl" }
};
static int
dmu_buf_hold_noread_by_dnode(dnode_t *dn, uint64_t offset,
void *tag, dmu_buf_t **dbp)
{
uint64_t blkid;
dmu_buf_impl_t *db;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
blkid = dbuf_whichblock(dn, 0, offset);
db = dbuf_hold(dn, blkid, tag);
rw_exit(&dn->dn_struct_rwlock);
if (db == NULL) {
*dbp = NULL;
return (SET_ERROR(EIO));
}
*dbp = &db->db;
return (0);
}
int
dmu_buf_hold_noread(objset_t *os, uint64_t object, uint64_t offset,
void *tag, dmu_buf_t **dbp)
{
dnode_t *dn;
uint64_t blkid;
dmu_buf_impl_t *db;
int err;
err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
rw_enter(&dn->dn_struct_rwlock, RW_READER);
blkid = dbuf_whichblock(dn, 0, offset);
db = dbuf_hold(dn, blkid, tag);
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
if (db == NULL) {
*dbp = NULL;
return (SET_ERROR(EIO));
}
*dbp = &db->db;
return (err);
}
int
dmu_buf_hold_by_dnode(dnode_t *dn, uint64_t offset,
void *tag, dmu_buf_t **dbp, int flags)
{
int err;
int db_flags = DB_RF_CANFAIL;
if (flags & DMU_READ_NO_PREFETCH)
db_flags |= DB_RF_NOPREFETCH;
if (flags & DMU_READ_NO_DECRYPT)
db_flags |= DB_RF_NO_DECRYPT;
err = dmu_buf_hold_noread_by_dnode(dn, offset, tag, dbp);
if (err == 0) {
dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp);
err = dbuf_read(db, NULL, db_flags);
if (err != 0) {
dbuf_rele(db, tag);
*dbp = NULL;
}
}
return (err);
}
int
dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset,
void *tag, dmu_buf_t **dbp, int flags)
{
int err;
int db_flags = DB_RF_CANFAIL;
if (flags & DMU_READ_NO_PREFETCH)
db_flags |= DB_RF_NOPREFETCH;
if (flags & DMU_READ_NO_DECRYPT)
db_flags |= DB_RF_NO_DECRYPT;
err = dmu_buf_hold_noread(os, object, offset, tag, dbp);
if (err == 0) {
dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp);
err = dbuf_read(db, NULL, db_flags);
if (err != 0) {
dbuf_rele(db, tag);
*dbp = NULL;
}
}
return (err);
}
int
dmu_bonus_max(void)
{
return (DN_OLD_MAX_BONUSLEN);
}
int
dmu_set_bonus(dmu_buf_t *db_fake, int newsize, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dnode_t *dn;
int error;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
if (dn->dn_bonus != db) {
error = SET_ERROR(EINVAL);
} else if (newsize < 0 || newsize > db_fake->db_size) {
error = SET_ERROR(EINVAL);
} else {
dnode_setbonuslen(dn, newsize, tx);
error = 0;
}
DB_DNODE_EXIT(db);
return (error);
}
int
dmu_set_bonustype(dmu_buf_t *db_fake, dmu_object_type_t type, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dnode_t *dn;
int error;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
if (!DMU_OT_IS_VALID(type)) {
error = SET_ERROR(EINVAL);
} else if (dn->dn_bonus != db) {
error = SET_ERROR(EINVAL);
} else {
dnode_setbonus_type(dn, type, tx);
error = 0;
}
DB_DNODE_EXIT(db);
return (error);
}
dmu_object_type_t
dmu_get_bonustype(dmu_buf_t *db_fake)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dnode_t *dn;
dmu_object_type_t type;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
type = dn->dn_bonustype;
DB_DNODE_EXIT(db);
return (type);
}
int
dmu_rm_spill(objset_t *os, uint64_t object, dmu_tx_t *tx)
{
dnode_t *dn;
int error;
error = dnode_hold(os, object, FTAG, &dn);
dbuf_rm_spill(dn, tx);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
dnode_rm_spill(dn, tx);
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
return (error);
}
/*
* Lookup and hold the bonus buffer for the provided dnode. If the dnode
* has not yet been allocated a new bonus dbuf a will be allocated.
* Returns ENOENT, EIO, or 0.
*/
int dmu_bonus_hold_by_dnode(dnode_t *dn, void *tag, dmu_buf_t **dbp,
uint32_t flags)
{
dmu_buf_impl_t *db;
int error;
uint32_t db_flags = DB_RF_MUST_SUCCEED;
if (flags & DMU_READ_NO_PREFETCH)
db_flags |= DB_RF_NOPREFETCH;
if (flags & DMU_READ_NO_DECRYPT)
db_flags |= DB_RF_NO_DECRYPT;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (dn->dn_bonus == NULL) {
rw_exit(&dn->dn_struct_rwlock);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
if (dn->dn_bonus == NULL)
dbuf_create_bonus(dn);
}
db = dn->dn_bonus;
/* as long as the bonus buf is held, the dnode will be held */
if (zfs_refcount_add(&db->db_holds, tag) == 1) {
VERIFY(dnode_add_ref(dn, db));
atomic_inc_32(&dn->dn_dbufs_count);
}
/*
* Wait to drop dn_struct_rwlock until after adding the bonus dbuf's
* hold and incrementing the dbuf count to ensure that dnode_move() sees
* a dnode hold for every dbuf.
*/
rw_exit(&dn->dn_struct_rwlock);
error = dbuf_read(db, NULL, db_flags);
if (error) {
dnode_evict_bonus(dn);
dbuf_rele(db, tag);
*dbp = NULL;
return (error);
}
*dbp = &db->db;
return (0);
}
int
dmu_bonus_hold(objset_t *os, uint64_t object, void *tag, dmu_buf_t **dbp)
{
dnode_t *dn;
int error;
error = dnode_hold(os, object, FTAG, &dn);
if (error)
return (error);
error = dmu_bonus_hold_by_dnode(dn, tag, dbp, DMU_READ_NO_PREFETCH);
dnode_rele(dn, FTAG);
return (error);
}
/*
* returns ENOENT, EIO, or 0.
*
* This interface will allocate a blank spill dbuf when a spill blk
* doesn't already exist on the dnode.
*
* if you only want to find an already existing spill db, then
* dmu_spill_hold_existing() should be used.
*/
int
dmu_spill_hold_by_dnode(dnode_t *dn, uint32_t flags, void *tag, dmu_buf_t **dbp)
{
dmu_buf_impl_t *db = NULL;
int err;
if ((flags & DB_RF_HAVESTRUCT) == 0)
rw_enter(&dn->dn_struct_rwlock, RW_READER);
db = dbuf_hold(dn, DMU_SPILL_BLKID, tag);
if ((flags & DB_RF_HAVESTRUCT) == 0)
rw_exit(&dn->dn_struct_rwlock);
if (db == NULL) {
*dbp = NULL;
return (SET_ERROR(EIO));
}
err = dbuf_read(db, NULL, flags);
if (err == 0)
*dbp = &db->db;
else {
dbuf_rele(db, tag);
*dbp = NULL;
}
return (err);
}
int
dmu_spill_hold_existing(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus;
dnode_t *dn;
int err;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_SA) {
err = SET_ERROR(EINVAL);
} else {
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (!dn->dn_have_spill) {
err = SET_ERROR(ENOENT);
} else {
err = dmu_spill_hold_by_dnode(dn,
DB_RF_HAVESTRUCT | DB_RF_CANFAIL, tag, dbp);
}
rw_exit(&dn->dn_struct_rwlock);
}
DB_DNODE_EXIT(db);
return (err);
}
int
dmu_spill_hold_by_bonus(dmu_buf_t *bonus, uint32_t flags, void *tag,
dmu_buf_t **dbp)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus;
dnode_t *dn;
int err;
uint32_t db_flags = DB_RF_CANFAIL;
if (flags & DMU_READ_NO_DECRYPT)
db_flags |= DB_RF_NO_DECRYPT;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
err = dmu_spill_hold_by_dnode(dn, db_flags, tag, dbp);
DB_DNODE_EXIT(db);
return (err);
}
/*
* Note: longer-term, we should modify all of the dmu_buf_*() interfaces
* to take a held dnode rather than <os, object> -- the lookup is wasteful,
* and can induce severe lock contention when writing to several files
* whose dnodes are in the same block.
*/
int
dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
boolean_t read, void *tag, int *numbufsp, dmu_buf_t ***dbpp, uint32_t flags)
{
dmu_buf_t **dbp;
zstream_t *zs = NULL;
uint64_t blkid, nblks, i;
uint32_t dbuf_flags;
int err;
zio_t *zio = NULL;
boolean_t missed = B_FALSE;
ASSERT(length <= DMU_MAX_ACCESS);
/*
* Note: We directly notify the prefetch code of this read, so that
* we can tell it about the multi-block read. dbuf_read() only knows
* about the one block it is accessing.
*/
dbuf_flags = DB_RF_CANFAIL | DB_RF_NEVERWAIT | DB_RF_HAVESTRUCT |
DB_RF_NOPREFETCH;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (dn->dn_datablkshift) {
int blkshift = dn->dn_datablkshift;
nblks = (P2ROUNDUP(offset + length, 1ULL << blkshift) -
P2ALIGN(offset, 1ULL << blkshift)) >> blkshift;
} else {
if (offset + length > dn->dn_datablksz) {
zfs_panic_recover("zfs: accessing past end of object "
"%llx/%llx (size=%u access=%llu+%llu)",
(longlong_t)dn->dn_objset->
os_dsl_dataset->ds_object,
(longlong_t)dn->dn_object, dn->dn_datablksz,
(longlong_t)offset, (longlong_t)length);
rw_exit(&dn->dn_struct_rwlock);
return (SET_ERROR(EIO));
}
nblks = 1;
}
dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_SLEEP);
if (read)
zio = zio_root(dn->dn_objset->os_spa, NULL, NULL,
ZIO_FLAG_CANFAIL);
blkid = dbuf_whichblock(dn, 0, offset);
if ((flags & DMU_READ_NO_PREFETCH) == 0 &&
DNODE_META_IS_CACHEABLE(dn) && length <= zfetch_array_rd_sz) {
/*
* Prepare the zfetch before initiating the demand reads, so
* that if multiple threads block on same indirect block, we
* base predictions on the original less racy request order.
*/
zs = dmu_zfetch_prepare(&dn->dn_zfetch, blkid, nblks,
read && DNODE_IS_CACHEABLE(dn), B_TRUE);
}
for (i = 0; i < nblks; i++) {
dmu_buf_impl_t *db = dbuf_hold(dn, blkid + i, tag);
if (db == NULL) {
if (zs)
dmu_zfetch_run(zs, missed, B_TRUE);
rw_exit(&dn->dn_struct_rwlock);
dmu_buf_rele_array(dbp, nblks, tag);
if (read)
zio_nowait(zio);
return (SET_ERROR(EIO));
}
/*
* Initiate async demand data read.
* We check the db_state after calling dbuf_read() because
* (1) dbuf_read() may change the state to CACHED due to a
* hit in the ARC, and (2) on a cache miss, a child will
* have been added to "zio" but not yet completed, so the
* state will not yet be CACHED.
*/
if (read) {
(void) dbuf_read(db, zio, dbuf_flags);
if (db->db_state != DB_CACHED)
missed = B_TRUE;
}
dbp[i] = &db->db;
}
if (!read)
zfs_racct_write(length, nblks);
if (zs)
dmu_zfetch_run(zs, missed, B_TRUE);
rw_exit(&dn->dn_struct_rwlock);
if (read) {
/* wait for async read i/o */
err = zio_wait(zio);
if (err) {
dmu_buf_rele_array(dbp, nblks, tag);
return (err);
}
/* wait for other io to complete */
for (i = 0; i < nblks; i++) {
dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbp[i];
mutex_enter(&db->db_mtx);
while (db->db_state == DB_READ ||
db->db_state == DB_FILL)
cv_wait(&db->db_changed, &db->db_mtx);
if (db->db_state == DB_UNCACHED)
err = SET_ERROR(EIO);
mutex_exit(&db->db_mtx);
if (err) {
dmu_buf_rele_array(dbp, nblks, tag);
return (err);
}
}
}
*numbufsp = nblks;
*dbpp = dbp;
return (0);
}
static int
dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset,
uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp)
{
dnode_t *dn;
int err;
err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag,
numbufsp, dbpp, DMU_READ_PREFETCH);
dnode_rele(dn, FTAG);
return (err);
}
int
dmu_buf_hold_array_by_bonus(dmu_buf_t *db_fake, uint64_t offset,
uint64_t length, boolean_t read, void *tag, int *numbufsp,
dmu_buf_t ***dbpp)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dnode_t *dn;
int err;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag,
numbufsp, dbpp, DMU_READ_PREFETCH);
DB_DNODE_EXIT(db);
return (err);
}
void
dmu_buf_rele_array(dmu_buf_t **dbp_fake, int numbufs, void *tag)
{
int i;
dmu_buf_impl_t **dbp = (dmu_buf_impl_t **)dbp_fake;
if (numbufs == 0)
return;
for (i = 0; i < numbufs; i++) {
if (dbp[i])
dbuf_rele(dbp[i], tag);
}
kmem_free(dbp, sizeof (dmu_buf_t *) * numbufs);
}
/*
* Issue prefetch i/os for the given blocks. If level is greater than 0, the
* indirect blocks prefetched will be those that point to the blocks containing
* the data starting at offset, and continuing to offset + len.
*
* Note that if the indirect blocks above the blocks being prefetched are not
* in cache, they will be asynchronously read in.
*/
void
dmu_prefetch(objset_t *os, uint64_t object, int64_t level, uint64_t offset,
uint64_t len, zio_priority_t pri)
{
dnode_t *dn;
uint64_t blkid;
int nblks, err;
if (len == 0) { /* they're interested in the bonus buffer */
dn = DMU_META_DNODE(os);
if (object == 0 || object >= DN_MAX_OBJECT)
return;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
blkid = dbuf_whichblock(dn, level,
object * sizeof (dnode_phys_t));
dbuf_prefetch(dn, level, blkid, pri, 0);
rw_exit(&dn->dn_struct_rwlock);
return;
}
/*
* See comment before the definition of dmu_prefetch_max.
*/
len = MIN(len, dmu_prefetch_max);
/*
* XXX - Note, if the dnode for the requested object is not
* already cached, we will do a *synchronous* read in the
* dnode_hold() call. The same is true for any indirects.
*/
err = dnode_hold(os, object, FTAG, &dn);
if (err != 0)
return;
/*
* offset + len - 1 is the last byte we want to prefetch for, and offset
* is the first. Then dbuf_whichblk(dn, level, off + len - 1) is the
* last block we want to prefetch, and dbuf_whichblock(dn, level,
* offset) is the first. Then the number we need to prefetch is the
* last - first + 1.
*/
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (level > 0 || dn->dn_datablkshift != 0) {
nblks = dbuf_whichblock(dn, level, offset + len - 1) -
dbuf_whichblock(dn, level, offset) + 1;
} else {
nblks = (offset < dn->dn_datablksz);
}
if (nblks != 0) {
blkid = dbuf_whichblock(dn, level, offset);
for (int i = 0; i < nblks; i++)
dbuf_prefetch(dn, level, blkid + i, pri, 0);
}
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
}
/*
* Get the next "chunk" of file data to free. We traverse the file from
* the end so that the file gets shorter over time (if we crashes in the
* middle, this will leave us in a better state). We find allocated file
* data by simply searching the allocated level 1 indirects.
*
* On input, *start should be the first offset that does not need to be
* freed (e.g. "offset + length"). On return, *start will be the first
* offset that should be freed and l1blks is set to the number of level 1
* indirect blocks found within the chunk.
*/
static int
get_next_chunk(dnode_t *dn, uint64_t *start, uint64_t minimum, uint64_t *l1blks)
{
uint64_t blks;
uint64_t maxblks = DMU_MAX_ACCESS >> (dn->dn_indblkshift + 1);
/* bytes of data covered by a level-1 indirect block */
uint64_t iblkrange = (uint64_t)dn->dn_datablksz *
EPB(dn->dn_indblkshift, SPA_BLKPTRSHIFT);
ASSERT3U(minimum, <=, *start);
/*
* Check if we can free the entire range assuming that all of the
* L1 blocks in this range have data. If we can, we use this
* worst case value as an estimate so we can avoid having to look
* at the object's actual data.
*/
uint64_t total_l1blks =
(roundup(*start, iblkrange) - (minimum / iblkrange * iblkrange)) /
iblkrange;
if (total_l1blks <= maxblks) {
*l1blks = total_l1blks;
*start = minimum;
return (0);
}
ASSERT(ISP2(iblkrange));
for (blks = 0; *start > minimum && blks < maxblks; blks++) {
int err;
/*
* dnode_next_offset(BACKWARDS) will find an allocated L1
* indirect block at or before the input offset. We must
* decrement *start so that it is at the end of the region
* to search.
*/
(*start)--;
err = dnode_next_offset(dn,
DNODE_FIND_BACKWARDS, start, 2, 1, 0);
/* if there are no indirect blocks before start, we are done */
if (err == ESRCH) {
*start = minimum;
break;
} else if (err != 0) {
*l1blks = blks;
return (err);
}
/* set start to the beginning of this L1 indirect */
*start = P2ALIGN(*start, iblkrange);
}
if (*start < minimum)
*start = minimum;
*l1blks = blks;
return (0);
}
/*
* If this objset is of type OST_ZFS return true if vfs's unmounted flag is set,
* otherwise return false.
* Used below in dmu_free_long_range_impl() to enable abort when unmounting
*/
-/*ARGSUSED*/
static boolean_t
dmu_objset_zfs_unmounting(objset_t *os)
{
#ifdef _KERNEL
if (dmu_objset_type(os) == DMU_OST_ZFS)
return (zfs_get_vfs_flag_unmounted(os));
+#else
+ (void) os;
#endif
return (B_FALSE);
}
static int
dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset,
uint64_t length)
{
uint64_t object_size;
int err;
uint64_t dirty_frees_threshold;
dsl_pool_t *dp = dmu_objset_pool(os);
if (dn == NULL)
return (SET_ERROR(EINVAL));
object_size = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
if (offset >= object_size)
return (0);
if (zfs_per_txg_dirty_frees_percent <= 100)
dirty_frees_threshold =
zfs_per_txg_dirty_frees_percent * zfs_dirty_data_max / 100;
else
dirty_frees_threshold = zfs_dirty_data_max / 20;
if (length == DMU_OBJECT_END || offset + length > object_size)
length = object_size - offset;
while (length != 0) {
uint64_t chunk_end, chunk_begin, chunk_len;
uint64_t l1blks;
dmu_tx_t *tx;
if (dmu_objset_zfs_unmounting(dn->dn_objset))
return (SET_ERROR(EINTR));
chunk_end = chunk_begin = offset + length;
/* move chunk_begin backwards to the beginning of this chunk */
err = get_next_chunk(dn, &chunk_begin, offset, &l1blks);
if (err)
return (err);
ASSERT3U(chunk_begin, >=, offset);
ASSERT3U(chunk_begin, <=, chunk_end);
chunk_len = chunk_end - chunk_begin;
tx = dmu_tx_create(os);
dmu_tx_hold_free(tx, dn->dn_object, chunk_begin, chunk_len);
/*
* Mark this transaction as typically resulting in a net
* reduction in space used.
*/
dmu_tx_mark_netfree(tx);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err) {
dmu_tx_abort(tx);
return (err);
}
uint64_t txg = dmu_tx_get_txg(tx);
mutex_enter(&dp->dp_lock);
uint64_t long_free_dirty =
dp->dp_long_free_dirty_pertxg[txg & TXG_MASK];
mutex_exit(&dp->dp_lock);
/*
* To avoid filling up a TXG with just frees, wait for
* the next TXG to open before freeing more chunks if
* we have reached the threshold of frees.
*/
if (dirty_frees_threshold != 0 &&
long_free_dirty >= dirty_frees_threshold) {
DMU_TX_STAT_BUMP(dmu_tx_dirty_frees_delay);
dmu_tx_commit(tx);
txg_wait_open(dp, 0, B_TRUE);
continue;
}
/*
* In order to prevent unnecessary write throttling, for each
* TXG, we track the cumulative size of L1 blocks being dirtied
* in dnode_free_range() below. We compare this number to a
* tunable threshold, past which we prevent new L1 dirty freeing
* blocks from being added into the open TXG. See
* dmu_free_long_range_impl() for details. The threshold
* prevents write throttle activation due to dirty freeing L1
* blocks taking up a large percentage of zfs_dirty_data_max.
*/
mutex_enter(&dp->dp_lock);
dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] +=
l1blks << dn->dn_indblkshift;
mutex_exit(&dp->dp_lock);
DTRACE_PROBE3(free__long__range,
uint64_t, long_free_dirty, uint64_t, chunk_len,
uint64_t, txg);
dnode_free_range(dn, chunk_begin, chunk_len, tx);
dmu_tx_commit(tx);
length -= chunk_len;
}
return (0);
}
int
dmu_free_long_range(objset_t *os, uint64_t object,
uint64_t offset, uint64_t length)
{
dnode_t *dn;
int err;
err = dnode_hold(os, object, FTAG, &dn);
if (err != 0)
return (err);
err = dmu_free_long_range_impl(os, dn, offset, length);
/*
* It is important to zero out the maxblkid when freeing the entire
* file, so that (a) subsequent calls to dmu_free_long_range_impl()
* will take the fast path, and (b) dnode_reallocate() can verify
* that the entire file has been freed.
*/
if (err == 0 && offset == 0 && length == DMU_OBJECT_END)
dn->dn_maxblkid = 0;
dnode_rele(dn, FTAG);
return (err);
}
int
dmu_free_long_object(objset_t *os, uint64_t object)
{
dmu_tx_t *tx;
int err;
err = dmu_free_long_range(os, object, 0, DMU_OBJECT_END);
if (err != 0)
return (err);
tx = dmu_tx_create(os);
dmu_tx_hold_bonus(tx, object);
dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
dmu_tx_mark_netfree(tx);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err == 0) {
if (err == 0)
err = dmu_object_free(os, object, tx);
dmu_tx_commit(tx);
} else {
dmu_tx_abort(tx);
}
return (err);
}
int
dmu_free_range(objset_t *os, uint64_t object, uint64_t offset,
uint64_t size, dmu_tx_t *tx)
{
dnode_t *dn;
int err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
ASSERT(offset < UINT64_MAX);
ASSERT(size == DMU_OBJECT_END || size <= UINT64_MAX - offset);
dnode_free_range(dn, offset, size, tx);
dnode_rele(dn, FTAG);
return (0);
}
static int
dmu_read_impl(dnode_t *dn, uint64_t offset, uint64_t size,
void *buf, uint32_t flags)
{
dmu_buf_t **dbp;
int numbufs, err = 0;
/*
* Deal with odd block sizes, where there can't be data past the first
* block. If we ever do the tail block optimization, we will need to
* handle that here as well.
*/
if (dn->dn_maxblkid == 0) {
uint64_t newsz = offset > dn->dn_datablksz ? 0 :
MIN(size, dn->dn_datablksz - offset);
bzero((char *)buf + newsz, size - newsz);
size = newsz;
}
while (size > 0) {
uint64_t mylen = MIN(size, DMU_MAX_ACCESS / 2);
int i;
/*
* NB: we could do this block-at-a-time, but it's nice
* to be reading in parallel.
*/
err = dmu_buf_hold_array_by_dnode(dn, offset, mylen,
TRUE, FTAG, &numbufs, &dbp, flags);
if (err)
break;
for (i = 0; i < numbufs; i++) {
uint64_t tocpy;
int64_t bufoff;
dmu_buf_t *db = dbp[i];
ASSERT(size > 0);
bufoff = offset - db->db_offset;
tocpy = MIN(db->db_size - bufoff, size);
(void) memcpy(buf, (char *)db->db_data + bufoff, tocpy);
offset += tocpy;
size -= tocpy;
buf = (char *)buf + tocpy;
}
dmu_buf_rele_array(dbp, numbufs, FTAG);
}
return (err);
}
int
dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
void *buf, uint32_t flags)
{
dnode_t *dn;
int err;
err = dnode_hold(os, object, FTAG, &dn);
if (err != 0)
return (err);
err = dmu_read_impl(dn, offset, size, buf, flags);
dnode_rele(dn, FTAG);
return (err);
}
int
dmu_read_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, void *buf,
uint32_t flags)
{
return (dmu_read_impl(dn, offset, size, buf, flags));
}
static void
dmu_write_impl(dmu_buf_t **dbp, int numbufs, uint64_t offset, uint64_t size,
const void *buf, dmu_tx_t *tx)
{
int i;
for (i = 0; i < numbufs; i++) {
uint64_t tocpy;
int64_t bufoff;
dmu_buf_t *db = dbp[i];
ASSERT(size > 0);
bufoff = offset - db->db_offset;
tocpy = MIN(db->db_size - bufoff, size);
ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
if (tocpy == db->db_size)
dmu_buf_will_fill(db, tx);
else
dmu_buf_will_dirty(db, tx);
(void) memcpy((char *)db->db_data + bufoff, buf, tocpy);
if (tocpy == db->db_size)
dmu_buf_fill_done(db, tx);
offset += tocpy;
size -= tocpy;
buf = (char *)buf + tocpy;
}
}
void
dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
const void *buf, dmu_tx_t *tx)
{
dmu_buf_t **dbp;
int numbufs;
if (size == 0)
return;
VERIFY0(dmu_buf_hold_array(os, object, offset, size,
FALSE, FTAG, &numbufs, &dbp));
dmu_write_impl(dbp, numbufs, offset, size, buf, tx);
dmu_buf_rele_array(dbp, numbufs, FTAG);
}
/*
* Note: Lustre is an external consumer of this interface.
*/
void
dmu_write_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size,
const void *buf, dmu_tx_t *tx)
{
dmu_buf_t **dbp;
int numbufs;
if (size == 0)
return;
VERIFY0(dmu_buf_hold_array_by_dnode(dn, offset, size,
FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH));
dmu_write_impl(dbp, numbufs, offset, size, buf, tx);
dmu_buf_rele_array(dbp, numbufs, FTAG);
}
void
dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
dmu_tx_t *tx)
{
dmu_buf_t **dbp;
int numbufs, i;
if (size == 0)
return;
VERIFY(0 == dmu_buf_hold_array(os, object, offset, size,
FALSE, FTAG, &numbufs, &dbp));
for (i = 0; i < numbufs; i++) {
dmu_buf_t *db = dbp[i];
dmu_buf_will_not_fill(db, tx);
}
dmu_buf_rele_array(dbp, numbufs, FTAG);
}
void
dmu_write_embedded(objset_t *os, uint64_t object, uint64_t offset,
void *data, uint8_t etype, uint8_t comp, int uncompressed_size,
int compressed_size, int byteorder, dmu_tx_t *tx)
{
dmu_buf_t *db;
ASSERT3U(etype, <, NUM_BP_EMBEDDED_TYPES);
ASSERT3U(comp, <, ZIO_COMPRESS_FUNCTIONS);
VERIFY0(dmu_buf_hold_noread(os, object, offset,
FTAG, &db));
dmu_buf_write_embedded(db,
data, (bp_embedded_type_t)etype, (enum zio_compress)comp,
uncompressed_size, compressed_size, byteorder, tx);
dmu_buf_rele(db, FTAG);
}
void
dmu_redact(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
dmu_tx_t *tx)
{
int numbufs, i;
dmu_buf_t **dbp;
VERIFY0(dmu_buf_hold_array(os, object, offset, size, FALSE, FTAG,
&numbufs, &dbp));
for (i = 0; i < numbufs; i++)
dmu_buf_redact(dbp[i], tx);
dmu_buf_rele_array(dbp, numbufs, FTAG);
}
#ifdef _KERNEL
int
dmu_read_uio_dnode(dnode_t *dn, zfs_uio_t *uio, uint64_t size)
{
dmu_buf_t **dbp;
int numbufs, i, err;
/*
* NB: we could do this block-at-a-time, but it's nice
* to be reading in parallel.
*/
err = dmu_buf_hold_array_by_dnode(dn, zfs_uio_offset(uio), size,
TRUE, FTAG, &numbufs, &dbp, 0);
if (err)
return (err);
for (i = 0; i < numbufs; i++) {
uint64_t tocpy;
int64_t bufoff;
dmu_buf_t *db = dbp[i];
ASSERT(size > 0);
bufoff = zfs_uio_offset(uio) - db->db_offset;
tocpy = MIN(db->db_size - bufoff, size);
err = zfs_uio_fault_move((char *)db->db_data + bufoff, tocpy,
UIO_READ, uio);
if (err)
break;
size -= tocpy;
}
dmu_buf_rele_array(dbp, numbufs, FTAG);
return (err);
}
/*
* Read 'size' bytes into the uio buffer.
* From object zdb->db_object.
* Starting at zfs_uio_offset(uio).
*
* If the caller already has a dbuf in the target object
* (e.g. its bonus buffer), this routine is faster than dmu_read_uio(),
* because we don't have to find the dnode_t for the object.
*/
int
dmu_read_uio_dbuf(dmu_buf_t *zdb, zfs_uio_t *uio, uint64_t size)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb;
dnode_t *dn;
int err;
if (size == 0)
return (0);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
err = dmu_read_uio_dnode(dn, uio, size);
DB_DNODE_EXIT(db);
return (err);
}
/*
* Read 'size' bytes into the uio buffer.
* From the specified object
* Starting at offset zfs_uio_offset(uio).
*/
int
dmu_read_uio(objset_t *os, uint64_t object, zfs_uio_t *uio, uint64_t size)
{
dnode_t *dn;
int err;
if (size == 0)
return (0);
err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
err = dmu_read_uio_dnode(dn, uio, size);
dnode_rele(dn, FTAG);
return (err);
}
int
dmu_write_uio_dnode(dnode_t *dn, zfs_uio_t *uio, uint64_t size, dmu_tx_t *tx)
{
dmu_buf_t **dbp;
int numbufs;
int err = 0;
int i;
err = dmu_buf_hold_array_by_dnode(dn, zfs_uio_offset(uio), size,
FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH);
if (err)
return (err);
for (i = 0; i < numbufs; i++) {
uint64_t tocpy;
int64_t bufoff;
dmu_buf_t *db = dbp[i];
ASSERT(size > 0);
bufoff = zfs_uio_offset(uio) - db->db_offset;
tocpy = MIN(db->db_size - bufoff, size);
ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
if (tocpy == db->db_size)
dmu_buf_will_fill(db, tx);
else
dmu_buf_will_dirty(db, tx);
/*
* XXX zfs_uiomove could block forever (eg.nfs-backed
* pages). There needs to be a uiolockdown() function
* to lock the pages in memory, so that zfs_uiomove won't
* block.
*/
err = zfs_uio_fault_move((char *)db->db_data + bufoff,
tocpy, UIO_WRITE, uio);
if (tocpy == db->db_size)
dmu_buf_fill_done(db, tx);
if (err)
break;
size -= tocpy;
}
dmu_buf_rele_array(dbp, numbufs, FTAG);
return (err);
}
/*
* Write 'size' bytes from the uio buffer.
* To object zdb->db_object.
* Starting at offset zfs_uio_offset(uio).
*
* If the caller already has a dbuf in the target object
* (e.g. its bonus buffer), this routine is faster than dmu_write_uio(),
* because we don't have to find the dnode_t for the object.
*/
int
dmu_write_uio_dbuf(dmu_buf_t *zdb, zfs_uio_t *uio, uint64_t size,
dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb;
dnode_t *dn;
int err;
if (size == 0)
return (0);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
err = dmu_write_uio_dnode(dn, uio, size, tx);
DB_DNODE_EXIT(db);
return (err);
}
/*
* Write 'size' bytes from the uio buffer.
* To the specified object.
* Starting at offset zfs_uio_offset(uio).
*/
int
dmu_write_uio(objset_t *os, uint64_t object, zfs_uio_t *uio, uint64_t size,
dmu_tx_t *tx)
{
dnode_t *dn;
int err;
if (size == 0)
return (0);
err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
err = dmu_write_uio_dnode(dn, uio, size, tx);
dnode_rele(dn, FTAG);
return (err);
}
#endif /* _KERNEL */
/*
* Allocate a loaned anonymous arc buffer.
*/
arc_buf_t *
dmu_request_arcbuf(dmu_buf_t *handle, int size)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)handle;
return (arc_loan_buf(db->db_objset->os_spa, B_FALSE, size));
}
/*
* Free a loaned arc buffer.
*/
void
dmu_return_arcbuf(arc_buf_t *buf)
{
arc_return_buf(buf, FTAG);
arc_buf_destroy(buf, FTAG);
}
/*
* A "lightweight" write is faster than a regular write (e.g.
* dmu_write_by_dnode() or dmu_assign_arcbuf_by_dnode()), because it avoids the
* CPU cost of creating a dmu_buf_impl_t and arc_buf_[hdr_]_t. However, the
* data can not be read or overwritten until the transaction's txg has been
* synced. This makes it appropriate for workloads that are known to be
* (temporarily) write-only, like "zfs receive".
*
* A single block is written, starting at the specified offset in bytes. If
* the call is successful, it returns 0 and the provided abd has been
* consumed (the caller should not free it).
*/
int
dmu_lightweight_write_by_dnode(dnode_t *dn, uint64_t offset, abd_t *abd,
const zio_prop_t *zp, enum zio_flag flags, dmu_tx_t *tx)
{
dbuf_dirty_record_t *dr =
dbuf_dirty_lightweight(dn, dbuf_whichblock(dn, 0, offset), tx);
if (dr == NULL)
return (SET_ERROR(EIO));
dr->dt.dll.dr_abd = abd;
dr->dt.dll.dr_props = *zp;
dr->dt.dll.dr_flags = flags;
return (0);
}
/*
* When possible directly assign passed loaned arc buffer to a dbuf.
* If this is not possible copy the contents of passed arc buf via
* dmu_write().
*/
int
dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset, arc_buf_t *buf,
dmu_tx_t *tx)
{
dmu_buf_impl_t *db;
objset_t *os = dn->dn_objset;
uint64_t object = dn->dn_object;
uint32_t blksz = (uint32_t)arc_buf_lsize(buf);
uint64_t blkid;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
blkid = dbuf_whichblock(dn, 0, offset);
db = dbuf_hold(dn, blkid, FTAG);
if (db == NULL)
return (SET_ERROR(EIO));
rw_exit(&dn->dn_struct_rwlock);
/*
* We can only assign if the offset is aligned and the arc buf is the
* same size as the dbuf.
*/
if (offset == db->db.db_offset && blksz == db->db.db_size) {
zfs_racct_write(blksz, 1);
dbuf_assign_arcbuf(db, buf, tx);
dbuf_rele(db, FTAG);
} else {
/* compressed bufs must always be assignable to their dbuf */
ASSERT3U(arc_get_compression(buf), ==, ZIO_COMPRESS_OFF);
ASSERT(!(buf->b_flags & ARC_BUF_FLAG_COMPRESSED));
dbuf_rele(db, FTAG);
dmu_write(os, object, offset, blksz, buf->b_data, tx);
dmu_return_arcbuf(buf);
}
return (0);
}
int
dmu_assign_arcbuf_by_dbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf,
dmu_tx_t *tx)
{
int err;
dmu_buf_impl_t *dbuf = (dmu_buf_impl_t *)handle;
DB_DNODE_ENTER(dbuf);
err = dmu_assign_arcbuf_by_dnode(DB_DNODE(dbuf), offset, buf, tx);
DB_DNODE_EXIT(dbuf);
return (err);
}
typedef struct {
dbuf_dirty_record_t *dsa_dr;
dmu_sync_cb_t *dsa_done;
zgd_t *dsa_zgd;
dmu_tx_t *dsa_tx;
} dmu_sync_arg_t;
-/* ARGSUSED */
static void
dmu_sync_ready(zio_t *zio, arc_buf_t *buf, void *varg)
{
+ (void) buf;
dmu_sync_arg_t *dsa = varg;
dmu_buf_t *db = dsa->dsa_zgd->zgd_db;
blkptr_t *bp = zio->io_bp;
if (zio->io_error == 0) {
if (BP_IS_HOLE(bp)) {
/*
* A block of zeros may compress to a hole, but the
* block size still needs to be known for replay.
*/
BP_SET_LSIZE(bp, db->db_size);
} else if (!BP_IS_EMBEDDED(bp)) {
ASSERT(BP_GET_LEVEL(bp) == 0);
BP_SET_FILL(bp, 1);
}
}
}
static void
dmu_sync_late_arrival_ready(zio_t *zio)
{
dmu_sync_ready(zio, NULL, zio->io_private);
}
-/* ARGSUSED */
static void
dmu_sync_done(zio_t *zio, arc_buf_t *buf, void *varg)
{
+ (void) buf;
dmu_sync_arg_t *dsa = varg;
dbuf_dirty_record_t *dr = dsa->dsa_dr;
dmu_buf_impl_t *db = dr->dr_dbuf;
zgd_t *zgd = dsa->dsa_zgd;
/*
* Record the vdev(s) backing this blkptr so they can be flushed after
* the writes for the lwb have completed.
*/
if (zio->io_error == 0) {
zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp);
}
mutex_enter(&db->db_mtx);
ASSERT(dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC);
if (zio->io_error == 0) {
dr->dt.dl.dr_nopwrite = !!(zio->io_flags & ZIO_FLAG_NOPWRITE);
if (dr->dt.dl.dr_nopwrite) {
blkptr_t *bp = zio->io_bp;
blkptr_t *bp_orig = &zio->io_bp_orig;
uint8_t chksum = BP_GET_CHECKSUM(bp_orig);
ASSERT(BP_EQUAL(bp, bp_orig));
VERIFY(BP_EQUAL(bp, db->db_blkptr));
ASSERT(zio->io_prop.zp_compress != ZIO_COMPRESS_OFF);
VERIFY(zio_checksum_table[chksum].ci_flags &
ZCHECKSUM_FLAG_NOPWRITE);
}
dr->dt.dl.dr_overridden_by = *zio->io_bp;
dr->dt.dl.dr_override_state = DR_OVERRIDDEN;
dr->dt.dl.dr_copies = zio->io_prop.zp_copies;
/*
* Old style holes are filled with all zeros, whereas
* new-style holes maintain their lsize, type, level,
* and birth time (see zio_write_compress). While we
* need to reset the BP_SET_LSIZE() call that happened
* in dmu_sync_ready for old style holes, we do *not*
* want to wipe out the information contained in new
* style holes. Thus, only zero out the block pointer if
* it's an old style hole.
*/
if (BP_IS_HOLE(&dr->dt.dl.dr_overridden_by) &&
dr->dt.dl.dr_overridden_by.blk_birth == 0)
BP_ZERO(&dr->dt.dl.dr_overridden_by);
} else {
dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
}
cv_broadcast(&db->db_changed);
mutex_exit(&db->db_mtx);
dsa->dsa_done(dsa->dsa_zgd, zio->io_error);
kmem_free(dsa, sizeof (*dsa));
}
static void
dmu_sync_late_arrival_done(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
dmu_sync_arg_t *dsa = zio->io_private;
zgd_t *zgd = dsa->dsa_zgd;
if (zio->io_error == 0) {
/*
* Record the vdev(s) backing this blkptr so they can be
* flushed after the writes for the lwb have completed.
*/
zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp);
if (!BP_IS_HOLE(bp)) {
blkptr_t *bp_orig __maybe_unused = &zio->io_bp_orig;
ASSERT(!(zio->io_flags & ZIO_FLAG_NOPWRITE));
ASSERT(BP_IS_HOLE(bp_orig) || !BP_EQUAL(bp, bp_orig));
ASSERT(zio->io_bp->blk_birth == zio->io_txg);
ASSERT(zio->io_txg > spa_syncing_txg(zio->io_spa));
zio_free(zio->io_spa, zio->io_txg, zio->io_bp);
}
}
dmu_tx_commit(dsa->dsa_tx);
dsa->dsa_done(dsa->dsa_zgd, zio->io_error);
abd_free(zio->io_abd);
kmem_free(dsa, sizeof (*dsa));
}
static int
dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd,
zio_prop_t *zp, zbookmark_phys_t *zb)
{
dmu_sync_arg_t *dsa;
dmu_tx_t *tx;
tx = dmu_tx_create(os);
dmu_tx_hold_space(tx, zgd->zgd_db->db_size);
if (dmu_tx_assign(tx, TXG_WAIT) != 0) {
dmu_tx_abort(tx);
/* Make zl_get_data do txg_waited_synced() */
return (SET_ERROR(EIO));
}
/*
* In order to prevent the zgd's lwb from being free'd prior to
* dmu_sync_late_arrival_done() being called, we have to ensure
* the lwb's "max txg" takes this tx's txg into account.
*/
zil_lwb_add_txg(zgd->zgd_lwb, dmu_tx_get_txg(tx));
dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
dsa->dsa_dr = NULL;
dsa->dsa_done = done;
dsa->dsa_zgd = zgd;
dsa->dsa_tx = tx;
/*
* Since we are currently syncing this txg, it's nontrivial to
* determine what BP to nopwrite against, so we disable nopwrite.
*
* When syncing, the db_blkptr is initially the BP of the previous
* txg. We can not nopwrite against it because it will be changed
* (this is similar to the non-late-arrival case where the dbuf is
* dirty in a future txg).
*
* Then dbuf_write_ready() sets bp_blkptr to the location we will write.
* We can not nopwrite against it because although the BP will not
* (typically) be changed, the data has not yet been persisted to this
* location.
*
* Finally, when dbuf_write_done() is called, it is theoretically
* possible to always nopwrite, because the data that was written in
* this txg is the same data that we are trying to write. However we
* would need to check that this dbuf is not dirty in any future
* txg's (as we do in the normal dmu_sync() path). For simplicity, we
* don't nopwrite in this case.
*/
zp->zp_nopwrite = B_FALSE;
zio_nowait(zio_write(pio, os->os_spa, dmu_tx_get_txg(tx), zgd->zgd_bp,
abd_get_from_buf(zgd->zgd_db->db_data, zgd->zgd_db->db_size),
zgd->zgd_db->db_size, zgd->zgd_db->db_size, zp,
dmu_sync_late_arrival_ready, NULL, NULL, dmu_sync_late_arrival_done,
dsa, ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, zb));
return (0);
}
/*
* Intent log support: sync the block associated with db to disk.
* N.B. and XXX: the caller is responsible for making sure that the
* data isn't changing while dmu_sync() is writing it.
*
* Return values:
*
* EEXIST: this txg has already been synced, so there's nothing to do.
* The caller should not log the write.
*
* ENOENT: the block was dbuf_free_range()'d, so there's nothing to do.
* The caller should not log the write.
*
* EALREADY: this block is already in the process of being synced.
* The caller should track its progress (somehow).
*
* EIO: could not do the I/O.
* The caller should do a txg_wait_synced().
*
* 0: the I/O has been initiated.
* The caller should log this blkptr in the done callback.
* It is possible that the I/O will fail, in which case
* the error will be reported to the done callback and
* propagated to pio from zio_done().
*/
int
dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)zgd->zgd_db;
objset_t *os = db->db_objset;
dsl_dataset_t *ds = os->os_dsl_dataset;
dbuf_dirty_record_t *dr, *dr_next;
dmu_sync_arg_t *dsa;
zbookmark_phys_t zb;
zio_prop_t zp;
dnode_t *dn;
ASSERT(pio != NULL);
ASSERT(txg != 0);
SET_BOOKMARK(&zb, ds->ds_object,
db->db.db_object, db->db_level, db->db_blkid);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
dmu_write_policy(os, dn, db->db_level, WP_DMU_SYNC, &zp);
DB_DNODE_EXIT(db);
/*
* If we're frozen (running ziltest), we always need to generate a bp.
*/
if (txg > spa_freeze_txg(os->os_spa))
return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb));
/*
* Grabbing db_mtx now provides a barrier between dbuf_sync_leaf()
* and us. If we determine that this txg is not yet syncing,
* but it begins to sync a moment later, that's OK because the
* sync thread will block in dbuf_sync_leaf() until we drop db_mtx.
*/
mutex_enter(&db->db_mtx);
if (txg <= spa_last_synced_txg(os->os_spa)) {
/*
* This txg has already synced. There's nothing to do.
*/
mutex_exit(&db->db_mtx);
return (SET_ERROR(EEXIST));
}
if (txg <= spa_syncing_txg(os->os_spa)) {
/*
* This txg is currently syncing, so we can't mess with
* the dirty record anymore; just write a new log block.
*/
mutex_exit(&db->db_mtx);
return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb));
}
dr = dbuf_find_dirty_eq(db, txg);
if (dr == NULL) {
/*
* There's no dr for this dbuf, so it must have been freed.
* There's no need to log writes to freed blocks, so we're done.
*/
mutex_exit(&db->db_mtx);
return (SET_ERROR(ENOENT));
}
dr_next = list_next(&db->db_dirty_records, dr);
ASSERT(dr_next == NULL || dr_next->dr_txg < txg);
if (db->db_blkptr != NULL) {
/*
* We need to fill in zgd_bp with the current blkptr so that
* the nopwrite code can check if we're writing the same
* data that's already on disk. We can only nopwrite if we
* are sure that after making the copy, db_blkptr will not
* change until our i/o completes. We ensure this by
* holding the db_mtx, and only allowing nopwrite if the
* block is not already dirty (see below). This is verified
* by dmu_sync_done(), which VERIFYs that the db_blkptr has
* not changed.
*/
*zgd->zgd_bp = *db->db_blkptr;
}
/*
* Assume the on-disk data is X, the current syncing data (in
* txg - 1) is Y, and the current in-memory data is Z (currently
* in dmu_sync).
*
* We usually want to perform a nopwrite if X and Z are the
* same. However, if Y is different (i.e. the BP is going to
* change before this write takes effect), then a nopwrite will
* be incorrect - we would override with X, which could have
* been freed when Y was written.
*
* (Note that this is not a concern when we are nop-writing from
* syncing context, because X and Y must be identical, because
* all previous txgs have been synced.)
*
* Therefore, we disable nopwrite if the current BP could change
* before this TXG. There are two ways it could change: by
* being dirty (dr_next is non-NULL), or by being freed
* (dnode_block_freed()). This behavior is verified by
* zio_done(), which VERIFYs that the override BP is identical
* to the on-disk BP.
*/
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
if (dr_next != NULL || dnode_block_freed(dn, db->db_blkid))
zp.zp_nopwrite = B_FALSE;
DB_DNODE_EXIT(db);
ASSERT(dr->dr_txg == txg);
if (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC ||
dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
/*
* We have already issued a sync write for this buffer,
* or this buffer has already been synced. It could not
* have been dirtied since, or we would have cleared the state.
*/
mutex_exit(&db->db_mtx);
return (SET_ERROR(EALREADY));
}
ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC;
mutex_exit(&db->db_mtx);
dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
dsa->dsa_dr = dr;
dsa->dsa_done = done;
dsa->dsa_zgd = zgd;
dsa->dsa_tx = NULL;
zio_nowait(arc_write(pio, os->os_spa, txg,
zgd->zgd_bp, dr->dt.dl.dr_data, DBUF_IS_L2CACHEABLE(db),
&zp, dmu_sync_ready, NULL, NULL, dmu_sync_done, dsa,
ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, &zb));
return (0);
}
int
dmu_object_set_nlevels(objset_t *os, uint64_t object, int nlevels, dmu_tx_t *tx)
{
dnode_t *dn;
int err;
err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
err = dnode_set_nlevels(dn, nlevels, tx);
dnode_rele(dn, FTAG);
return (err);
}
int
dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, int ibs,
dmu_tx_t *tx)
{
dnode_t *dn;
int err;
err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
err = dnode_set_blksz(dn, size, ibs, tx);
dnode_rele(dn, FTAG);
return (err);
}
int
dmu_object_set_maxblkid(objset_t *os, uint64_t object, uint64_t maxblkid,
dmu_tx_t *tx)
{
dnode_t *dn;
int err;
err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
dnode_new_blkid(dn, maxblkid, tx, B_FALSE, B_TRUE);
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
return (0);
}
void
dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum,
dmu_tx_t *tx)
{
dnode_t *dn;
/*
* Send streams include each object's checksum function. This
* check ensures that the receiving system can understand the
* checksum function transmitted.
*/
ASSERT3U(checksum, <, ZIO_CHECKSUM_LEGACY_FUNCTIONS);
VERIFY0(dnode_hold(os, object, FTAG, &dn));
ASSERT3U(checksum, <, ZIO_CHECKSUM_FUNCTIONS);
dn->dn_checksum = checksum;
dnode_setdirty(dn, tx);
dnode_rele(dn, FTAG);
}
void
dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress,
dmu_tx_t *tx)
{
dnode_t *dn;
/*
* Send streams include each object's compression function. This
* check ensures that the receiving system can understand the
* compression function transmitted.
*/
ASSERT3U(compress, <, ZIO_COMPRESS_LEGACY_FUNCTIONS);
VERIFY0(dnode_hold(os, object, FTAG, &dn));
dn->dn_compress = compress;
dnode_setdirty(dn, tx);
dnode_rele(dn, FTAG);
}
/*
* When the "redundant_metadata" property is set to "most", only indirect
* blocks of this level and higher will have an additional ditto block.
*/
int zfs_redundant_metadata_most_ditto_level = 2;
void
dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp)
{
dmu_object_type_t type = dn ? dn->dn_type : DMU_OT_OBJSET;
boolean_t ismd = (level > 0 || DMU_OT_IS_METADATA(type) ||
(wp & WP_SPILL));
enum zio_checksum checksum = os->os_checksum;
enum zio_compress compress = os->os_compress;
uint8_t complevel = os->os_complevel;
enum zio_checksum dedup_checksum = os->os_dedup_checksum;
boolean_t dedup = B_FALSE;
boolean_t nopwrite = B_FALSE;
boolean_t dedup_verify = os->os_dedup_verify;
boolean_t encrypt = B_FALSE;
int copies = os->os_copies;
/*
* We maintain different write policies for each of the following
* types of data:
* 1. metadata
* 2. preallocated blocks (i.e. level-0 blocks of a dump device)
* 3. all other level 0 blocks
*/
if (ismd) {
/*
* XXX -- we should design a compression algorithm
* that specializes in arrays of bps.
*/
compress = zio_compress_select(os->os_spa,
ZIO_COMPRESS_ON, ZIO_COMPRESS_ON);
/*
* Metadata always gets checksummed. If the data
* checksum is multi-bit correctable, and it's not a
* ZBT-style checksum, then it's suitable for metadata
* as well. Otherwise, the metadata checksum defaults
* to fletcher4.
*/
if (!(zio_checksum_table[checksum].ci_flags &
ZCHECKSUM_FLAG_METADATA) ||
(zio_checksum_table[checksum].ci_flags &
ZCHECKSUM_FLAG_EMBEDDED))
checksum = ZIO_CHECKSUM_FLETCHER_4;
if (os->os_redundant_metadata == ZFS_REDUNDANT_METADATA_ALL ||
(os->os_redundant_metadata ==
ZFS_REDUNDANT_METADATA_MOST &&
(level >= zfs_redundant_metadata_most_ditto_level ||
DMU_OT_IS_METADATA(type) || (wp & WP_SPILL))))
copies++;
} else if (wp & WP_NOFILL) {
ASSERT(level == 0);
/*
* If we're writing preallocated blocks, we aren't actually
* writing them so don't set any policy properties. These
* blocks are currently only used by an external subsystem
* outside of zfs (i.e. dump) and not written by the zio
* pipeline.
*/
compress = ZIO_COMPRESS_OFF;
checksum = ZIO_CHECKSUM_OFF;
} else {
compress = zio_compress_select(os->os_spa, dn->dn_compress,
compress);
complevel = zio_complevel_select(os->os_spa, compress,
complevel, complevel);
checksum = (dedup_checksum == ZIO_CHECKSUM_OFF) ?
zio_checksum_select(dn->dn_checksum, checksum) :
dedup_checksum;
/*
* Determine dedup setting. If we are in dmu_sync(),
* we won't actually dedup now because that's all
* done in syncing context; but we do want to use the
* dedup checksum. If the checksum is not strong
* enough to ensure unique signatures, force
* dedup_verify.
*/
if (dedup_checksum != ZIO_CHECKSUM_OFF) {
dedup = (wp & WP_DMU_SYNC) ? B_FALSE : B_TRUE;
if (!(zio_checksum_table[checksum].ci_flags &
ZCHECKSUM_FLAG_DEDUP))
dedup_verify = B_TRUE;
}
/*
* Enable nopwrite if we have secure enough checksum
* algorithm (see comment in zio_nop_write) and
* compression is enabled. We don't enable nopwrite if
* dedup is enabled as the two features are mutually
* exclusive.
*/
nopwrite = (!dedup && (zio_checksum_table[checksum].ci_flags &
ZCHECKSUM_FLAG_NOPWRITE) &&
compress != ZIO_COMPRESS_OFF && zfs_nopwrite_enabled);
}
/*
* All objects in an encrypted objset are protected from modification
* via a MAC. Encrypted objects store their IV and salt in the last DVA
* in the bp, so we cannot use all copies. Encrypted objects are also
* not subject to nopwrite since writing the same data will still
* result in a new ciphertext. Only encrypted blocks can be dedup'd
* to avoid ambiguity in the dedup code since the DDT does not store
* object types.
*/
if (os->os_encrypted && (wp & WP_NOFILL) == 0) {
encrypt = B_TRUE;
if (DMU_OT_IS_ENCRYPTED(type)) {
copies = MIN(copies, SPA_DVAS_PER_BP - 1);
nopwrite = B_FALSE;
} else {
dedup = B_FALSE;
}
if (level <= 0 &&
(type == DMU_OT_DNODE || type == DMU_OT_OBJSET)) {
compress = ZIO_COMPRESS_EMPTY;
}
}
zp->zp_compress = compress;
zp->zp_complevel = complevel;
zp->zp_checksum = checksum;
zp->zp_type = (wp & WP_SPILL) ? dn->dn_bonustype : type;
zp->zp_level = level;
zp->zp_copies = MIN(copies, spa_max_replication(os->os_spa));
zp->zp_dedup = dedup;
zp->zp_dedup_verify = dedup && dedup_verify;
zp->zp_nopwrite = nopwrite;
zp->zp_encrypt = encrypt;
zp->zp_byteorder = ZFS_HOST_BYTEORDER;
bzero(zp->zp_salt, ZIO_DATA_SALT_LEN);
bzero(zp->zp_iv, ZIO_DATA_IV_LEN);
bzero(zp->zp_mac, ZIO_DATA_MAC_LEN);
zp->zp_zpl_smallblk = DMU_OT_IS_FILE(zp->zp_type) ?
os->os_zpl_special_smallblock : 0;
ASSERT3U(zp->zp_compress, !=, ZIO_COMPRESS_INHERIT);
}
/*
* This function is only called from zfs_holey_common() for zpl_llseek()
* in order to determine the location of holes. In order to accurately
* report holes all dirty data must be synced to disk. This causes extremely
* poor performance when seeking for holes in a dirty file. As a compromise,
* only provide hole data when the dnode is clean. When a dnode is dirty
* report the dnode as having no holes which is always a safe thing to do.
*/
int
dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off)
{
dnode_t *dn;
int err;
restart:
err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (dnode_is_dirty(dn)) {
/*
* If the zfs_dmu_offset_next_sync module option is enabled
* then strict hole reporting has been requested. Dirty
* dnodes must be synced to disk to accurately report all
* holes. When disabled (the default) dirty dnodes are
* reported to not have any holes which is always safe.
*
* When called by zfs_holey_common() the zp->z_rangelock
* is held to prevent zfs_write() and mmap writeback from
* re-dirtying the dnode after txg_wait_synced().
*/
if (zfs_dmu_offset_next_sync) {
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
txg_wait_synced(dmu_objset_pool(os), 0);
goto restart;
}
err = SET_ERROR(EBUSY);
} else {
err = dnode_next_offset(dn, DNODE_FIND_HAVELOCK |
(hole ? DNODE_FIND_HOLE : 0), off, 1, 1, 0);
}
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
return (err);
}
void
__dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi)
{
dnode_phys_t *dnp = dn->dn_phys;
doi->doi_data_block_size = dn->dn_datablksz;
doi->doi_metadata_block_size = dn->dn_indblkshift ?
1ULL << dn->dn_indblkshift : 0;
doi->doi_type = dn->dn_type;
doi->doi_bonus_type = dn->dn_bonustype;
doi->doi_bonus_size = dn->dn_bonuslen;
doi->doi_dnodesize = dn->dn_num_slots << DNODE_SHIFT;
doi->doi_indirection = dn->dn_nlevels;
doi->doi_checksum = dn->dn_checksum;
doi->doi_compress = dn->dn_compress;
doi->doi_nblkptr = dn->dn_nblkptr;
doi->doi_physical_blocks_512 = (DN_USED_BYTES(dnp) + 256) >> 9;
doi->doi_max_offset = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
doi->doi_fill_count = 0;
for (int i = 0; i < dnp->dn_nblkptr; i++)
doi->doi_fill_count += BP_GET_FILL(&dnp->dn_blkptr[i]);
}
void
dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi)
{
rw_enter(&dn->dn_struct_rwlock, RW_READER);
mutex_enter(&dn->dn_mtx);
__dmu_object_info_from_dnode(dn, doi);
mutex_exit(&dn->dn_mtx);
rw_exit(&dn->dn_struct_rwlock);
}
/*
* Get information on a DMU object.
* If doi is NULL, just indicates whether the object exists.
*/
int
dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi)
{
dnode_t *dn;
int err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
if (doi != NULL)
dmu_object_info_from_dnode(dn, doi);
dnode_rele(dn, FTAG);
return (0);
}
/*
* As above, but faster; can be used when you have a held dbuf in hand.
*/
void
dmu_object_info_from_db(dmu_buf_t *db_fake, dmu_object_info_t *doi)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
DB_DNODE_ENTER(db);
dmu_object_info_from_dnode(DB_DNODE(db), doi);
DB_DNODE_EXIT(db);
}
/*
* Faster still when you only care about the size.
*/
void
dmu_object_size_from_db(dmu_buf_t *db_fake, uint32_t *blksize,
u_longlong_t *nblk512)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dnode_t *dn;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
*blksize = dn->dn_datablksz;
/* add in number of slots used for the dnode itself */
*nblk512 = ((DN_USED_BYTES(dn->dn_phys) + SPA_MINBLOCKSIZE/2) >>
SPA_MINBLOCKSHIFT) + dn->dn_num_slots;
DB_DNODE_EXIT(db);
}
void
dmu_object_dnsize_from_db(dmu_buf_t *db_fake, int *dnsize)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dnode_t *dn;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
*dnsize = dn->dn_num_slots << DNODE_SHIFT;
DB_DNODE_EXIT(db);
}
void
byteswap_uint64_array(void *vbuf, size_t size)
{
uint64_t *buf = vbuf;
size_t count = size >> 3;
int i;
ASSERT((size & 7) == 0);
for (i = 0; i < count; i++)
buf[i] = BSWAP_64(buf[i]);
}
void
byteswap_uint32_array(void *vbuf, size_t size)
{
uint32_t *buf = vbuf;
size_t count = size >> 2;
int i;
ASSERT((size & 3) == 0);
for (i = 0; i < count; i++)
buf[i] = BSWAP_32(buf[i]);
}
void
byteswap_uint16_array(void *vbuf, size_t size)
{
uint16_t *buf = vbuf;
size_t count = size >> 1;
int i;
ASSERT((size & 1) == 0);
for (i = 0; i < count; i++)
buf[i] = BSWAP_16(buf[i]);
}
-/* ARGSUSED */
void
byteswap_uint8_array(void *vbuf, size_t size)
{
+ (void) vbuf, (void) size;
}
void
dmu_init(void)
{
abd_init();
zfs_dbgmsg_init();
sa_cache_init();
dmu_objset_init();
dnode_init();
zfetch_init();
dmu_tx_init();
l2arc_init();
arc_init();
dbuf_init();
}
void
dmu_fini(void)
{
arc_fini(); /* arc depends on l2arc, so arc must go first */
l2arc_fini();
dmu_tx_fini();
zfetch_fini();
dbuf_fini();
dnode_fini();
dmu_objset_fini();
sa_cache_fini();
zfs_dbgmsg_fini();
abd_fini();
}
EXPORT_SYMBOL(dmu_bonus_hold);
EXPORT_SYMBOL(dmu_bonus_hold_by_dnode);
EXPORT_SYMBOL(dmu_buf_hold_array_by_bonus);
EXPORT_SYMBOL(dmu_buf_rele_array);
EXPORT_SYMBOL(dmu_prefetch);
EXPORT_SYMBOL(dmu_free_range);
EXPORT_SYMBOL(dmu_free_long_range);
EXPORT_SYMBOL(dmu_free_long_object);
EXPORT_SYMBOL(dmu_read);
EXPORT_SYMBOL(dmu_read_by_dnode);
EXPORT_SYMBOL(dmu_write);
EXPORT_SYMBOL(dmu_write_by_dnode);
EXPORT_SYMBOL(dmu_prealloc);
EXPORT_SYMBOL(dmu_object_info);
EXPORT_SYMBOL(dmu_object_info_from_dnode);
EXPORT_SYMBOL(dmu_object_info_from_db);
EXPORT_SYMBOL(dmu_object_size_from_db);
EXPORT_SYMBOL(dmu_object_dnsize_from_db);
EXPORT_SYMBOL(dmu_object_set_nlevels);
EXPORT_SYMBOL(dmu_object_set_blocksize);
EXPORT_SYMBOL(dmu_object_set_maxblkid);
EXPORT_SYMBOL(dmu_object_set_checksum);
EXPORT_SYMBOL(dmu_object_set_compress);
EXPORT_SYMBOL(dmu_offset_next);
EXPORT_SYMBOL(dmu_write_policy);
EXPORT_SYMBOL(dmu_sync);
EXPORT_SYMBOL(dmu_request_arcbuf);
EXPORT_SYMBOL(dmu_return_arcbuf);
EXPORT_SYMBOL(dmu_assign_arcbuf_by_dnode);
EXPORT_SYMBOL(dmu_assign_arcbuf_by_dbuf);
EXPORT_SYMBOL(dmu_buf_hold);
EXPORT_SYMBOL(dmu_ot);
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs, zfs_, nopwrite_enabled, INT, ZMOD_RW,
"Enable NOP writes");
ZFS_MODULE_PARAM(zfs, zfs_, per_txg_dirty_frees_percent, ULONG, ZMOD_RW,
"Percentage of dirtied blocks from frees in one TXG");
ZFS_MODULE_PARAM(zfs, zfs_, dmu_offset_next_sync, INT, ZMOD_RW,
"Enable forcing txg sync to find holes");
ZFS_MODULE_PARAM(zfs, , dmu_prefetch_max, INT, ZMOD_RW,
"Limit one prefetch call to this size");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/dmu_diff.c b/sys/contrib/openzfs/module/zfs/dmu_diff.c
index a573a2e1bd41..1382da267d8a 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_diff.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_diff.c
@@ -1,240 +1,240 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
*/
#include <sys/dmu.h>
#include <sys/dmu_impl.h>
#include <sys/dmu_tx.h>
#include <sys/dbuf.h>
#include <sys/dnode.h>
#include <sys/zfs_context.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_traverse.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_synctask.h>
#include <sys/zfs_ioctl.h>
#include <sys/zap.h>
#include <sys/zio_checksum.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_file.h>
typedef struct dmu_diffarg {
zfs_file_t *da_fp; /* file to which we are reporting */
offset_t *da_offp;
int da_err; /* error that stopped diff search */
dmu_diff_record_t da_ddr;
} dmu_diffarg_t;
static int
write_record(dmu_diffarg_t *da)
{
zfs_file_t *fp;
ssize_t resid;
if (da->da_ddr.ddr_type == DDR_NONE) {
da->da_err = 0;
return (0);
}
fp = da->da_fp;
da->da_err = zfs_file_write(fp, (caddr_t)&da->da_ddr,
sizeof (da->da_ddr), &resid);
*da->da_offp += sizeof (da->da_ddr);
return (da->da_err);
}
static int
report_free_dnode_range(dmu_diffarg_t *da, uint64_t first, uint64_t last)
{
ASSERT(first <= last);
if (da->da_ddr.ddr_type != DDR_FREE ||
first != da->da_ddr.ddr_last + 1) {
if (write_record(da) != 0)
return (da->da_err);
da->da_ddr.ddr_type = DDR_FREE;
da->da_ddr.ddr_first = first;
da->da_ddr.ddr_last = last;
return (0);
}
da->da_ddr.ddr_last = last;
return (0);
}
static int
report_dnode(dmu_diffarg_t *da, uint64_t object, dnode_phys_t *dnp)
{
ASSERT(dnp != NULL);
if (dnp->dn_type == DMU_OT_NONE)
return (report_free_dnode_range(da, object, object));
if (da->da_ddr.ddr_type != DDR_INUSE ||
object != da->da_ddr.ddr_last + 1) {
if (write_record(da) != 0)
return (da->da_err);
da->da_ddr.ddr_type = DDR_INUSE;
da->da_ddr.ddr_first = da->da_ddr.ddr_last = object;
return (0);
}
da->da_ddr.ddr_last = object;
return (0);
}
#define DBP_SPAN(dnp, level) \
(((uint64_t)dnp->dn_datablkszsec) << (SPA_MINBLOCKSHIFT + \
(level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT)))
-/* ARGSUSED */
static int
diff_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
+ (void) zilog;
dmu_diffarg_t *da = arg;
int err = 0;
if (issig(JUSTLOOKING) && issig(FORREAL))
return (SET_ERROR(EINTR));
if (zb->zb_level == ZB_DNODE_LEVEL ||
zb->zb_object != DMU_META_DNODE_OBJECT)
return (0);
if (BP_IS_HOLE(bp)) {
uint64_t span = DBP_SPAN(dnp, zb->zb_level);
uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT;
err = report_free_dnode_range(da, dnobj,
dnobj + (span >> DNODE_SHIFT) - 1);
if (err)
return (err);
} else if (zb->zb_level == 0) {
dnode_phys_t *blk;
arc_buf_t *abuf;
arc_flags_t aflags = ARC_FLAG_WAIT;
int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
int zio_flags = ZIO_FLAG_CANFAIL;
int i;
if (BP_IS_PROTECTED(bp))
zio_flags |= ZIO_FLAG_RAW;
if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
ZIO_PRIORITY_ASYNC_READ, zio_flags, &aflags, zb) != 0)
return (SET_ERROR(EIO));
blk = abuf->b_data;
for (i = 0; i < epb; i += blk[i].dn_extra_slots + 1) {
uint64_t dnobj = (zb->zb_blkid <<
(DNODE_BLOCK_SHIFT - DNODE_SHIFT)) + i;
err = report_dnode(da, dnobj, blk+i);
if (err)
break;
}
arc_buf_destroy(abuf, &abuf);
if (err)
return (err);
/* Don't care about the data blocks */
return (TRAVERSE_VISIT_NO_CHILDREN);
}
return (0);
}
int
dmu_diff(const char *tosnap_name, const char *fromsnap_name,
zfs_file_t *fp, offset_t *offp)
{
dmu_diffarg_t da;
dsl_dataset_t *fromsnap;
dsl_dataset_t *tosnap;
dsl_pool_t *dp;
int error;
uint64_t fromtxg;
if (strchr(tosnap_name, '@') == NULL ||
strchr(fromsnap_name, '@') == NULL)
return (SET_ERROR(EINVAL));
error = dsl_pool_hold(tosnap_name, FTAG, &dp);
if (error != 0)
return (error);
error = dsl_dataset_hold(dp, tosnap_name, FTAG, &tosnap);
if (error != 0) {
dsl_pool_rele(dp, FTAG);
return (error);
}
error = dsl_dataset_hold(dp, fromsnap_name, FTAG, &fromsnap);
if (error != 0) {
dsl_dataset_rele(tosnap, FTAG);
dsl_pool_rele(dp, FTAG);
return (error);
}
if (!dsl_dataset_is_before(tosnap, fromsnap, 0)) {
dsl_dataset_rele(fromsnap, FTAG);
dsl_dataset_rele(tosnap, FTAG);
dsl_pool_rele(dp, FTAG);
return (SET_ERROR(EXDEV));
}
fromtxg = dsl_dataset_phys(fromsnap)->ds_creation_txg;
dsl_dataset_rele(fromsnap, FTAG);
dsl_dataset_long_hold(tosnap, FTAG);
dsl_pool_rele(dp, FTAG);
da.da_fp = fp;
da.da_offp = offp;
da.da_ddr.ddr_type = DDR_NONE;
da.da_ddr.ddr_first = da.da_ddr.ddr_last = 0;
da.da_err = 0;
/*
* Since zfs diff only looks at dnodes which are stored in plaintext
* (other than bonus buffers), we don't technically need to decrypt
* the dataset to perform this operation. However, the command line
* utility will still fail if the keys are not loaded because the
* dataset isn't mounted and because it will fail when it attempts to
* call the ZFS_IOC_OBJ_TO_STATS ioctl.
*/
error = traverse_dataset(tosnap, fromtxg,
TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA | TRAVERSE_NO_DECRYPT,
diff_cb, &da);
if (error != 0) {
da.da_err = error;
} else {
/* we set the da.da_err we return as side-effect */
(void) write_record(&da);
}
dsl_dataset_long_rele(tosnap, FTAG);
dsl_dataset_rele(tosnap, FTAG);
return (da.da_err);
}
diff --git a/sys/contrib/openzfs/module/zfs/dmu_objset.c b/sys/contrib/openzfs/module/zfs/dmu_objset.c
index af107fb8ad63..b9380890230c 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_objset.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_objset.c
@@ -1,3050 +1,3048 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright (c) 2013, Joyent, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright (c) 2015, STRATO AG, Inc. All rights reserved.
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
* Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
*/
/* Portions Copyright 2010 Robert Milkowski */
#include <sys/cred.h>
#include <sys/zfs_context.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_synctask.h>
#include <sys/dsl_deleg.h>
#include <sys/dnode.h>
#include <sys/dbuf.h>
#include <sys/zvol.h>
#include <sys/dmu_tx.h>
#include <sys/zap.h>
#include <sys/zil.h>
#include <sys/dmu_impl.h>
#include <sys/zfs_ioctl.h>
#include <sys/sa.h>
#include <sys/zfs_onexit.h>
#include <sys/dsl_destroy.h>
#include <sys/vdev.h>
#include <sys/zfeature.h>
#include <sys/policy.h>
#include <sys/spa_impl.h>
#include <sys/dmu_recv.h>
#include <sys/zfs_project.h>
#include "zfs_namecheck.h"
/*
* Needed to close a window in dnode_move() that allows the objset to be freed
* before it can be safely accessed.
*/
krwlock_t os_lock;
/*
* Tunable to overwrite the maximum number of threads for the parallelization
* of dmu_objset_find_dp, needed to speed up the import of pools with many
* datasets.
* Default is 4 times the number of leaf vdevs.
*/
int dmu_find_threads = 0;
/*
* Backfill lower metadnode objects after this many have been freed.
* Backfilling negatively impacts object creation rates, so only do it
* if there are enough holes to fill.
*/
int dmu_rescan_dnode_threshold = 1 << DN_MAX_INDBLKSHIFT;
static char *upgrade_tag = "upgrade_tag";
static void dmu_objset_find_dp_cb(void *arg);
static void dmu_objset_upgrade(objset_t *os, dmu_objset_upgrade_cb_t cb);
static void dmu_objset_upgrade_stop(objset_t *os);
void
dmu_objset_init(void)
{
rw_init(&os_lock, NULL, RW_DEFAULT, NULL);
}
void
dmu_objset_fini(void)
{
rw_destroy(&os_lock);
}
spa_t *
dmu_objset_spa(objset_t *os)
{
return (os->os_spa);
}
zilog_t *
dmu_objset_zil(objset_t *os)
{
return (os->os_zil);
}
dsl_pool_t *
dmu_objset_pool(objset_t *os)
{
dsl_dataset_t *ds;
if ((ds = os->os_dsl_dataset) != NULL && ds->ds_dir)
return (ds->ds_dir->dd_pool);
else
return (spa_get_dsl(os->os_spa));
}
dsl_dataset_t *
dmu_objset_ds(objset_t *os)
{
return (os->os_dsl_dataset);
}
dmu_objset_type_t
dmu_objset_type(objset_t *os)
{
return (os->os_phys->os_type);
}
void
dmu_objset_name(objset_t *os, char *buf)
{
dsl_dataset_name(os->os_dsl_dataset, buf);
}
uint64_t
dmu_objset_id(objset_t *os)
{
dsl_dataset_t *ds = os->os_dsl_dataset;
return (ds ? ds->ds_object : 0);
}
uint64_t
dmu_objset_dnodesize(objset_t *os)
{
return (os->os_dnodesize);
}
zfs_sync_type_t
dmu_objset_syncprop(objset_t *os)
{
return (os->os_sync);
}
zfs_logbias_op_t
dmu_objset_logbias(objset_t *os)
{
return (os->os_logbias);
}
static void
checksum_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
/*
* Inheritance should have been done by now.
*/
ASSERT(newval != ZIO_CHECKSUM_INHERIT);
os->os_checksum = zio_checksum_select(newval, ZIO_CHECKSUM_ON_VALUE);
}
static void
compression_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
/*
* Inheritance and range checking should have been done by now.
*/
ASSERT(newval != ZIO_COMPRESS_INHERIT);
os->os_compress = zio_compress_select(os->os_spa,
ZIO_COMPRESS_ALGO(newval), ZIO_COMPRESS_ON);
os->os_complevel = zio_complevel_select(os->os_spa, os->os_compress,
ZIO_COMPRESS_LEVEL(newval), ZIO_COMPLEVEL_DEFAULT);
}
static void
copies_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
/*
* Inheritance and range checking should have been done by now.
*/
ASSERT(newval > 0);
ASSERT(newval <= spa_max_replication(os->os_spa));
os->os_copies = newval;
}
static void
dedup_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
spa_t *spa = os->os_spa;
enum zio_checksum checksum;
/*
* Inheritance should have been done by now.
*/
ASSERT(newval != ZIO_CHECKSUM_INHERIT);
checksum = zio_checksum_dedup_select(spa, newval, ZIO_CHECKSUM_OFF);
os->os_dedup_checksum = checksum & ZIO_CHECKSUM_MASK;
os->os_dedup_verify = !!(checksum & ZIO_CHECKSUM_VERIFY);
}
static void
primary_cache_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
/*
* Inheritance and range checking should have been done by now.
*/
ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE ||
newval == ZFS_CACHE_METADATA);
os->os_primary_cache = newval;
}
static void
secondary_cache_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
/*
* Inheritance and range checking should have been done by now.
*/
ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE ||
newval == ZFS_CACHE_METADATA);
os->os_secondary_cache = newval;
}
static void
sync_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
/*
* Inheritance and range checking should have been done by now.
*/
ASSERT(newval == ZFS_SYNC_STANDARD || newval == ZFS_SYNC_ALWAYS ||
newval == ZFS_SYNC_DISABLED);
os->os_sync = newval;
if (os->os_zil)
zil_set_sync(os->os_zil, newval);
}
static void
redundant_metadata_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
/*
* Inheritance and range checking should have been done by now.
*/
ASSERT(newval == ZFS_REDUNDANT_METADATA_ALL ||
newval == ZFS_REDUNDANT_METADATA_MOST);
os->os_redundant_metadata = newval;
}
static void
dnodesize_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
switch (newval) {
case ZFS_DNSIZE_LEGACY:
os->os_dnodesize = DNODE_MIN_SIZE;
break;
case ZFS_DNSIZE_AUTO:
/*
* Choose a dnode size that will work well for most
* workloads if the user specified "auto". Future code
* improvements could dynamically select a dnode size
* based on observed workload patterns.
*/
os->os_dnodesize = DNODE_MIN_SIZE * 2;
break;
case ZFS_DNSIZE_1K:
case ZFS_DNSIZE_2K:
case ZFS_DNSIZE_4K:
case ZFS_DNSIZE_8K:
case ZFS_DNSIZE_16K:
os->os_dnodesize = newval;
break;
}
}
static void
smallblk_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
/*
* Inheritance and range checking should have been done by now.
*/
ASSERT(newval <= SPA_MAXBLOCKSIZE);
ASSERT(ISP2(newval));
os->os_zpl_special_smallblock = newval;
}
static void
logbias_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
ASSERT(newval == ZFS_LOGBIAS_LATENCY ||
newval == ZFS_LOGBIAS_THROUGHPUT);
os->os_logbias = newval;
if (os->os_zil)
zil_set_logbias(os->os_zil, newval);
}
static void
recordsize_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
os->os_recordsize = newval;
}
void
dmu_objset_byteswap(void *buf, size_t size)
{
objset_phys_t *osp = buf;
ASSERT(size == OBJSET_PHYS_SIZE_V1 || size == OBJSET_PHYS_SIZE_V2 ||
size == sizeof (objset_phys_t));
dnode_byteswap(&osp->os_meta_dnode);
byteswap_uint64_array(&osp->os_zil_header, sizeof (zil_header_t));
osp->os_type = BSWAP_64(osp->os_type);
osp->os_flags = BSWAP_64(osp->os_flags);
if (size >= OBJSET_PHYS_SIZE_V2) {
dnode_byteswap(&osp->os_userused_dnode);
dnode_byteswap(&osp->os_groupused_dnode);
if (size >= sizeof (objset_phys_t))
dnode_byteswap(&osp->os_projectused_dnode);
}
}
/*
* The hash is a CRC-based hash of the objset_t pointer and the object number.
*/
static uint64_t
dnode_hash(const objset_t *os, uint64_t obj)
{
uintptr_t osv = (uintptr_t)os;
uint64_t crc = -1ULL;
ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
/*
* The low 6 bits of the pointer don't have much entropy, because
* the objset_t is larger than 2^6 bytes long.
*/
crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF];
crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF];
crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF];
crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 16)) & 0xFF];
crc ^= (osv>>14) ^ (obj>>24);
return (crc);
}
static unsigned int
dnode_multilist_index_func(multilist_t *ml, void *obj)
{
dnode_t *dn = obj;
/*
* The low order bits of the hash value are thought to be
* distributed evenly. Otherwise, in the case that the multilist
* has a power of two number of sublists, each sublists' usage
* would not be evenly distributed. In this context full 64bit
* division would be a waste of time, so limit it to 32 bits.
*/
return ((unsigned int)dnode_hash(dn->dn_objset, dn->dn_object) %
multilist_get_num_sublists(ml));
}
/*
* Instantiates the objset_t in-memory structure corresponding to the
* objset_phys_t that's pointed to by the specified blkptr_t.
*/
int
dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
objset_t **osp)
{
objset_t *os;
int i, err;
ASSERT(ds == NULL || MUTEX_HELD(&ds->ds_opening_lock));
ASSERT(!BP_IS_REDACTED(bp));
/*
* We need the pool config lock to get properties.
*/
ASSERT(ds == NULL || dsl_pool_config_held(ds->ds_dir->dd_pool));
/*
* The $ORIGIN dataset (if it exists) doesn't have an associated
* objset, so there's no reason to open it. The $ORIGIN dataset
* will not exist on pools older than SPA_VERSION_ORIGIN.
*/
if (ds != NULL && spa_get_dsl(spa) != NULL &&
spa_get_dsl(spa)->dp_origin_snap != NULL) {
ASSERT3P(ds->ds_dir, !=,
spa_get_dsl(spa)->dp_origin_snap->ds_dir);
}
os = kmem_zalloc(sizeof (objset_t), KM_SLEEP);
os->os_dsl_dataset = ds;
os->os_spa = spa;
os->os_rootbp = bp;
if (!BP_IS_HOLE(os->os_rootbp)) {
arc_flags_t aflags = ARC_FLAG_WAIT;
zbookmark_phys_t zb;
int size;
enum zio_flag zio_flags = ZIO_FLAG_CANFAIL;
SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
if (DMU_OS_IS_L2CACHEABLE(os))
aflags |= ARC_FLAG_L2CACHE;
if (ds != NULL && ds->ds_dir->dd_crypto_obj != 0) {
ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
ASSERT(BP_IS_AUTHENTICATED(bp));
zio_flags |= ZIO_FLAG_RAW;
}
dprintf_bp(os->os_rootbp, "reading %s", "");
err = arc_read(NULL, spa, os->os_rootbp,
arc_getbuf_func, &os->os_phys_buf,
ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
if (err != 0) {
kmem_free(os, sizeof (objset_t));
/* convert checksum errors into IO errors */
if (err == ECKSUM)
err = SET_ERROR(EIO);
return (err);
}
if (spa_version(spa) < SPA_VERSION_USERSPACE)
size = OBJSET_PHYS_SIZE_V1;
else if (!spa_feature_is_enabled(spa,
SPA_FEATURE_PROJECT_QUOTA))
size = OBJSET_PHYS_SIZE_V2;
else
size = sizeof (objset_phys_t);
/* Increase the blocksize if we are permitted. */
if (arc_buf_size(os->os_phys_buf) < size) {
arc_buf_t *buf = arc_alloc_buf(spa, &os->os_phys_buf,
ARC_BUFC_METADATA, size);
bzero(buf->b_data, size);
bcopy(os->os_phys_buf->b_data, buf->b_data,
arc_buf_size(os->os_phys_buf));
arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf);
os->os_phys_buf = buf;
}
os->os_phys = os->os_phys_buf->b_data;
os->os_flags = os->os_phys->os_flags;
} else {
int size = spa_version(spa) >= SPA_VERSION_USERSPACE ?
sizeof (objset_phys_t) : OBJSET_PHYS_SIZE_V1;
os->os_phys_buf = arc_alloc_buf(spa, &os->os_phys_buf,
ARC_BUFC_METADATA, size);
os->os_phys = os->os_phys_buf->b_data;
bzero(os->os_phys, size);
}
/*
* These properties will be filled in by the logic in zfs_get_zplprop()
* when they are queried for the first time.
*/
os->os_version = OBJSET_PROP_UNINITIALIZED;
os->os_normalization = OBJSET_PROP_UNINITIALIZED;
os->os_utf8only = OBJSET_PROP_UNINITIALIZED;
os->os_casesensitivity = OBJSET_PROP_UNINITIALIZED;
/*
* Note: the changed_cb will be called once before the register
* func returns, thus changing the checksum/compression from the
* default (fletcher2/off). Snapshots don't need to know about
* checksum/compression/copies.
*/
if (ds != NULL) {
os->os_encrypted = (ds->ds_dir->dd_crypto_obj != 0);
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_PRIMARYCACHE),
primary_cache_changed_cb, os);
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_SECONDARYCACHE),
secondary_cache_changed_cb, os);
}
if (!ds->ds_is_snapshot) {
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_CHECKSUM),
checksum_changed_cb, os);
}
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_COMPRESSION),
compression_changed_cb, os);
}
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_COPIES),
copies_changed_cb, os);
}
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_DEDUP),
dedup_changed_cb, os);
}
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_LOGBIAS),
logbias_changed_cb, os);
}
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_SYNC),
sync_changed_cb, os);
}
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(
ZFS_PROP_REDUNDANT_METADATA),
redundant_metadata_changed_cb, os);
}
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_RECORDSIZE),
recordsize_changed_cb, os);
}
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_DNODESIZE),
dnodesize_changed_cb, os);
}
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(
ZFS_PROP_SPECIAL_SMALL_BLOCKS),
smallblk_changed_cb, os);
}
}
if (err != 0) {
arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf);
kmem_free(os, sizeof (objset_t));
return (err);
}
} else {
/* It's the meta-objset. */
os->os_checksum = ZIO_CHECKSUM_FLETCHER_4;
os->os_compress = ZIO_COMPRESS_ON;
os->os_complevel = ZIO_COMPLEVEL_DEFAULT;
os->os_encrypted = B_FALSE;
os->os_copies = spa_max_replication(spa);
os->os_dedup_checksum = ZIO_CHECKSUM_OFF;
os->os_dedup_verify = B_FALSE;
os->os_logbias = ZFS_LOGBIAS_LATENCY;
os->os_sync = ZFS_SYNC_STANDARD;
os->os_primary_cache = ZFS_CACHE_ALL;
os->os_secondary_cache = ZFS_CACHE_ALL;
os->os_dnodesize = DNODE_MIN_SIZE;
}
if (ds == NULL || !ds->ds_is_snapshot)
os->os_zil_header = os->os_phys->os_zil_header;
os->os_zil = zil_alloc(os, &os->os_zil_header);
for (i = 0; i < TXG_SIZE; i++) {
multilist_create(&os->os_dirty_dnodes[i], sizeof (dnode_t),
offsetof(dnode_t, dn_dirty_link[i]),
dnode_multilist_index_func);
}
list_create(&os->os_dnodes, sizeof (dnode_t),
offsetof(dnode_t, dn_link));
list_create(&os->os_downgraded_dbufs, sizeof (dmu_buf_impl_t),
offsetof(dmu_buf_impl_t, db_link));
list_link_init(&os->os_evicting_node);
mutex_init(&os->os_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&os->os_userused_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&os->os_obj_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&os->os_user_ptr_lock, NULL, MUTEX_DEFAULT, NULL);
os->os_obj_next_percpu_len = boot_ncpus;
os->os_obj_next_percpu = kmem_zalloc(os->os_obj_next_percpu_len *
sizeof (os->os_obj_next_percpu[0]), KM_SLEEP);
dnode_special_open(os, &os->os_phys->os_meta_dnode,
DMU_META_DNODE_OBJECT, &os->os_meta_dnode);
if (OBJSET_BUF_HAS_USERUSED(os->os_phys_buf)) {
dnode_special_open(os, &os->os_phys->os_userused_dnode,
DMU_USERUSED_OBJECT, &os->os_userused_dnode);
dnode_special_open(os, &os->os_phys->os_groupused_dnode,
DMU_GROUPUSED_OBJECT, &os->os_groupused_dnode);
if (OBJSET_BUF_HAS_PROJECTUSED(os->os_phys_buf))
dnode_special_open(os,
&os->os_phys->os_projectused_dnode,
DMU_PROJECTUSED_OBJECT, &os->os_projectused_dnode);
}
mutex_init(&os->os_upgrade_lock, NULL, MUTEX_DEFAULT, NULL);
*osp = os;
return (0);
}
int
dmu_objset_from_ds(dsl_dataset_t *ds, objset_t **osp)
{
int err = 0;
/*
* We need the pool_config lock to manipulate the dsl_dataset_t.
* Even if the dataset is long-held, we need the pool_config lock
* to open the objset, as it needs to get properties.
*/
ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
mutex_enter(&ds->ds_opening_lock);
if (ds->ds_objset == NULL) {
objset_t *os;
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
err = dmu_objset_open_impl(dsl_dataset_get_spa(ds),
ds, dsl_dataset_get_blkptr(ds), &os);
rrw_exit(&ds->ds_bp_rwlock, FTAG);
if (err == 0) {
mutex_enter(&ds->ds_lock);
ASSERT(ds->ds_objset == NULL);
ds->ds_objset = os;
mutex_exit(&ds->ds_lock);
}
}
*osp = ds->ds_objset;
mutex_exit(&ds->ds_opening_lock);
return (err);
}
/*
* Holds the pool while the objset is held. Therefore only one objset
* can be held at a time.
*/
int
dmu_objset_hold_flags(const char *name, boolean_t decrypt, void *tag,
objset_t **osp)
{
dsl_pool_t *dp;
dsl_dataset_t *ds;
int err;
ds_hold_flags_t flags;
flags = (decrypt) ? DS_HOLD_FLAG_DECRYPT : DS_HOLD_FLAG_NONE;
err = dsl_pool_hold(name, tag, &dp);
if (err != 0)
return (err);
err = dsl_dataset_hold_flags(dp, name, flags, tag, &ds);
if (err != 0) {
dsl_pool_rele(dp, tag);
return (err);
}
err = dmu_objset_from_ds(ds, osp);
if (err != 0) {
dsl_dataset_rele(ds, tag);
dsl_pool_rele(dp, tag);
}
return (err);
}
int
dmu_objset_hold(const char *name, void *tag, objset_t **osp)
{
return (dmu_objset_hold_flags(name, B_FALSE, tag, osp));
}
static int
dmu_objset_own_impl(dsl_dataset_t *ds, dmu_objset_type_t type,
boolean_t readonly, boolean_t decrypt, void *tag, objset_t **osp)
{
- int err;
+ (void) tag;
- err = dmu_objset_from_ds(ds, osp);
+ int err = dmu_objset_from_ds(ds, osp);
if (err != 0) {
return (err);
} else if (type != DMU_OST_ANY && type != (*osp)->os_phys->os_type) {
return (SET_ERROR(EINVAL));
} else if (!readonly && dsl_dataset_is_snapshot(ds)) {
return (SET_ERROR(EROFS));
} else if (!readonly && decrypt &&
dsl_dir_incompatible_encryption_version(ds->ds_dir)) {
return (SET_ERROR(EROFS));
}
/* if we are decrypting, we can now check MACs in os->os_phys_buf */
if (decrypt && arc_is_unauthenticated((*osp)->os_phys_buf)) {
zbookmark_phys_t zb;
SET_BOOKMARK(&zb, ds->ds_object, ZB_ROOT_OBJECT,
ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
err = arc_untransform((*osp)->os_phys_buf, (*osp)->os_spa,
&zb, B_FALSE);
if (err != 0)
return (err);
ASSERT0(arc_is_unauthenticated((*osp)->os_phys_buf));
}
return (0);
}
/*
* dsl_pool must not be held when this is called.
* Upon successful return, there will be a longhold on the dataset,
* and the dsl_pool will not be held.
*/
int
dmu_objset_own(const char *name, dmu_objset_type_t type,
boolean_t readonly, boolean_t decrypt, void *tag, objset_t **osp)
{
dsl_pool_t *dp;
dsl_dataset_t *ds;
int err;
ds_hold_flags_t flags;
flags = (decrypt) ? DS_HOLD_FLAG_DECRYPT : DS_HOLD_FLAG_NONE;
err = dsl_pool_hold(name, FTAG, &dp);
if (err != 0)
return (err);
err = dsl_dataset_own(dp, name, flags, tag, &ds);
if (err != 0) {
dsl_pool_rele(dp, FTAG);
return (err);
}
err = dmu_objset_own_impl(ds, type, readonly, decrypt, tag, osp);
if (err != 0) {
dsl_dataset_disown(ds, flags, tag);
dsl_pool_rele(dp, FTAG);
return (err);
}
/*
* User accounting requires the dataset to be decrypted and rw.
* We also don't begin user accounting during claiming to help
* speed up pool import times and to keep this txg reserved
* completely for recovery work.
*/
if (!readonly && !dp->dp_spa->spa_claiming &&
(ds->ds_dir->dd_crypto_obj == 0 || decrypt)) {
if (dmu_objset_userobjspace_upgradable(*osp) ||
dmu_objset_projectquota_upgradable(*osp)) {
dmu_objset_id_quota_upgrade(*osp);
} else if (dmu_objset_userused_enabled(*osp)) {
dmu_objset_userspace_upgrade(*osp);
}
}
dsl_pool_rele(dp, FTAG);
return (0);
}
int
dmu_objset_own_obj(dsl_pool_t *dp, uint64_t obj, dmu_objset_type_t type,
boolean_t readonly, boolean_t decrypt, void *tag, objset_t **osp)
{
dsl_dataset_t *ds;
int err;
ds_hold_flags_t flags;
flags = (decrypt) ? DS_HOLD_FLAG_DECRYPT : DS_HOLD_FLAG_NONE;
err = dsl_dataset_own_obj(dp, obj, flags, tag, &ds);
if (err != 0)
return (err);
err = dmu_objset_own_impl(ds, type, readonly, decrypt, tag, osp);
if (err != 0) {
dsl_dataset_disown(ds, flags, tag);
return (err);
}
return (0);
}
void
dmu_objset_rele_flags(objset_t *os, boolean_t decrypt, void *tag)
{
ds_hold_flags_t flags;
dsl_pool_t *dp = dmu_objset_pool(os);
flags = (decrypt) ? DS_HOLD_FLAG_DECRYPT : DS_HOLD_FLAG_NONE;
dsl_dataset_rele_flags(os->os_dsl_dataset, flags, tag);
dsl_pool_rele(dp, tag);
}
void
dmu_objset_rele(objset_t *os, void *tag)
{
dmu_objset_rele_flags(os, B_FALSE, tag);
}
/*
* When we are called, os MUST refer to an objset associated with a dataset
* that is owned by 'tag'; that is, is held and long held by 'tag' and ds_owner
* == tag. We will then release and reacquire ownership of the dataset while
* holding the pool config_rwlock to avoid intervening namespace or ownership
* changes may occur.
*
* This exists solely to accommodate zfs_ioc_userspace_upgrade()'s desire to
* release the hold on its dataset and acquire a new one on the dataset of the
* same name so that it can be partially torn down and reconstructed.
*/
void
dmu_objset_refresh_ownership(dsl_dataset_t *ds, dsl_dataset_t **newds,
boolean_t decrypt, void *tag)
{
dsl_pool_t *dp;
char name[ZFS_MAX_DATASET_NAME_LEN];
ds_hold_flags_t flags;
flags = (decrypt) ? DS_HOLD_FLAG_DECRYPT : DS_HOLD_FLAG_NONE;
VERIFY3P(ds, !=, NULL);
VERIFY3P(ds->ds_owner, ==, tag);
VERIFY(dsl_dataset_long_held(ds));
dsl_dataset_name(ds, name);
dp = ds->ds_dir->dd_pool;
dsl_pool_config_enter(dp, FTAG);
dsl_dataset_disown(ds, flags, tag);
VERIFY0(dsl_dataset_own(dp, name, flags, tag, newds));
dsl_pool_config_exit(dp, FTAG);
}
void
dmu_objset_disown(objset_t *os, boolean_t decrypt, void *tag)
{
ds_hold_flags_t flags;
flags = (decrypt) ? DS_HOLD_FLAG_DECRYPT : DS_HOLD_FLAG_NONE;
/*
* Stop upgrading thread
*/
dmu_objset_upgrade_stop(os);
dsl_dataset_disown(os->os_dsl_dataset, flags, tag);
}
void
dmu_objset_evict_dbufs(objset_t *os)
{
dnode_t *dn_marker;
dnode_t *dn;
dn_marker = kmem_alloc(sizeof (dnode_t), KM_SLEEP);
mutex_enter(&os->os_lock);
dn = list_head(&os->os_dnodes);
while (dn != NULL) {
/*
* Skip dnodes without holds. We have to do this dance
* because dnode_add_ref() only works if there is already a
* hold. If the dnode has no holds, then it has no dbufs.
*/
if (dnode_add_ref(dn, FTAG)) {
list_insert_after(&os->os_dnodes, dn, dn_marker);
mutex_exit(&os->os_lock);
dnode_evict_dbufs(dn);
dnode_rele(dn, FTAG);
mutex_enter(&os->os_lock);
dn = list_next(&os->os_dnodes, dn_marker);
list_remove(&os->os_dnodes, dn_marker);
} else {
dn = list_next(&os->os_dnodes, dn);
}
}
mutex_exit(&os->os_lock);
kmem_free(dn_marker, sizeof (dnode_t));
if (DMU_USERUSED_DNODE(os) != NULL) {
if (DMU_PROJECTUSED_DNODE(os) != NULL)
dnode_evict_dbufs(DMU_PROJECTUSED_DNODE(os));
dnode_evict_dbufs(DMU_GROUPUSED_DNODE(os));
dnode_evict_dbufs(DMU_USERUSED_DNODE(os));
}
dnode_evict_dbufs(DMU_META_DNODE(os));
}
/*
* Objset eviction processing is split into into two pieces.
* The first marks the objset as evicting, evicts any dbufs that
* have a refcount of zero, and then queues up the objset for the
* second phase of eviction. Once os->os_dnodes has been cleared by
* dnode_buf_pageout()->dnode_destroy(), the second phase is executed.
* The second phase closes the special dnodes, dequeues the objset from
* the list of those undergoing eviction, and finally frees the objset.
*
* NOTE: Due to asynchronous eviction processing (invocation of
* dnode_buf_pageout()), it is possible for the meta dnode for the
* objset to have no holds even though os->os_dnodes is not empty.
*/
void
dmu_objset_evict(objset_t *os)
{
dsl_dataset_t *ds = os->os_dsl_dataset;
for (int t = 0; t < TXG_SIZE; t++)
ASSERT(!dmu_objset_is_dirty(os, t));
if (ds)
dsl_prop_unregister_all(ds, os);
if (os->os_sa)
sa_tear_down(os);
dmu_objset_evict_dbufs(os);
mutex_enter(&os->os_lock);
spa_evicting_os_register(os->os_spa, os);
if (list_is_empty(&os->os_dnodes)) {
mutex_exit(&os->os_lock);
dmu_objset_evict_done(os);
} else {
mutex_exit(&os->os_lock);
}
}
void
dmu_objset_evict_done(objset_t *os)
{
ASSERT3P(list_head(&os->os_dnodes), ==, NULL);
dnode_special_close(&os->os_meta_dnode);
if (DMU_USERUSED_DNODE(os)) {
if (DMU_PROJECTUSED_DNODE(os))
dnode_special_close(&os->os_projectused_dnode);
dnode_special_close(&os->os_userused_dnode);
dnode_special_close(&os->os_groupused_dnode);
}
zil_free(os->os_zil);
arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf);
/*
* This is a barrier to prevent the objset from going away in
* dnode_move() until we can safely ensure that the objset is still in
* use. We consider the objset valid before the barrier and invalid
* after the barrier.
*/
rw_enter(&os_lock, RW_READER);
rw_exit(&os_lock);
kmem_free(os->os_obj_next_percpu,
os->os_obj_next_percpu_len * sizeof (os->os_obj_next_percpu[0]));
mutex_destroy(&os->os_lock);
mutex_destroy(&os->os_userused_lock);
mutex_destroy(&os->os_obj_lock);
mutex_destroy(&os->os_user_ptr_lock);
mutex_destroy(&os->os_upgrade_lock);
for (int i = 0; i < TXG_SIZE; i++)
multilist_destroy(&os->os_dirty_dnodes[i]);
spa_evicting_os_deregister(os->os_spa, os);
kmem_free(os, sizeof (objset_t));
}
inode_timespec_t
dmu_objset_snap_cmtime(objset_t *os)
{
return (dsl_dir_snap_cmtime(os->os_dsl_dataset->ds_dir));
}
objset_t *
dmu_objset_create_impl_dnstats(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
dmu_objset_type_t type, int levels, int blksz, int ibs, dmu_tx_t *tx)
{
objset_t *os;
dnode_t *mdn;
ASSERT(dmu_tx_is_syncing(tx));
if (blksz == 0)
blksz = DNODE_BLOCK_SIZE;
if (ibs == 0)
ibs = DN_MAX_INDBLKSHIFT;
if (ds != NULL)
VERIFY0(dmu_objset_from_ds(ds, &os));
else
VERIFY0(dmu_objset_open_impl(spa, NULL, bp, &os));
mdn = DMU_META_DNODE(os);
dnode_allocate(mdn, DMU_OT_DNODE, blksz, ibs, DMU_OT_NONE, 0,
DNODE_MIN_SLOTS, tx);
/*
* We don't want to have to increase the meta-dnode's nlevels
* later, because then we could do it in quiescing context while
* we are also accessing it in open context.
*
* This precaution is not necessary for the MOS (ds == NULL),
* because the MOS is only updated in syncing context.
* This is most fortunate: the MOS is the only objset that
* needs to be synced multiple times as spa_sync() iterates
* to convergence, so minimizing its dn_nlevels matters.
*/
if (ds != NULL) {
if (levels == 0) {
levels = 1;
/*
* Determine the number of levels necessary for the
* meta-dnode to contain DN_MAX_OBJECT dnodes. Note
* that in order to ensure that we do not overflow
* 64 bits, there has to be a nlevels that gives us a
* number of blocks > DN_MAX_OBJECT but < 2^64.
* Therefore, (mdn->dn_indblkshift - SPA_BLKPTRSHIFT)
* (10) must be less than (64 - log2(DN_MAX_OBJECT))
* (16).
*/
while ((uint64_t)mdn->dn_nblkptr <<
(mdn->dn_datablkshift - DNODE_SHIFT + (levels - 1) *
(mdn->dn_indblkshift - SPA_BLKPTRSHIFT)) <
DN_MAX_OBJECT)
levels++;
}
mdn->dn_next_nlevels[tx->tx_txg & TXG_MASK] =
mdn->dn_nlevels = levels;
}
ASSERT(type != DMU_OST_NONE);
ASSERT(type != DMU_OST_ANY);
ASSERT(type < DMU_OST_NUMTYPES);
os->os_phys->os_type = type;
/*
* Enable user accounting if it is enabled and this is not an
* encrypted receive.
*/
if (dmu_objset_userused_enabled(os) &&
(!os->os_encrypted || !dmu_objset_is_receiving(os))) {
os->os_phys->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE;
if (dmu_objset_userobjused_enabled(os)) {
ds->ds_feature_activation[
SPA_FEATURE_USEROBJ_ACCOUNTING] = (void *)B_TRUE;
os->os_phys->os_flags |=
OBJSET_FLAG_USEROBJACCOUNTING_COMPLETE;
}
if (dmu_objset_projectquota_enabled(os)) {
ds->ds_feature_activation[
SPA_FEATURE_PROJECT_QUOTA] = (void *)B_TRUE;
os->os_phys->os_flags |=
OBJSET_FLAG_PROJECTQUOTA_COMPLETE;
}
os->os_flags = os->os_phys->os_flags;
}
dsl_dataset_dirty(ds, tx);
return (os);
}
/* called from dsl for meta-objset */
objset_t *
dmu_objset_create_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
dmu_objset_type_t type, dmu_tx_t *tx)
{
return (dmu_objset_create_impl_dnstats(spa, ds, bp, type, 0, 0, 0, tx));
}
typedef struct dmu_objset_create_arg {
const char *doca_name;
cred_t *doca_cred;
proc_t *doca_proc;
void (*doca_userfunc)(objset_t *os, void *arg,
cred_t *cr, dmu_tx_t *tx);
void *doca_userarg;
dmu_objset_type_t doca_type;
uint64_t doca_flags;
dsl_crypto_params_t *doca_dcp;
} dmu_objset_create_arg_t;
-/*ARGSUSED*/
static int
dmu_objset_create_check(void *arg, dmu_tx_t *tx)
{
dmu_objset_create_arg_t *doca = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dir_t *pdd;
dsl_dataset_t *parentds;
objset_t *parentos;
const char *tail;
int error;
if (strchr(doca->doca_name, '@') != NULL)
return (SET_ERROR(EINVAL));
if (strlen(doca->doca_name) >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
if (dataset_nestcheck(doca->doca_name) != 0)
return (SET_ERROR(ENAMETOOLONG));
error = dsl_dir_hold(dp, doca->doca_name, FTAG, &pdd, &tail);
if (error != 0)
return (error);
if (tail == NULL) {
dsl_dir_rele(pdd, FTAG);
return (SET_ERROR(EEXIST));
}
error = dmu_objset_create_crypt_check(pdd, doca->doca_dcp, NULL);
if (error != 0) {
dsl_dir_rele(pdd, FTAG);
return (error);
}
error = dsl_fs_ss_limit_check(pdd, 1, ZFS_PROP_FILESYSTEM_LIMIT, NULL,
doca->doca_cred, doca->doca_proc);
if (error != 0) {
dsl_dir_rele(pdd, FTAG);
return (error);
}
/* can't create below anything but filesystems (eg. no ZVOLs) */
error = dsl_dataset_hold_obj(pdd->dd_pool,
dsl_dir_phys(pdd)->dd_head_dataset_obj, FTAG, &parentds);
if (error != 0) {
dsl_dir_rele(pdd, FTAG);
return (error);
}
error = dmu_objset_from_ds(parentds, &parentos);
if (error != 0) {
dsl_dataset_rele(parentds, FTAG);
dsl_dir_rele(pdd, FTAG);
return (error);
}
if (dmu_objset_type(parentos) != DMU_OST_ZFS) {
dsl_dataset_rele(parentds, FTAG);
dsl_dir_rele(pdd, FTAG);
return (SET_ERROR(ZFS_ERR_WRONG_PARENT));
}
dsl_dataset_rele(parentds, FTAG);
dsl_dir_rele(pdd, FTAG);
return (error);
}
static void
dmu_objset_create_sync(void *arg, dmu_tx_t *tx)
{
dmu_objset_create_arg_t *doca = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
spa_t *spa = dp->dp_spa;
dsl_dir_t *pdd;
const char *tail;
dsl_dataset_t *ds;
uint64_t obj;
blkptr_t *bp;
objset_t *os;
zio_t *rzio;
VERIFY0(dsl_dir_hold(dp, doca->doca_name, FTAG, &pdd, &tail));
obj = dsl_dataset_create_sync(pdd, tail, NULL, doca->doca_flags,
doca->doca_cred, doca->doca_dcp, tx);
VERIFY0(dsl_dataset_hold_obj_flags(pdd->dd_pool, obj,
DS_HOLD_FLAG_DECRYPT, FTAG, &ds));
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
bp = dsl_dataset_get_blkptr(ds);
os = dmu_objset_create_impl(spa, ds, bp, doca->doca_type, tx);
rrw_exit(&ds->ds_bp_rwlock, FTAG);
if (doca->doca_userfunc != NULL) {
doca->doca_userfunc(os, doca->doca_userarg,
doca->doca_cred, tx);
}
/*
* The doca_userfunc() may write out some data that needs to be
* encrypted if the dataset is encrypted (specifically the root
* directory). This data must be written out before the encryption
* key mapping is removed by dsl_dataset_rele_flags(). Force the
* I/O to occur immediately by invoking the relevant sections of
* dsl_pool_sync().
*/
if (os->os_encrypted) {
dsl_dataset_t *tmpds = NULL;
boolean_t need_sync_done = B_FALSE;
mutex_enter(&ds->ds_lock);
ds->ds_owner = FTAG;
mutex_exit(&ds->ds_lock);
rzio = zio_root(spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
tmpds = txg_list_remove_this(&dp->dp_dirty_datasets, ds,
tx->tx_txg);
if (tmpds != NULL) {
dsl_dataset_sync(ds, rzio, tx);
need_sync_done = B_TRUE;
}
VERIFY0(zio_wait(rzio));
dmu_objset_sync_done(os, tx);
taskq_wait(dp->dp_sync_taskq);
if (txg_list_member(&dp->dp_dirty_datasets, ds, tx->tx_txg)) {
ASSERT3P(ds->ds_key_mapping, !=, NULL);
key_mapping_rele(spa, ds->ds_key_mapping, ds);
}
rzio = zio_root(spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
tmpds = txg_list_remove_this(&dp->dp_dirty_datasets, ds,
tx->tx_txg);
if (tmpds != NULL) {
dmu_buf_rele(ds->ds_dbuf, ds);
dsl_dataset_sync(ds, rzio, tx);
}
VERIFY0(zio_wait(rzio));
if (need_sync_done) {
ASSERT3P(ds->ds_key_mapping, !=, NULL);
key_mapping_rele(spa, ds->ds_key_mapping, ds);
dsl_dataset_sync_done(ds, tx);
}
mutex_enter(&ds->ds_lock);
ds->ds_owner = NULL;
mutex_exit(&ds->ds_lock);
}
spa_history_log_internal_ds(ds, "create", tx, " ");
dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
dsl_dir_rele(pdd, FTAG);
}
int
dmu_objset_create(const char *name, dmu_objset_type_t type, uint64_t flags,
dsl_crypto_params_t *dcp, dmu_objset_create_sync_func_t func, void *arg)
{
dmu_objset_create_arg_t doca;
dsl_crypto_params_t tmp_dcp = { 0 };
doca.doca_name = name;
doca.doca_cred = CRED();
doca.doca_proc = curproc;
doca.doca_flags = flags;
doca.doca_userfunc = func;
doca.doca_userarg = arg;
doca.doca_type = type;
/*
* Some callers (mostly for testing) do not provide a dcp on their
* own but various code inside the sync task will require it to be
* allocated. Rather than adding NULL checks throughout this code
* or adding dummy dcp's to all of the callers we simply create a
* dummy one here and use that. This zero dcp will have the same
* effect as asking for inheritance of all encryption params.
*/
doca.doca_dcp = (dcp != NULL) ? dcp : &tmp_dcp;
int rv = dsl_sync_task(name,
dmu_objset_create_check, dmu_objset_create_sync, &doca,
6, ZFS_SPACE_CHECK_NORMAL);
if (rv == 0)
zvol_create_minor(name);
return (rv);
}
typedef struct dmu_objset_clone_arg {
const char *doca_clone;
const char *doca_origin;
cred_t *doca_cred;
proc_t *doca_proc;
} dmu_objset_clone_arg_t;
-/*ARGSUSED*/
static int
dmu_objset_clone_check(void *arg, dmu_tx_t *tx)
{
dmu_objset_clone_arg_t *doca = arg;
dsl_dir_t *pdd;
const char *tail;
int error;
dsl_dataset_t *origin;
dsl_pool_t *dp = dmu_tx_pool(tx);
if (strchr(doca->doca_clone, '@') != NULL)
return (SET_ERROR(EINVAL));
if (strlen(doca->doca_clone) >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
error = dsl_dir_hold(dp, doca->doca_clone, FTAG, &pdd, &tail);
if (error != 0)
return (error);
if (tail == NULL) {
dsl_dir_rele(pdd, FTAG);
return (SET_ERROR(EEXIST));
}
error = dsl_fs_ss_limit_check(pdd, 1, ZFS_PROP_FILESYSTEM_LIMIT, NULL,
doca->doca_cred, doca->doca_proc);
if (error != 0) {
dsl_dir_rele(pdd, FTAG);
return (SET_ERROR(EDQUOT));
}
error = dsl_dataset_hold(dp, doca->doca_origin, FTAG, &origin);
if (error != 0) {
dsl_dir_rele(pdd, FTAG);
return (error);
}
/* You can only clone snapshots, not the head datasets. */
if (!origin->ds_is_snapshot) {
dsl_dataset_rele(origin, FTAG);
dsl_dir_rele(pdd, FTAG);
return (SET_ERROR(EINVAL));
}
dsl_dataset_rele(origin, FTAG);
dsl_dir_rele(pdd, FTAG);
return (0);
}
static void
dmu_objset_clone_sync(void *arg, dmu_tx_t *tx)
{
dmu_objset_clone_arg_t *doca = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dir_t *pdd;
const char *tail;
dsl_dataset_t *origin, *ds;
uint64_t obj;
char namebuf[ZFS_MAX_DATASET_NAME_LEN];
VERIFY0(dsl_dir_hold(dp, doca->doca_clone, FTAG, &pdd, &tail));
VERIFY0(dsl_dataset_hold(dp, doca->doca_origin, FTAG, &origin));
obj = dsl_dataset_create_sync(pdd, tail, origin, 0,
doca->doca_cred, NULL, tx);
VERIFY0(dsl_dataset_hold_obj(pdd->dd_pool, obj, FTAG, &ds));
dsl_dataset_name(origin, namebuf);
spa_history_log_internal_ds(ds, "clone", tx,
"origin=%s (%llu)", namebuf, (u_longlong_t)origin->ds_object);
dsl_dataset_rele(ds, FTAG);
dsl_dataset_rele(origin, FTAG);
dsl_dir_rele(pdd, FTAG);
}
int
dmu_objset_clone(const char *clone, const char *origin)
{
dmu_objset_clone_arg_t doca;
doca.doca_clone = clone;
doca.doca_origin = origin;
doca.doca_cred = CRED();
doca.doca_proc = curproc;
int rv = dsl_sync_task(clone,
dmu_objset_clone_check, dmu_objset_clone_sync, &doca,
6, ZFS_SPACE_CHECK_NORMAL);
if (rv == 0)
zvol_create_minor(clone);
return (rv);
}
int
dmu_objset_snapshot_one(const char *fsname, const char *snapname)
{
int err;
char *longsnap = kmem_asprintf("%s@%s", fsname, snapname);
nvlist_t *snaps = fnvlist_alloc();
fnvlist_add_boolean(snaps, longsnap);
kmem_strfree(longsnap);
err = dsl_dataset_snapshot(snaps, NULL, NULL);
fnvlist_free(snaps);
return (err);
}
static void
dmu_objset_upgrade_task_cb(void *data)
{
objset_t *os = data;
mutex_enter(&os->os_upgrade_lock);
os->os_upgrade_status = EINTR;
if (!os->os_upgrade_exit) {
int status;
mutex_exit(&os->os_upgrade_lock);
status = os->os_upgrade_cb(os);
mutex_enter(&os->os_upgrade_lock);
os->os_upgrade_status = status;
}
os->os_upgrade_exit = B_TRUE;
os->os_upgrade_id = 0;
mutex_exit(&os->os_upgrade_lock);
dsl_dataset_long_rele(dmu_objset_ds(os), upgrade_tag);
}
static void
dmu_objset_upgrade(objset_t *os, dmu_objset_upgrade_cb_t cb)
{
if (os->os_upgrade_id != 0)
return;
ASSERT(dsl_pool_config_held(dmu_objset_pool(os)));
dsl_dataset_long_hold(dmu_objset_ds(os), upgrade_tag);
mutex_enter(&os->os_upgrade_lock);
if (os->os_upgrade_id == 0 && os->os_upgrade_status == 0) {
os->os_upgrade_exit = B_FALSE;
os->os_upgrade_cb = cb;
os->os_upgrade_id = taskq_dispatch(
os->os_spa->spa_upgrade_taskq,
dmu_objset_upgrade_task_cb, os, TQ_SLEEP);
if (os->os_upgrade_id == TASKQID_INVALID) {
dsl_dataset_long_rele(dmu_objset_ds(os), upgrade_tag);
os->os_upgrade_status = ENOMEM;
}
} else {
dsl_dataset_long_rele(dmu_objset_ds(os), upgrade_tag);
}
mutex_exit(&os->os_upgrade_lock);
}
static void
dmu_objset_upgrade_stop(objset_t *os)
{
mutex_enter(&os->os_upgrade_lock);
os->os_upgrade_exit = B_TRUE;
if (os->os_upgrade_id != 0) {
taskqid_t id = os->os_upgrade_id;
os->os_upgrade_id = 0;
mutex_exit(&os->os_upgrade_lock);
if ((taskq_cancel_id(os->os_spa->spa_upgrade_taskq, id)) == 0) {
dsl_dataset_long_rele(dmu_objset_ds(os), upgrade_tag);
}
txg_wait_synced(os->os_spa->spa_dsl_pool, 0);
} else {
mutex_exit(&os->os_upgrade_lock);
}
}
static void
dmu_objset_sync_dnodes(multilist_sublist_t *list, dmu_tx_t *tx)
{
dnode_t *dn;
while ((dn = multilist_sublist_head(list)) != NULL) {
ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
ASSERT(dn->dn_dbuf->db_data_pending);
/*
* Initialize dn_zio outside dnode_sync() because the
* meta-dnode needs to set it outside dnode_sync().
*/
dn->dn_zio = dn->dn_dbuf->db_data_pending->dr_zio;
ASSERT(dn->dn_zio);
ASSERT3U(dn->dn_nlevels, <=, DN_MAX_LEVELS);
multilist_sublist_remove(list, dn);
/*
* See the comment above dnode_rele_task() for an explanation
* of why this dnode hold is always needed (even when not
* doing user accounting).
*/
multilist_t *newlist = &dn->dn_objset->os_synced_dnodes;
(void) dnode_add_ref(dn, newlist);
multilist_insert(newlist, dn);
dnode_sync(dn, tx);
}
}
-/* ARGSUSED */
static void
dmu_objset_write_ready(zio_t *zio, arc_buf_t *abuf, void *arg)
{
+ (void) abuf;
blkptr_t *bp = zio->io_bp;
objset_t *os = arg;
dnode_phys_t *dnp = &os->os_phys->os_meta_dnode;
uint64_t fill = 0;
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_OBJSET);
ASSERT0(BP_GET_LEVEL(bp));
/*
* Update rootbp fill count: it should be the number of objects
* allocated in the object set (not counting the "special"
* objects that are stored in the objset_phys_t -- the meta
* dnode and user/group/project accounting objects).
*/
for (int i = 0; i < dnp->dn_nblkptr; i++)
fill += BP_GET_FILL(&dnp->dn_blkptr[i]);
BP_SET_FILL(bp, fill);
if (os->os_dsl_dataset != NULL)
rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_WRITER, FTAG);
*os->os_rootbp = *bp;
if (os->os_dsl_dataset != NULL)
rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG);
}
-/* ARGSUSED */
static void
dmu_objset_write_done(zio_t *zio, arc_buf_t *abuf, void *arg)
{
+ (void) abuf;
blkptr_t *bp = zio->io_bp;
blkptr_t *bp_orig = &zio->io_bp_orig;
objset_t *os = arg;
if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
ASSERT(BP_EQUAL(bp, bp_orig));
} else {
dsl_dataset_t *ds = os->os_dsl_dataset;
dmu_tx_t *tx = os->os_synctx;
(void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
dsl_dataset_block_born(ds, bp, tx);
}
kmem_free(bp, sizeof (*bp));
}
typedef struct sync_dnodes_arg {
multilist_t *sda_list;
int sda_sublist_idx;
multilist_t *sda_newlist;
dmu_tx_t *sda_tx;
} sync_dnodes_arg_t;
static void
sync_dnodes_task(void *arg)
{
sync_dnodes_arg_t *sda = arg;
multilist_sublist_t *ms =
multilist_sublist_lock(sda->sda_list, sda->sda_sublist_idx);
dmu_objset_sync_dnodes(ms, sda->sda_tx);
multilist_sublist_unlock(ms);
kmem_free(sda, sizeof (*sda));
}
/* called from dsl */
void
dmu_objset_sync(objset_t *os, zio_t *pio, dmu_tx_t *tx)
{
int txgoff;
zbookmark_phys_t zb;
zio_prop_t zp;
zio_t *zio;
list_t *list;
dbuf_dirty_record_t *dr;
int num_sublists;
multilist_t *ml;
blkptr_t *blkptr_copy = kmem_alloc(sizeof (*os->os_rootbp), KM_SLEEP);
*blkptr_copy = *os->os_rootbp;
dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", (u_longlong_t)tx->tx_txg);
ASSERT(dmu_tx_is_syncing(tx));
/* XXX the write_done callback should really give us the tx... */
os->os_synctx = tx;
if (os->os_dsl_dataset == NULL) {
/*
* This is the MOS. If we have upgraded,
* spa_max_replication() could change, so reset
* os_copies here.
*/
os->os_copies = spa_max_replication(os->os_spa);
}
/*
* Create the root block IO
*/
SET_BOOKMARK(&zb, os->os_dsl_dataset ?
os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
arc_release(os->os_phys_buf, &os->os_phys_buf);
dmu_write_policy(os, NULL, 0, 0, &zp);
/*
* If we are either claiming the ZIL or doing a raw receive, write
* out the os_phys_buf raw. Neither of these actions will effect the
* MAC at this point.
*/
if (os->os_raw_receive ||
os->os_next_write_raw[tx->tx_txg & TXG_MASK]) {
ASSERT(os->os_encrypted);
arc_convert_to_raw(os->os_phys_buf,
os->os_dsl_dataset->ds_object, ZFS_HOST_BYTEORDER,
DMU_OT_OBJSET, NULL, NULL, NULL);
}
zio = arc_write(pio, os->os_spa, tx->tx_txg,
blkptr_copy, os->os_phys_buf, DMU_OS_IS_L2CACHEABLE(os),
&zp, dmu_objset_write_ready, NULL, NULL, dmu_objset_write_done,
os, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
/*
* Sync special dnodes - the parent IO for the sync is the root block
*/
DMU_META_DNODE(os)->dn_zio = zio;
dnode_sync(DMU_META_DNODE(os), tx);
os->os_phys->os_flags = os->os_flags;
if (DMU_USERUSED_DNODE(os) &&
DMU_USERUSED_DNODE(os)->dn_type != DMU_OT_NONE) {
DMU_USERUSED_DNODE(os)->dn_zio = zio;
dnode_sync(DMU_USERUSED_DNODE(os), tx);
DMU_GROUPUSED_DNODE(os)->dn_zio = zio;
dnode_sync(DMU_GROUPUSED_DNODE(os), tx);
}
if (DMU_PROJECTUSED_DNODE(os) &&
DMU_PROJECTUSED_DNODE(os)->dn_type != DMU_OT_NONE) {
DMU_PROJECTUSED_DNODE(os)->dn_zio = zio;
dnode_sync(DMU_PROJECTUSED_DNODE(os), tx);
}
txgoff = tx->tx_txg & TXG_MASK;
/*
* We must create the list here because it uses the
* dn_dirty_link[] of this txg. But it may already
* exist because we call dsl_dataset_sync() twice per txg.
*/
if (os->os_synced_dnodes.ml_sublists == NULL) {
multilist_create(&os->os_synced_dnodes, sizeof (dnode_t),
offsetof(dnode_t, dn_dirty_link[txgoff]),
dnode_multilist_index_func);
} else {
ASSERT3U(os->os_synced_dnodes.ml_offset, ==,
offsetof(dnode_t, dn_dirty_link[txgoff]));
}
ml = &os->os_dirty_dnodes[txgoff];
num_sublists = multilist_get_num_sublists(ml);
for (int i = 0; i < num_sublists; i++) {
if (multilist_sublist_is_empty_idx(ml, i))
continue;
sync_dnodes_arg_t *sda = kmem_alloc(sizeof (*sda), KM_SLEEP);
sda->sda_list = ml;
sda->sda_sublist_idx = i;
sda->sda_tx = tx;
(void) taskq_dispatch(dmu_objset_pool(os)->dp_sync_taskq,
sync_dnodes_task, sda, 0);
/* callback frees sda */
}
taskq_wait(dmu_objset_pool(os)->dp_sync_taskq);
list = &DMU_META_DNODE(os)->dn_dirty_records[txgoff];
while ((dr = list_head(list)) != NULL) {
ASSERT0(dr->dr_dbuf->db_level);
list_remove(list, dr);
zio_nowait(dr->dr_zio);
}
/* Enable dnode backfill if enough objects have been freed. */
if (os->os_freed_dnodes >= dmu_rescan_dnode_threshold) {
os->os_rescan_dnodes = B_TRUE;
os->os_freed_dnodes = 0;
}
/*
* Free intent log blocks up to this tx.
*/
zil_sync(os->os_zil, tx);
os->os_phys->os_zil_header = os->os_zil_header;
zio_nowait(zio);
}
boolean_t
dmu_objset_is_dirty(objset_t *os, uint64_t txg)
{
return (!multilist_is_empty(&os->os_dirty_dnodes[txg & TXG_MASK]));
}
static file_info_cb_t *file_cbs[DMU_OST_NUMTYPES];
void
dmu_objset_register_type(dmu_objset_type_t ost, file_info_cb_t *cb)
{
file_cbs[ost] = cb;
}
int
dmu_get_file_info(objset_t *os, dmu_object_type_t bonustype, const void *data,
zfs_file_info_t *zfi)
{
file_info_cb_t *cb = file_cbs[os->os_phys->os_type];
if (cb == NULL)
return (EINVAL);
return (cb(bonustype, data, zfi));
}
boolean_t
dmu_objset_userused_enabled(objset_t *os)
{
return (spa_version(os->os_spa) >= SPA_VERSION_USERSPACE &&
file_cbs[os->os_phys->os_type] != NULL &&
DMU_USERUSED_DNODE(os) != NULL);
}
boolean_t
dmu_objset_userobjused_enabled(objset_t *os)
{
return (dmu_objset_userused_enabled(os) &&
spa_feature_is_enabled(os->os_spa, SPA_FEATURE_USEROBJ_ACCOUNTING));
}
boolean_t
dmu_objset_projectquota_enabled(objset_t *os)
{
return (file_cbs[os->os_phys->os_type] != NULL &&
DMU_PROJECTUSED_DNODE(os) != NULL &&
spa_feature_is_enabled(os->os_spa, SPA_FEATURE_PROJECT_QUOTA));
}
typedef struct userquota_node {
/* must be in the first filed, see userquota_update_cache() */
char uqn_id[20 + DMU_OBJACCT_PREFIX_LEN];
int64_t uqn_delta;
avl_node_t uqn_node;
} userquota_node_t;
typedef struct userquota_cache {
avl_tree_t uqc_user_deltas;
avl_tree_t uqc_group_deltas;
avl_tree_t uqc_project_deltas;
} userquota_cache_t;
static int
userquota_compare(const void *l, const void *r)
{
const userquota_node_t *luqn = l;
const userquota_node_t *ruqn = r;
int rv;
/*
* NB: can only access uqn_id because userquota_update_cache() doesn't
* pass in an entire userquota_node_t.
*/
rv = strcmp(luqn->uqn_id, ruqn->uqn_id);
return (TREE_ISIGN(rv));
}
static void
do_userquota_cacheflush(objset_t *os, userquota_cache_t *cache, dmu_tx_t *tx)
{
void *cookie;
userquota_node_t *uqn;
ASSERT(dmu_tx_is_syncing(tx));
cookie = NULL;
while ((uqn = avl_destroy_nodes(&cache->uqc_user_deltas,
&cookie)) != NULL) {
/*
* os_userused_lock protects against concurrent calls to
* zap_increment_int(). It's needed because zap_increment_int()
* is not thread-safe (i.e. not atomic).
*/
mutex_enter(&os->os_userused_lock);
VERIFY0(zap_increment(os, DMU_USERUSED_OBJECT,
uqn->uqn_id, uqn->uqn_delta, tx));
mutex_exit(&os->os_userused_lock);
kmem_free(uqn, sizeof (*uqn));
}
avl_destroy(&cache->uqc_user_deltas);
cookie = NULL;
while ((uqn = avl_destroy_nodes(&cache->uqc_group_deltas,
&cookie)) != NULL) {
mutex_enter(&os->os_userused_lock);
VERIFY0(zap_increment(os, DMU_GROUPUSED_OBJECT,
uqn->uqn_id, uqn->uqn_delta, tx));
mutex_exit(&os->os_userused_lock);
kmem_free(uqn, sizeof (*uqn));
}
avl_destroy(&cache->uqc_group_deltas);
if (dmu_objset_projectquota_enabled(os)) {
cookie = NULL;
while ((uqn = avl_destroy_nodes(&cache->uqc_project_deltas,
&cookie)) != NULL) {
mutex_enter(&os->os_userused_lock);
VERIFY0(zap_increment(os, DMU_PROJECTUSED_OBJECT,
uqn->uqn_id, uqn->uqn_delta, tx));
mutex_exit(&os->os_userused_lock);
kmem_free(uqn, sizeof (*uqn));
}
avl_destroy(&cache->uqc_project_deltas);
}
}
static void
userquota_update_cache(avl_tree_t *avl, const char *id, int64_t delta)
{
userquota_node_t *uqn;
avl_index_t idx;
ASSERT(strlen(id) < sizeof (uqn->uqn_id));
/*
* Use id directly for searching because uqn_id is the first field of
* userquota_node_t and fields after uqn_id won't be accessed in
* avl_find().
*/
uqn = avl_find(avl, (const void *)id, &idx);
if (uqn == NULL) {
uqn = kmem_zalloc(sizeof (*uqn), KM_SLEEP);
strlcpy(uqn->uqn_id, id, sizeof (uqn->uqn_id));
avl_insert(avl, uqn, idx);
}
uqn->uqn_delta += delta;
}
static void
do_userquota_update(objset_t *os, userquota_cache_t *cache, uint64_t used,
uint64_t flags, uint64_t user, uint64_t group, uint64_t project,
boolean_t subtract)
{
if (flags & DNODE_FLAG_USERUSED_ACCOUNTED) {
int64_t delta = DNODE_MIN_SIZE + used;
char name[20];
if (subtract)
delta = -delta;
(void) snprintf(name, sizeof (name), "%llx", (longlong_t)user);
userquota_update_cache(&cache->uqc_user_deltas, name, delta);
(void) snprintf(name, sizeof (name), "%llx", (longlong_t)group);
userquota_update_cache(&cache->uqc_group_deltas, name, delta);
if (dmu_objset_projectquota_enabled(os)) {
(void) snprintf(name, sizeof (name), "%llx",
(longlong_t)project);
userquota_update_cache(&cache->uqc_project_deltas,
name, delta);
}
}
}
static void
do_userobjquota_update(objset_t *os, userquota_cache_t *cache, uint64_t flags,
uint64_t user, uint64_t group, uint64_t project, boolean_t subtract)
{
if (flags & DNODE_FLAG_USEROBJUSED_ACCOUNTED) {
char name[20 + DMU_OBJACCT_PREFIX_LEN];
int delta = subtract ? -1 : 1;
(void) snprintf(name, sizeof (name), DMU_OBJACCT_PREFIX "%llx",
(longlong_t)user);
userquota_update_cache(&cache->uqc_user_deltas, name, delta);
(void) snprintf(name, sizeof (name), DMU_OBJACCT_PREFIX "%llx",
(longlong_t)group);
userquota_update_cache(&cache->uqc_group_deltas, name, delta);
if (dmu_objset_projectquota_enabled(os)) {
(void) snprintf(name, sizeof (name),
DMU_OBJACCT_PREFIX "%llx", (longlong_t)project);
userquota_update_cache(&cache->uqc_project_deltas,
name, delta);
}
}
}
typedef struct userquota_updates_arg {
objset_t *uua_os;
int uua_sublist_idx;
dmu_tx_t *uua_tx;
} userquota_updates_arg_t;
static void
userquota_updates_task(void *arg)
{
userquota_updates_arg_t *uua = arg;
objset_t *os = uua->uua_os;
dmu_tx_t *tx = uua->uua_tx;
dnode_t *dn;
userquota_cache_t cache = { { 0 } };
multilist_sublist_t *list =
multilist_sublist_lock(&os->os_synced_dnodes, uua->uua_sublist_idx);
ASSERT(multilist_sublist_head(list) == NULL ||
dmu_objset_userused_enabled(os));
avl_create(&cache.uqc_user_deltas, userquota_compare,
sizeof (userquota_node_t), offsetof(userquota_node_t, uqn_node));
avl_create(&cache.uqc_group_deltas, userquota_compare,
sizeof (userquota_node_t), offsetof(userquota_node_t, uqn_node));
if (dmu_objset_projectquota_enabled(os))
avl_create(&cache.uqc_project_deltas, userquota_compare,
sizeof (userquota_node_t), offsetof(userquota_node_t,
uqn_node));
while ((dn = multilist_sublist_head(list)) != NULL) {
int flags;
ASSERT(!DMU_OBJECT_IS_SPECIAL(dn->dn_object));
ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE ||
dn->dn_phys->dn_flags &
DNODE_FLAG_USERUSED_ACCOUNTED);
flags = dn->dn_id_flags;
ASSERT(flags);
if (flags & DN_ID_OLD_EXIST) {
do_userquota_update(os, &cache, dn->dn_oldused,
dn->dn_oldflags, dn->dn_olduid, dn->dn_oldgid,
dn->dn_oldprojid, B_TRUE);
do_userobjquota_update(os, &cache, dn->dn_oldflags,
dn->dn_olduid, dn->dn_oldgid,
dn->dn_oldprojid, B_TRUE);
}
if (flags & DN_ID_NEW_EXIST) {
do_userquota_update(os, &cache,
DN_USED_BYTES(dn->dn_phys), dn->dn_phys->dn_flags,
dn->dn_newuid, dn->dn_newgid,
dn->dn_newprojid, B_FALSE);
do_userobjquota_update(os, &cache,
dn->dn_phys->dn_flags, dn->dn_newuid, dn->dn_newgid,
dn->dn_newprojid, B_FALSE);
}
mutex_enter(&dn->dn_mtx);
dn->dn_oldused = 0;
dn->dn_oldflags = 0;
if (dn->dn_id_flags & DN_ID_NEW_EXIST) {
dn->dn_olduid = dn->dn_newuid;
dn->dn_oldgid = dn->dn_newgid;
dn->dn_oldprojid = dn->dn_newprojid;
dn->dn_id_flags |= DN_ID_OLD_EXIST;
if (dn->dn_bonuslen == 0)
dn->dn_id_flags |= DN_ID_CHKED_SPILL;
else
dn->dn_id_flags |= DN_ID_CHKED_BONUS;
}
dn->dn_id_flags &= ~(DN_ID_NEW_EXIST);
mutex_exit(&dn->dn_mtx);
multilist_sublist_remove(list, dn);
dnode_rele(dn, &os->os_synced_dnodes);
}
do_userquota_cacheflush(os, &cache, tx);
multilist_sublist_unlock(list);
kmem_free(uua, sizeof (*uua));
}
/*
* Release dnode holds from dmu_objset_sync_dnodes(). When the dnode is being
* synced (i.e. we have issued the zio's for blocks in the dnode), it can't be
* evicted because the block containing the dnode can't be evicted until it is
* written out. However, this hold is necessary to prevent the dnode_t from
* being moved (via dnode_move()) while it's still referenced by
* dbuf_dirty_record_t:dr_dnode. And dr_dnode is needed for
* dirty_lightweight_leaf-type dirty records.
*
* If we are doing user-object accounting, the dnode_rele() happens from
* userquota_updates_task() instead.
*/
static void
dnode_rele_task(void *arg)
{
userquota_updates_arg_t *uua = arg;
objset_t *os = uua->uua_os;
multilist_sublist_t *list =
multilist_sublist_lock(&os->os_synced_dnodes, uua->uua_sublist_idx);
dnode_t *dn;
while ((dn = multilist_sublist_head(list)) != NULL) {
multilist_sublist_remove(list, dn);
dnode_rele(dn, &os->os_synced_dnodes);
}
multilist_sublist_unlock(list);
kmem_free(uua, sizeof (*uua));
}
/*
* Return TRUE if userquota updates are needed.
*/
static boolean_t
dmu_objset_do_userquota_updates_prep(objset_t *os, dmu_tx_t *tx)
{
if (!dmu_objset_userused_enabled(os))
return (B_FALSE);
/*
* If this is a raw receive just return and handle accounting
* later when we have the keys loaded. We also don't do user
* accounting during claiming since the datasets are not owned
* for the duration of claiming and this txg should only be
* used for recovery.
*/
if (os->os_encrypted && dmu_objset_is_receiving(os))
return (B_FALSE);
if (tx->tx_txg <= os->os_spa->spa_claim_max_txg)
return (B_FALSE);
/* Allocate the user/group/project used objects if necessary. */
if (DMU_USERUSED_DNODE(os)->dn_type == DMU_OT_NONE) {
VERIFY0(zap_create_claim(os,
DMU_USERUSED_OBJECT,
DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
VERIFY0(zap_create_claim(os,
DMU_GROUPUSED_OBJECT,
DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
}
if (dmu_objset_projectquota_enabled(os) &&
DMU_PROJECTUSED_DNODE(os)->dn_type == DMU_OT_NONE) {
VERIFY0(zap_create_claim(os, DMU_PROJECTUSED_OBJECT,
DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
}
return (B_TRUE);
}
/*
* Dispatch taskq tasks to dp_sync_taskq to update the user accounting, and
* also release the holds on the dnodes from dmu_objset_sync_dnodes().
* The caller must taskq_wait(dp_sync_taskq).
*/
void
dmu_objset_sync_done(objset_t *os, dmu_tx_t *tx)
{
boolean_t need_userquota = dmu_objset_do_userquota_updates_prep(os, tx);
int num_sublists = multilist_get_num_sublists(&os->os_synced_dnodes);
for (int i = 0; i < num_sublists; i++) {
userquota_updates_arg_t *uua =
kmem_alloc(sizeof (*uua), KM_SLEEP);
uua->uua_os = os;
uua->uua_sublist_idx = i;
uua->uua_tx = tx;
/*
* If we don't need to update userquotas, use
* dnode_rele_task() to call dnode_rele()
*/
(void) taskq_dispatch(dmu_objset_pool(os)->dp_sync_taskq,
need_userquota ? userquota_updates_task : dnode_rele_task,
uua, 0);
/* callback frees uua */
}
}
/*
* Returns a pointer to data to find uid/gid from
*
* If a dirty record for transaction group that is syncing can't
* be found then NULL is returned. In the NULL case it is assumed
* the uid/gid aren't changing.
*/
static void *
dmu_objset_userquota_find_data(dmu_buf_impl_t *db, dmu_tx_t *tx)
{
dbuf_dirty_record_t *dr;
void *data;
if (db->db_dirtycnt == 0)
return (db->db.db_data); /* Nothing is changing */
dr = dbuf_find_dirty_eq(db, tx->tx_txg);
if (dr == NULL) {
data = NULL;
} else {
if (dr->dr_dnode->dn_bonuslen == 0 &&
dr->dr_dbuf->db_blkid == DMU_SPILL_BLKID)
data = dr->dt.dl.dr_data->b_data;
else
data = dr->dt.dl.dr_data;
}
return (data);
}
void
dmu_objset_userquota_get_ids(dnode_t *dn, boolean_t before, dmu_tx_t *tx)
{
objset_t *os = dn->dn_objset;
void *data = NULL;
dmu_buf_impl_t *db = NULL;
int flags = dn->dn_id_flags;
int error;
boolean_t have_spill = B_FALSE;
if (!dmu_objset_userused_enabled(dn->dn_objset))
return;
/*
* Raw receives introduce a problem with user accounting. Raw
* receives cannot update the user accounting info because the
* user ids and the sizes are encrypted. To guarantee that we
* never end up with bad user accounting, we simply disable it
* during raw receives. We also disable this for normal receives
* so that an incremental raw receive may be done on top of an
* existing non-raw receive.
*/
if (os->os_encrypted && dmu_objset_is_receiving(os))
return;
if (before && (flags & (DN_ID_CHKED_BONUS|DN_ID_OLD_EXIST|
DN_ID_CHKED_SPILL)))
return;
if (before && dn->dn_bonuslen != 0)
data = DN_BONUS(dn->dn_phys);
else if (!before && dn->dn_bonuslen != 0) {
if (dn->dn_bonus) {
db = dn->dn_bonus;
mutex_enter(&db->db_mtx);
data = dmu_objset_userquota_find_data(db, tx);
} else {
data = DN_BONUS(dn->dn_phys);
}
} else if (dn->dn_bonuslen == 0 && dn->dn_bonustype == DMU_OT_SA) {
int rf = 0;
if (RW_WRITE_HELD(&dn->dn_struct_rwlock))
rf |= DB_RF_HAVESTRUCT;
error = dmu_spill_hold_by_dnode(dn,
rf | DB_RF_MUST_SUCCEED,
FTAG, (dmu_buf_t **)&db);
ASSERT(error == 0);
mutex_enter(&db->db_mtx);
data = (before) ? db->db.db_data :
dmu_objset_userquota_find_data(db, tx);
have_spill = B_TRUE;
} else {
mutex_enter(&dn->dn_mtx);
dn->dn_id_flags |= DN_ID_CHKED_BONUS;
mutex_exit(&dn->dn_mtx);
return;
}
/*
* Must always call the callback in case the object
* type has changed and that type isn't an object type to track
*/
zfs_file_info_t zfi;
error = file_cbs[os->os_phys->os_type](dn->dn_bonustype, data, &zfi);
if (before) {
ASSERT(data);
dn->dn_olduid = zfi.zfi_user;
dn->dn_oldgid = zfi.zfi_group;
dn->dn_oldprojid = zfi.zfi_project;
} else if (data) {
dn->dn_newuid = zfi.zfi_user;
dn->dn_newgid = zfi.zfi_group;
dn->dn_newprojid = zfi.zfi_project;
}
/*
* Preserve existing uid/gid when the callback can't determine
* what the new uid/gid are and the callback returned EEXIST.
* The EEXIST error tells us to just use the existing uid/gid.
* If we don't know what the old values are then just assign
* them to 0, since that is a new file being created.
*/
if (!before && data == NULL && error == EEXIST) {
if (flags & DN_ID_OLD_EXIST) {
dn->dn_newuid = dn->dn_olduid;
dn->dn_newgid = dn->dn_oldgid;
dn->dn_newprojid = dn->dn_oldprojid;
} else {
dn->dn_newuid = 0;
dn->dn_newgid = 0;
dn->dn_newprojid = ZFS_DEFAULT_PROJID;
}
error = 0;
}
if (db)
mutex_exit(&db->db_mtx);
mutex_enter(&dn->dn_mtx);
if (error == 0 && before)
dn->dn_id_flags |= DN_ID_OLD_EXIST;
if (error == 0 && !before)
dn->dn_id_flags |= DN_ID_NEW_EXIST;
if (have_spill) {
dn->dn_id_flags |= DN_ID_CHKED_SPILL;
} else {
dn->dn_id_flags |= DN_ID_CHKED_BONUS;
}
mutex_exit(&dn->dn_mtx);
if (have_spill)
dmu_buf_rele((dmu_buf_t *)db, FTAG);
}
boolean_t
dmu_objset_userspace_present(objset_t *os)
{
return (os->os_phys->os_flags &
OBJSET_FLAG_USERACCOUNTING_COMPLETE);
}
boolean_t
dmu_objset_userobjspace_present(objset_t *os)
{
return (os->os_phys->os_flags &
OBJSET_FLAG_USEROBJACCOUNTING_COMPLETE);
}
boolean_t
dmu_objset_projectquota_present(objset_t *os)
{
return (os->os_phys->os_flags &
OBJSET_FLAG_PROJECTQUOTA_COMPLETE);
}
static int
dmu_objset_space_upgrade(objset_t *os)
{
uint64_t obj;
int err = 0;
/*
* We simply need to mark every object dirty, so that it will be
* synced out and now accounted. If this is called
* concurrently, or if we already did some work before crashing,
* that's fine, since we track each object's accounted state
* independently.
*/
for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 0)) {
dmu_tx_t *tx;
dmu_buf_t *db;
int objerr;
mutex_enter(&os->os_upgrade_lock);
if (os->os_upgrade_exit)
err = SET_ERROR(EINTR);
mutex_exit(&os->os_upgrade_lock);
if (err != 0)
return (err);
if (issig(JUSTLOOKING) && issig(FORREAL))
return (SET_ERROR(EINTR));
objerr = dmu_bonus_hold(os, obj, FTAG, &db);
if (objerr != 0)
continue;
tx = dmu_tx_create(os);
dmu_tx_hold_bonus(tx, obj);
objerr = dmu_tx_assign(tx, TXG_WAIT);
if (objerr != 0) {
dmu_buf_rele(db, FTAG);
dmu_tx_abort(tx);
continue;
}
dmu_buf_will_dirty(db, tx);
dmu_buf_rele(db, FTAG);
dmu_tx_commit(tx);
}
return (0);
}
static int
dmu_objset_userspace_upgrade_cb(objset_t *os)
{
int err = 0;
if (dmu_objset_userspace_present(os))
return (0);
if (dmu_objset_is_snapshot(os))
return (SET_ERROR(EINVAL));
if (!dmu_objset_userused_enabled(os))
return (SET_ERROR(ENOTSUP));
err = dmu_objset_space_upgrade(os);
if (err)
return (err);
os->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE;
txg_wait_synced(dmu_objset_pool(os), 0);
return (0);
}
void
dmu_objset_userspace_upgrade(objset_t *os)
{
dmu_objset_upgrade(os, dmu_objset_userspace_upgrade_cb);
}
static int
dmu_objset_id_quota_upgrade_cb(objset_t *os)
{
int err = 0;
if (dmu_objset_userobjspace_present(os) &&
dmu_objset_projectquota_present(os))
return (0);
if (dmu_objset_is_snapshot(os))
return (SET_ERROR(EINVAL));
if (!dmu_objset_userused_enabled(os))
return (SET_ERROR(ENOTSUP));
if (!dmu_objset_projectquota_enabled(os) &&
dmu_objset_userobjspace_present(os))
return (SET_ERROR(ENOTSUP));
if (dmu_objset_userobjused_enabled(os))
dmu_objset_ds(os)->ds_feature_activation[
SPA_FEATURE_USEROBJ_ACCOUNTING] = (void *)B_TRUE;
if (dmu_objset_projectquota_enabled(os))
dmu_objset_ds(os)->ds_feature_activation[
SPA_FEATURE_PROJECT_QUOTA] = (void *)B_TRUE;
err = dmu_objset_space_upgrade(os);
if (err)
return (err);
os->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE;
if (dmu_objset_userobjused_enabled(os))
os->os_flags |= OBJSET_FLAG_USEROBJACCOUNTING_COMPLETE;
if (dmu_objset_projectquota_enabled(os))
os->os_flags |= OBJSET_FLAG_PROJECTQUOTA_COMPLETE;
txg_wait_synced(dmu_objset_pool(os), 0);
return (0);
}
void
dmu_objset_id_quota_upgrade(objset_t *os)
{
dmu_objset_upgrade(os, dmu_objset_id_quota_upgrade_cb);
}
boolean_t
dmu_objset_userobjspace_upgradable(objset_t *os)
{
return (dmu_objset_type(os) == DMU_OST_ZFS &&
!dmu_objset_is_snapshot(os) &&
dmu_objset_userobjused_enabled(os) &&
!dmu_objset_userobjspace_present(os) &&
spa_writeable(dmu_objset_spa(os)));
}
boolean_t
dmu_objset_projectquota_upgradable(objset_t *os)
{
return (dmu_objset_type(os) == DMU_OST_ZFS &&
!dmu_objset_is_snapshot(os) &&
dmu_objset_projectquota_enabled(os) &&
!dmu_objset_projectquota_present(os) &&
spa_writeable(dmu_objset_spa(os)));
}
void
dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp,
uint64_t *usedobjsp, uint64_t *availobjsp)
{
dsl_dataset_space(os->os_dsl_dataset, refdbytesp, availbytesp,
usedobjsp, availobjsp);
}
uint64_t
dmu_objset_fsid_guid(objset_t *os)
{
return (dsl_dataset_fsid_guid(os->os_dsl_dataset));
}
void
dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat)
{
stat->dds_type = os->os_phys->os_type;
if (os->os_dsl_dataset)
dsl_dataset_fast_stat(os->os_dsl_dataset, stat);
}
void
dmu_objset_stats(objset_t *os, nvlist_t *nv)
{
ASSERT(os->os_dsl_dataset ||
os->os_phys->os_type == DMU_OST_META);
if (os->os_dsl_dataset != NULL)
dsl_dataset_stats(os->os_dsl_dataset, nv);
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_TYPE,
os->os_phys->os_type);
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERACCOUNTING,
dmu_objset_userspace_present(os));
}
int
dmu_objset_is_snapshot(objset_t *os)
{
if (os->os_dsl_dataset != NULL)
return (os->os_dsl_dataset->ds_is_snapshot);
else
return (B_FALSE);
}
int
dmu_snapshot_realname(objset_t *os, const char *name, char *real, int maxlen,
boolean_t *conflict)
{
dsl_dataset_t *ds = os->os_dsl_dataset;
uint64_t ignored;
if (dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0)
return (SET_ERROR(ENOENT));
return (zap_lookup_norm(ds->ds_dir->dd_pool->dp_meta_objset,
dsl_dataset_phys(ds)->ds_snapnames_zapobj, name, 8, 1, &ignored,
MT_NORMALIZE, real, maxlen, conflict));
}
int
dmu_snapshot_list_next(objset_t *os, int namelen, char *name,
uint64_t *idp, uint64_t *offp, boolean_t *case_conflict)
{
dsl_dataset_t *ds = os->os_dsl_dataset;
zap_cursor_t cursor;
zap_attribute_t attr;
ASSERT(dsl_pool_config_held(dmu_objset_pool(os)));
if (dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0)
return (SET_ERROR(ENOENT));
zap_cursor_init_serialized(&cursor,
ds->ds_dir->dd_pool->dp_meta_objset,
dsl_dataset_phys(ds)->ds_snapnames_zapobj, *offp);
if (zap_cursor_retrieve(&cursor, &attr) != 0) {
zap_cursor_fini(&cursor);
return (SET_ERROR(ENOENT));
}
if (strlen(attr.za_name) + 1 > namelen) {
zap_cursor_fini(&cursor);
return (SET_ERROR(ENAMETOOLONG));
}
(void) strlcpy(name, attr.za_name, namelen);
if (idp)
*idp = attr.za_first_integer;
if (case_conflict)
*case_conflict = attr.za_normalization_conflict;
zap_cursor_advance(&cursor);
*offp = zap_cursor_serialize(&cursor);
zap_cursor_fini(&cursor);
return (0);
}
int
dmu_snapshot_lookup(objset_t *os, const char *name, uint64_t *value)
{
return (dsl_dataset_snap_lookup(os->os_dsl_dataset, name, value));
}
int
dmu_dir_list_next(objset_t *os, int namelen, char *name,
uint64_t *idp, uint64_t *offp)
{
dsl_dir_t *dd = os->os_dsl_dataset->ds_dir;
zap_cursor_t cursor;
zap_attribute_t attr;
/* there is no next dir on a snapshot! */
if (os->os_dsl_dataset->ds_object !=
dsl_dir_phys(dd)->dd_head_dataset_obj)
return (SET_ERROR(ENOENT));
zap_cursor_init_serialized(&cursor,
dd->dd_pool->dp_meta_objset,
dsl_dir_phys(dd)->dd_child_dir_zapobj, *offp);
if (zap_cursor_retrieve(&cursor, &attr) != 0) {
zap_cursor_fini(&cursor);
return (SET_ERROR(ENOENT));
}
if (strlen(attr.za_name) + 1 > namelen) {
zap_cursor_fini(&cursor);
return (SET_ERROR(ENAMETOOLONG));
}
(void) strlcpy(name, attr.za_name, namelen);
if (idp)
*idp = attr.za_first_integer;
zap_cursor_advance(&cursor);
*offp = zap_cursor_serialize(&cursor);
zap_cursor_fini(&cursor);
return (0);
}
typedef struct dmu_objset_find_ctx {
taskq_t *dc_tq;
dsl_pool_t *dc_dp;
uint64_t dc_ddobj;
char *dc_ddname; /* last component of ddobj's name */
int (*dc_func)(dsl_pool_t *, dsl_dataset_t *, void *);
void *dc_arg;
int dc_flags;
kmutex_t *dc_error_lock;
int *dc_error;
} dmu_objset_find_ctx_t;
static void
dmu_objset_find_dp_impl(dmu_objset_find_ctx_t *dcp)
{
dsl_pool_t *dp = dcp->dc_dp;
dsl_dir_t *dd;
dsl_dataset_t *ds;
zap_cursor_t zc;
zap_attribute_t *attr;
uint64_t thisobj;
int err = 0;
/* don't process if there already was an error */
if (*dcp->dc_error != 0)
goto out;
/*
* Note: passing the name (dc_ddname) here is optional, but it
* improves performance because we don't need to call
* zap_value_search() to determine the name.
*/
err = dsl_dir_hold_obj(dp, dcp->dc_ddobj, dcp->dc_ddname, FTAG, &dd);
if (err != 0)
goto out;
/* Don't visit hidden ($MOS & $ORIGIN) objsets. */
if (dd->dd_myname[0] == '$') {
dsl_dir_rele(dd, FTAG);
goto out;
}
thisobj = dsl_dir_phys(dd)->dd_head_dataset_obj;
attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
/*
* Iterate over all children.
*/
if (dcp->dc_flags & DS_FIND_CHILDREN) {
for (zap_cursor_init(&zc, dp->dp_meta_objset,
dsl_dir_phys(dd)->dd_child_dir_zapobj);
zap_cursor_retrieve(&zc, attr) == 0;
(void) zap_cursor_advance(&zc)) {
ASSERT3U(attr->za_integer_length, ==,
sizeof (uint64_t));
ASSERT3U(attr->za_num_integers, ==, 1);
dmu_objset_find_ctx_t *child_dcp =
kmem_alloc(sizeof (*child_dcp), KM_SLEEP);
*child_dcp = *dcp;
child_dcp->dc_ddobj = attr->za_first_integer;
child_dcp->dc_ddname = spa_strdup(attr->za_name);
if (dcp->dc_tq != NULL)
(void) taskq_dispatch(dcp->dc_tq,
dmu_objset_find_dp_cb, child_dcp, TQ_SLEEP);
else
dmu_objset_find_dp_impl(child_dcp);
}
zap_cursor_fini(&zc);
}
/*
* Iterate over all snapshots.
*/
if (dcp->dc_flags & DS_FIND_SNAPSHOTS) {
dsl_dataset_t *ds;
err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds);
if (err == 0) {
uint64_t snapobj;
snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj;
dsl_dataset_rele(ds, FTAG);
for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj);
zap_cursor_retrieve(&zc, attr) == 0;
(void) zap_cursor_advance(&zc)) {
ASSERT3U(attr->za_integer_length, ==,
sizeof (uint64_t));
ASSERT3U(attr->za_num_integers, ==, 1);
err = dsl_dataset_hold_obj(dp,
attr->za_first_integer, FTAG, &ds);
if (err != 0)
break;
err = dcp->dc_func(dp, ds, dcp->dc_arg);
dsl_dataset_rele(ds, FTAG);
if (err != 0)
break;
}
zap_cursor_fini(&zc);
}
}
kmem_free(attr, sizeof (zap_attribute_t));
if (err != 0) {
dsl_dir_rele(dd, FTAG);
goto out;
}
/*
* Apply to self.
*/
err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds);
/*
* Note: we hold the dir while calling dsl_dataset_hold_obj() so
* that the dir will remain cached, and we won't have to re-instantiate
* it (which could be expensive due to finding its name via
* zap_value_search()).
*/
dsl_dir_rele(dd, FTAG);
if (err != 0)
goto out;
err = dcp->dc_func(dp, ds, dcp->dc_arg);
dsl_dataset_rele(ds, FTAG);
out:
if (err != 0) {
mutex_enter(dcp->dc_error_lock);
/* only keep first error */
if (*dcp->dc_error == 0)
*dcp->dc_error = err;
mutex_exit(dcp->dc_error_lock);
}
if (dcp->dc_ddname != NULL)
spa_strfree(dcp->dc_ddname);
kmem_free(dcp, sizeof (*dcp));
}
static void
dmu_objset_find_dp_cb(void *arg)
{
dmu_objset_find_ctx_t *dcp = arg;
dsl_pool_t *dp = dcp->dc_dp;
/*
* We need to get a pool_config_lock here, as there are several
* assert(pool_config_held) down the stack. Getting a lock via
* dsl_pool_config_enter is risky, as it might be stalled by a
* pending writer. This would deadlock, as the write lock can
* only be granted when our parent thread gives up the lock.
* The _prio interface gives us priority over a pending writer.
*/
dsl_pool_config_enter_prio(dp, FTAG);
dmu_objset_find_dp_impl(dcp);
dsl_pool_config_exit(dp, FTAG);
}
/*
* Find objsets under and including ddobj, call func(ds) on each.
* The order for the enumeration is completely undefined.
* func is called with dsl_pool_config held.
*/
int
dmu_objset_find_dp(dsl_pool_t *dp, uint64_t ddobj,
int func(dsl_pool_t *, dsl_dataset_t *, void *), void *arg, int flags)
{
int error = 0;
taskq_t *tq = NULL;
int ntasks;
dmu_objset_find_ctx_t *dcp;
kmutex_t err_lock;
mutex_init(&err_lock, NULL, MUTEX_DEFAULT, NULL);
dcp = kmem_alloc(sizeof (*dcp), KM_SLEEP);
dcp->dc_tq = NULL;
dcp->dc_dp = dp;
dcp->dc_ddobj = ddobj;
dcp->dc_ddname = NULL;
dcp->dc_func = func;
dcp->dc_arg = arg;
dcp->dc_flags = flags;
dcp->dc_error_lock = &err_lock;
dcp->dc_error = &error;
if ((flags & DS_FIND_SERIALIZE) || dsl_pool_config_held_writer(dp)) {
/*
* In case a write lock is held we can't make use of
* parallelism, as down the stack of the worker threads
* the lock is asserted via dsl_pool_config_held.
* In case of a read lock this is solved by getting a read
* lock in each worker thread, which isn't possible in case
* of a writer lock. So we fall back to the synchronous path
* here.
* In the future it might be possible to get some magic into
* dsl_pool_config_held in a way that it returns true for
* the worker threads so that a single lock held from this
* thread suffices. For now, stay single threaded.
*/
dmu_objset_find_dp_impl(dcp);
mutex_destroy(&err_lock);
return (error);
}
ntasks = dmu_find_threads;
if (ntasks == 0)
ntasks = vdev_count_leaves(dp->dp_spa) * 4;
tq = taskq_create("dmu_objset_find", ntasks, maxclsyspri, ntasks,
INT_MAX, 0);
if (tq == NULL) {
kmem_free(dcp, sizeof (*dcp));
mutex_destroy(&err_lock);
return (SET_ERROR(ENOMEM));
}
dcp->dc_tq = tq;
/* dcp will be freed by task */
(void) taskq_dispatch(tq, dmu_objset_find_dp_cb, dcp, TQ_SLEEP);
/*
* PORTING: this code relies on the property of taskq_wait to wait
* until no more tasks are queued and no more tasks are active. As
* we always queue new tasks from within other tasks, task_wait
* reliably waits for the full recursion to finish, even though we
* enqueue new tasks after taskq_wait has been called.
* On platforms other than illumos, taskq_wait may not have this
* property.
*/
taskq_wait(tq);
taskq_destroy(tq);
mutex_destroy(&err_lock);
return (error);
}
/*
* Find all objsets under name, and for each, call 'func(child_name, arg)'.
* The dp_config_rwlock must not be held when this is called, and it
* will not be held when the callback is called.
* Therefore this function should only be used when the pool is not changing
* (e.g. in syncing context), or the callback can deal with the possible races.
*/
static int
dmu_objset_find_impl(spa_t *spa, const char *name,
int func(const char *, void *), void *arg, int flags)
{
dsl_dir_t *dd;
dsl_pool_t *dp = spa_get_dsl(spa);
dsl_dataset_t *ds;
zap_cursor_t zc;
zap_attribute_t *attr;
char *child;
uint64_t thisobj;
int err;
dsl_pool_config_enter(dp, FTAG);
err = dsl_dir_hold(dp, name, FTAG, &dd, NULL);
if (err != 0) {
dsl_pool_config_exit(dp, FTAG);
return (err);
}
/* Don't visit hidden ($MOS & $ORIGIN) objsets. */
if (dd->dd_myname[0] == '$') {
dsl_dir_rele(dd, FTAG);
dsl_pool_config_exit(dp, FTAG);
return (0);
}
thisobj = dsl_dir_phys(dd)->dd_head_dataset_obj;
attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
/*
* Iterate over all children.
*/
if (flags & DS_FIND_CHILDREN) {
for (zap_cursor_init(&zc, dp->dp_meta_objset,
dsl_dir_phys(dd)->dd_child_dir_zapobj);
zap_cursor_retrieve(&zc, attr) == 0;
(void) zap_cursor_advance(&zc)) {
ASSERT3U(attr->za_integer_length, ==,
sizeof (uint64_t));
ASSERT3U(attr->za_num_integers, ==, 1);
child = kmem_asprintf("%s/%s", name, attr->za_name);
dsl_pool_config_exit(dp, FTAG);
err = dmu_objset_find_impl(spa, child,
func, arg, flags);
dsl_pool_config_enter(dp, FTAG);
kmem_strfree(child);
if (err != 0)
break;
}
zap_cursor_fini(&zc);
if (err != 0) {
dsl_dir_rele(dd, FTAG);
dsl_pool_config_exit(dp, FTAG);
kmem_free(attr, sizeof (zap_attribute_t));
return (err);
}
}
/*
* Iterate over all snapshots.
*/
if (flags & DS_FIND_SNAPSHOTS) {
err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds);
if (err == 0) {
uint64_t snapobj;
snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj;
dsl_dataset_rele(ds, FTAG);
for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj);
zap_cursor_retrieve(&zc, attr) == 0;
(void) zap_cursor_advance(&zc)) {
ASSERT3U(attr->za_integer_length, ==,
sizeof (uint64_t));
ASSERT3U(attr->za_num_integers, ==, 1);
child = kmem_asprintf("%s@%s",
name, attr->za_name);
dsl_pool_config_exit(dp, FTAG);
err = func(child, arg);
dsl_pool_config_enter(dp, FTAG);
kmem_strfree(child);
if (err != 0)
break;
}
zap_cursor_fini(&zc);
}
}
dsl_dir_rele(dd, FTAG);
kmem_free(attr, sizeof (zap_attribute_t));
dsl_pool_config_exit(dp, FTAG);
if (err != 0)
return (err);
/* Apply to self. */
return (func(name, arg));
}
/*
* See comment above dmu_objset_find_impl().
*/
int
dmu_objset_find(const char *name, int func(const char *, void *), void *arg,
int flags)
{
spa_t *spa;
int error;
error = spa_open(name, &spa, FTAG);
if (error != 0)
return (error);
error = dmu_objset_find_impl(spa, name, func, arg, flags);
spa_close(spa, FTAG);
return (error);
}
boolean_t
dmu_objset_incompatible_encryption_version(objset_t *os)
{
return (dsl_dir_incompatible_encryption_version(
os->os_dsl_dataset->ds_dir));
}
void
dmu_objset_set_user(objset_t *os, void *user_ptr)
{
ASSERT(MUTEX_HELD(&os->os_user_ptr_lock));
os->os_user_ptr = user_ptr;
}
void *
dmu_objset_get_user(objset_t *os)
{
ASSERT(MUTEX_HELD(&os->os_user_ptr_lock));
return (os->os_user_ptr);
}
/*
* Determine name of filesystem, given name of snapshot.
* buf must be at least ZFS_MAX_DATASET_NAME_LEN bytes
*/
int
dmu_fsname(const char *snapname, char *buf)
{
char *atp = strchr(snapname, '@');
if (atp == NULL)
return (SET_ERROR(EINVAL));
if (atp - snapname >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
(void) strlcpy(buf, snapname, atp - snapname + 1);
return (0);
}
/*
* Call when we think we're going to write/free space in open context
* to track the amount of dirty data in the open txg, which is also the
* amount of memory that can not be evicted until this txg syncs.
*
* Note that there are two conditions where this can be called from
* syncing context:
*
* [1] When we just created the dataset, in which case we go on with
* updating any accounting of dirty data as usual.
* [2] When we are dirtying MOS data, in which case we only update the
* pool's accounting of dirty data.
*/
void
dmu_objset_willuse_space(objset_t *os, int64_t space, dmu_tx_t *tx)
{
dsl_dataset_t *ds = os->os_dsl_dataset;
int64_t aspace = spa_get_worst_case_asize(os->os_spa, space);
if (ds != NULL) {
dsl_dir_willuse_space(ds->ds_dir, aspace, tx);
}
dsl_pool_dirty_space(dmu_tx_pool(tx), space, tx);
}
#if defined(_KERNEL)
EXPORT_SYMBOL(dmu_objset_zil);
EXPORT_SYMBOL(dmu_objset_pool);
EXPORT_SYMBOL(dmu_objset_ds);
EXPORT_SYMBOL(dmu_objset_type);
EXPORT_SYMBOL(dmu_objset_name);
EXPORT_SYMBOL(dmu_objset_hold);
EXPORT_SYMBOL(dmu_objset_hold_flags);
EXPORT_SYMBOL(dmu_objset_own);
EXPORT_SYMBOL(dmu_objset_rele);
EXPORT_SYMBOL(dmu_objset_rele_flags);
EXPORT_SYMBOL(dmu_objset_disown);
EXPORT_SYMBOL(dmu_objset_from_ds);
EXPORT_SYMBOL(dmu_objset_create);
EXPORT_SYMBOL(dmu_objset_clone);
EXPORT_SYMBOL(dmu_objset_stats);
EXPORT_SYMBOL(dmu_objset_fast_stat);
EXPORT_SYMBOL(dmu_objset_spa);
EXPORT_SYMBOL(dmu_objset_space);
EXPORT_SYMBOL(dmu_objset_fsid_guid);
EXPORT_SYMBOL(dmu_objset_find);
EXPORT_SYMBOL(dmu_objset_byteswap);
EXPORT_SYMBOL(dmu_objset_evict_dbufs);
EXPORT_SYMBOL(dmu_objset_snap_cmtime);
EXPORT_SYMBOL(dmu_objset_dnodesize);
EXPORT_SYMBOL(dmu_objset_sync);
EXPORT_SYMBOL(dmu_objset_is_dirty);
EXPORT_SYMBOL(dmu_objset_create_impl_dnstats);
EXPORT_SYMBOL(dmu_objset_create_impl);
EXPORT_SYMBOL(dmu_objset_open_impl);
EXPORT_SYMBOL(dmu_objset_evict);
EXPORT_SYMBOL(dmu_objset_register_type);
EXPORT_SYMBOL(dmu_objset_sync_done);
EXPORT_SYMBOL(dmu_objset_userquota_get_ids);
EXPORT_SYMBOL(dmu_objset_userused_enabled);
EXPORT_SYMBOL(dmu_objset_userspace_upgrade);
EXPORT_SYMBOL(dmu_objset_userspace_present);
EXPORT_SYMBOL(dmu_objset_userobjused_enabled);
EXPORT_SYMBOL(dmu_objset_userobjspace_upgradable);
EXPORT_SYMBOL(dmu_objset_userobjspace_present);
EXPORT_SYMBOL(dmu_objset_projectquota_enabled);
EXPORT_SYMBOL(dmu_objset_projectquota_present);
EXPORT_SYMBOL(dmu_objset_projectquota_upgradable);
EXPORT_SYMBOL(dmu_objset_id_quota_upgrade);
#endif
diff --git a/sys/contrib/openzfs/module/zfs/dmu_recv.c b/sys/contrib/openzfs/module/zfs/dmu_recv.c
index 0ec46bdb4f47..99eeceeb4139 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_recv.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_recv.c
@@ -1,3401 +1,3409 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2014, Joyent, Inc. All rights reserved.
* Copyright 2014 HybridCluster. All rights reserved.
* Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
*/
#include <sys/dmu.h>
#include <sys/dmu_impl.h>
#include <sys/dmu_send.h>
#include <sys/dmu_recv.h>
#include <sys/dmu_tx.h>
#include <sys/dbuf.h>
#include <sys/dnode.h>
#include <sys/zfs_context.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_traverse.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_synctask.h>
#include <sys/zfs_ioctl.h>
#include <sys/zap.h>
#include <sys/zvol.h>
#include <sys/zio_checksum.h>
#include <sys/zfs_znode.h>
#include <zfs_fletcher.h>
#include <sys/avl.h>
#include <sys/ddt.h>
#include <sys/zfs_onexit.h>
#include <sys/dsl_destroy.h>
#include <sys/blkptr.h>
#include <sys/dsl_bookmark.h>
#include <sys/zfeature.h>
#include <sys/bqueue.h>
#include <sys/objlist.h>
#ifdef _KERNEL
#include <sys/zfs_vfsops.h>
#endif
#include <sys/zfs_file.h>
int zfs_recv_queue_length = SPA_MAXBLOCKSIZE;
int zfs_recv_queue_ff = 20;
int zfs_recv_write_batch_size = 1024 * 1024;
static char *dmu_recv_tag = "dmu_recv_tag";
const char *recv_clone_name = "%recv";
static int receive_read_payload_and_next_header(dmu_recv_cookie_t *ra, int len,
void *buf);
struct receive_record_arg {
dmu_replay_record_t header;
void *payload; /* Pointer to a buffer containing the payload */
/*
* If the record is a WRITE or SPILL, pointer to the abd containing the
* payload.
*/
abd_t *abd;
int payload_size;
uint64_t bytes_read; /* bytes read from stream when record created */
boolean_t eos_marker; /* Marks the end of the stream */
bqueue_node_t node;
};
struct receive_writer_arg {
objset_t *os;
boolean_t byteswap;
bqueue_t q;
/*
* These three members are used to signal to the main thread when
* we're done.
*/
kmutex_t mutex;
kcondvar_t cv;
boolean_t done;
int err;
boolean_t resumable;
boolean_t raw; /* DMU_BACKUP_FEATURE_RAW set */
boolean_t spill; /* DRR_FLAG_SPILL_BLOCK set */
boolean_t full; /* this is a full send stream */
uint64_t last_object;
uint64_t last_offset;
uint64_t max_object; /* highest object ID referenced in stream */
uint64_t bytes_read; /* bytes read when current record created */
list_t write_batch;
/* Encryption parameters for the last received DRR_OBJECT_RANGE */
boolean_t or_crypt_params_present;
uint64_t or_firstobj;
uint64_t or_numslots;
uint8_t or_salt[ZIO_DATA_SALT_LEN];
uint8_t or_iv[ZIO_DATA_IV_LEN];
uint8_t or_mac[ZIO_DATA_MAC_LEN];
boolean_t or_byteorder;
};
typedef struct dmu_recv_begin_arg {
const char *drba_origin;
dmu_recv_cookie_t *drba_cookie;
cred_t *drba_cred;
proc_t *drba_proc;
dsl_crypto_params_t *drba_dcp;
} dmu_recv_begin_arg_t;
static void
byteswap_record(dmu_replay_record_t *drr)
{
#define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
#define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
drr->drr_type = BSWAP_32(drr->drr_type);
drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen);
switch (drr->drr_type) {
case DRR_BEGIN:
DO64(drr_begin.drr_magic);
DO64(drr_begin.drr_versioninfo);
DO64(drr_begin.drr_creation_time);
DO32(drr_begin.drr_type);
DO32(drr_begin.drr_flags);
DO64(drr_begin.drr_toguid);
DO64(drr_begin.drr_fromguid);
break;
case DRR_OBJECT:
DO64(drr_object.drr_object);
DO32(drr_object.drr_type);
DO32(drr_object.drr_bonustype);
DO32(drr_object.drr_blksz);
DO32(drr_object.drr_bonuslen);
DO32(drr_object.drr_raw_bonuslen);
DO64(drr_object.drr_toguid);
DO64(drr_object.drr_maxblkid);
break;
case DRR_FREEOBJECTS:
DO64(drr_freeobjects.drr_firstobj);
DO64(drr_freeobjects.drr_numobjs);
DO64(drr_freeobjects.drr_toguid);
break;
case DRR_WRITE:
DO64(drr_write.drr_object);
DO32(drr_write.drr_type);
DO64(drr_write.drr_offset);
DO64(drr_write.drr_logical_size);
DO64(drr_write.drr_toguid);
ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write.drr_key.ddk_cksum);
DO64(drr_write.drr_key.ddk_prop);
DO64(drr_write.drr_compressed_size);
break;
case DRR_WRITE_EMBEDDED:
DO64(drr_write_embedded.drr_object);
DO64(drr_write_embedded.drr_offset);
DO64(drr_write_embedded.drr_length);
DO64(drr_write_embedded.drr_toguid);
DO32(drr_write_embedded.drr_lsize);
DO32(drr_write_embedded.drr_psize);
break;
case DRR_FREE:
DO64(drr_free.drr_object);
DO64(drr_free.drr_offset);
DO64(drr_free.drr_length);
DO64(drr_free.drr_toguid);
break;
case DRR_SPILL:
DO64(drr_spill.drr_object);
DO64(drr_spill.drr_length);
DO64(drr_spill.drr_toguid);
DO64(drr_spill.drr_compressed_size);
DO32(drr_spill.drr_type);
break;
case DRR_OBJECT_RANGE:
DO64(drr_object_range.drr_firstobj);
DO64(drr_object_range.drr_numslots);
DO64(drr_object_range.drr_toguid);
break;
case DRR_REDACT:
DO64(drr_redact.drr_object);
DO64(drr_redact.drr_offset);
DO64(drr_redact.drr_length);
DO64(drr_redact.drr_toguid);
break;
case DRR_END:
DO64(drr_end.drr_toguid);
ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_end.drr_checksum);
break;
default:
break;
}
if (drr->drr_type != DRR_BEGIN) {
ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_checksum.drr_checksum);
}
#undef DO64
#undef DO32
}
static boolean_t
redact_snaps_contains(uint64_t *snaps, uint64_t num_snaps, uint64_t guid)
{
for (int i = 0; i < num_snaps; i++) {
if (snaps[i] == guid)
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Check that the new stream we're trying to receive is redacted with respect to
* a subset of the snapshots that the origin was redacted with respect to. For
* the reasons behind this, see the man page on redacted zfs sends and receives.
*/
static boolean_t
compatible_redact_snaps(uint64_t *origin_snaps, uint64_t origin_num_snaps,
uint64_t *redact_snaps, uint64_t num_redact_snaps)
{
/*
* Short circuit the comparison; if we are redacted with respect to
* more snapshots than the origin, we can't be redacted with respect
* to a subset.
*/
if (num_redact_snaps > origin_num_snaps) {
return (B_FALSE);
}
for (int i = 0; i < num_redact_snaps; i++) {
if (!redact_snaps_contains(origin_snaps, origin_num_snaps,
redact_snaps[i])) {
return (B_FALSE);
}
}
return (B_TRUE);
}
static boolean_t
redact_check(dmu_recv_begin_arg_t *drba, dsl_dataset_t *origin)
{
uint64_t *origin_snaps;
uint64_t origin_num_snaps;
dmu_recv_cookie_t *drc = drba->drba_cookie;
struct drr_begin *drrb = drc->drc_drrb;
int featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
int err = 0;
boolean_t ret = B_TRUE;
uint64_t *redact_snaps;
uint_t numredactsnaps;
/*
* If this is a full send stream, we're safe no matter what.
*/
if (drrb->drr_fromguid == 0)
return (ret);
VERIFY(dsl_dataset_get_uint64_array_feature(origin,
SPA_FEATURE_REDACTED_DATASETS, &origin_num_snaps, &origin_snaps));
if (nvlist_lookup_uint64_array(drc->drc_begin_nvl,
BEGINNV_REDACT_FROM_SNAPS, &redact_snaps, &numredactsnaps) ==
0) {
/*
* If the send stream was sent from the redaction bookmark or
* the redacted version of the dataset, then we're safe. Verify
* that this is from the a compatible redaction bookmark or
* redacted dataset.
*/
if (!compatible_redact_snaps(origin_snaps, origin_num_snaps,
redact_snaps, numredactsnaps)) {
err = EINVAL;
}
} else if (featureflags & DMU_BACKUP_FEATURE_REDACTED) {
/*
* If the stream is redacted, it must be redacted with respect
* to a subset of what the origin is redacted with respect to.
* See case number 2 in the zfs man page section on redacted zfs
* send.
*/
err = nvlist_lookup_uint64_array(drc->drc_begin_nvl,
BEGINNV_REDACT_SNAPS, &redact_snaps, &numredactsnaps);
if (err != 0 || !compatible_redact_snaps(origin_snaps,
origin_num_snaps, redact_snaps, numredactsnaps)) {
err = EINVAL;
}
} else if (!redact_snaps_contains(origin_snaps, origin_num_snaps,
drrb->drr_toguid)) {
/*
* If the stream isn't redacted but the origin is, this must be
* one of the snapshots the origin is redacted with respect to.
* See case number 1 in the zfs man page section on redacted zfs
* send.
*/
err = EINVAL;
}
if (err != 0)
ret = B_FALSE;
return (ret);
}
/*
* If we previously received a stream with --large-block, we don't support
* receiving an incremental on top of it without --large-block. This avoids
* forcing a read-modify-write or trying to re-aggregate a string of WRITE
* records.
*/
static int
recv_check_large_blocks(dsl_dataset_t *ds, uint64_t featureflags)
{
if (dsl_dataset_feature_is_active(ds, SPA_FEATURE_LARGE_BLOCKS) &&
!(featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS))
return (SET_ERROR(ZFS_ERR_STREAM_LARGE_BLOCK_MISMATCH));
return (0);
}
static int
recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
uint64_t fromguid, uint64_t featureflags)
{
uint64_t val;
uint64_t children;
int error;
dsl_pool_t *dp = ds->ds_dir->dd_pool;
boolean_t encrypted = ds->ds_dir->dd_crypto_obj != 0;
boolean_t raw = (featureflags & DMU_BACKUP_FEATURE_RAW) != 0;
boolean_t embed = (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) != 0;
/* Temporary clone name must not exist. */
error = zap_lookup(dp->dp_meta_objset,
dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, recv_clone_name,
8, 1, &val);
if (error != ENOENT)
return (error == 0 ? SET_ERROR(EBUSY) : error);
/* Resume state must not be set. */
if (dsl_dataset_has_resume_receive_state(ds))
return (SET_ERROR(EBUSY));
/* New snapshot name must not exist. */
error = zap_lookup(dp->dp_meta_objset,
dsl_dataset_phys(ds)->ds_snapnames_zapobj,
drba->drba_cookie->drc_tosnap, 8, 1, &val);
if (error != ENOENT)
return (error == 0 ? SET_ERROR(EEXIST) : error);
/* Must not have children if receiving a ZVOL. */
error = zap_count(dp->dp_meta_objset,
dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, &children);
if (error != 0)
return (error);
if (drba->drba_cookie->drc_drrb->drr_type != DMU_OST_ZFS &&
children > 0)
return (SET_ERROR(ZFS_ERR_WRONG_PARENT));
/*
* Check snapshot limit before receiving. We'll recheck again at the
* end, but might as well abort before receiving if we're already over
* the limit.
*
* Note that we do not check the file system limit with
* dsl_dir_fscount_check because the temporary %clones don't count
* against that limit.
*/
error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT,
NULL, drba->drba_cred, drba->drba_proc);
if (error != 0)
return (error);
if (fromguid != 0) {
dsl_dataset_t *snap;
uint64_t obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
/* Can't perform a raw receive on top of a non-raw receive */
if (!encrypted && raw)
return (SET_ERROR(EINVAL));
/* Encryption is incompatible with embedded data */
if (encrypted && embed)
return (SET_ERROR(EINVAL));
/* Find snapshot in this dir that matches fromguid. */
while (obj != 0) {
error = dsl_dataset_hold_obj(dp, obj, FTAG,
&snap);
if (error != 0)
return (SET_ERROR(ENODEV));
if (snap->ds_dir != ds->ds_dir) {
dsl_dataset_rele(snap, FTAG);
return (SET_ERROR(ENODEV));
}
if (dsl_dataset_phys(snap)->ds_guid == fromguid)
break;
obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
dsl_dataset_rele(snap, FTAG);
}
if (obj == 0)
return (SET_ERROR(ENODEV));
if (drba->drba_cookie->drc_force) {
drba->drba_cookie->drc_fromsnapobj = obj;
} else {
/*
* If we are not forcing, there must be no
* changes since fromsnap. Raw sends have an
* additional constraint that requires that
* no "noop" snapshots exist between fromsnap
* and tosnap for the IVset checking code to
* work properly.
*/
if (dsl_dataset_modified_since_snap(ds, snap) ||
(raw &&
dsl_dataset_phys(ds)->ds_prev_snap_obj !=
snap->ds_object)) {
dsl_dataset_rele(snap, FTAG);
return (SET_ERROR(ETXTBSY));
}
drba->drba_cookie->drc_fromsnapobj =
ds->ds_prev->ds_object;
}
if (dsl_dataset_feature_is_active(snap,
SPA_FEATURE_REDACTED_DATASETS) && !redact_check(drba,
snap)) {
dsl_dataset_rele(snap, FTAG);
return (SET_ERROR(EINVAL));
}
error = recv_check_large_blocks(snap, featureflags);
if (error != 0) {
dsl_dataset_rele(snap, FTAG);
return (error);
}
dsl_dataset_rele(snap, FTAG);
} else {
/* if full, then must be forced */
if (!drba->drba_cookie->drc_force)
return (SET_ERROR(EEXIST));
/*
* We don't support using zfs recv -F to blow away
* encrypted filesystems. This would require the
* dsl dir to point to the old encryption key and
* the new one at the same time during the receive.
*/
if ((!encrypted && raw) || encrypted)
return (SET_ERROR(EINVAL));
/*
* Perform the same encryption checks we would if
* we were creating a new dataset from scratch.
*/
if (!raw) {
boolean_t will_encrypt;
error = dmu_objset_create_crypt_check(
ds->ds_dir->dd_parent, drba->drba_dcp,
&will_encrypt);
if (error != 0)
return (error);
if (will_encrypt && embed)
return (SET_ERROR(EINVAL));
}
}
return (0);
}
/*
* Check that any feature flags used in the data stream we're receiving are
* supported by the pool we are receiving into.
*
* Note that some of the features we explicitly check here have additional
* (implicit) features they depend on, but those dependencies are enforced
* through the zfeature_register() calls declaring the features that we
* explicitly check.
*/
static int
recv_begin_check_feature_flags_impl(uint64_t featureflags, spa_t *spa)
{
/*
* Check if there are any unsupported feature flags.
*/
if (!DMU_STREAM_SUPPORTED(featureflags)) {
return (SET_ERROR(ZFS_ERR_UNKNOWN_SEND_STREAM_FEATURE));
}
/* Verify pool version supports SA if SA_SPILL feature set */
if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
spa_version(spa) < SPA_VERSION_SA)
return (SET_ERROR(ENOTSUP));
/*
* LZ4 compressed, ZSTD compressed, embedded, mooched, large blocks,
* and large_dnodes in the stream can only be used if those pool
* features are enabled because we don't attempt to decompress /
* un-embed / un-mooch / split up the blocks / dnodes during the
* receive process.
*/
if ((featureflags & DMU_BACKUP_FEATURE_LZ4) &&
!spa_feature_is_enabled(spa, SPA_FEATURE_LZ4_COMPRESS))
return (SET_ERROR(ENOTSUP));
if ((featureflags & DMU_BACKUP_FEATURE_ZSTD) &&
!spa_feature_is_enabled(spa, SPA_FEATURE_ZSTD_COMPRESS))
return (SET_ERROR(ENOTSUP));
if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) &&
!spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA))
return (SET_ERROR(ENOTSUP));
if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
!spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS))
return (SET_ERROR(ENOTSUP));
if ((featureflags & DMU_BACKUP_FEATURE_LARGE_DNODE) &&
!spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE))
return (SET_ERROR(ENOTSUP));
/*
* Receiving redacted streams requires that redacted datasets are
* enabled.
*/
if ((featureflags & DMU_BACKUP_FEATURE_REDACTED) &&
!spa_feature_is_enabled(spa, SPA_FEATURE_REDACTED_DATASETS))
return (SET_ERROR(ENOTSUP));
return (0);
}
static int
dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
{
dmu_recv_begin_arg_t *drba = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
uint64_t fromguid = drrb->drr_fromguid;
int flags = drrb->drr_flags;
ds_hold_flags_t dsflags = DS_HOLD_FLAG_NONE;
int error;
uint64_t featureflags = drba->drba_cookie->drc_featureflags;
dsl_dataset_t *ds;
const char *tofs = drba->drba_cookie->drc_tofs;
/* already checked */
ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
ASSERT(!(featureflags & DMU_BACKUP_FEATURE_RESUMING));
if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
DMU_COMPOUNDSTREAM ||
drrb->drr_type >= DMU_OST_NUMTYPES ||
((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL))
return (SET_ERROR(EINVAL));
error = recv_begin_check_feature_flags_impl(featureflags, dp->dp_spa);
if (error != 0)
return (error);
/* Resumable receives require extensible datasets */
if (drba->drba_cookie->drc_resumable &&
!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EXTENSIBLE_DATASET))
return (SET_ERROR(ENOTSUP));
if (featureflags & DMU_BACKUP_FEATURE_RAW) {
/* raw receives require the encryption feature */
if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_ENCRYPTION))
return (SET_ERROR(ENOTSUP));
/* embedded data is incompatible with encryption and raw recv */
if (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)
return (SET_ERROR(EINVAL));
/* raw receives require spill block allocation flag */
if (!(flags & DRR_FLAG_SPILL_BLOCK))
return (SET_ERROR(ZFS_ERR_SPILL_BLOCK_FLAG_MISSING));
} else {
- dsflags |= DS_HOLD_FLAG_DECRYPT;
+ /*
+ * We support unencrypted datasets below encrypted ones now,
+ * so add the DS_HOLD_FLAG_DECRYPT flag only if we are dealing
+ * with a dataset we may encrypt.
+ */
+ if (drba->drba_dcp != NULL &&
+ drba->drba_dcp->cp_crypt != ZIO_CRYPT_OFF) {
+ dsflags |= DS_HOLD_FLAG_DECRYPT;
+ }
}
error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds);
if (error == 0) {
/* target fs already exists; recv into temp clone */
/* Can't recv a clone into an existing fs */
if (flags & DRR_FLAG_CLONE || drba->drba_origin) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
error = recv_begin_check_existing_impl(drba, ds, fromguid,
featureflags);
dsl_dataset_rele_flags(ds, dsflags, FTAG);
} else if (error == ENOENT) {
/* target fs does not exist; must be a full backup or clone */
char buf[ZFS_MAX_DATASET_NAME_LEN];
objset_t *os;
/*
* If it's a non-clone incremental, we are missing the
* target fs, so fail the recv.
*/
if (fromguid != 0 && !((flags & DRR_FLAG_CLONE) ||
drba->drba_origin))
return (SET_ERROR(ENOENT));
/*
* If we're receiving a full send as a clone, and it doesn't
* contain all the necessary free records and freeobject
* records, reject it.
*/
if (fromguid == 0 && drba->drba_origin != NULL &&
!(flags & DRR_FLAG_FREERECORDS))
return (SET_ERROR(EINVAL));
/* Open the parent of tofs */
ASSERT3U(strlen(tofs), <, sizeof (buf));
(void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1);
error = dsl_dataset_hold(dp, buf, FTAG, &ds);
if (error != 0)
return (error);
if ((featureflags & DMU_BACKUP_FEATURE_RAW) == 0 &&
drba->drba_origin == NULL) {
boolean_t will_encrypt;
/*
* Check that we aren't breaking any encryption rules
* and that we have all the parameters we need to
* create an encrypted dataset if necessary. If we are
* making an encrypted dataset the stream can't have
* embedded data.
*/
error = dmu_objset_create_crypt_check(ds->ds_dir,
drba->drba_dcp, &will_encrypt);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
if (will_encrypt &&
(featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EINVAL));
}
}
/*
* Check filesystem and snapshot limits before receiving. We'll
* recheck snapshot limits again at the end (we create the
* filesystems and increment those counts during begin_sync).
*/
error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
ZFS_PROP_FILESYSTEM_LIMIT, NULL,
drba->drba_cred, drba->drba_proc);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
ZFS_PROP_SNAPSHOT_LIMIT, NULL,
drba->drba_cred, drba->drba_proc);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
/* can't recv below anything but filesystems (eg. no ZVOLs) */
error = dmu_objset_from_ds(ds, &os);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
if (dmu_objset_type(os) != DMU_OST_ZFS) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ZFS_ERR_WRONG_PARENT));
}
if (drba->drba_origin != NULL) {
dsl_dataset_t *origin;
error = dsl_dataset_hold_flags(dp, drba->drba_origin,
dsflags, FTAG, &origin);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
if (!origin->ds_is_snapshot) {
dsl_dataset_rele_flags(origin, dsflags, FTAG);
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EINVAL));
}
if (dsl_dataset_phys(origin)->ds_guid != fromguid &&
fromguid != 0) {
dsl_dataset_rele_flags(origin, dsflags, FTAG);
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ENODEV));
}
if (origin->ds_dir->dd_crypto_obj != 0 &&
(featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)) {
dsl_dataset_rele_flags(origin, dsflags, FTAG);
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EINVAL));
}
/*
* If the origin is redacted we need to verify that this
* send stream can safely be received on top of the
* origin.
*/
if (dsl_dataset_feature_is_active(origin,
SPA_FEATURE_REDACTED_DATASETS)) {
if (!redact_check(drba, origin)) {
dsl_dataset_rele_flags(origin, dsflags,
FTAG);
dsl_dataset_rele_flags(ds, dsflags,
FTAG);
return (SET_ERROR(EINVAL));
}
}
error = recv_check_large_blocks(ds, featureflags);
if (error != 0) {
dsl_dataset_rele_flags(origin, dsflags, FTAG);
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (error);
}
dsl_dataset_rele_flags(origin, dsflags, FTAG);
}
dsl_dataset_rele(ds, FTAG);
error = 0;
}
return (error);
}
static void
dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
{
dmu_recv_begin_arg_t *drba = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
objset_t *mos = dp->dp_meta_objset;
dmu_recv_cookie_t *drc = drba->drba_cookie;
struct drr_begin *drrb = drc->drc_drrb;
const char *tofs = drc->drc_tofs;
uint64_t featureflags = drc->drc_featureflags;
dsl_dataset_t *ds, *newds;
objset_t *os;
uint64_t dsobj;
ds_hold_flags_t dsflags = DS_HOLD_FLAG_NONE;
int error;
uint64_t crflags = 0;
dsl_crypto_params_t dummy_dcp = { 0 };
dsl_crypto_params_t *dcp = drba->drba_dcp;
if (drrb->drr_flags & DRR_FLAG_CI_DATA)
crflags |= DS_FLAG_CI_DATASET;
if ((featureflags & DMU_BACKUP_FEATURE_RAW) == 0)
dsflags |= DS_HOLD_FLAG_DECRYPT;
/*
* Raw, non-incremental recvs always use a dummy dcp with
* the raw cmd set. Raw incremental recvs do not use a dcp
* since the encryption parameters are already set in stone.
*/
if (dcp == NULL && drrb->drr_fromguid == 0 &&
drba->drba_origin == NULL) {
ASSERT3P(dcp, ==, NULL);
dcp = &dummy_dcp;
if (featureflags & DMU_BACKUP_FEATURE_RAW)
dcp->cp_cmd = DCP_CMD_RAW_RECV;
}
error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds);
if (error == 0) {
/* create temporary clone */
dsl_dataset_t *snap = NULL;
if (drba->drba_cookie->drc_fromsnapobj != 0) {
VERIFY0(dsl_dataset_hold_obj(dp,
drba->drba_cookie->drc_fromsnapobj, FTAG, &snap));
ASSERT3P(dcp, ==, NULL);
}
dsobj = dsl_dataset_create_sync(ds->ds_dir, recv_clone_name,
snap, crflags, drba->drba_cred, dcp, tx);
if (drba->drba_cookie->drc_fromsnapobj != 0)
dsl_dataset_rele(snap, FTAG);
dsl_dataset_rele_flags(ds, dsflags, FTAG);
} else {
dsl_dir_t *dd;
const char *tail;
dsl_dataset_t *origin = NULL;
VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail));
if (drba->drba_origin != NULL) {
VERIFY0(dsl_dataset_hold(dp, drba->drba_origin,
FTAG, &origin));
ASSERT3P(dcp, ==, NULL);
}
/* Create new dataset. */
dsobj = dsl_dataset_create_sync(dd, strrchr(tofs, '/') + 1,
origin, crflags, drba->drba_cred, dcp, tx);
if (origin != NULL)
dsl_dataset_rele(origin, FTAG);
dsl_dir_rele(dd, FTAG);
drc->drc_newfs = B_TRUE;
}
VERIFY0(dsl_dataset_own_obj_force(dp, dsobj, dsflags, dmu_recv_tag,
&newds));
if (dsl_dataset_feature_is_active(newds,
SPA_FEATURE_REDACTED_DATASETS)) {
/*
* If the origin dataset is redacted, the child will be redacted
* when we create it. We clear the new dataset's
* redaction info; if it should be redacted, we'll fill
* in its information later.
*/
dsl_dataset_deactivate_feature(newds,
SPA_FEATURE_REDACTED_DATASETS, tx);
}
VERIFY0(dmu_objset_from_ds(newds, &os));
if (drc->drc_resumable) {
dsl_dataset_zapify(newds, tx);
if (drrb->drr_fromguid != 0) {
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_FROMGUID,
8, 1, &drrb->drr_fromguid, tx));
}
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TOGUID,
8, 1, &drrb->drr_toguid, tx));
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TONAME,
1, strlen(drrb->drr_toname) + 1, drrb->drr_toname, tx));
uint64_t one = 1;
uint64_t zero = 0;
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OBJECT,
8, 1, &one, tx));
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OFFSET,
8, 1, &zero, tx));
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_BYTES,
8, 1, &zero, tx));
if (featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) {
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_LARGEBLOCK,
8, 1, &one, tx));
}
if (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) {
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_EMBEDOK,
8, 1, &one, tx));
}
if (featureflags & DMU_BACKUP_FEATURE_COMPRESSED) {
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_COMPRESSOK,
8, 1, &one, tx));
}
if (featureflags & DMU_BACKUP_FEATURE_RAW) {
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_RAWOK,
8, 1, &one, tx));
}
uint64_t *redact_snaps;
uint_t numredactsnaps;
if (nvlist_lookup_uint64_array(drc->drc_begin_nvl,
BEGINNV_REDACT_FROM_SNAPS, &redact_snaps,
&numredactsnaps) == 0) {
VERIFY0(zap_add(mos, dsobj,
DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS,
sizeof (*redact_snaps), numredactsnaps,
redact_snaps, tx));
}
}
/*
* Usually the os->os_encrypted value is tied to the presence of a
* DSL Crypto Key object in the dd. However, that will not be received
* until dmu_recv_stream(), so we set the value manually for now.
*/
if (featureflags & DMU_BACKUP_FEATURE_RAW) {
os->os_encrypted = B_TRUE;
drba->drba_cookie->drc_raw = B_TRUE;
}
if (featureflags & DMU_BACKUP_FEATURE_REDACTED) {
uint64_t *redact_snaps;
uint_t numredactsnaps;
VERIFY0(nvlist_lookup_uint64_array(drc->drc_begin_nvl,
BEGINNV_REDACT_SNAPS, &redact_snaps, &numredactsnaps));
dsl_dataset_activate_redaction(newds, redact_snaps,
numredactsnaps, tx);
}
dmu_buf_will_dirty(newds->ds_dbuf, tx);
dsl_dataset_phys(newds)->ds_flags |= DS_FLAG_INCONSISTENT;
/*
* If we actually created a non-clone, we need to create the objset
* in our new dataset. If this is a raw send we postpone this until
* dmu_recv_stream() so that we can allocate the metadnode with the
* properties from the DRR_BEGIN payload.
*/
rrw_enter(&newds->ds_bp_rwlock, RW_READER, FTAG);
if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds)) &&
(featureflags & DMU_BACKUP_FEATURE_RAW) == 0) {
(void) dmu_objset_create_impl(dp->dp_spa,
newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx);
}
rrw_exit(&newds->ds_bp_rwlock, FTAG);
drba->drba_cookie->drc_ds = newds;
drba->drba_cookie->drc_os = os;
spa_history_log_internal_ds(newds, "receive", tx, " ");
}
static int
dmu_recv_resume_begin_check(void *arg, dmu_tx_t *tx)
{
dmu_recv_begin_arg_t *drba = arg;
dmu_recv_cookie_t *drc = drba->drba_cookie;
dsl_pool_t *dp = dmu_tx_pool(tx);
struct drr_begin *drrb = drc->drc_drrb;
int error;
ds_hold_flags_t dsflags = DS_HOLD_FLAG_NONE;
dsl_dataset_t *ds;
const char *tofs = drc->drc_tofs;
/* already checked */
ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
ASSERT(drc->drc_featureflags & DMU_BACKUP_FEATURE_RESUMING);
if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
DMU_COMPOUNDSTREAM ||
drrb->drr_type >= DMU_OST_NUMTYPES)
return (SET_ERROR(EINVAL));
/*
* This is mostly a sanity check since we should have already done these
* checks during a previous attempt to receive the data.
*/
error = recv_begin_check_feature_flags_impl(drc->drc_featureflags,
dp->dp_spa);
if (error != 0)
return (error);
/* 6 extra bytes for /%recv */
char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
(void) snprintf(recvname, sizeof (recvname), "%s/%s",
tofs, recv_clone_name);
if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RAW) {
/* raw receives require spill block allocation flag */
if (!(drrb->drr_flags & DRR_FLAG_SPILL_BLOCK))
return (SET_ERROR(ZFS_ERR_SPILL_BLOCK_FLAG_MISSING));
} else {
dsflags |= DS_HOLD_FLAG_DECRYPT;
}
if (dsl_dataset_hold_flags(dp, recvname, dsflags, FTAG, &ds) != 0) {
/* %recv does not exist; continue in tofs */
error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds);
if (error != 0)
return (error);
}
/* check that ds is marked inconsistent */
if (!DS_IS_INCONSISTENT(ds)) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
/* check that there is resuming data, and that the toguid matches */
if (!dsl_dataset_is_zapified(ds)) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
uint64_t val;
error = zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_TOGUID, sizeof (val), 1, &val);
if (error != 0 || drrb->drr_toguid != val) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
/*
* Check if the receive is still running. If so, it will be owned.
* Note that nothing else can own the dataset (e.g. after the receive
* fails) because it will be marked inconsistent.
*/
if (dsl_dataset_has_owner(ds)) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EBUSY));
}
/* There should not be any snapshots of this fs yet. */
if (ds->ds_prev != NULL && ds->ds_prev->ds_dir == ds->ds_dir) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
/*
* Note: resume point will be checked when we process the first WRITE
* record.
*/
/* check that the origin matches */
val = 0;
(void) zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_FROMGUID, sizeof (val), 1, &val);
if (drrb->drr_fromguid != val) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
if (ds->ds_prev != NULL && drrb->drr_fromguid != 0)
drc->drc_fromsnapobj = ds->ds_prev->ds_object;
/*
* If we're resuming, and the send is redacted, then the original send
* must have been redacted, and must have been redacted with respect to
* the same snapshots.
*/
if (drc->drc_featureflags & DMU_BACKUP_FEATURE_REDACTED) {
uint64_t num_ds_redact_snaps;
uint64_t *ds_redact_snaps;
uint_t num_stream_redact_snaps;
uint64_t *stream_redact_snaps;
if (nvlist_lookup_uint64_array(drc->drc_begin_nvl,
BEGINNV_REDACT_SNAPS, &stream_redact_snaps,
&num_stream_redact_snaps) != 0) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
if (!dsl_dataset_get_uint64_array_feature(ds,
SPA_FEATURE_REDACTED_DATASETS, &num_ds_redact_snaps,
&ds_redact_snaps)) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
for (int i = 0; i < num_ds_redact_snaps; i++) {
if (!redact_snaps_contains(ds_redact_snaps,
num_ds_redact_snaps, stream_redact_snaps[i])) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
}
}
error = recv_check_large_blocks(ds, drc->drc_featureflags);
if (error != 0) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (error);
}
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (0);
}
static void
dmu_recv_resume_begin_sync(void *arg, dmu_tx_t *tx)
{
dmu_recv_begin_arg_t *drba = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
const char *tofs = drba->drba_cookie->drc_tofs;
uint64_t featureflags = drba->drba_cookie->drc_featureflags;
dsl_dataset_t *ds;
ds_hold_flags_t dsflags = DS_HOLD_FLAG_NONE;
/* 6 extra bytes for /%recv */
char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
(void) snprintf(recvname, sizeof (recvname), "%s/%s", tofs,
recv_clone_name);
if (featureflags & DMU_BACKUP_FEATURE_RAW) {
drba->drba_cookie->drc_raw = B_TRUE;
} else {
dsflags |= DS_HOLD_FLAG_DECRYPT;
}
if (dsl_dataset_own_force(dp, recvname, dsflags, dmu_recv_tag, &ds)
!= 0) {
/* %recv does not exist; continue in tofs */
VERIFY0(dsl_dataset_own_force(dp, tofs, dsflags, dmu_recv_tag,
&ds));
drba->drba_cookie->drc_newfs = B_TRUE;
}
ASSERT(DS_IS_INCONSISTENT(ds));
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
ASSERT(!BP_IS_HOLE(dsl_dataset_get_blkptr(ds)) ||
drba->drba_cookie->drc_raw);
rrw_exit(&ds->ds_bp_rwlock, FTAG);
drba->drba_cookie->drc_ds = ds;
VERIFY0(dmu_objset_from_ds(ds, &drba->drba_cookie->drc_os));
drba->drba_cookie->drc_should_save = B_TRUE;
spa_history_log_internal_ds(ds, "resume receive", tx, " ");
}
/*
* NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
* succeeds; otherwise we will leak the holds on the datasets.
*/
int
dmu_recv_begin(char *tofs, char *tosnap, dmu_replay_record_t *drr_begin,
boolean_t force, boolean_t resumable, nvlist_t *localprops,
nvlist_t *hidden_args, char *origin, dmu_recv_cookie_t *drc,
zfs_file_t *fp, offset_t *voffp)
{
dmu_recv_begin_arg_t drba = { 0 };
int err;
bzero(drc, sizeof (dmu_recv_cookie_t));
drc->drc_drr_begin = drr_begin;
drc->drc_drrb = &drr_begin->drr_u.drr_begin;
drc->drc_tosnap = tosnap;
drc->drc_tofs = tofs;
drc->drc_force = force;
drc->drc_resumable = resumable;
drc->drc_cred = CRED();
drc->drc_proc = curproc;
drc->drc_clone = (origin != NULL);
if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) {
drc->drc_byteswap = B_TRUE;
(void) fletcher_4_incremental_byteswap(drr_begin,
sizeof (dmu_replay_record_t), &drc->drc_cksum);
byteswap_record(drr_begin);
} else if (drc->drc_drrb->drr_magic == DMU_BACKUP_MAGIC) {
(void) fletcher_4_incremental_native(drr_begin,
sizeof (dmu_replay_record_t), &drc->drc_cksum);
} else {
return (SET_ERROR(EINVAL));
}
drc->drc_fp = fp;
drc->drc_voff = *voffp;
drc->drc_featureflags =
DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
uint32_t payloadlen = drc->drc_drr_begin->drr_payloadlen;
void *payload = NULL;
if (payloadlen != 0)
payload = kmem_alloc(payloadlen, KM_SLEEP);
err = receive_read_payload_and_next_header(drc, payloadlen,
payload);
if (err != 0) {
kmem_free(payload, payloadlen);
return (err);
}
if (payloadlen != 0) {
err = nvlist_unpack(payload, payloadlen, &drc->drc_begin_nvl,
KM_SLEEP);
kmem_free(payload, payloadlen);
if (err != 0) {
kmem_free(drc->drc_next_rrd,
sizeof (*drc->drc_next_rrd));
return (err);
}
}
if (drc->drc_drrb->drr_flags & DRR_FLAG_SPILL_BLOCK)
drc->drc_spill = B_TRUE;
drba.drba_origin = origin;
drba.drba_cookie = drc;
drba.drba_cred = CRED();
drba.drba_proc = curproc;
if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RESUMING) {
err = dsl_sync_task(tofs,
dmu_recv_resume_begin_check, dmu_recv_resume_begin_sync,
&drba, 5, ZFS_SPACE_CHECK_NORMAL);
} else {
/*
* For non-raw, non-incremental, non-resuming receives the
* user can specify encryption parameters on the command line
* with "zfs recv -o". For these receives we create a dcp and
* pass it to the sync task. Creating the dcp will implicitly
* remove the encryption params from the localprops nvlist,
* which avoids errors when trying to set these normally
* read-only properties. Any other kind of receive that
* attempts to set these properties will fail as a result.
*/
if ((DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_RAW) == 0 &&
origin == NULL && drc->drc_drrb->drr_fromguid == 0) {
err = dsl_crypto_params_create_nvlist(DCP_CMD_NONE,
localprops, hidden_args, &drba.drba_dcp);
}
if (err == 0) {
err = dsl_sync_task(tofs,
dmu_recv_begin_check, dmu_recv_begin_sync,
&drba, 5, ZFS_SPACE_CHECK_NORMAL);
dsl_crypto_params_free(drba.drba_dcp, !!err);
}
}
if (err != 0) {
kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
nvlist_free(drc->drc_begin_nvl);
}
return (err);
}
static int
receive_read(dmu_recv_cookie_t *drc, int len, void *buf)
{
int done = 0;
/*
* The code doesn't rely on this (lengths being multiples of 8). See
* comment in dump_bytes.
*/
ASSERT(len % 8 == 0 ||
(drc->drc_featureflags & DMU_BACKUP_FEATURE_RAW) != 0);
while (done < len) {
ssize_t resid;
zfs_file_t *fp = drc->drc_fp;
int err = zfs_file_read(fp, (char *)buf + done,
len - done, &resid);
if (resid == len - done) {
/*
* Note: ECKSUM or ZFS_ERR_STREAM_TRUNCATED indicates
* that the receive was interrupted and can
* potentially be resumed.
*/
err = SET_ERROR(ZFS_ERR_STREAM_TRUNCATED);
}
drc->drc_voff += len - done - resid;
done = len - resid;
if (err != 0)
return (err);
}
drc->drc_bytes_read += len;
ASSERT3U(done, ==, len);
return (0);
}
static inline uint8_t
deduce_nblkptr(dmu_object_type_t bonus_type, uint64_t bonus_size)
{
if (bonus_type == DMU_OT_SA) {
return (1);
} else {
return (1 +
((DN_OLD_MAX_BONUSLEN -
MIN(DN_OLD_MAX_BONUSLEN, bonus_size)) >> SPA_BLKPTRSHIFT));
}
}
static void
save_resume_state(struct receive_writer_arg *rwa,
uint64_t object, uint64_t offset, dmu_tx_t *tx)
{
int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
if (!rwa->resumable)
return;
/*
* We use ds_resume_bytes[] != 0 to indicate that we need to
* update this on disk, so it must not be 0.
*/
ASSERT(rwa->bytes_read != 0);
/*
* We only resume from write records, which have a valid
* (non-meta-dnode) object number.
*/
ASSERT(object != 0);
/*
* For resuming to work correctly, we must receive records in order,
* sorted by object,offset. This is checked by the callers, but
* assert it here for good measure.
*/
ASSERT3U(object, >=, rwa->os->os_dsl_dataset->ds_resume_object[txgoff]);
ASSERT(object != rwa->os->os_dsl_dataset->ds_resume_object[txgoff] ||
offset >= rwa->os->os_dsl_dataset->ds_resume_offset[txgoff]);
ASSERT3U(rwa->bytes_read, >=,
rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff]);
rwa->os->os_dsl_dataset->ds_resume_object[txgoff] = object;
rwa->os->os_dsl_dataset->ds_resume_offset[txgoff] = offset;
rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff] = rwa->bytes_read;
}
static int
receive_object_is_same_generation(objset_t *os, uint64_t object,
dmu_object_type_t old_bonus_type, dmu_object_type_t new_bonus_type,
const void *new_bonus, boolean_t *samegenp)
{
zfs_file_info_t zoi;
int err;
dmu_buf_t *old_bonus_dbuf;
err = dmu_bonus_hold(os, object, FTAG, &old_bonus_dbuf);
if (err != 0)
return (err);
err = dmu_get_file_info(os, old_bonus_type, old_bonus_dbuf->db_data,
&zoi);
dmu_buf_rele(old_bonus_dbuf, FTAG);
if (err != 0)
return (err);
uint64_t old_gen = zoi.zfi_generation;
err = dmu_get_file_info(os, new_bonus_type, new_bonus, &zoi);
if (err != 0)
return (err);
uint64_t new_gen = zoi.zfi_generation;
*samegenp = (old_gen == new_gen);
return (0);
}
static int
receive_handle_existing_object(const struct receive_writer_arg *rwa,
const struct drr_object *drro, const dmu_object_info_t *doi,
const void *bonus_data,
uint64_t *object_to_hold, uint32_t *new_blksz)
{
uint32_t indblksz = drro->drr_indblkshift ?
1ULL << drro->drr_indblkshift : 0;
int nblkptr = deduce_nblkptr(drro->drr_bonustype,
drro->drr_bonuslen);
uint8_t dn_slots = drro->drr_dn_slots != 0 ?
drro->drr_dn_slots : DNODE_MIN_SLOTS;
boolean_t do_free_range = B_FALSE;
int err;
*object_to_hold = drro->drr_object;
/* nblkptr should be bounded by the bonus size and type */
if (rwa->raw && nblkptr != drro->drr_nblkptr)
return (SET_ERROR(EINVAL));
/*
* After the previous send stream, the sending system may
* have freed this object, and then happened to re-allocate
* this object number in a later txg. In this case, we are
* receiving a different logical file, and the block size may
* appear to be different. i.e. we may have a different
* block size for this object than what the send stream says.
* In this case we need to remove the object's contents,
* so that its structure can be changed and then its contents
* entirely replaced by subsequent WRITE records.
*
* If this is a -L (--large-block) incremental stream, and
* the previous stream was not -L, the block size may appear
* to increase. i.e. we may have a smaller block size for
* this object than what the send stream says. In this case
* we need to keep the object's contents and block size
* intact, so that we don't lose parts of the object's
* contents that are not changed by this incremental send
* stream.
*
* We can distinguish between the two above cases by using
* the ZPL's generation number (see
* receive_object_is_same_generation()). However, we only
* want to rely on the generation number when absolutely
* necessary, because with raw receives, the generation is
* encrypted. We also want to minimize dependence on the
* ZPL, so that other types of datasets can also be received
* (e.g. ZVOLs, although note that ZVOLS currently do not
* reallocate their objects or change their structure).
* Therefore, we check a number of different cases where we
* know it is safe to discard the object's contents, before
* using the ZPL's generation number to make the above
* distinction.
*/
if (drro->drr_blksz != doi->doi_data_block_size) {
if (rwa->raw) {
/*
* RAW streams always have large blocks, so
* we are sure that the data is not needed
* due to changing --large-block to be on.
* Which is fortunate since the bonus buffer
* (which contains the ZPL generation) is
* encrypted, and the key might not be
* loaded.
*/
do_free_range = B_TRUE;
} else if (rwa->full) {
/*
* This is a full send stream, so it always
* replaces what we have. Even if the
* generation numbers happen to match, this
* can not actually be the same logical file.
* This is relevant when receiving a full
* send as a clone.
*/
do_free_range = B_TRUE;
} else if (drro->drr_type !=
DMU_OT_PLAIN_FILE_CONTENTS ||
doi->doi_type != DMU_OT_PLAIN_FILE_CONTENTS) {
/*
* PLAIN_FILE_CONTENTS are the only type of
* objects that have ever been stored with
* large blocks, so we don't need the special
* logic below. ZAP blocks can shrink (when
* there's only one block), so we don't want
* to hit the error below about block size
* only increasing.
*/
do_free_range = B_TRUE;
} else if (doi->doi_max_offset <=
doi->doi_data_block_size) {
/*
* There is only one block. We can free it,
* because its contents will be replaced by a
* WRITE record. This can not be the no-L ->
* -L case, because the no-L case would have
* resulted in multiple blocks. If we
* supported -L -> no-L, it would not be safe
* to free the file's contents. Fortunately,
* that is not allowed (see
* recv_check_large_blocks()).
*/
do_free_range = B_TRUE;
} else {
boolean_t is_same_gen;
err = receive_object_is_same_generation(rwa->os,
drro->drr_object, doi->doi_bonus_type,
drro->drr_bonustype, bonus_data, &is_same_gen);
if (err != 0)
return (SET_ERROR(EINVAL));
if (is_same_gen) {
/*
* This is the same logical file, and
* the block size must be increasing.
* It could only decrease if
* --large-block was changed to be
* off, which is checked in
* recv_check_large_blocks().
*/
if (drro->drr_blksz <=
doi->doi_data_block_size)
return (SET_ERROR(EINVAL));
/*
* We keep the existing blocksize and
* contents.
*/
*new_blksz =
doi->doi_data_block_size;
} else {
do_free_range = B_TRUE;
}
}
}
/* nblkptr can only decrease if the object was reallocated */
if (nblkptr < doi->doi_nblkptr)
do_free_range = B_TRUE;
/* number of slots can only change on reallocation */
if (dn_slots != doi->doi_dnodesize >> DNODE_SHIFT)
do_free_range = B_TRUE;
/*
* For raw sends we also check a few other fields to
* ensure we are preserving the objset structure exactly
* as it was on the receive side:
* - A changed indirect block size
* - A smaller nlevels
*/
if (rwa->raw) {
if (indblksz != doi->doi_metadata_block_size)
do_free_range = B_TRUE;
if (drro->drr_nlevels < doi->doi_indirection)
do_free_range = B_TRUE;
}
if (do_free_range) {
err = dmu_free_long_range(rwa->os, drro->drr_object,
0, DMU_OBJECT_END);
if (err != 0)
return (SET_ERROR(EINVAL));
}
/*
* The dmu does not currently support decreasing nlevels
* or changing the number of dnode slots on an object. For
* non-raw sends, this does not matter and the new object
* can just use the previous one's nlevels. For raw sends,
* however, the structure of the received dnode (including
* nlevels and dnode slots) must match that of the send
* side. Therefore, instead of using dmu_object_reclaim(),
* we must free the object completely and call
* dmu_object_claim_dnsize() instead.
*/
if ((rwa->raw && drro->drr_nlevels < doi->doi_indirection) ||
dn_slots != doi->doi_dnodesize >> DNODE_SHIFT) {
err = dmu_free_long_object(rwa->os, drro->drr_object);
if (err != 0)
return (SET_ERROR(EINVAL));
txg_wait_synced(dmu_objset_pool(rwa->os), 0);
*object_to_hold = DMU_NEW_OBJECT;
}
/*
* For raw receives, free everything beyond the new incoming
* maxblkid. Normally this would be done with a DRR_FREE
* record that would come after this DRR_OBJECT record is
* processed. However, for raw receives we manually set the
* maxblkid from the drr_maxblkid and so we must first free
* everything above that blkid to ensure the DMU is always
* consistent with itself. We will never free the first block
* of the object here because a maxblkid of 0 could indicate
* an object with a single block or one with no blocks. This
* free may be skipped when dmu_free_long_range() was called
* above since it covers the entire object's contents.
*/
if (rwa->raw && *object_to_hold != DMU_NEW_OBJECT && !do_free_range) {
err = dmu_free_long_range(rwa->os, drro->drr_object,
(drro->drr_maxblkid + 1) * doi->doi_data_block_size,
DMU_OBJECT_END);
if (err != 0)
return (SET_ERROR(EINVAL));
}
return (0);
}
noinline static int
receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
void *data)
{
dmu_object_info_t doi;
dmu_tx_t *tx;
int err;
uint32_t new_blksz = drro->drr_blksz;
uint8_t dn_slots = drro->drr_dn_slots != 0 ?
drro->drr_dn_slots : DNODE_MIN_SLOTS;
if (drro->drr_type == DMU_OT_NONE ||
!DMU_OT_IS_VALID(drro->drr_type) ||
!DMU_OT_IS_VALID(drro->drr_bonustype) ||
drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS ||
drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS ||
P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) ||
drro->drr_blksz < SPA_MINBLOCKSIZE ||
drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(rwa->os)) ||
drro->drr_bonuslen >
DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(rwa->os))) ||
dn_slots >
(spa_maxdnodesize(dmu_objset_spa(rwa->os)) >> DNODE_SHIFT)) {
return (SET_ERROR(EINVAL));
}
if (rwa->raw) {
/*
* We should have received a DRR_OBJECT_RANGE record
* containing this block and stored it in rwa.
*/
if (drro->drr_object < rwa->or_firstobj ||
drro->drr_object >= rwa->or_firstobj + rwa->or_numslots ||
drro->drr_raw_bonuslen < drro->drr_bonuslen ||
drro->drr_indblkshift > SPA_MAXBLOCKSHIFT ||
drro->drr_nlevels > DN_MAX_LEVELS ||
drro->drr_nblkptr > DN_MAX_NBLKPTR ||
DN_SLOTS_TO_BONUSLEN(dn_slots) <
drro->drr_raw_bonuslen)
return (SET_ERROR(EINVAL));
} else {
/*
* The DRR_OBJECT_SPILL flag is valid when the DRR_BEGIN
* record indicates this by setting DRR_FLAG_SPILL_BLOCK.
*/
if (((drro->drr_flags & ~(DRR_OBJECT_SPILL))) ||
(!rwa->spill && DRR_OBJECT_HAS_SPILL(drro->drr_flags))) {
return (SET_ERROR(EINVAL));
}
if (drro->drr_raw_bonuslen != 0 || drro->drr_nblkptr != 0 ||
drro->drr_indblkshift != 0 || drro->drr_nlevels != 0) {
return (SET_ERROR(EINVAL));
}
}
err = dmu_object_info(rwa->os, drro->drr_object, &doi);
if (err != 0 && err != ENOENT && err != EEXIST)
return (SET_ERROR(EINVAL));
if (drro->drr_object > rwa->max_object)
rwa->max_object = drro->drr_object;
/*
* If we are losing blkptrs or changing the block size this must
* be a new file instance. We must clear out the previous file
* contents before we can change this type of metadata in the dnode.
* Raw receives will also check that the indirect structure of the
* dnode hasn't changed.
*/
uint64_t object_to_hold;
if (err == 0) {
err = receive_handle_existing_object(rwa, drro, &doi, data,
&object_to_hold, &new_blksz);
} else if (err == EEXIST) {
/*
* The object requested is currently an interior slot of a
* multi-slot dnode. This will be resolved when the next txg
* is synced out, since the send stream will have told us
* to free this slot when we freed the associated dnode
* earlier in the stream.
*/
txg_wait_synced(dmu_objset_pool(rwa->os), 0);
if (dmu_object_info(rwa->os, drro->drr_object, NULL) != ENOENT)
return (SET_ERROR(EINVAL));
/* object was freed and we are about to allocate a new one */
object_to_hold = DMU_NEW_OBJECT;
} else {
/* object is free and we are about to allocate a new one */
object_to_hold = DMU_NEW_OBJECT;
}
/*
* If this is a multi-slot dnode there is a chance that this
* object will expand into a slot that is already used by
* another object from the previous snapshot. We must free
* these objects before we attempt to allocate the new dnode.
*/
if (dn_slots > 1) {
boolean_t need_sync = B_FALSE;
for (uint64_t slot = drro->drr_object + 1;
slot < drro->drr_object + dn_slots;
slot++) {
dmu_object_info_t slot_doi;
err = dmu_object_info(rwa->os, slot, &slot_doi);
if (err == ENOENT || err == EEXIST)
continue;
else if (err != 0)
return (err);
err = dmu_free_long_object(rwa->os, slot);
if (err != 0)
return (err);
need_sync = B_TRUE;
}
if (need_sync)
txg_wait_synced(dmu_objset_pool(rwa->os), 0);
}
tx = dmu_tx_create(rwa->os);
dmu_tx_hold_bonus(tx, object_to_hold);
dmu_tx_hold_write(tx, object_to_hold, 0, 0);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err != 0) {
dmu_tx_abort(tx);
return (err);
}
if (object_to_hold == DMU_NEW_OBJECT) {
/* Currently free, wants to be allocated */
err = dmu_object_claim_dnsize(rwa->os, drro->drr_object,
drro->drr_type, new_blksz,
drro->drr_bonustype, drro->drr_bonuslen,
dn_slots << DNODE_SHIFT, tx);
} else if (drro->drr_type != doi.doi_type ||
new_blksz != doi.doi_data_block_size ||
drro->drr_bonustype != doi.doi_bonus_type ||
drro->drr_bonuslen != doi.doi_bonus_size) {
/* Currently allocated, but with different properties */
err = dmu_object_reclaim_dnsize(rwa->os, drro->drr_object,
drro->drr_type, new_blksz,
drro->drr_bonustype, drro->drr_bonuslen,
dn_slots << DNODE_SHIFT, rwa->spill ?
DRR_OBJECT_HAS_SPILL(drro->drr_flags) : B_FALSE, tx);
} else if (rwa->spill && !DRR_OBJECT_HAS_SPILL(drro->drr_flags)) {
/*
* Currently allocated, the existing version of this object
* may reference a spill block that is no longer allocated
* at the source and needs to be freed.
*/
err = dmu_object_rm_spill(rwa->os, drro->drr_object, tx);
}
if (err != 0) {
dmu_tx_commit(tx);
return (SET_ERROR(EINVAL));
}
if (rwa->or_crypt_params_present) {
/*
* Set the crypt params for the buffer associated with this
* range of dnodes. This causes the blkptr_t to have the
* same crypt params (byteorder, salt, iv, mac) as on the
* sending side.
*
* Since we are committing this tx now, it is possible for
* the dnode block to end up on-disk with the incorrect MAC,
* if subsequent objects in this block are received in a
* different txg. However, since the dataset is marked as
* inconsistent, no code paths will do a non-raw read (or
* decrypt the block / verify the MAC). The receive code and
* scrub code can safely do raw reads and verify the
* checksum. They don't need to verify the MAC.
*/
dmu_buf_t *db = NULL;
uint64_t offset = rwa->or_firstobj * DNODE_MIN_SIZE;
err = dmu_buf_hold_by_dnode(DMU_META_DNODE(rwa->os),
offset, FTAG, &db, DMU_READ_PREFETCH | DMU_READ_NO_DECRYPT);
if (err != 0) {
dmu_tx_commit(tx);
return (SET_ERROR(EINVAL));
}
dmu_buf_set_crypt_params(db, rwa->or_byteorder,
rwa->or_salt, rwa->or_iv, rwa->or_mac, tx);
dmu_buf_rele(db, FTAG);
rwa->or_crypt_params_present = B_FALSE;
}
dmu_object_set_checksum(rwa->os, drro->drr_object,
drro->drr_checksumtype, tx);
dmu_object_set_compress(rwa->os, drro->drr_object,
drro->drr_compress, tx);
/* handle more restrictive dnode structuring for raw recvs */
if (rwa->raw) {
/*
* Set the indirect block size, block shift, nlevels.
* This will not fail because we ensured all of the
* blocks were freed earlier if this is a new object.
* For non-new objects block size and indirect block
* shift cannot change and nlevels can only increase.
*/
ASSERT3U(new_blksz, ==, drro->drr_blksz);
VERIFY0(dmu_object_set_blocksize(rwa->os, drro->drr_object,
drro->drr_blksz, drro->drr_indblkshift, tx));
VERIFY0(dmu_object_set_nlevels(rwa->os, drro->drr_object,
drro->drr_nlevels, tx));
/*
* Set the maxblkid. This will always succeed because
* we freed all blocks beyond the new maxblkid above.
*/
VERIFY0(dmu_object_set_maxblkid(rwa->os, drro->drr_object,
drro->drr_maxblkid, tx));
}
if (data != NULL) {
dmu_buf_t *db;
dnode_t *dn;
uint32_t flags = DMU_READ_NO_PREFETCH;
if (rwa->raw)
flags |= DMU_READ_NO_DECRYPT;
VERIFY0(dnode_hold(rwa->os, drro->drr_object, FTAG, &dn));
VERIFY0(dmu_bonus_hold_by_dnode(dn, FTAG, &db, flags));
dmu_buf_will_dirty(db, tx);
ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
bcopy(data, db->db_data, DRR_OBJECT_PAYLOAD_SIZE(drro));
/*
* Raw bonus buffers have their byteorder determined by the
* DRR_OBJECT_RANGE record.
*/
if (rwa->byteswap && !rwa->raw) {
dmu_object_byteswap_t byteswap =
DMU_OT_BYTESWAP(drro->drr_bonustype);
dmu_ot_byteswap[byteswap].ob_func(db->db_data,
DRR_OBJECT_PAYLOAD_SIZE(drro));
}
dmu_buf_rele(db, FTAG);
dnode_rele(dn, FTAG);
}
dmu_tx_commit(tx);
return (0);
}
/* ARGSUSED */
noinline static int
receive_freeobjects(struct receive_writer_arg *rwa,
struct drr_freeobjects *drrfo)
{
uint64_t obj;
int next_err = 0;
if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
return (SET_ERROR(EINVAL));
for (obj = drrfo->drr_firstobj == 0 ? 1 : drrfo->drr_firstobj;
obj < drrfo->drr_firstobj + drrfo->drr_numobjs &&
obj < DN_MAX_OBJECT && next_err == 0;
next_err = dmu_object_next(rwa->os, &obj, FALSE, 0)) {
dmu_object_info_t doi;
int err;
err = dmu_object_info(rwa->os, obj, &doi);
if (err == ENOENT)
continue;
else if (err != 0)
return (err);
err = dmu_free_long_object(rwa->os, obj);
if (err != 0)
return (err);
}
if (next_err != ESRCH)
return (next_err);
return (0);
}
/*
* Note: if this fails, the caller will clean up any records left on the
* rwa->write_batch list.
*/
static int
flush_write_batch_impl(struct receive_writer_arg *rwa)
{
dnode_t *dn;
int err;
if (dnode_hold(rwa->os, rwa->last_object, FTAG, &dn) != 0)
return (SET_ERROR(EINVAL));
struct receive_record_arg *last_rrd = list_tail(&rwa->write_batch);
struct drr_write *last_drrw = &last_rrd->header.drr_u.drr_write;
struct receive_record_arg *first_rrd = list_head(&rwa->write_batch);
struct drr_write *first_drrw = &first_rrd->header.drr_u.drr_write;
ASSERT3U(rwa->last_object, ==, last_drrw->drr_object);
ASSERT3U(rwa->last_offset, ==, last_drrw->drr_offset);
dmu_tx_t *tx = dmu_tx_create(rwa->os);
dmu_tx_hold_write_by_dnode(tx, dn, first_drrw->drr_offset,
last_drrw->drr_offset - first_drrw->drr_offset +
last_drrw->drr_logical_size);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err != 0) {
dmu_tx_abort(tx);
dnode_rele(dn, FTAG);
return (err);
}
struct receive_record_arg *rrd;
while ((rrd = list_head(&rwa->write_batch)) != NULL) {
struct drr_write *drrw = &rrd->header.drr_u.drr_write;
abd_t *abd = rrd->abd;
ASSERT3U(drrw->drr_object, ==, rwa->last_object);
if (drrw->drr_logical_size != dn->dn_datablksz) {
/*
* The WRITE record is larger than the object's block
* size. We must be receiving an incremental
* large-block stream into a dataset that previously did
* a non-large-block receive. Lightweight writes must
* be exactly one block, so we need to decompress the
* data (if compressed) and do a normal dmu_write().
*/
ASSERT3U(drrw->drr_logical_size, >, dn->dn_datablksz);
if (DRR_WRITE_COMPRESSED(drrw)) {
abd_t *decomp_abd =
abd_alloc_linear(drrw->drr_logical_size,
B_FALSE);
err = zio_decompress_data(
drrw->drr_compressiontype,
abd, abd_to_buf(decomp_abd),
abd_get_size(abd),
abd_get_size(decomp_abd), NULL);
if (err == 0) {
dmu_write_by_dnode(dn,
drrw->drr_offset,
drrw->drr_logical_size,
abd_to_buf(decomp_abd), tx);
}
abd_free(decomp_abd);
} else {
dmu_write_by_dnode(dn,
drrw->drr_offset,
drrw->drr_logical_size,
abd_to_buf(abd), tx);
}
if (err == 0)
abd_free(abd);
} else {
zio_prop_t zp;
dmu_write_policy(rwa->os, dn, 0, 0, &zp);
enum zio_flag zio_flags = 0;
if (rwa->raw) {
zp.zp_encrypt = B_TRUE;
zp.zp_compress = drrw->drr_compressiontype;
zp.zp_byteorder = ZFS_HOST_BYTEORDER ^
!!DRR_IS_RAW_BYTESWAPPED(drrw->drr_flags) ^
rwa->byteswap;
bcopy(drrw->drr_salt, zp.zp_salt,
ZIO_DATA_SALT_LEN);
bcopy(drrw->drr_iv, zp.zp_iv,
ZIO_DATA_IV_LEN);
bcopy(drrw->drr_mac, zp.zp_mac,
ZIO_DATA_MAC_LEN);
if (DMU_OT_IS_ENCRYPTED(zp.zp_type)) {
zp.zp_nopwrite = B_FALSE;
zp.zp_copies = MIN(zp.zp_copies,
SPA_DVAS_PER_BP - 1);
}
zio_flags |= ZIO_FLAG_RAW;
} else if (DRR_WRITE_COMPRESSED(drrw)) {
ASSERT3U(drrw->drr_compressed_size, >, 0);
ASSERT3U(drrw->drr_logical_size, >=,
drrw->drr_compressed_size);
zp.zp_compress = drrw->drr_compressiontype;
zio_flags |= ZIO_FLAG_RAW_COMPRESS;
} else if (rwa->byteswap) {
/*
* Note: compressed blocks never need to be
* byteswapped, because WRITE records for
* metadata blocks are never compressed. The
* exception is raw streams, which are written
* in the original byteorder, and the byteorder
* bit is preserved in the BP by setting
* zp_byteorder above.
*/
dmu_object_byteswap_t byteswap =
DMU_OT_BYTESWAP(drrw->drr_type);
dmu_ot_byteswap[byteswap].ob_func(
abd_to_buf(abd),
DRR_WRITE_PAYLOAD_SIZE(drrw));
}
/*
* Since this data can't be read until the receive
* completes, we can do a "lightweight" write for
* improved performance.
*/
err = dmu_lightweight_write_by_dnode(dn,
drrw->drr_offset, abd, &zp, zio_flags, tx);
}
if (err != 0) {
/*
* This rrd is left on the list, so the caller will
* free it (and the abd).
*/
break;
}
/*
* Note: If the receive fails, we want the resume stream to
* start with the same record that we last successfully
* received (as opposed to the next record), so that we can
* verify that we are resuming from the correct location.
*/
save_resume_state(rwa, drrw->drr_object, drrw->drr_offset, tx);
list_remove(&rwa->write_batch, rrd);
kmem_free(rrd, sizeof (*rrd));
}
dmu_tx_commit(tx);
dnode_rele(dn, FTAG);
return (err);
}
noinline static int
flush_write_batch(struct receive_writer_arg *rwa)
{
if (list_is_empty(&rwa->write_batch))
return (0);
int err = rwa->err;
if (err == 0)
err = flush_write_batch_impl(rwa);
if (err != 0) {
struct receive_record_arg *rrd;
while ((rrd = list_remove_head(&rwa->write_batch)) != NULL) {
abd_free(rrd->abd);
kmem_free(rrd, sizeof (*rrd));
}
}
ASSERT(list_is_empty(&rwa->write_batch));
return (err);
}
noinline static int
receive_process_write_record(struct receive_writer_arg *rwa,
struct receive_record_arg *rrd)
{
int err = 0;
ASSERT3U(rrd->header.drr_type, ==, DRR_WRITE);
struct drr_write *drrw = &rrd->header.drr_u.drr_write;
if (drrw->drr_offset + drrw->drr_logical_size < drrw->drr_offset ||
!DMU_OT_IS_VALID(drrw->drr_type))
return (SET_ERROR(EINVAL));
/*
* For resuming to work, records must be in increasing order
* by (object, offset).
*/
if (drrw->drr_object < rwa->last_object ||
(drrw->drr_object == rwa->last_object &&
drrw->drr_offset < rwa->last_offset)) {
return (SET_ERROR(EINVAL));
}
struct receive_record_arg *first_rrd = list_head(&rwa->write_batch);
struct drr_write *first_drrw = &first_rrd->header.drr_u.drr_write;
uint64_t batch_size =
MIN(zfs_recv_write_batch_size, DMU_MAX_ACCESS / 2);
if (first_rrd != NULL &&
(drrw->drr_object != first_drrw->drr_object ||
drrw->drr_offset >= first_drrw->drr_offset + batch_size)) {
err = flush_write_batch(rwa);
if (err != 0)
return (err);
}
rwa->last_object = drrw->drr_object;
rwa->last_offset = drrw->drr_offset;
if (rwa->last_object > rwa->max_object)
rwa->max_object = rwa->last_object;
list_insert_tail(&rwa->write_batch, rrd);
/*
* Return EAGAIN to indicate that we will use this rrd again,
* so the caller should not free it
*/
return (EAGAIN);
}
static int
receive_write_embedded(struct receive_writer_arg *rwa,
struct drr_write_embedded *drrwe, void *data)
{
dmu_tx_t *tx;
int err;
if (drrwe->drr_offset + drrwe->drr_length < drrwe->drr_offset)
return (SET_ERROR(EINVAL));
if (drrwe->drr_psize > BPE_PAYLOAD_SIZE)
return (SET_ERROR(EINVAL));
if (drrwe->drr_etype >= NUM_BP_EMBEDDED_TYPES)
return (SET_ERROR(EINVAL));
if (drrwe->drr_compression >= ZIO_COMPRESS_FUNCTIONS)
return (SET_ERROR(EINVAL));
if (rwa->raw)
return (SET_ERROR(EINVAL));
if (drrwe->drr_object > rwa->max_object)
rwa->max_object = drrwe->drr_object;
tx = dmu_tx_create(rwa->os);
dmu_tx_hold_write(tx, drrwe->drr_object,
drrwe->drr_offset, drrwe->drr_length);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err != 0) {
dmu_tx_abort(tx);
return (err);
}
dmu_write_embedded(rwa->os, drrwe->drr_object,
drrwe->drr_offset, data, drrwe->drr_etype,
drrwe->drr_compression, drrwe->drr_lsize, drrwe->drr_psize,
rwa->byteswap ^ ZFS_HOST_BYTEORDER, tx);
/* See comment in restore_write. */
save_resume_state(rwa, drrwe->drr_object, drrwe->drr_offset, tx);
dmu_tx_commit(tx);
return (0);
}
static int
receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
abd_t *abd)
{
dmu_buf_t *db, *db_spill;
int err;
if (drrs->drr_length < SPA_MINBLOCKSIZE ||
drrs->drr_length > spa_maxblocksize(dmu_objset_spa(rwa->os)))
return (SET_ERROR(EINVAL));
/*
* This is an unmodified spill block which was added to the stream
* to resolve an issue with incorrectly removing spill blocks. It
* should be ignored by current versions of the code which support
* the DRR_FLAG_SPILL_BLOCK flag.
*/
if (rwa->spill && DRR_SPILL_IS_UNMODIFIED(drrs->drr_flags)) {
abd_free(abd);
return (0);
}
if (rwa->raw) {
if (!DMU_OT_IS_VALID(drrs->drr_type) ||
drrs->drr_compressiontype >= ZIO_COMPRESS_FUNCTIONS ||
drrs->drr_compressed_size == 0)
return (SET_ERROR(EINVAL));
}
if (dmu_object_info(rwa->os, drrs->drr_object, NULL) != 0)
return (SET_ERROR(EINVAL));
if (drrs->drr_object > rwa->max_object)
rwa->max_object = drrs->drr_object;
VERIFY0(dmu_bonus_hold(rwa->os, drrs->drr_object, FTAG, &db));
if ((err = dmu_spill_hold_by_bonus(db, DMU_READ_NO_DECRYPT, FTAG,
&db_spill)) != 0) {
dmu_buf_rele(db, FTAG);
return (err);
}
dmu_tx_t *tx = dmu_tx_create(rwa->os);
dmu_tx_hold_spill(tx, db->db_object);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err != 0) {
dmu_buf_rele(db, FTAG);
dmu_buf_rele(db_spill, FTAG);
dmu_tx_abort(tx);
return (err);
}
/*
* Spill blocks may both grow and shrink. When a change in size
* occurs any existing dbuf must be updated to match the logical
* size of the provided arc_buf_t.
*/
if (db_spill->db_size != drrs->drr_length) {
dmu_buf_will_fill(db_spill, tx);
VERIFY0(dbuf_spill_set_blksz(db_spill,
drrs->drr_length, tx));
}
arc_buf_t *abuf;
if (rwa->raw) {
boolean_t byteorder = ZFS_HOST_BYTEORDER ^
!!DRR_IS_RAW_BYTESWAPPED(drrs->drr_flags) ^
rwa->byteswap;
abuf = arc_loan_raw_buf(dmu_objset_spa(rwa->os),
drrs->drr_object, byteorder, drrs->drr_salt,
drrs->drr_iv, drrs->drr_mac, drrs->drr_type,
drrs->drr_compressed_size, drrs->drr_length,
drrs->drr_compressiontype, 0);
} else {
abuf = arc_loan_buf(dmu_objset_spa(rwa->os),
DMU_OT_IS_METADATA(drrs->drr_type),
drrs->drr_length);
if (rwa->byteswap) {
dmu_object_byteswap_t byteswap =
DMU_OT_BYTESWAP(drrs->drr_type);
dmu_ot_byteswap[byteswap].ob_func(abd_to_buf(abd),
DRR_SPILL_PAYLOAD_SIZE(drrs));
}
}
bcopy(abd_to_buf(abd), abuf->b_data, DRR_SPILL_PAYLOAD_SIZE(drrs));
abd_free(abd);
dbuf_assign_arcbuf((dmu_buf_impl_t *)db_spill, abuf, tx);
dmu_buf_rele(db, FTAG);
dmu_buf_rele(db_spill, FTAG);
dmu_tx_commit(tx);
return (0);
}
/* ARGSUSED */
noinline static int
receive_free(struct receive_writer_arg *rwa, struct drr_free *drrf)
{
int err;
if (drrf->drr_length != -1ULL &&
drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
return (SET_ERROR(EINVAL));
if (dmu_object_info(rwa->os, drrf->drr_object, NULL) != 0)
return (SET_ERROR(EINVAL));
if (drrf->drr_object > rwa->max_object)
rwa->max_object = drrf->drr_object;
err = dmu_free_long_range(rwa->os, drrf->drr_object,
drrf->drr_offset, drrf->drr_length);
return (err);
}
static int
receive_object_range(struct receive_writer_arg *rwa,
struct drr_object_range *drror)
{
/*
* By default, we assume this block is in our native format
* (ZFS_HOST_BYTEORDER). We then take into account whether
* the send stream is byteswapped (rwa->byteswap). Finally,
* we need to byteswap again if this particular block was
* in non-native format on the send side.
*/
boolean_t byteorder = ZFS_HOST_BYTEORDER ^ rwa->byteswap ^
!!DRR_IS_RAW_BYTESWAPPED(drror->drr_flags);
/*
* Since dnode block sizes are constant, we should not need to worry
* about making sure that the dnode block size is the same on the
* sending and receiving sides for the time being. For non-raw sends,
* this does not matter (and in fact we do not send a DRR_OBJECT_RANGE
* record at all). Raw sends require this record type because the
* encryption parameters are used to protect an entire block of bonus
* buffers. If the size of dnode blocks ever becomes variable,
* handling will need to be added to ensure that dnode block sizes
* match on the sending and receiving side.
*/
if (drror->drr_numslots != DNODES_PER_BLOCK ||
P2PHASE(drror->drr_firstobj, DNODES_PER_BLOCK) != 0 ||
!rwa->raw)
return (SET_ERROR(EINVAL));
if (drror->drr_firstobj > rwa->max_object)
rwa->max_object = drror->drr_firstobj;
/*
* The DRR_OBJECT_RANGE handling must be deferred to receive_object()
* so that the block of dnodes is not written out when it's empty,
* and converted to a HOLE BP.
*/
rwa->or_crypt_params_present = B_TRUE;
rwa->or_firstobj = drror->drr_firstobj;
rwa->or_numslots = drror->drr_numslots;
bcopy(drror->drr_salt, rwa->or_salt, ZIO_DATA_SALT_LEN);
bcopy(drror->drr_iv, rwa->or_iv, ZIO_DATA_IV_LEN);
bcopy(drror->drr_mac, rwa->or_mac, ZIO_DATA_MAC_LEN);
rwa->or_byteorder = byteorder;
return (0);
}
/*
* Until we have the ability to redact large ranges of data efficiently, we
* process these records as frees.
*/
/* ARGSUSED */
noinline static int
receive_redact(struct receive_writer_arg *rwa, struct drr_redact *drrr)
{
struct drr_free drrf = {0};
drrf.drr_length = drrr->drr_length;
drrf.drr_object = drrr->drr_object;
drrf.drr_offset = drrr->drr_offset;
drrf.drr_toguid = drrr->drr_toguid;
return (receive_free(rwa, &drrf));
}
/* used to destroy the drc_ds on error */
static void
dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc)
{
dsl_dataset_t *ds = drc->drc_ds;
ds_hold_flags_t dsflags;
dsflags = (drc->drc_raw) ? DS_HOLD_FLAG_NONE : DS_HOLD_FLAG_DECRYPT;
/*
* Wait for the txg sync before cleaning up the receive. For
* resumable receives, this ensures that our resume state has
* been written out to disk. For raw receives, this ensures
* that the user accounting code will not attempt to do anything
* after we stopped receiving the dataset.
*/
txg_wait_synced(ds->ds_dir->dd_pool, 0);
ds->ds_objset->os_raw_receive = B_FALSE;
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
if (drc->drc_resumable && drc->drc_should_save &&
!BP_IS_HOLE(dsl_dataset_get_blkptr(ds))) {
rrw_exit(&ds->ds_bp_rwlock, FTAG);
dsl_dataset_disown(ds, dsflags, dmu_recv_tag);
} else {
char name[ZFS_MAX_DATASET_NAME_LEN];
rrw_exit(&ds->ds_bp_rwlock, FTAG);
dsl_dataset_name(ds, name);
dsl_dataset_disown(ds, dsflags, dmu_recv_tag);
(void) dsl_destroy_head(name);
}
}
static void
receive_cksum(dmu_recv_cookie_t *drc, int len, void *buf)
{
if (drc->drc_byteswap) {
(void) fletcher_4_incremental_byteswap(buf, len,
&drc->drc_cksum);
} else {
(void) fletcher_4_incremental_native(buf, len, &drc->drc_cksum);
}
}
/*
* Read the payload into a buffer of size len, and update the current record's
* payload field.
* Allocate drc->drc_next_rrd and read the next record's header into
* drc->drc_next_rrd->header.
* Verify checksum of payload and next record.
*/
static int
receive_read_payload_and_next_header(dmu_recv_cookie_t *drc, int len, void *buf)
{
int err;
if (len != 0) {
ASSERT3U(len, <=, SPA_MAXBLOCKSIZE);
err = receive_read(drc, len, buf);
if (err != 0)
return (err);
receive_cksum(drc, len, buf);
/* note: rrd is NULL when reading the begin record's payload */
if (drc->drc_rrd != NULL) {
drc->drc_rrd->payload = buf;
drc->drc_rrd->payload_size = len;
drc->drc_rrd->bytes_read = drc->drc_bytes_read;
}
} else {
ASSERT3P(buf, ==, NULL);
}
drc->drc_prev_cksum = drc->drc_cksum;
drc->drc_next_rrd = kmem_zalloc(sizeof (*drc->drc_next_rrd), KM_SLEEP);
err = receive_read(drc, sizeof (drc->drc_next_rrd->header),
&drc->drc_next_rrd->header);
drc->drc_next_rrd->bytes_read = drc->drc_bytes_read;
if (err != 0) {
kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
drc->drc_next_rrd = NULL;
return (err);
}
if (drc->drc_next_rrd->header.drr_type == DRR_BEGIN) {
kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
drc->drc_next_rrd = NULL;
return (SET_ERROR(EINVAL));
}
/*
* Note: checksum is of everything up to but not including the
* checksum itself.
*/
ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
receive_cksum(drc,
offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
&drc->drc_next_rrd->header);
zio_cksum_t cksum_orig =
drc->drc_next_rrd->header.drr_u.drr_checksum.drr_checksum;
zio_cksum_t *cksump =
&drc->drc_next_rrd->header.drr_u.drr_checksum.drr_checksum;
if (drc->drc_byteswap)
byteswap_record(&drc->drc_next_rrd->header);
if ((!ZIO_CHECKSUM_IS_ZERO(cksump)) &&
!ZIO_CHECKSUM_EQUAL(drc->drc_cksum, *cksump)) {
kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
drc->drc_next_rrd = NULL;
return (SET_ERROR(ECKSUM));
}
receive_cksum(drc, sizeof (cksum_orig), &cksum_orig);
return (0);
}
/*
* Issue the prefetch reads for any necessary indirect blocks.
*
* We use the object ignore list to tell us whether or not to issue prefetches
* for a given object. We do this for both correctness (in case the blocksize
* of an object has changed) and performance (if the object doesn't exist, don't
* needlessly try to issue prefetches). We also trim the list as we go through
* the stream to prevent it from growing to an unbounded size.
*
* The object numbers within will always be in sorted order, and any write
* records we see will also be in sorted order, but they're not sorted with
* respect to each other (i.e. we can get several object records before
* receiving each object's write records). As a result, once we've reached a
* given object number, we can safely remove any reference to lower object
* numbers in the ignore list. In practice, we receive up to 32 object records
* before receiving write records, so the list can have up to 32 nodes in it.
*/
/* ARGSUSED */
static void
receive_read_prefetch(dmu_recv_cookie_t *drc, uint64_t object, uint64_t offset,
uint64_t length)
{
if (!objlist_exists(drc->drc_ignore_objlist, object)) {
dmu_prefetch(drc->drc_os, object, 1, offset, length,
ZIO_PRIORITY_SYNC_READ);
}
}
/*
* Read records off the stream, issuing any necessary prefetches.
*/
static int
receive_read_record(dmu_recv_cookie_t *drc)
{
int err;
switch (drc->drc_rrd->header.drr_type) {
case DRR_OBJECT:
{
struct drr_object *drro =
&drc->drc_rrd->header.drr_u.drr_object;
uint32_t size = DRR_OBJECT_PAYLOAD_SIZE(drro);
void *buf = NULL;
dmu_object_info_t doi;
if (size != 0)
buf = kmem_zalloc(size, KM_SLEEP);
err = receive_read_payload_and_next_header(drc, size, buf);
if (err != 0) {
kmem_free(buf, size);
return (err);
}
err = dmu_object_info(drc->drc_os, drro->drr_object, &doi);
/*
* See receive_read_prefetch for an explanation why we're
* storing this object in the ignore_obj_list.
*/
if (err == ENOENT || err == EEXIST ||
(err == 0 && doi.doi_data_block_size != drro->drr_blksz)) {
objlist_insert(drc->drc_ignore_objlist,
drro->drr_object);
err = 0;
}
return (err);
}
case DRR_FREEOBJECTS:
{
err = receive_read_payload_and_next_header(drc, 0, NULL);
return (err);
}
case DRR_WRITE:
{
struct drr_write *drrw = &drc->drc_rrd->header.drr_u.drr_write;
int size = DRR_WRITE_PAYLOAD_SIZE(drrw);
abd_t *abd = abd_alloc_linear(size, B_FALSE);
err = receive_read_payload_and_next_header(drc, size,
abd_to_buf(abd));
if (err != 0) {
abd_free(abd);
return (err);
}
drc->drc_rrd->abd = abd;
receive_read_prefetch(drc, drrw->drr_object, drrw->drr_offset,
drrw->drr_logical_size);
return (err);
}
case DRR_WRITE_EMBEDDED:
{
struct drr_write_embedded *drrwe =
&drc->drc_rrd->header.drr_u.drr_write_embedded;
uint32_t size = P2ROUNDUP(drrwe->drr_psize, 8);
void *buf = kmem_zalloc(size, KM_SLEEP);
err = receive_read_payload_and_next_header(drc, size, buf);
if (err != 0) {
kmem_free(buf, size);
return (err);
}
receive_read_prefetch(drc, drrwe->drr_object, drrwe->drr_offset,
drrwe->drr_length);
return (err);
}
case DRR_FREE:
case DRR_REDACT:
{
/*
* It might be beneficial to prefetch indirect blocks here, but
* we don't really have the data to decide for sure.
*/
err = receive_read_payload_and_next_header(drc, 0, NULL);
return (err);
}
case DRR_END:
{
struct drr_end *drre = &drc->drc_rrd->header.drr_u.drr_end;
if (!ZIO_CHECKSUM_EQUAL(drc->drc_prev_cksum,
drre->drr_checksum))
return (SET_ERROR(ECKSUM));
return (0);
}
case DRR_SPILL:
{
struct drr_spill *drrs = &drc->drc_rrd->header.drr_u.drr_spill;
int size = DRR_SPILL_PAYLOAD_SIZE(drrs);
abd_t *abd = abd_alloc_linear(size, B_FALSE);
err = receive_read_payload_and_next_header(drc, size,
abd_to_buf(abd));
if (err != 0)
abd_free(abd);
else
drc->drc_rrd->abd = abd;
return (err);
}
case DRR_OBJECT_RANGE:
{
err = receive_read_payload_and_next_header(drc, 0, NULL);
return (err);
}
default:
return (SET_ERROR(EINVAL));
}
}
static void
dprintf_drr(struct receive_record_arg *rrd, int err)
{
#ifdef ZFS_DEBUG
switch (rrd->header.drr_type) {
case DRR_OBJECT:
{
struct drr_object *drro = &rrd->header.drr_u.drr_object;
dprintf("drr_type = OBJECT obj = %llu type = %u "
"bonustype = %u blksz = %u bonuslen = %u cksumtype = %u "
"compress = %u dn_slots = %u err = %d\n",
(u_longlong_t)drro->drr_object, drro->drr_type,
drro->drr_bonustype, drro->drr_blksz, drro->drr_bonuslen,
drro->drr_checksumtype, drro->drr_compress,
drro->drr_dn_slots, err);
break;
}
case DRR_FREEOBJECTS:
{
struct drr_freeobjects *drrfo =
&rrd->header.drr_u.drr_freeobjects;
dprintf("drr_type = FREEOBJECTS firstobj = %llu "
"numobjs = %llu err = %d\n",
(u_longlong_t)drrfo->drr_firstobj,
(u_longlong_t)drrfo->drr_numobjs, err);
break;
}
case DRR_WRITE:
{
struct drr_write *drrw = &rrd->header.drr_u.drr_write;
dprintf("drr_type = WRITE obj = %llu type = %u offset = %llu "
"lsize = %llu cksumtype = %u flags = %u "
"compress = %u psize = %llu err = %d\n",
(u_longlong_t)drrw->drr_object, drrw->drr_type,
(u_longlong_t)drrw->drr_offset,
(u_longlong_t)drrw->drr_logical_size,
drrw->drr_checksumtype, drrw->drr_flags,
drrw->drr_compressiontype,
(u_longlong_t)drrw->drr_compressed_size, err);
break;
}
case DRR_WRITE_BYREF:
{
struct drr_write_byref *drrwbr =
&rrd->header.drr_u.drr_write_byref;
dprintf("drr_type = WRITE_BYREF obj = %llu offset = %llu "
"length = %llu toguid = %llx refguid = %llx "
"refobject = %llu refoffset = %llu cksumtype = %u "
"flags = %u err = %d\n",
(u_longlong_t)drrwbr->drr_object,
(u_longlong_t)drrwbr->drr_offset,
(u_longlong_t)drrwbr->drr_length,
(u_longlong_t)drrwbr->drr_toguid,
(u_longlong_t)drrwbr->drr_refguid,
(u_longlong_t)drrwbr->drr_refobject,
(u_longlong_t)drrwbr->drr_refoffset,
drrwbr->drr_checksumtype, drrwbr->drr_flags, err);
break;
}
case DRR_WRITE_EMBEDDED:
{
struct drr_write_embedded *drrwe =
&rrd->header.drr_u.drr_write_embedded;
dprintf("drr_type = WRITE_EMBEDDED obj = %llu offset = %llu "
"length = %llu compress = %u etype = %u lsize = %u "
"psize = %u err = %d\n",
(u_longlong_t)drrwe->drr_object,
(u_longlong_t)drrwe->drr_offset,
(u_longlong_t)drrwe->drr_length,
drrwe->drr_compression, drrwe->drr_etype,
drrwe->drr_lsize, drrwe->drr_psize, err);
break;
}
case DRR_FREE:
{
struct drr_free *drrf = &rrd->header.drr_u.drr_free;
dprintf("drr_type = FREE obj = %llu offset = %llu "
"length = %lld err = %d\n",
(u_longlong_t)drrf->drr_object,
(u_longlong_t)drrf->drr_offset,
(longlong_t)drrf->drr_length,
err);
break;
}
case DRR_SPILL:
{
struct drr_spill *drrs = &rrd->header.drr_u.drr_spill;
dprintf("drr_type = SPILL obj = %llu length = %llu "
"err = %d\n", (u_longlong_t)drrs->drr_object,
(u_longlong_t)drrs->drr_length, err);
break;
}
case DRR_OBJECT_RANGE:
{
struct drr_object_range *drror =
&rrd->header.drr_u.drr_object_range;
dprintf("drr_type = OBJECT_RANGE firstobj = %llu "
"numslots = %llu flags = %u err = %d\n",
(u_longlong_t)drror->drr_firstobj,
(u_longlong_t)drror->drr_numslots,
drror->drr_flags, err);
break;
}
default:
return;
}
#endif
}
/*
* Commit the records to the pool.
*/
static int
receive_process_record(struct receive_writer_arg *rwa,
struct receive_record_arg *rrd)
{
int err;
/* Processing in order, therefore bytes_read should be increasing. */
ASSERT3U(rrd->bytes_read, >=, rwa->bytes_read);
rwa->bytes_read = rrd->bytes_read;
if (rrd->header.drr_type != DRR_WRITE) {
err = flush_write_batch(rwa);
if (err != 0) {
if (rrd->abd != NULL) {
abd_free(rrd->abd);
rrd->abd = NULL;
rrd->payload = NULL;
} else if (rrd->payload != NULL) {
kmem_free(rrd->payload, rrd->payload_size);
rrd->payload = NULL;
}
return (err);
}
}
switch (rrd->header.drr_type) {
case DRR_OBJECT:
{
struct drr_object *drro = &rrd->header.drr_u.drr_object;
err = receive_object(rwa, drro, rrd->payload);
kmem_free(rrd->payload, rrd->payload_size);
rrd->payload = NULL;
break;
}
case DRR_FREEOBJECTS:
{
struct drr_freeobjects *drrfo =
&rrd->header.drr_u.drr_freeobjects;
err = receive_freeobjects(rwa, drrfo);
break;
}
case DRR_WRITE:
{
err = receive_process_write_record(rwa, rrd);
if (err != EAGAIN) {
/*
* On success, receive_process_write_record() returns
* EAGAIN to indicate that we do not want to free
* the rrd or arc_buf.
*/
ASSERT(err != 0);
abd_free(rrd->abd);
rrd->abd = NULL;
}
break;
}
case DRR_WRITE_EMBEDDED:
{
struct drr_write_embedded *drrwe =
&rrd->header.drr_u.drr_write_embedded;
err = receive_write_embedded(rwa, drrwe, rrd->payload);
kmem_free(rrd->payload, rrd->payload_size);
rrd->payload = NULL;
break;
}
case DRR_FREE:
{
struct drr_free *drrf = &rrd->header.drr_u.drr_free;
err = receive_free(rwa, drrf);
break;
}
case DRR_SPILL:
{
struct drr_spill *drrs = &rrd->header.drr_u.drr_spill;
err = receive_spill(rwa, drrs, rrd->abd);
if (err != 0)
abd_free(rrd->abd);
rrd->abd = NULL;
rrd->payload = NULL;
break;
}
case DRR_OBJECT_RANGE:
{
struct drr_object_range *drror =
&rrd->header.drr_u.drr_object_range;
err = receive_object_range(rwa, drror);
break;
}
case DRR_REDACT:
{
struct drr_redact *drrr = &rrd->header.drr_u.drr_redact;
err = receive_redact(rwa, drrr);
break;
}
default:
err = (SET_ERROR(EINVAL));
}
if (err != 0)
dprintf_drr(rrd, err);
return (err);
}
/*
* dmu_recv_stream's worker thread; pull records off the queue, and then call
* receive_process_record When we're done, signal the main thread and exit.
*/
static void
receive_writer_thread(void *arg)
{
struct receive_writer_arg *rwa = arg;
struct receive_record_arg *rrd;
fstrans_cookie_t cookie = spl_fstrans_mark();
for (rrd = bqueue_dequeue(&rwa->q); !rrd->eos_marker;
rrd = bqueue_dequeue(&rwa->q)) {
/*
* If there's an error, the main thread will stop putting things
* on the queue, but we need to clear everything in it before we
* can exit.
*/
int err = 0;
if (rwa->err == 0) {
err = receive_process_record(rwa, rrd);
} else if (rrd->abd != NULL) {
abd_free(rrd->abd);
rrd->abd = NULL;
rrd->payload = NULL;
} else if (rrd->payload != NULL) {
kmem_free(rrd->payload, rrd->payload_size);
rrd->payload = NULL;
}
/*
* EAGAIN indicates that this record has been saved (on
* raw->write_batch), and will be used again, so we don't
* free it.
*/
if (err != EAGAIN) {
if (rwa->err == 0)
rwa->err = err;
kmem_free(rrd, sizeof (*rrd));
}
}
kmem_free(rrd, sizeof (*rrd));
int err = flush_write_batch(rwa);
if (rwa->err == 0)
rwa->err = err;
mutex_enter(&rwa->mutex);
rwa->done = B_TRUE;
cv_signal(&rwa->cv);
mutex_exit(&rwa->mutex);
spl_fstrans_unmark(cookie);
thread_exit();
}
static int
resume_check(dmu_recv_cookie_t *drc, nvlist_t *begin_nvl)
{
uint64_t val;
objset_t *mos = dmu_objset_pool(drc->drc_os)->dp_meta_objset;
uint64_t dsobj = dmu_objset_id(drc->drc_os);
uint64_t resume_obj, resume_off;
if (nvlist_lookup_uint64(begin_nvl,
"resume_object", &resume_obj) != 0 ||
nvlist_lookup_uint64(begin_nvl,
"resume_offset", &resume_off) != 0) {
return (SET_ERROR(EINVAL));
}
VERIFY0(zap_lookup(mos, dsobj,
DS_FIELD_RESUME_OBJECT, sizeof (val), 1, &val));
if (resume_obj != val)
return (SET_ERROR(EINVAL));
VERIFY0(zap_lookup(mos, dsobj,
DS_FIELD_RESUME_OFFSET, sizeof (val), 1, &val));
if (resume_off != val)
return (SET_ERROR(EINVAL));
return (0);
}
/*
* Read in the stream's records, one by one, and apply them to the pool. There
* are two threads involved; the thread that calls this function will spin up a
* worker thread, read the records off the stream one by one, and issue
* prefetches for any necessary indirect blocks. It will then push the records
* onto an internal blocking queue. The worker thread will pull the records off
* the queue, and actually write the data into the DMU. This way, the worker
* thread doesn't have to wait for reads to complete, since everything it needs
* (the indirect blocks) will be prefetched.
*
* NB: callers *must* call dmu_recv_end() if this succeeds.
*/
int
dmu_recv_stream(dmu_recv_cookie_t *drc, offset_t *voffp)
{
int err = 0;
struct receive_writer_arg *rwa = kmem_zalloc(sizeof (*rwa), KM_SLEEP);
if (dsl_dataset_has_resume_receive_state(drc->drc_ds)) {
uint64_t bytes = 0;
(void) zap_lookup(drc->drc_ds->ds_dir->dd_pool->dp_meta_objset,
drc->drc_ds->ds_object, DS_FIELD_RESUME_BYTES,
sizeof (bytes), 1, &bytes);
drc->drc_bytes_read += bytes;
}
drc->drc_ignore_objlist = objlist_create();
/* these were verified in dmu_recv_begin */
ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==,
DMU_SUBSTREAM);
ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES);
ASSERT(dsl_dataset_phys(drc->drc_ds)->ds_flags & DS_FLAG_INCONSISTENT);
ASSERT0(drc->drc_os->os_encrypted &&
(drc->drc_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA));
/* handle DSL encryption key payload */
if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RAW) {
nvlist_t *keynvl = NULL;
ASSERT(drc->drc_os->os_encrypted);
ASSERT(drc->drc_raw);
err = nvlist_lookup_nvlist(drc->drc_begin_nvl, "crypt_keydata",
&keynvl);
if (err != 0)
goto out;
/*
* If this is a new dataset we set the key immediately.
* Otherwise we don't want to change the key until we
* are sure the rest of the receive succeeded so we stash
* the keynvl away until then.
*/
err = dsl_crypto_recv_raw(spa_name(drc->drc_os->os_spa),
drc->drc_ds->ds_object, drc->drc_fromsnapobj,
drc->drc_drrb->drr_type, keynvl, drc->drc_newfs);
if (err != 0)
goto out;
/* see comment in dmu_recv_end_sync() */
drc->drc_ivset_guid = 0;
(void) nvlist_lookup_uint64(keynvl, "to_ivset_guid",
&drc->drc_ivset_guid);
if (!drc->drc_newfs)
drc->drc_keynvl = fnvlist_dup(keynvl);
}
if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RESUMING) {
err = resume_check(drc, drc->drc_begin_nvl);
if (err != 0)
goto out;
}
/*
* If we failed before this point we will clean up any new resume
* state that was created. Now that we've gotten past the initial
* checks we are ok to retain that resume state.
*/
drc->drc_should_save = B_TRUE;
(void) bqueue_init(&rwa->q, zfs_recv_queue_ff,
MAX(zfs_recv_queue_length, 2 * zfs_max_recordsize),
offsetof(struct receive_record_arg, node));
cv_init(&rwa->cv, NULL, CV_DEFAULT, NULL);
mutex_init(&rwa->mutex, NULL, MUTEX_DEFAULT, NULL);
rwa->os = drc->drc_os;
rwa->byteswap = drc->drc_byteswap;
rwa->resumable = drc->drc_resumable;
rwa->raw = drc->drc_raw;
rwa->spill = drc->drc_spill;
rwa->full = (drc->drc_drr_begin->drr_u.drr_begin.drr_fromguid == 0);
rwa->os->os_raw_receive = drc->drc_raw;
list_create(&rwa->write_batch, sizeof (struct receive_record_arg),
offsetof(struct receive_record_arg, node.bqn_node));
(void) thread_create(NULL, 0, receive_writer_thread, rwa, 0, curproc,
TS_RUN, minclsyspri);
/*
* We're reading rwa->err without locks, which is safe since we are the
* only reader, and the worker thread is the only writer. It's ok if we
* miss a write for an iteration or two of the loop, since the writer
* thread will keep freeing records we send it until we send it an eos
* marker.
*
* We can leave this loop in 3 ways: First, if rwa->err is
* non-zero. In that case, the writer thread will free the rrd we just
* pushed. Second, if we're interrupted; in that case, either it's the
* first loop and drc->drc_rrd was never allocated, or it's later, and
* drc->drc_rrd has been handed off to the writer thread who will free
* it. Finally, if receive_read_record fails or we're at the end of the
* stream, then we free drc->drc_rrd and exit.
*/
while (rwa->err == 0) {
if (issig(JUSTLOOKING) && issig(FORREAL)) {
err = SET_ERROR(EINTR);
break;
}
ASSERT3P(drc->drc_rrd, ==, NULL);
drc->drc_rrd = drc->drc_next_rrd;
drc->drc_next_rrd = NULL;
/* Allocates and loads header into drc->drc_next_rrd */
err = receive_read_record(drc);
if (drc->drc_rrd->header.drr_type == DRR_END || err != 0) {
kmem_free(drc->drc_rrd, sizeof (*drc->drc_rrd));
drc->drc_rrd = NULL;
break;
}
bqueue_enqueue(&rwa->q, drc->drc_rrd,
sizeof (struct receive_record_arg) +
drc->drc_rrd->payload_size);
drc->drc_rrd = NULL;
}
ASSERT3P(drc->drc_rrd, ==, NULL);
drc->drc_rrd = kmem_zalloc(sizeof (*drc->drc_rrd), KM_SLEEP);
drc->drc_rrd->eos_marker = B_TRUE;
bqueue_enqueue_flush(&rwa->q, drc->drc_rrd, 1);
mutex_enter(&rwa->mutex);
while (!rwa->done) {
/*
* We need to use cv_wait_sig() so that any process that may
* be sleeping here can still fork.
*/
(void) cv_wait_sig(&rwa->cv, &rwa->mutex);
}
mutex_exit(&rwa->mutex);
/*
* If we are receiving a full stream as a clone, all object IDs which
* are greater than the maximum ID referenced in the stream are
* by definition unused and must be freed.
*/
if (drc->drc_clone && drc->drc_drrb->drr_fromguid == 0) {
uint64_t obj = rwa->max_object + 1;
int free_err = 0;
int next_err = 0;
while (next_err == 0) {
free_err = dmu_free_long_object(rwa->os, obj);
if (free_err != 0 && free_err != ENOENT)
break;
next_err = dmu_object_next(rwa->os, &obj, FALSE, 0);
}
if (err == 0) {
if (free_err != 0 && free_err != ENOENT)
err = free_err;
else if (next_err != ESRCH)
err = next_err;
}
}
cv_destroy(&rwa->cv);
mutex_destroy(&rwa->mutex);
bqueue_destroy(&rwa->q);
list_destroy(&rwa->write_batch);
if (err == 0)
err = rwa->err;
out:
/*
* If we hit an error before we started the receive_writer_thread
* we need to clean up the next_rrd we create by processing the
* DRR_BEGIN record.
*/
if (drc->drc_next_rrd != NULL)
kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
/*
* The objset will be invalidated by dmu_recv_end() when we do
* dsl_dataset_clone_swap_sync_impl().
*/
drc->drc_os = NULL;
kmem_free(rwa, sizeof (*rwa));
nvlist_free(drc->drc_begin_nvl);
if (err != 0) {
/*
* Clean up references. If receive is not resumable,
* destroy what we created, so we don't leave it in
* the inconsistent state.
*/
dmu_recv_cleanup_ds(drc);
nvlist_free(drc->drc_keynvl);
}
objlist_destroy(drc->drc_ignore_objlist);
drc->drc_ignore_objlist = NULL;
*voffp = drc->drc_voff;
return (err);
}
static int
dmu_recv_end_check(void *arg, dmu_tx_t *tx)
{
dmu_recv_cookie_t *drc = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
int error;
ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag);
if (!drc->drc_newfs) {
dsl_dataset_t *origin_head;
error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head);
if (error != 0)
return (error);
if (drc->drc_force) {
/*
* We will destroy any snapshots in tofs (i.e. before
* origin_head) that are after the origin (which is
* the snap before drc_ds, because drc_ds can not
* have any snaps of its own).
*/
uint64_t obj;
obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
while (obj !=
dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
dsl_dataset_t *snap;
error = dsl_dataset_hold_obj(dp, obj, FTAG,
&snap);
if (error != 0)
break;
if (snap->ds_dir != origin_head->ds_dir)
error = SET_ERROR(EINVAL);
if (error == 0) {
error = dsl_destroy_snapshot_check_impl(
snap, B_FALSE);
}
obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
dsl_dataset_rele(snap, FTAG);
if (error != 0)
break;
}
if (error != 0) {
dsl_dataset_rele(origin_head, FTAG);
return (error);
}
}
if (drc->drc_keynvl != NULL) {
error = dsl_crypto_recv_raw_key_check(drc->drc_ds,
drc->drc_keynvl, tx);
if (error != 0) {
dsl_dataset_rele(origin_head, FTAG);
return (error);
}
}
error = dsl_dataset_clone_swap_check_impl(drc->drc_ds,
origin_head, drc->drc_force, drc->drc_owner, tx);
if (error != 0) {
dsl_dataset_rele(origin_head, FTAG);
return (error);
}
error = dsl_dataset_snapshot_check_impl(origin_head,
drc->drc_tosnap, tx, B_TRUE, 1,
drc->drc_cred, drc->drc_proc);
dsl_dataset_rele(origin_head, FTAG);
if (error != 0)
return (error);
error = dsl_destroy_head_check_impl(drc->drc_ds, 1);
} else {
error = dsl_dataset_snapshot_check_impl(drc->drc_ds,
drc->drc_tosnap, tx, B_TRUE, 1,
drc->drc_cred, drc->drc_proc);
}
return (error);
}
static void
dmu_recv_end_sync(void *arg, dmu_tx_t *tx)
{
dmu_recv_cookie_t *drc = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
boolean_t encrypted = drc->drc_ds->ds_dir->dd_crypto_obj != 0;
uint64_t newsnapobj;
spa_history_log_internal_ds(drc->drc_ds, "finish receiving",
tx, "snap=%s", drc->drc_tosnap);
drc->drc_ds->ds_objset->os_raw_receive = B_FALSE;
if (!drc->drc_newfs) {
dsl_dataset_t *origin_head;
VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG,
&origin_head));
if (drc->drc_force) {
/*
* Destroy any snapshots of drc_tofs (origin_head)
* after the origin (the snap before drc_ds).
*/
uint64_t obj;
obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
while (obj !=
dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
dsl_dataset_t *snap;
VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG,
&snap));
ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir);
obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
dsl_destroy_snapshot_sync_impl(snap,
B_FALSE, tx);
dsl_dataset_rele(snap, FTAG);
}
}
if (drc->drc_keynvl != NULL) {
dsl_crypto_recv_raw_key_sync(drc->drc_ds,
drc->drc_keynvl, tx);
nvlist_free(drc->drc_keynvl);
drc->drc_keynvl = NULL;
}
VERIFY3P(drc->drc_ds->ds_prev, ==,
origin_head->ds_prev);
dsl_dataset_clone_swap_sync_impl(drc->drc_ds,
origin_head, tx);
/*
* The objset was evicted by dsl_dataset_clone_swap_sync_impl,
* so drc_os is no longer valid.
*/
drc->drc_os = NULL;
dsl_dataset_snapshot_sync_impl(origin_head,
drc->drc_tosnap, tx);
/* set snapshot's creation time and guid */
dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx);
dsl_dataset_phys(origin_head->ds_prev)->ds_creation_time =
drc->drc_drrb->drr_creation_time;
dsl_dataset_phys(origin_head->ds_prev)->ds_guid =
drc->drc_drrb->drr_toguid;
dsl_dataset_phys(origin_head->ds_prev)->ds_flags &=
~DS_FLAG_INCONSISTENT;
dmu_buf_will_dirty(origin_head->ds_dbuf, tx);
dsl_dataset_phys(origin_head)->ds_flags &=
~DS_FLAG_INCONSISTENT;
newsnapobj =
dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
dsl_dataset_rele(origin_head, FTAG);
dsl_destroy_head_sync_impl(drc->drc_ds, tx);
if (drc->drc_owner != NULL)
VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner);
} else {
dsl_dataset_t *ds = drc->drc_ds;
dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx);
/* set snapshot's creation time and guid */
dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
dsl_dataset_phys(ds->ds_prev)->ds_creation_time =
drc->drc_drrb->drr_creation_time;
dsl_dataset_phys(ds->ds_prev)->ds_guid =
drc->drc_drrb->drr_toguid;
dsl_dataset_phys(ds->ds_prev)->ds_flags &=
~DS_FLAG_INCONSISTENT;
dmu_buf_will_dirty(ds->ds_dbuf, tx);
dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT;
if (dsl_dataset_has_resume_receive_state(ds)) {
(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_FROMGUID, tx);
(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_OBJECT, tx);
(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_OFFSET, tx);
(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_BYTES, tx);
(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_TOGUID, tx);
(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_TONAME, tx);
(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS, tx);
}
newsnapobj =
dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj;
}
/*
* If this is a raw receive, the crypt_keydata nvlist will include
* a to_ivset_guid for us to set on the new snapshot. This value
* will override the value generated by the snapshot code. However,
* this value may not be present, because older implementations of
* the raw send code did not include this value, and we are still
* allowed to receive them if the zfs_disable_ivset_guid_check
* tunable is set, in which case we will leave the newly-generated
* value.
*/
if (drc->drc_raw && drc->drc_ivset_guid != 0) {
dmu_object_zapify(dp->dp_meta_objset, newsnapobj,
DMU_OT_DSL_DATASET, tx);
VERIFY0(zap_update(dp->dp_meta_objset, newsnapobj,
DS_FIELD_IVSET_GUID, sizeof (uint64_t), 1,
&drc->drc_ivset_guid, tx));
}
/*
* Release the hold from dmu_recv_begin. This must be done before
* we return to open context, so that when we free the dataset's dnode
* we can evict its bonus buffer. Since the dataset may be destroyed
* at this point (and therefore won't have a valid pointer to the spa)
* we release the key mapping manually here while we do have a valid
* pointer, if it exists.
*/
if (!drc->drc_raw && encrypted) {
(void) spa_keystore_remove_mapping(dmu_tx_pool(tx)->dp_spa,
drc->drc_ds->ds_object, drc->drc_ds);
}
dsl_dataset_disown(drc->drc_ds, 0, dmu_recv_tag);
drc->drc_ds = NULL;
}
static int dmu_recv_end_modified_blocks = 3;
static int
dmu_recv_existing_end(dmu_recv_cookie_t *drc)
{
#ifdef _KERNEL
/*
* We will be destroying the ds; make sure its origin is unmounted if
* necessary.
*/
char name[ZFS_MAX_DATASET_NAME_LEN];
dsl_dataset_name(drc->drc_ds, name);
zfs_destroy_unmount_origin(name);
#endif
return (dsl_sync_task(drc->drc_tofs,
dmu_recv_end_check, dmu_recv_end_sync, drc,
dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL));
}
static int
dmu_recv_new_end(dmu_recv_cookie_t *drc)
{
return (dsl_sync_task(drc->drc_tofs,
dmu_recv_end_check, dmu_recv_end_sync, drc,
dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL));
}
int
dmu_recv_end(dmu_recv_cookie_t *drc, void *owner)
{
int error;
drc->drc_owner = owner;
if (drc->drc_newfs)
error = dmu_recv_new_end(drc);
else
error = dmu_recv_existing_end(drc);
if (error != 0) {
dmu_recv_cleanup_ds(drc);
nvlist_free(drc->drc_keynvl);
} else {
if (drc->drc_newfs) {
zvol_create_minor(drc->drc_tofs);
}
char *snapname = kmem_asprintf("%s@%s",
drc->drc_tofs, drc->drc_tosnap);
zvol_create_minor(snapname);
kmem_strfree(snapname);
}
return (error);
}
/*
* Return TRUE if this objset is currently being received into.
*/
boolean_t
dmu_objset_is_receiving(objset_t *os)
{
return (os->os_dsl_dataset != NULL &&
os->os_dsl_dataset->ds_owner == dmu_recv_tag);
}
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, queue_length, INT, ZMOD_RW,
"Maximum receive queue length");
ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, queue_ff, INT, ZMOD_RW,
"Receive queue fill fraction");
ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, write_batch_size, INT, ZMOD_RW,
"Maximum amount of writes to batch into one transaction");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/dmu_redact.c b/sys/contrib/openzfs/module/zfs/dmu_redact.c
index fdbdf7d6e868..7efe423d35f0 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_redact.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_redact.c
@@ -1,1201 +1,1201 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2017, 2018 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/txg.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_traverse.h>
#include <sys/dmu_redact.h>
#include <sys/bqueue.h>
#include <sys/objlist.h>
#include <sys/dmu_tx.h>
#ifdef _KERNEL
#include <sys/zfs_vfsops.h>
#include <sys/zap.h>
#include <sys/zfs_znode.h>
#endif
/*
* This controls the number of entries in the buffer the redaction_list_update
* synctask uses to buffer writes to the redaction list.
*/
int redact_sync_bufsize = 1024;
/*
* Controls how often to update the redaction list when creating a redaction
* list.
*/
uint64_t redaction_list_update_interval_ns = 1000 * 1000 * 1000ULL; /* NS */
/*
* This tunable controls the length of the queues that zfs redact worker threads
* use to communicate. If the dmu_redact_snap thread is blocking on these
* queues, this variable may need to be increased. If there is a significant
* slowdown at the start of a redact operation as these threads consume all the
* available IO resources, or the queues are consuming too much memory, this
* variable may need to be decreased.
*/
int zfs_redact_queue_length = 1024 * 1024;
/*
* These tunables control the fill fraction of the queues by zfs redact. The
* fill fraction controls the frequency with which threads have to be
* cv_signaled. If a lot of cpu time is being spent on cv_signal, then these
* should be tuned down. If the queues empty before the signalled thread can
* catch up, then these should be tuned up.
*/
uint64_t zfs_redact_queue_ff = 20;
struct redact_record {
bqueue_node_t ln;
boolean_t eos_marker; /* Marks the end of the stream */
uint64_t start_object;
uint64_t start_blkid;
uint64_t end_object;
uint64_t end_blkid;
uint8_t indblkshift;
uint32_t datablksz;
};
struct redact_thread_arg {
bqueue_t q;
objset_t *os; /* Objset to traverse */
dsl_dataset_t *ds; /* Dataset to traverse */
struct redact_record *current_record;
int error_code;
boolean_t cancel;
zbookmark_phys_t resume;
objlist_t *deleted_objs;
uint64_t *num_blocks_visited;
uint64_t ignore_object; /* ignore further callbacks on this */
uint64_t txg; /* txg to traverse since */
};
/*
* The redaction node is a wrapper around the redaction record that is used
* by the redaction merging thread to sort the records and determine overlaps.
*
* It contains two nodes; one sorts the records by their start_zb, and the other
* sorts the records by their end_zb.
*/
struct redact_node {
avl_node_t avl_node_start;
avl_node_t avl_node_end;
struct redact_record *record;
struct redact_thread_arg *rt_arg;
uint32_t thread_num;
};
struct merge_data {
list_t md_redact_block_pending;
redact_block_phys_t md_coalesce_block;
uint64_t md_last_time;
redact_block_phys_t md_furthest[TXG_SIZE];
/* Lists of struct redact_block_list_node. */
list_t md_blocks[TXG_SIZE];
boolean_t md_synctask_txg[TXG_SIZE];
uint64_t md_latest_synctask_txg;
redaction_list_t *md_redaction_list;
};
/*
* A wrapper around struct redact_block so it can be stored in a list_t.
*/
struct redact_block_list_node {
redact_block_phys_t block;
list_node_t node;
};
/*
* We've found a new redaction candidate. In order to improve performance, we
* coalesce these blocks when they're adjacent to each other. This function
* handles that. If the new candidate block range is immediately after the
* range we're building, coalesce it into the range we're building. Otherwise,
* put the record we're building on the queue, and update the build pointer to
* point to the new record.
*/
static void
record_merge_enqueue(bqueue_t *q, struct redact_record **build,
struct redact_record *new)
{
if (new->eos_marker) {
if (*build != NULL)
bqueue_enqueue(q, *build, sizeof (*build));
bqueue_enqueue_flush(q, new, sizeof (*new));
return;
}
if (*build == NULL) {
*build = new;
return;
}
struct redact_record *curbuild = *build;
if ((curbuild->end_object == new->start_object &&
curbuild->end_blkid + 1 == new->start_blkid &&
curbuild->end_blkid != UINT64_MAX) ||
(curbuild->end_object + 1 == new->start_object &&
curbuild->end_blkid == UINT64_MAX && new->start_blkid == 0)) {
curbuild->end_object = new->end_object;
curbuild->end_blkid = new->end_blkid;
kmem_free(new, sizeof (*new));
} else {
bqueue_enqueue(q, curbuild, sizeof (*curbuild));
*build = new;
}
}
#ifdef _KERNEL
struct objnode {
avl_node_t node;
uint64_t obj;
};
static int
objnode_compare(const void *o1, const void *o2)
{
const struct objnode *obj1 = o1;
const struct objnode *obj2 = o2;
if (obj1->obj < obj2->obj)
return (-1);
if (obj1->obj > obj2->obj)
return (1);
return (0);
}
static objlist_t *
zfs_get_deleteq(objset_t *os)
{
objlist_t *deleteq_objlist = objlist_create();
uint64_t deleteq_obj;
zap_cursor_t zc;
zap_attribute_t za;
dmu_object_info_t doi;
ASSERT3U(os->os_phys->os_type, ==, DMU_OST_ZFS);
VERIFY0(dmu_object_info(os, MASTER_NODE_OBJ, &doi));
ASSERT3U(doi.doi_type, ==, DMU_OT_MASTER_NODE);
VERIFY0(zap_lookup(os, MASTER_NODE_OBJ,
ZFS_UNLINKED_SET, sizeof (uint64_t), 1, &deleteq_obj));
/*
* In order to insert objects into the objlist, they must be in sorted
* order. We don't know what order we'll get them out of the ZAP in, so
* we insert them into and remove them from an avl_tree_t to sort them.
*/
avl_tree_t at;
avl_create(&at, objnode_compare, sizeof (struct objnode),
offsetof(struct objnode, node));
for (zap_cursor_init(&zc, os, deleteq_obj);
zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) {
struct objnode *obj = kmem_zalloc(sizeof (*obj), KM_SLEEP);
obj->obj = za.za_first_integer;
avl_add(&at, obj);
}
zap_cursor_fini(&zc);
struct objnode *next, *found = avl_first(&at);
while (found != NULL) {
next = AVL_NEXT(&at, found);
objlist_insert(deleteq_objlist, found->obj);
found = next;
}
void *cookie = NULL;
while ((found = avl_destroy_nodes(&at, &cookie)) != NULL)
kmem_free(found, sizeof (*found));
avl_destroy(&at);
return (deleteq_objlist);
}
#endif
/*
* This is the callback function to traverse_dataset for the redaction threads
* for dmu_redact_snap. This thread is responsible for creating redaction
* records for all the data that is modified by the snapshots we're redacting
* with respect to. Redaction records represent ranges of data that have been
* modified by one of the redaction snapshots, and are stored in the
* redact_record struct. We need to create redaction records for three
* cases:
*
* First, if there's a normal write, we need to create a redaction record for
* that block.
*
* Second, if there's a hole, we need to create a redaction record that covers
* the whole range of the hole. If the hole is in the meta-dnode, it must cover
* every block in all of the objects in the hole.
*
* Third, if there is a deleted object, we need to create a redaction record for
* all of the blocks in that object.
*/
-/*ARGSUSED*/
static int
redact_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg)
{
+ (void) spa, (void) zilog;
struct redact_thread_arg *rta = arg;
struct redact_record *record;
ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT ||
zb->zb_object >= rta->resume.zb_object);
if (rta->cancel)
return (SET_ERROR(EINTR));
if (rta->ignore_object == zb->zb_object)
return (0);
/*
* If we're visiting a dnode, we need to handle the case where the
* object has been deleted.
*/
if (zb->zb_level == ZB_DNODE_LEVEL) {
ASSERT3U(zb->zb_level, ==, ZB_DNODE_LEVEL);
if (zb->zb_object == 0)
return (0);
/*
* If the object has been deleted, redact all of the blocks in
* it.
*/
if (dnp->dn_type == DMU_OT_NONE ||
objlist_exists(rta->deleted_objs, zb->zb_object)) {
rta->ignore_object = zb->zb_object;
record = kmem_zalloc(sizeof (struct redact_record),
KM_SLEEP);
record->eos_marker = B_FALSE;
record->start_object = record->end_object =
zb->zb_object;
record->start_blkid = 0;
record->end_blkid = UINT64_MAX;
record_merge_enqueue(&rta->q,
&rta->current_record, record);
}
return (0);
} else if (zb->zb_level < 0) {
return (0);
} else if (zb->zb_level > 0 && !BP_IS_HOLE(bp)) {
/*
* If this is an indirect block, but not a hole, it doesn't
* provide any useful information for redaction, so ignore it.
*/
return (0);
}
/*
* At this point, there are two options left for the type of block we're
* looking at. Either this is a hole (which could be in the dnode or
* the meta-dnode), or it's a level 0 block of some sort. If it's a
* hole, we create a redaction record that covers the whole range. If
* the hole is in a dnode, we need to redact all the blocks in that
* hole. If the hole is in the meta-dnode, we instead need to redact
* all blocks in every object covered by that hole. If it's a level 0
* block, we only need to redact that single block.
*/
record = kmem_zalloc(sizeof (struct redact_record), KM_SLEEP);
record->eos_marker = B_FALSE;
record->start_object = record->end_object = zb->zb_object;
if (BP_IS_HOLE(bp)) {
record->start_blkid = zb->zb_blkid *
bp_span_in_blocks(dnp->dn_indblkshift, zb->zb_level);
record->end_blkid = ((zb->zb_blkid + 1) *
bp_span_in_blocks(dnp->dn_indblkshift, zb->zb_level)) - 1;
if (zb->zb_object == DMU_META_DNODE_OBJECT) {
record->start_object = record->start_blkid *
((SPA_MINBLOCKSIZE * dnp->dn_datablkszsec) /
sizeof (dnode_phys_t));
record->start_blkid = 0;
record->end_object = ((record->end_blkid +
1) * ((SPA_MINBLOCKSIZE * dnp->dn_datablkszsec) /
sizeof (dnode_phys_t))) - 1;
record->end_blkid = UINT64_MAX;
}
} else if (zb->zb_level != 0 ||
zb->zb_object == DMU_META_DNODE_OBJECT) {
kmem_free(record, sizeof (*record));
return (0);
} else {
record->start_blkid = record->end_blkid = zb->zb_blkid;
}
record->indblkshift = dnp->dn_indblkshift;
record->datablksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
record_merge_enqueue(&rta->q, &rta->current_record, record);
return (0);
}
static void
redact_traverse_thread(void *arg)
{
struct redact_thread_arg *rt_arg = arg;
int err;
struct redact_record *data;
#ifdef _KERNEL
if (rt_arg->os->os_phys->os_type == DMU_OST_ZFS)
rt_arg->deleted_objs = zfs_get_deleteq(rt_arg->os);
else
rt_arg->deleted_objs = objlist_create();
#else
rt_arg->deleted_objs = objlist_create();
#endif
err = traverse_dataset_resume(rt_arg->ds, rt_arg->txg,
&rt_arg->resume, TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA,
redact_cb, rt_arg);
if (err != EINTR)
rt_arg->error_code = err;
objlist_destroy(rt_arg->deleted_objs);
data = kmem_zalloc(sizeof (*data), KM_SLEEP);
data->eos_marker = B_TRUE;
record_merge_enqueue(&rt_arg->q, &rt_arg->current_record, data);
thread_exit();
}
static inline void
create_zbookmark_from_obj_off(zbookmark_phys_t *zb, uint64_t object,
uint64_t blkid)
{
zb->zb_object = object;
zb->zb_level = 0;
zb->zb_blkid = blkid;
}
/*
* This is a utility function that can do the comparison for the start or ends
* of the ranges in a redact_record.
*/
static int
redact_range_compare(uint64_t obj1, uint64_t off1, uint32_t dbss1,
uint64_t obj2, uint64_t off2, uint32_t dbss2)
{
zbookmark_phys_t z1, z2;
create_zbookmark_from_obj_off(&z1, obj1, off1);
create_zbookmark_from_obj_off(&z2, obj2, off2);
return (zbookmark_compare(dbss1 >> SPA_MINBLOCKSHIFT, 0,
dbss2 >> SPA_MINBLOCKSHIFT, 0, &z1, &z2));
}
/*
* Compare two redaction records by their range's start location. Also makes
* eos records always compare last. We use the thread number in the redact_node
* to ensure that records do not compare equal (which is not allowed in our avl
* trees).
*/
static int
redact_node_compare_start(const void *arg1, const void *arg2)
{
const struct redact_node *rn1 = arg1;
const struct redact_node *rn2 = arg2;
const struct redact_record *rr1 = rn1->record;
const struct redact_record *rr2 = rn2->record;
if (rr1->eos_marker)
return (1);
if (rr2->eos_marker)
return (-1);
int cmp = redact_range_compare(rr1->start_object, rr1->start_blkid,
rr1->datablksz, rr2->start_object, rr2->start_blkid,
rr2->datablksz);
if (cmp == 0)
cmp = (rn1->thread_num < rn2->thread_num ? -1 : 1);
return (cmp);
}
/*
* Compare two redaction records by their range's end location. Also makes
* eos records always compare last. We use the thread number in the redact_node
* to ensure that records do not compare equal (which is not allowed in our avl
* trees).
*/
static int
redact_node_compare_end(const void *arg1, const void *arg2)
{
const struct redact_node *rn1 = arg1;
const struct redact_node *rn2 = arg2;
const struct redact_record *srr1 = rn1->record;
const struct redact_record *srr2 = rn2->record;
if (srr1->eos_marker)
return (1);
if (srr2->eos_marker)
return (-1);
int cmp = redact_range_compare(srr1->end_object, srr1->end_blkid,
srr1->datablksz, srr2->end_object, srr2->end_blkid,
srr2->datablksz);
if (cmp == 0)
cmp = (rn1->thread_num < rn2->thread_num ? -1 : 1);
return (cmp);
}
/*
* Utility function that compares two redaction records to determine if any part
* of the "from" record is before any part of the "to" record. Also causes End
* of Stream redaction records to compare after all others, so that the
* redaction merging logic can stay simple.
*/
static boolean_t
redact_record_before(const struct redact_record *from,
const struct redact_record *to)
{
if (from->eos_marker == B_TRUE)
return (B_FALSE);
else if (to->eos_marker == B_TRUE)
return (B_TRUE);
return (redact_range_compare(from->start_object, from->start_blkid,
from->datablksz, to->end_object, to->end_blkid,
to->datablksz) <= 0);
}
/*
* Pop a new redaction record off the queue, check that the records are in the
* right order, and free the old data.
*/
static struct redact_record *
get_next_redact_record(bqueue_t *bq, struct redact_record *prev)
{
struct redact_record *next = bqueue_dequeue(bq);
ASSERT(redact_record_before(prev, next));
kmem_free(prev, sizeof (*prev));
return (next);
}
/*
* Remove the given redaction node from both trees, pull a new redaction record
* off the queue, free the old redaction record, update the redaction node, and
* reinsert the node into the trees.
*/
static int
update_avl_trees(avl_tree_t *start_tree, avl_tree_t *end_tree,
struct redact_node *redact_node)
{
avl_remove(start_tree, redact_node);
avl_remove(end_tree, redact_node);
redact_node->record = get_next_redact_record(&redact_node->rt_arg->q,
redact_node->record);
avl_add(end_tree, redact_node);
avl_add(start_tree, redact_node);
return (redact_node->rt_arg->error_code);
}
/*
* Synctask for updating redaction lists. We first take this txg's list of
* redacted blocks and append those to the redaction list. We then update the
* redaction list's bonus buffer. We store the furthest blocks we visited and
* the list of snapshots that we're redacting with respect to. We need these so
* that redacted sends and receives can be correctly resumed.
*/
static void
redaction_list_update_sync(void *arg, dmu_tx_t *tx)
{
struct merge_data *md = arg;
uint64_t txg = dmu_tx_get_txg(tx);
list_t *list = &md->md_blocks[txg & TXG_MASK];
redact_block_phys_t *furthest_visited =
&md->md_furthest[txg & TXG_MASK];
objset_t *mos = tx->tx_pool->dp_meta_objset;
redaction_list_t *rl = md->md_redaction_list;
int bufsize = redact_sync_bufsize;
redact_block_phys_t *buf = kmem_alloc(bufsize * sizeof (*buf),
KM_SLEEP);
int index = 0;
dmu_buf_will_dirty(rl->rl_dbuf, tx);
for (struct redact_block_list_node *rbln = list_remove_head(list);
rbln != NULL; rbln = list_remove_head(list)) {
ASSERT3U(rbln->block.rbp_object, <=,
furthest_visited->rbp_object);
ASSERT(rbln->block.rbp_object < furthest_visited->rbp_object ||
rbln->block.rbp_blkid <= furthest_visited->rbp_blkid);
buf[index] = rbln->block;
index++;
if (index == bufsize) {
dmu_write(mos, rl->rl_object,
rl->rl_phys->rlp_num_entries * sizeof (*buf),
bufsize * sizeof (*buf), buf, tx);
rl->rl_phys->rlp_num_entries += bufsize;
index = 0;
}
kmem_free(rbln, sizeof (*rbln));
}
if (index > 0) {
dmu_write(mos, rl->rl_object, rl->rl_phys->rlp_num_entries *
sizeof (*buf), index * sizeof (*buf), buf, tx);
rl->rl_phys->rlp_num_entries += index;
}
kmem_free(buf, bufsize * sizeof (*buf));
md->md_synctask_txg[txg & TXG_MASK] = B_FALSE;
rl->rl_phys->rlp_last_object = furthest_visited->rbp_object;
rl->rl_phys->rlp_last_blkid = furthest_visited->rbp_blkid;
}
static void
commit_rl_updates(objset_t *os, struct merge_data *md, uint64_t object,
uint64_t blkid)
{
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(os->os_spa)->dp_mos_dir);
dmu_tx_hold_space(tx, sizeof (struct redact_block_list_node));
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
uint64_t txg = dmu_tx_get_txg(tx);
if (!md->md_synctask_txg[txg & TXG_MASK]) {
dsl_sync_task_nowait(dmu_tx_pool(tx),
redaction_list_update_sync, md, tx);
md->md_synctask_txg[txg & TXG_MASK] = B_TRUE;
md->md_latest_synctask_txg = txg;
}
md->md_furthest[txg & TXG_MASK].rbp_object = object;
md->md_furthest[txg & TXG_MASK].rbp_blkid = blkid;
list_move_tail(&md->md_blocks[txg & TXG_MASK],
&md->md_redact_block_pending);
dmu_tx_commit(tx);
md->md_last_time = gethrtime();
}
/*
* We want to store the list of blocks that we're redacting in the bookmark's
* redaction list. However, this list is stored in the MOS, which means it can
* only be written to in syncing context. To get around this, we create a
* synctask that will write to the mos for us. We tell it what to write by
* a linked list for each current transaction group; every time we decide to
* redact a block, we append it to the transaction group that is currently in
* open context. We also update some progress information that the synctask
* will store to enable resumable redacted sends.
*/
static void
update_redaction_list(struct merge_data *md, objset_t *os,
uint64_t object, uint64_t blkid, uint64_t endblkid, uint32_t blksz)
{
boolean_t enqueue = B_FALSE;
redact_block_phys_t cur = {0};
uint64_t count = endblkid - blkid + 1;
while (count > REDACT_BLOCK_MAX_COUNT) {
update_redaction_list(md, os, object, blkid,
blkid + REDACT_BLOCK_MAX_COUNT - 1, blksz);
blkid += REDACT_BLOCK_MAX_COUNT;
count -= REDACT_BLOCK_MAX_COUNT;
}
redact_block_phys_t *coalesce = &md->md_coalesce_block;
boolean_t new;
if (coalesce->rbp_size_count == 0) {
new = B_TRUE;
enqueue = B_FALSE;
} else {
uint64_t old_count = redact_block_get_count(coalesce);
if (coalesce->rbp_object == object &&
coalesce->rbp_blkid + old_count == blkid &&
old_count + count <= REDACT_BLOCK_MAX_COUNT) {
ASSERT3U(redact_block_get_size(coalesce), ==, blksz);
redact_block_set_count(coalesce, old_count + count);
new = B_FALSE;
enqueue = B_FALSE;
} else {
new = B_TRUE;
enqueue = B_TRUE;
}
}
if (new) {
cur = *coalesce;
coalesce->rbp_blkid = blkid;
coalesce->rbp_object = object;
redact_block_set_count(coalesce, count);
redact_block_set_size(coalesce, blksz);
}
if (enqueue && redact_block_get_size(&cur) != 0) {
struct redact_block_list_node *rbln =
kmem_alloc(sizeof (struct redact_block_list_node),
KM_SLEEP);
rbln->block = cur;
list_insert_tail(&md->md_redact_block_pending, rbln);
}
if (gethrtime() > md->md_last_time +
redaction_list_update_interval_ns) {
commit_rl_updates(os, md, object, blkid);
}
}
/*
* This thread merges all the redaction records provided by the worker threads,
* and determines which blocks are redacted by all the snapshots. The algorithm
* for doing so is similar to performing a merge in mergesort with n sub-lists
* instead of 2, with some added complexity due to the fact that the entries are
* ranges, not just single blocks. This algorithm relies on the fact that the
* queues are sorted, which is ensured by the fact that traverse_dataset
* traverses the dataset in a consistent order. We pull one entry off the front
* of the queues of each secure dataset traversal thread. Then we repeat the
* following: each record represents a range of blocks modified by one of the
* redaction snapshots, and each block in that range may need to be redacted in
* the send stream. Find the record with the latest start of its range, and the
* record with the earliest end of its range. If the last start is before the
* first end, then we know that the blocks in the range [last_start, first_end]
* are covered by all of the ranges at the front of the queues, which means
* every thread redacts that whole range. For example, let's say the ranges on
* each queue look like this:
*
* Block Id 1 2 3 4 5 6 7 8 9 10 11
* Thread 1 | [====================]
* Thread 2 | [========]
* Thread 3 | [=================]
*
* Thread 3 has the last start (5), and the thread 2 has the last end (6). All
* three threads modified the range [5,6], so that data should not be sent over
* the wire. After we've determined whether or not to redact anything, we take
* the record with the first end. We discard that record, and pull a new one
* off the front of the queue it came from. In the above example, we would
* discard Thread 2's record, and pull a new one. Let's say the next record we
* pulled from Thread 2 covered range [10,11]. The new layout would look like
* this:
*
* Block Id 1 2 3 4 5 6 7 8 9 10 11
* Thread 1 | [====================]
* Thread 2 | [==]
* Thread 3 | [=================]
*
* When we compare the last start (10, from Thread 2) and the first end (9, from
* Thread 1), we see that the last start is greater than the first end.
* Therefore, we do not redact anything from these records. We'll iterate by
* replacing the record from Thread 1.
*
* We iterate by replacing the record with the lowest end because we know
* that the record with the lowest end has helped us as much as it can. All the
* ranges before it that we will ever redact have been redacted. In addition,
* by replacing the one with the lowest end, we guarantee we catch all ranges
* that need to be redacted. For example, if in the case above we had replaced
* the record from Thread 1 instead, we might have ended up with the following:
*
* Block Id 1 2 3 4 5 6 7 8 9 10 11 12
* Thread 1 | [==]
* Thread 2 | [========]
* Thread 3 | [=================]
*
* If the next record from Thread 2 had been [8,10], for example, we should have
* redacted part of that range, but because we updated Thread 1's record, we
* missed it.
*
* We implement this algorithm by using two trees. The first sorts the
* redaction records by their start_zb, and the second sorts them by their
* end_zb. We use these to find the record with the last start and the record
* with the first end. We create a record with that start and end, and send it
* on. The overall runtime of this implementation is O(n log m), where n is the
* total number of redaction records from all the different redaction snapshots,
* and m is the number of redaction snapshots.
*
* If we redact with respect to zero snapshots, we create a redaction
* record with the start object and blkid to 0, and the end object and blkid to
* UINT64_MAX. This will result in us redacting every block.
*/
static int
perform_thread_merge(bqueue_t *q, uint32_t num_threads,
struct redact_thread_arg *thread_args, boolean_t *cancel)
{
struct redact_node *redact_nodes = NULL;
avl_tree_t start_tree, end_tree;
struct redact_record *record;
struct redact_record *current_record = NULL;
int err = 0;
struct merge_data md = { {0} };
list_create(&md.md_redact_block_pending,
sizeof (struct redact_block_list_node),
offsetof(struct redact_block_list_node, node));
/*
* If we're redacting with respect to zero snapshots, then no data is
* permitted to be sent. We enqueue a record that redacts all blocks,
* and an eos marker.
*/
if (num_threads == 0) {
record = kmem_zalloc(sizeof (struct redact_record),
KM_SLEEP);
// We can't redact object 0, so don't try.
record->start_object = 1;
record->start_blkid = 0;
record->end_object = record->end_blkid = UINT64_MAX;
bqueue_enqueue(q, record, sizeof (*record));
return (0);
}
if (num_threads > 0) {
redact_nodes = kmem_zalloc(num_threads *
sizeof (*redact_nodes), KM_SLEEP);
}
avl_create(&start_tree, redact_node_compare_start,
sizeof (struct redact_node),
offsetof(struct redact_node, avl_node_start));
avl_create(&end_tree, redact_node_compare_end,
sizeof (struct redact_node),
offsetof(struct redact_node, avl_node_end));
for (int i = 0; i < num_threads; i++) {
struct redact_node *node = &redact_nodes[i];
struct redact_thread_arg *targ = &thread_args[i];
node->record = bqueue_dequeue(&targ->q);
node->rt_arg = targ;
node->thread_num = i;
avl_add(&start_tree, node);
avl_add(&end_tree, node);
}
/*
* Once the first record in the end tree has returned EOS, every record
* must be an EOS record, so we should stop.
*/
while (err == 0 && !((struct redact_node *)avl_first(&end_tree))->
record->eos_marker) {
if (*cancel) {
err = EINTR;
break;
}
struct redact_node *last_start = avl_last(&start_tree);
struct redact_node *first_end = avl_first(&end_tree);
/*
* If the last start record is before the first end record,
* then we have blocks that are redacted by all threads.
* Therefore, we should redact them. Copy the record, and send
* it to the main thread.
*/
if (redact_record_before(last_start->record,
first_end->record)) {
record = kmem_zalloc(sizeof (struct redact_record),
KM_SLEEP);
*record = *first_end->record;
record->start_object = last_start->record->start_object;
record->start_blkid = last_start->record->start_blkid;
record_merge_enqueue(q, &current_record,
record);
}
err = update_avl_trees(&start_tree, &end_tree, first_end);
}
/*
* We're done; if we were cancelled, we need to cancel our workers and
* clear out their queues. Either way, we need to remove every thread's
* redact_node struct from the avl trees.
*/
for (int i = 0; i < num_threads; i++) {
if (err != 0) {
thread_args[i].cancel = B_TRUE;
while (!redact_nodes[i].record->eos_marker) {
(void) update_avl_trees(&start_tree, &end_tree,
&redact_nodes[i]);
}
}
avl_remove(&start_tree, &redact_nodes[i]);
avl_remove(&end_tree, &redact_nodes[i]);
kmem_free(redact_nodes[i].record,
sizeof (struct redact_record));
bqueue_destroy(&thread_args[i].q);
}
avl_destroy(&start_tree);
avl_destroy(&end_tree);
kmem_free(redact_nodes, num_threads * sizeof (*redact_nodes));
if (current_record != NULL)
bqueue_enqueue(q, current_record, sizeof (current_record));
return (err);
}
struct redact_merge_thread_arg {
bqueue_t q;
spa_t *spa;
int numsnaps;
struct redact_thread_arg *thr_args;
boolean_t cancel;
int error_code;
};
static void
redact_merge_thread(void *arg)
{
struct redact_merge_thread_arg *rmta = arg;
rmta->error_code = perform_thread_merge(&rmta->q,
rmta->numsnaps, rmta->thr_args, &rmta->cancel);
struct redact_record *rec = kmem_zalloc(sizeof (*rec), KM_SLEEP);
rec->eos_marker = B_TRUE;
bqueue_enqueue_flush(&rmta->q, rec, 1);
thread_exit();
}
/*
* Find the next object in or after the redaction range passed in, and hold
* its dnode with the provided tag. Also update *object to contain the new
* object number.
*/
static int
hold_next_object(objset_t *os, struct redact_record *rec, void *tag,
uint64_t *object, dnode_t **dn)
{
int err = 0;
if (*dn != NULL)
dnode_rele(*dn, tag);
*dn = NULL;
if (*object < rec->start_object) {
*object = rec->start_object - 1;
}
err = dmu_object_next(os, object, B_FALSE, 0);
if (err != 0)
return (err);
err = dnode_hold(os, *object, tag, dn);
while (err == 0 && (*object < rec->start_object ||
DMU_OT_IS_METADATA((*dn)->dn_type))) {
dnode_rele(*dn, tag);
*dn = NULL;
err = dmu_object_next(os, object, B_FALSE, 0);
if (err != 0)
break;
err = dnode_hold(os, *object, tag, dn);
}
return (err);
}
static int
perform_redaction(objset_t *os, redaction_list_t *rl,
struct redact_merge_thread_arg *rmta)
{
int err = 0;
bqueue_t *q = &rmta->q;
struct redact_record *rec = NULL;
struct merge_data md = { {0} };
list_create(&md.md_redact_block_pending,
sizeof (struct redact_block_list_node),
offsetof(struct redact_block_list_node, node));
md.md_redaction_list = rl;
for (int i = 0; i < TXG_SIZE; i++) {
list_create(&md.md_blocks[i],
sizeof (struct redact_block_list_node),
offsetof(struct redact_block_list_node, node));
}
dnode_t *dn = NULL;
uint64_t prev_obj = 0;
for (rec = bqueue_dequeue(q); !rec->eos_marker && err == 0;
rec = get_next_redact_record(q, rec)) {
ASSERT3U(rec->start_object, !=, 0);
uint64_t object;
if (prev_obj != rec->start_object) {
object = rec->start_object - 1;
err = hold_next_object(os, rec, FTAG, &object, &dn);
} else {
object = prev_obj;
}
while (err == 0 && object <= rec->end_object) {
if (issig(JUSTLOOKING) && issig(FORREAL)) {
err = EINTR;
break;
}
/*
* Part of the current object is contained somewhere in
* the range covered by rec.
*/
uint64_t startblkid;
uint64_t endblkid;
uint64_t maxblkid = dn->dn_phys->dn_maxblkid;
if (rec->start_object < object)
startblkid = 0;
else if (rec->start_blkid > maxblkid)
break;
else
startblkid = rec->start_blkid;
if (rec->end_object > object || rec->end_blkid >
maxblkid) {
endblkid = maxblkid;
} else {
endblkid = rec->end_blkid;
}
update_redaction_list(&md, os, object, startblkid,
endblkid, dn->dn_datablksz);
if (object == rec->end_object)
break;
err = hold_next_object(os, rec, FTAG, &object, &dn);
}
if (err == ESRCH)
err = 0;
if (dn != NULL)
prev_obj = object;
}
if (err == 0 && dn != NULL)
dnode_rele(dn, FTAG);
if (err == ESRCH)
err = 0;
rmta->cancel = B_TRUE;
while (!rec->eos_marker)
rec = get_next_redact_record(q, rec);
kmem_free(rec, sizeof (*rec));
/*
* There may be a block that's being coalesced, sync that out before we
* return.
*/
if (err == 0 && md.md_coalesce_block.rbp_size_count != 0) {
struct redact_block_list_node *rbln =
kmem_alloc(sizeof (struct redact_block_list_node),
KM_SLEEP);
rbln->block = md.md_coalesce_block;
list_insert_tail(&md.md_redact_block_pending, rbln);
}
commit_rl_updates(os, &md, UINT64_MAX, UINT64_MAX);
/*
* Wait for all the redaction info to sync out before we return, so that
* anyone who attempts to resume this redaction will have all the data
* they need.
*/
dsl_pool_t *dp = spa_get_dsl(os->os_spa);
if (md.md_latest_synctask_txg != 0)
txg_wait_synced(dp, md.md_latest_synctask_txg);
for (int i = 0; i < TXG_SIZE; i++)
list_destroy(&md.md_blocks[i]);
return (err);
}
static boolean_t
redact_snaps_contains(uint64_t *snaps, uint64_t num_snaps, uint64_t guid)
{
for (int i = 0; i < num_snaps; i++) {
if (snaps[i] == guid)
return (B_TRUE);
}
return (B_FALSE);
}
int
dmu_redact_snap(const char *snapname, nvlist_t *redactnvl,
const char *redactbook)
{
int err = 0;
dsl_pool_t *dp = NULL;
dsl_dataset_t *ds = NULL;
int numsnaps = 0;
objset_t *os;
struct redact_thread_arg *args = NULL;
redaction_list_t *new_rl = NULL;
char *newredactbook;
if ((err = dsl_pool_hold(snapname, FTAG, &dp)) != 0)
return (err);
newredactbook = kmem_zalloc(sizeof (char) * ZFS_MAX_DATASET_NAME_LEN,
KM_SLEEP);
if ((err = dsl_dataset_hold_flags(dp, snapname, DS_HOLD_FLAG_DECRYPT,
FTAG, &ds)) != 0) {
goto out;
}
dsl_dataset_long_hold(ds, FTAG);
if (!ds->ds_is_snapshot || dmu_objset_from_ds(ds, &os) != 0) {
err = EINVAL;
goto out;
}
if (dsl_dataset_feature_is_active(ds, SPA_FEATURE_REDACTED_DATASETS)) {
err = EALREADY;
goto out;
}
numsnaps = fnvlist_num_pairs(redactnvl);
if (numsnaps > 0)
args = kmem_zalloc(numsnaps * sizeof (*args), KM_SLEEP);
nvpair_t *pair = NULL;
for (int i = 0; i < numsnaps; i++) {
pair = nvlist_next_nvpair(redactnvl, pair);
const char *name = nvpair_name(pair);
struct redact_thread_arg *rta = &args[i];
err = dsl_dataset_hold_flags(dp, name, DS_HOLD_FLAG_DECRYPT,
FTAG, &rta->ds);
if (err != 0)
break;
/*
* We want to do the long hold before we can get any other
* errors, because the cleanup code will release the long
* hold if rta->ds is filled in.
*/
dsl_dataset_long_hold(rta->ds, FTAG);
err = dmu_objset_from_ds(rta->ds, &rta->os);
if (err != 0)
break;
if (!dsl_dataset_is_before(rta->ds, ds, 0)) {
err = EINVAL;
break;
}
if (dsl_dataset_feature_is_active(rta->ds,
SPA_FEATURE_REDACTED_DATASETS)) {
err = EALREADY;
break;
}
}
if (err != 0)
goto out;
VERIFY3P(nvlist_next_nvpair(redactnvl, pair), ==, NULL);
boolean_t resuming = B_FALSE;
zfs_bookmark_phys_t bookmark;
(void) strlcpy(newredactbook, snapname, ZFS_MAX_DATASET_NAME_LEN);
char *c = strchr(newredactbook, '@');
ASSERT3P(c, !=, NULL);
int n = snprintf(c, ZFS_MAX_DATASET_NAME_LEN - (c - newredactbook),
"#%s", redactbook);
if (n >= ZFS_MAX_DATASET_NAME_LEN - (c - newredactbook)) {
dsl_pool_rele(dp, FTAG);
kmem_free(newredactbook,
sizeof (char) * ZFS_MAX_DATASET_NAME_LEN);
if (args != NULL)
kmem_free(args, numsnaps * sizeof (*args));
return (SET_ERROR(ENAMETOOLONG));
}
err = dsl_bookmark_lookup(dp, newredactbook, NULL, &bookmark);
if (err == 0) {
resuming = B_TRUE;
if (bookmark.zbm_redaction_obj == 0) {
err = EEXIST;
goto out;
}
err = dsl_redaction_list_hold_obj(dp,
bookmark.zbm_redaction_obj, FTAG, &new_rl);
if (err != 0) {
err = EIO;
goto out;
}
dsl_redaction_list_long_hold(dp, new_rl, FTAG);
if (new_rl->rl_phys->rlp_num_snaps != numsnaps) {
err = ESRCH;
goto out;
}
for (int i = 0; i < numsnaps; i++) {
struct redact_thread_arg *rta = &args[i];
if (!redact_snaps_contains(new_rl->rl_phys->rlp_snaps,
new_rl->rl_phys->rlp_num_snaps,
dsl_dataset_phys(rta->ds)->ds_guid)) {
err = ESRCH;
goto out;
}
}
if (new_rl->rl_phys->rlp_last_blkid == UINT64_MAX &&
new_rl->rl_phys->rlp_last_object == UINT64_MAX) {
err = EEXIST;
goto out;
}
dsl_pool_rele(dp, FTAG);
dp = NULL;
} else {
uint64_t *guids = NULL;
if (numsnaps > 0) {
guids = kmem_zalloc(numsnaps * sizeof (uint64_t),
KM_SLEEP);
}
for (int i = 0; i < numsnaps; i++) {
struct redact_thread_arg *rta = &args[i];
guids[i] = dsl_dataset_phys(rta->ds)->ds_guid;
}
dsl_pool_rele(dp, FTAG);
dp = NULL;
err = dsl_bookmark_create_redacted(newredactbook, snapname,
numsnaps, guids, FTAG, &new_rl);
kmem_free(guids, numsnaps * sizeof (uint64_t));
if (err != 0) {
goto out;
}
}
for (int i = 0; i < numsnaps; i++) {
struct redact_thread_arg *rta = &args[i];
(void) bqueue_init(&rta->q, zfs_redact_queue_ff,
zfs_redact_queue_length,
offsetof(struct redact_record, ln));
if (resuming) {
rta->resume.zb_blkid =
new_rl->rl_phys->rlp_last_blkid;
rta->resume.zb_object =
new_rl->rl_phys->rlp_last_object;
}
rta->txg = dsl_dataset_phys(ds)->ds_creation_txg;
(void) thread_create(NULL, 0, redact_traverse_thread, rta,
0, curproc, TS_RUN, minclsyspri);
}
struct redact_merge_thread_arg *rmta;
rmta = kmem_zalloc(sizeof (struct redact_merge_thread_arg), KM_SLEEP);
(void) bqueue_init(&rmta->q, zfs_redact_queue_ff,
zfs_redact_queue_length, offsetof(struct redact_record, ln));
rmta->numsnaps = numsnaps;
rmta->spa = os->os_spa;
rmta->thr_args = args;
(void) thread_create(NULL, 0, redact_merge_thread, rmta, 0, curproc,
TS_RUN, minclsyspri);
err = perform_redaction(os, new_rl, rmta);
bqueue_destroy(&rmta->q);
kmem_free(rmta, sizeof (struct redact_merge_thread_arg));
out:
kmem_free(newredactbook, sizeof (char) * ZFS_MAX_DATASET_NAME_LEN);
if (new_rl != NULL) {
dsl_redaction_list_long_rele(new_rl, FTAG);
dsl_redaction_list_rele(new_rl, FTAG);
}
for (int i = 0; i < numsnaps; i++) {
struct redact_thread_arg *rta = &args[i];
/*
* rta->ds may be NULL if we got an error while filling
* it in.
*/
if (rta->ds != NULL) {
dsl_dataset_long_rele(rta->ds, FTAG);
dsl_dataset_rele_flags(rta->ds,
DS_HOLD_FLAG_DECRYPT, FTAG);
}
}
if (args != NULL)
kmem_free(args, numsnaps * sizeof (*args));
if (dp != NULL)
dsl_pool_rele(dp, FTAG);
if (ds != NULL) {
dsl_dataset_long_rele(ds, FTAG);
dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
}
return (SET_ERROR(err));
}
diff --git a/sys/contrib/openzfs/module/zfs/dmu_send.c b/sys/contrib/openzfs/module/zfs/dmu_send.c
index 2f2fd4c3d6c8..390afba92680 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_send.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_send.c
@@ -1,3107 +1,3110 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2011, 2018 by Delphix. All rights reserved.
* Copyright (c) 2014, Joyent, Inc. All rights reserved.
* Copyright 2014 HybridCluster. All rights reserved.
* Copyright 2016 RackTop Systems.
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
*/
#include <sys/dmu.h>
#include <sys/dmu_impl.h>
#include <sys/dmu_tx.h>
#include <sys/dbuf.h>
#include <sys/dnode.h>
#include <sys/zfs_context.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_traverse.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_synctask.h>
#include <sys/spa_impl.h>
#include <sys/zfs_ioctl.h>
#include <sys/zap.h>
#include <sys/zio_checksum.h>
#include <sys/zfs_znode.h>
#include <zfs_fletcher.h>
#include <sys/avl.h>
#include <sys/ddt.h>
#include <sys/zfs_onexit.h>
#include <sys/dmu_send.h>
#include <sys/dmu_recv.h>
#include <sys/dsl_destroy.h>
#include <sys/blkptr.h>
#include <sys/dsl_bookmark.h>
#include <sys/zfeature.h>
#include <sys/bqueue.h>
#include <sys/zvol.h>
#include <sys/policy.h>
#include <sys/objlist.h>
#ifdef _KERNEL
#include <sys/zfs_vfsops.h>
#endif
/* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
int zfs_send_corrupt_data = B_FALSE;
/*
* This tunable controls the amount of data (measured in bytes) that will be
* prefetched by zfs send. If the main thread is blocking on reads that haven't
* completed, this variable might need to be increased. If instead the main
* thread is issuing new reads because the prefetches have fallen out of the
* cache, this may need to be decreased.
*/
int zfs_send_queue_length = SPA_MAXBLOCKSIZE;
/*
* This tunable controls the length of the queues that zfs send worker threads
* use to communicate. If the send_main_thread is blocking on these queues,
* this variable may need to be increased. If there is a significant slowdown
* at the start of a send as these threads consume all the available IO
* resources, this variable may need to be decreased.
*/
int zfs_send_no_prefetch_queue_length = 1024 * 1024;
/*
* These tunables control the fill fraction of the queues by zfs send. The fill
* fraction controls the frequency with which threads have to be cv_signaled.
* If a lot of cpu time is being spent on cv_signal, then these should be tuned
* down. If the queues empty before the signalled thread can catch up, then
* these should be tuned up.
*/
int zfs_send_queue_ff = 20;
int zfs_send_no_prefetch_queue_ff = 20;
/*
* Use this to override the recordsize calculation for fast zfs send estimates.
*/
int zfs_override_estimate_recordsize = 0;
/* Set this tunable to FALSE to disable setting of DRR_FLAG_FREERECORDS */
int zfs_send_set_freerecords_bit = B_TRUE;
/* Set this tunable to FALSE is disable sending unmodified spill blocks. */
int zfs_send_unmodified_spill_blocks = B_TRUE;
static inline boolean_t
overflow_multiply(uint64_t a, uint64_t b, uint64_t *c)
{
uint64_t temp = a * b;
if (b != 0 && temp / b != a)
return (B_FALSE);
*c = temp;
return (B_TRUE);
}
struct send_thread_arg {
bqueue_t q;
objset_t *os; /* Objset to traverse */
uint64_t fromtxg; /* Traverse from this txg */
int flags; /* flags to pass to traverse_dataset */
int error_code;
boolean_t cancel;
zbookmark_phys_t resume;
uint64_t *num_blocks_visited;
};
struct redact_list_thread_arg {
boolean_t cancel;
bqueue_t q;
zbookmark_phys_t resume;
redaction_list_t *rl;
boolean_t mark_redact;
int error_code;
uint64_t *num_blocks_visited;
};
struct send_merge_thread_arg {
bqueue_t q;
objset_t *os;
struct redact_list_thread_arg *from_arg;
struct send_thread_arg *to_arg;
struct redact_list_thread_arg *redact_arg;
int error;
boolean_t cancel;
};
struct send_range {
boolean_t eos_marker; /* Marks the end of the stream */
uint64_t object;
uint64_t start_blkid;
uint64_t end_blkid;
bqueue_node_t ln;
enum type {DATA, HOLE, OBJECT, OBJECT_RANGE, REDACT,
PREVIOUSLY_REDACTED} type;
union {
struct srd {
dmu_object_type_t obj_type;
uint32_t datablksz; // logical size
uint32_t datasz; // payload size
blkptr_t bp;
arc_buf_t *abuf;
abd_t *abd;
kmutex_t lock;
kcondvar_t cv;
boolean_t io_outstanding;
boolean_t io_compressed;
int io_err;
} data;
struct srh {
uint32_t datablksz;
} hole;
struct sro {
/*
* This is a pointer because embedding it in the
* struct causes these structures to be massively larger
* for all range types; this makes the code much less
* memory efficient.
*/
dnode_phys_t *dnp;
blkptr_t bp;
} object;
struct srr {
uint32_t datablksz;
} redact;
struct sror {
blkptr_t bp;
} object_range;
} sru;
};
/*
* The list of data whose inclusion in a send stream can be pending from
* one call to backup_cb to another. Multiple calls to dump_free(),
* dump_freeobjects(), and dump_redact() can be aggregated into a single
* DRR_FREE, DRR_FREEOBJECTS, or DRR_REDACT replay record.
*/
typedef enum {
PENDING_NONE,
PENDING_FREE,
PENDING_FREEOBJECTS,
PENDING_REDACT
} dmu_pendop_t;
typedef struct dmu_send_cookie {
dmu_replay_record_t *dsc_drr;
dmu_send_outparams_t *dsc_dso;
offset_t *dsc_off;
objset_t *dsc_os;
zio_cksum_t dsc_zc;
uint64_t dsc_toguid;
uint64_t dsc_fromtxg;
int dsc_err;
dmu_pendop_t dsc_pending_op;
uint64_t dsc_featureflags;
uint64_t dsc_last_data_object;
uint64_t dsc_last_data_offset;
uint64_t dsc_resume_object;
uint64_t dsc_resume_offset;
boolean_t dsc_sent_begin;
boolean_t dsc_sent_end;
} dmu_send_cookie_t;
static int do_dump(dmu_send_cookie_t *dscp, struct send_range *range);
static void
range_free(struct send_range *range)
{
if (range->type == OBJECT) {
size_t size = sizeof (dnode_phys_t) *
(range->sru.object.dnp->dn_extra_slots + 1);
kmem_free(range->sru.object.dnp, size);
} else if (range->type == DATA) {
mutex_enter(&range->sru.data.lock);
while (range->sru.data.io_outstanding)
cv_wait(&range->sru.data.cv, &range->sru.data.lock);
if (range->sru.data.abd != NULL)
abd_free(range->sru.data.abd);
if (range->sru.data.abuf != NULL) {
arc_buf_destroy(range->sru.data.abuf,
&range->sru.data.abuf);
}
mutex_exit(&range->sru.data.lock);
cv_destroy(&range->sru.data.cv);
mutex_destroy(&range->sru.data.lock);
}
kmem_free(range, sizeof (*range));
}
/*
* For all record types except BEGIN, fill in the checksum (overlaid in
* drr_u.drr_checksum.drr_checksum). The checksum verifies everything
* up to the start of the checksum itself.
*/
static int
dump_record(dmu_send_cookie_t *dscp, void *payload, int payload_len)
{
dmu_send_outparams_t *dso = dscp->dsc_dso;
ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
(void) fletcher_4_incremental_native(dscp->dsc_drr,
offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
&dscp->dsc_zc);
if (dscp->dsc_drr->drr_type == DRR_BEGIN) {
dscp->dsc_sent_begin = B_TRUE;
} else {
ASSERT(ZIO_CHECKSUM_IS_ZERO(&dscp->dsc_drr->drr_u.
drr_checksum.drr_checksum));
dscp->dsc_drr->drr_u.drr_checksum.drr_checksum = dscp->dsc_zc;
}
if (dscp->dsc_drr->drr_type == DRR_END) {
dscp->dsc_sent_end = B_TRUE;
}
(void) fletcher_4_incremental_native(&dscp->dsc_drr->
drr_u.drr_checksum.drr_checksum,
sizeof (zio_cksum_t), &dscp->dsc_zc);
*dscp->dsc_off += sizeof (dmu_replay_record_t);
dscp->dsc_err = dso->dso_outfunc(dscp->dsc_os, dscp->dsc_drr,
sizeof (dmu_replay_record_t), dso->dso_arg);
if (dscp->dsc_err != 0)
return (SET_ERROR(EINTR));
if (payload_len != 0) {
*dscp->dsc_off += payload_len;
/*
* payload is null when dso_dryrun == B_TRUE (i.e. when we're
* doing a send size calculation)
*/
if (payload != NULL) {
(void) fletcher_4_incremental_native(
payload, payload_len, &dscp->dsc_zc);
}
/*
* The code does not rely on this (len being a multiple of 8).
* We keep this assertion because of the corresponding assertion
* in receive_read(). Keeping this assertion ensures that we do
* not inadvertently break backwards compatibility (causing the
* assertion in receive_read() to trigger on old software).
*
* Raw sends cannot be received on old software, and so can
* bypass this assertion.
*/
ASSERT((payload_len % 8 == 0) ||
(dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW));
dscp->dsc_err = dso->dso_outfunc(dscp->dsc_os, payload,
payload_len, dso->dso_arg);
if (dscp->dsc_err != 0)
return (SET_ERROR(EINTR));
}
return (0);
}
/*
* Fill in the drr_free struct, or perform aggregation if the previous record is
* also a free record, and the two are adjacent.
*
* Note that we send free records even for a full send, because we want to be
* able to receive a full send as a clone, which requires a list of all the free
* and freeobject records that were generated on the source.
*/
static int
dump_free(dmu_send_cookie_t *dscp, uint64_t object, uint64_t offset,
uint64_t length)
{
struct drr_free *drrf = &(dscp->dsc_drr->drr_u.drr_free);
/*
* When we receive a free record, dbuf_free_range() assumes
* that the receiving system doesn't have any dbufs in the range
* being freed. This is always true because there is a one-record
* constraint: we only send one WRITE record for any given
* object,offset. We know that the one-record constraint is
* true because we always send data in increasing order by
* object,offset.
*
* If the increasing-order constraint ever changes, we should find
* another way to assert that the one-record constraint is still
* satisfied.
*/
ASSERT(object > dscp->dsc_last_data_object ||
(object == dscp->dsc_last_data_object &&
offset > dscp->dsc_last_data_offset));
/*
* If there is a pending op, but it's not PENDING_FREE, push it out,
* since free block aggregation can only be done for blocks of the
* same type (i.e., DRR_FREE records can only be aggregated with
* other DRR_FREE records. DRR_FREEOBJECTS records can only be
* aggregated with other DRR_FREEOBJECTS records).
*/
if (dscp->dsc_pending_op != PENDING_NONE &&
dscp->dsc_pending_op != PENDING_FREE) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
if (dscp->dsc_pending_op == PENDING_FREE) {
/*
* Check to see whether this free block can be aggregated
* with pending one.
*/
if (drrf->drr_object == object && drrf->drr_offset +
drrf->drr_length == offset) {
if (offset + length < offset || length == UINT64_MAX)
drrf->drr_length = UINT64_MAX;
else
drrf->drr_length += length;
return (0);
} else {
/* not a continuation. Push out pending record */
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
}
/* create a FREE record and make it pending */
bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_FREE;
drrf->drr_object = object;
drrf->drr_offset = offset;
if (offset + length < offset)
drrf->drr_length = DMU_OBJECT_END;
else
drrf->drr_length = length;
drrf->drr_toguid = dscp->dsc_toguid;
if (length == DMU_OBJECT_END) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
} else {
dscp->dsc_pending_op = PENDING_FREE;
}
return (0);
}
/*
* Fill in the drr_redact struct, or perform aggregation if the previous record
* is also a redaction record, and the two are adjacent.
*/
static int
dump_redact(dmu_send_cookie_t *dscp, uint64_t object, uint64_t offset,
uint64_t length)
{
struct drr_redact *drrr = &dscp->dsc_drr->drr_u.drr_redact;
/*
* If there is a pending op, but it's not PENDING_REDACT, push it out,
* since free block aggregation can only be done for blocks of the
* same type (i.e., DRR_REDACT records can only be aggregated with
* other DRR_REDACT records).
*/
if (dscp->dsc_pending_op != PENDING_NONE &&
dscp->dsc_pending_op != PENDING_REDACT) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
if (dscp->dsc_pending_op == PENDING_REDACT) {
/*
* Check to see whether this redacted block can be aggregated
* with pending one.
*/
if (drrr->drr_object == object && drrr->drr_offset +
drrr->drr_length == offset) {
drrr->drr_length += length;
return (0);
} else {
/* not a continuation. Push out pending record */
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
}
/* create a REDACT record and make it pending */
bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_REDACT;
drrr->drr_object = object;
drrr->drr_offset = offset;
drrr->drr_length = length;
drrr->drr_toguid = dscp->dsc_toguid;
dscp->dsc_pending_op = PENDING_REDACT;
return (0);
}
static int
dmu_dump_write(dmu_send_cookie_t *dscp, dmu_object_type_t type, uint64_t object,
uint64_t offset, int lsize, int psize, const blkptr_t *bp,
boolean_t io_compressed, void *data)
{
uint64_t payload_size;
boolean_t raw = (dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW);
struct drr_write *drrw = &(dscp->dsc_drr->drr_u.drr_write);
/*
* We send data in increasing object, offset order.
* See comment in dump_free() for details.
*/
ASSERT(object > dscp->dsc_last_data_object ||
(object == dscp->dsc_last_data_object &&
offset > dscp->dsc_last_data_offset));
dscp->dsc_last_data_object = object;
dscp->dsc_last_data_offset = offset + lsize - 1;
/*
* If there is any kind of pending aggregation (currently either
* a grouping of free objects or free blocks), push it out to
* the stream, since aggregation can't be done across operations
* of different types.
*/
if (dscp->dsc_pending_op != PENDING_NONE) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
/* write a WRITE record */
bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_WRITE;
drrw->drr_object = object;
drrw->drr_type = type;
drrw->drr_offset = offset;
drrw->drr_toguid = dscp->dsc_toguid;
drrw->drr_logical_size = lsize;
/* only set the compression fields if the buf is compressed or raw */
boolean_t compressed =
(bp != NULL ? BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF &&
io_compressed : lsize != psize);
if (raw || compressed) {
ASSERT(raw || dscp->dsc_featureflags &
DMU_BACKUP_FEATURE_COMPRESSED);
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT3S(psize, >, 0);
if (raw) {
ASSERT(BP_IS_PROTECTED(bp));
/*
* This is a raw protected block so we need to pass
* along everything the receiving side will need to
* interpret this block, including the byteswap, salt,
* IV, and MAC.
*/
if (BP_SHOULD_BYTESWAP(bp))
drrw->drr_flags |= DRR_RAW_BYTESWAP;
zio_crypt_decode_params_bp(bp, drrw->drr_salt,
drrw->drr_iv);
zio_crypt_decode_mac_bp(bp, drrw->drr_mac);
} else {
/* this is a compressed block */
ASSERT(dscp->dsc_featureflags &
DMU_BACKUP_FEATURE_COMPRESSED);
ASSERT(!BP_SHOULD_BYTESWAP(bp));
ASSERT(!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)));
ASSERT3U(BP_GET_COMPRESS(bp), !=, ZIO_COMPRESS_OFF);
ASSERT3S(lsize, >=, psize);
}
/* set fields common to compressed and raw sends */
drrw->drr_compressiontype = BP_GET_COMPRESS(bp);
drrw->drr_compressed_size = psize;
payload_size = drrw->drr_compressed_size;
} else {
payload_size = drrw->drr_logical_size;
}
if (bp == NULL || BP_IS_EMBEDDED(bp) || (BP_IS_PROTECTED(bp) && !raw)) {
/*
* There's no pre-computed checksum for partial-block writes,
* embedded BP's, or encrypted BP's that are being sent as
* plaintext, so (like fletcher4-checksummed blocks) userland
* will have to compute a dedup-capable checksum itself.
*/
drrw->drr_checksumtype = ZIO_CHECKSUM_OFF;
} else {
drrw->drr_checksumtype = BP_GET_CHECKSUM(bp);
if (zio_checksum_table[drrw->drr_checksumtype].ci_flags &
ZCHECKSUM_FLAG_DEDUP)
drrw->drr_flags |= DRR_CHECKSUM_DEDUP;
DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp));
DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp));
DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp));
DDK_SET_CRYPT(&drrw->drr_key, BP_IS_PROTECTED(bp));
drrw->drr_key.ddk_cksum = bp->blk_cksum;
}
if (dump_record(dscp, data, payload_size) != 0)
return (SET_ERROR(EINTR));
return (0);
}
static int
dump_write_embedded(dmu_send_cookie_t *dscp, uint64_t object, uint64_t offset,
int blksz, const blkptr_t *bp)
{
char buf[BPE_PAYLOAD_SIZE];
struct drr_write_embedded *drrw =
&(dscp->dsc_drr->drr_u.drr_write_embedded);
if (dscp->dsc_pending_op != PENDING_NONE) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
ASSERT(BP_IS_EMBEDDED(bp));
bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_WRITE_EMBEDDED;
drrw->drr_object = object;
drrw->drr_offset = offset;
drrw->drr_length = blksz;
drrw->drr_toguid = dscp->dsc_toguid;
drrw->drr_compression = BP_GET_COMPRESS(bp);
drrw->drr_etype = BPE_GET_ETYPE(bp);
drrw->drr_lsize = BPE_GET_LSIZE(bp);
drrw->drr_psize = BPE_GET_PSIZE(bp);
decode_embedded_bp_compressed(bp, buf);
if (dump_record(dscp, buf, P2ROUNDUP(drrw->drr_psize, 8)) != 0)
return (SET_ERROR(EINTR));
return (0);
}
static int
dump_spill(dmu_send_cookie_t *dscp, const blkptr_t *bp, uint64_t object,
void *data)
{
struct drr_spill *drrs = &(dscp->dsc_drr->drr_u.drr_spill);
uint64_t blksz = BP_GET_LSIZE(bp);
uint64_t payload_size = blksz;
if (dscp->dsc_pending_op != PENDING_NONE) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
/* write a SPILL record */
bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_SPILL;
drrs->drr_object = object;
drrs->drr_length = blksz;
drrs->drr_toguid = dscp->dsc_toguid;
/* See comment in dump_dnode() for full details */
if (zfs_send_unmodified_spill_blocks &&
(bp->blk_birth <= dscp->dsc_fromtxg)) {
drrs->drr_flags |= DRR_SPILL_UNMODIFIED;
}
/* handle raw send fields */
if (dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW) {
ASSERT(BP_IS_PROTECTED(bp));
if (BP_SHOULD_BYTESWAP(bp))
drrs->drr_flags |= DRR_RAW_BYTESWAP;
drrs->drr_compressiontype = BP_GET_COMPRESS(bp);
drrs->drr_compressed_size = BP_GET_PSIZE(bp);
zio_crypt_decode_params_bp(bp, drrs->drr_salt, drrs->drr_iv);
zio_crypt_decode_mac_bp(bp, drrs->drr_mac);
payload_size = drrs->drr_compressed_size;
}
if (dump_record(dscp, data, payload_size) != 0)
return (SET_ERROR(EINTR));
return (0);
}
static int
dump_freeobjects(dmu_send_cookie_t *dscp, uint64_t firstobj, uint64_t numobjs)
{
struct drr_freeobjects *drrfo = &(dscp->dsc_drr->drr_u.drr_freeobjects);
uint64_t maxobj = DNODES_PER_BLOCK *
(DMU_META_DNODE(dscp->dsc_os)->dn_maxblkid + 1);
/*
* ZoL < 0.7 does not handle large FREEOBJECTS records correctly,
* leading to zfs recv never completing. to avoid this issue, don't
* send FREEOBJECTS records for object IDs which cannot exist on the
* receiving side.
*/
if (maxobj > 0) {
if (maxobj <= firstobj)
return (0);
if (maxobj < firstobj + numobjs)
numobjs = maxobj - firstobj;
}
/*
* If there is a pending op, but it's not PENDING_FREEOBJECTS,
* push it out, since free block aggregation can only be done for
* blocks of the same type (i.e., DRR_FREE records can only be
* aggregated with other DRR_FREE records. DRR_FREEOBJECTS records
* can only be aggregated with other DRR_FREEOBJECTS records).
*/
if (dscp->dsc_pending_op != PENDING_NONE &&
dscp->dsc_pending_op != PENDING_FREEOBJECTS) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
if (dscp->dsc_pending_op == PENDING_FREEOBJECTS) {
/*
* See whether this free object array can be aggregated
* with pending one
*/
if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) {
drrfo->drr_numobjs += numobjs;
return (0);
} else {
/* can't be aggregated. Push out pending record */
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
}
/* write a FREEOBJECTS record */
bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_FREEOBJECTS;
drrfo->drr_firstobj = firstobj;
drrfo->drr_numobjs = numobjs;
drrfo->drr_toguid = dscp->dsc_toguid;
dscp->dsc_pending_op = PENDING_FREEOBJECTS;
return (0);
}
static int
dump_dnode(dmu_send_cookie_t *dscp, const blkptr_t *bp, uint64_t object,
dnode_phys_t *dnp)
{
struct drr_object *drro = &(dscp->dsc_drr->drr_u.drr_object);
int bonuslen;
if (object < dscp->dsc_resume_object) {
/*
* Note: when resuming, we will visit all the dnodes in
* the block of dnodes that we are resuming from. In
* this case it's unnecessary to send the dnodes prior to
* the one we are resuming from. We should be at most one
* block's worth of dnodes behind the resume point.
*/
ASSERT3U(dscp->dsc_resume_object - object, <,
1 << (DNODE_BLOCK_SHIFT - DNODE_SHIFT));
return (0);
}
if (dnp == NULL || dnp->dn_type == DMU_OT_NONE)
return (dump_freeobjects(dscp, object, 1));
if (dscp->dsc_pending_op != PENDING_NONE) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
/* write an OBJECT record */
bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_OBJECT;
drro->drr_object = object;
drro->drr_type = dnp->dn_type;
drro->drr_bonustype = dnp->dn_bonustype;
drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
drro->drr_bonuslen = dnp->dn_bonuslen;
drro->drr_dn_slots = dnp->dn_extra_slots + 1;
drro->drr_checksumtype = dnp->dn_checksum;
drro->drr_compress = dnp->dn_compress;
drro->drr_toguid = dscp->dsc_toguid;
if (!(dscp->dsc_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
drro->drr_blksz > SPA_OLD_MAXBLOCKSIZE)
drro->drr_blksz = SPA_OLD_MAXBLOCKSIZE;
bonuslen = P2ROUNDUP(dnp->dn_bonuslen, 8);
if ((dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW)) {
ASSERT(BP_IS_ENCRYPTED(bp));
if (BP_SHOULD_BYTESWAP(bp))
drro->drr_flags |= DRR_RAW_BYTESWAP;
/* needed for reconstructing dnp on recv side */
drro->drr_maxblkid = dnp->dn_maxblkid;
drro->drr_indblkshift = dnp->dn_indblkshift;
drro->drr_nlevels = dnp->dn_nlevels;
drro->drr_nblkptr = dnp->dn_nblkptr;
/*
* Since we encrypt the entire bonus area, the (raw) part
* beyond the bonuslen is actually nonzero, so we need
* to send it.
*/
if (bonuslen != 0) {
+ if (drro->drr_bonuslen > DN_MAX_BONUS_LEN(dnp))
+ return (SET_ERROR(EINVAL));
drro->drr_raw_bonuslen = DN_MAX_BONUS_LEN(dnp);
bonuslen = drro->drr_raw_bonuslen;
}
}
/*
* DRR_OBJECT_SPILL is set for every dnode which references a
* spill block. This allows the receiving pool to definitively
* determine when a spill block should be kept or freed.
*/
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)
drro->drr_flags |= DRR_OBJECT_SPILL;
if (dump_record(dscp, DN_BONUS(dnp), bonuslen) != 0)
return (SET_ERROR(EINTR));
/* Free anything past the end of the file. */
if (dump_free(dscp, object, (dnp->dn_maxblkid + 1) *
(dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), DMU_OBJECT_END) != 0)
return (SET_ERROR(EINTR));
/*
* Send DRR_SPILL records for unmodified spill blocks. This is useful
* because changing certain attributes of the object (e.g. blocksize)
* can cause old versions of ZFS to incorrectly remove a spill block.
* Including these records in the stream forces an up to date version
* to always be written ensuring they're never lost. Current versions
* of the code which understand the DRR_FLAG_SPILL_BLOCK feature can
* ignore these unmodified spill blocks.
*/
if (zfs_send_unmodified_spill_blocks &&
(dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) &&
(DN_SPILL_BLKPTR(dnp)->blk_birth <= dscp->dsc_fromtxg)) {
struct send_range record;
blkptr_t *bp = DN_SPILL_BLKPTR(dnp);
bzero(&record, sizeof (struct send_range));
record.type = DATA;
record.object = object;
record.eos_marker = B_FALSE;
record.start_blkid = DMU_SPILL_BLKID;
record.end_blkid = record.start_blkid + 1;
record.sru.data.bp = *bp;
record.sru.data.obj_type = dnp->dn_type;
record.sru.data.datablksz = BP_GET_LSIZE(bp);
if (do_dump(dscp, &record) != 0)
return (SET_ERROR(EINTR));
}
if (dscp->dsc_err != 0)
return (SET_ERROR(EINTR));
return (0);
}
static int
dump_object_range(dmu_send_cookie_t *dscp, const blkptr_t *bp,
uint64_t firstobj, uint64_t numslots)
{
struct drr_object_range *drror =
&(dscp->dsc_drr->drr_u.drr_object_range);
/* we only use this record type for raw sends */
ASSERT(BP_IS_PROTECTED(bp));
ASSERT(dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW);
ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_DNODE);
ASSERT0(BP_GET_LEVEL(bp));
if (dscp->dsc_pending_op != PENDING_NONE) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_OBJECT_RANGE;
drror->drr_firstobj = firstobj;
drror->drr_numslots = numslots;
drror->drr_toguid = dscp->dsc_toguid;
if (BP_SHOULD_BYTESWAP(bp))
drror->drr_flags |= DRR_RAW_BYTESWAP;
zio_crypt_decode_params_bp(bp, drror->drr_salt, drror->drr_iv);
zio_crypt_decode_mac_bp(bp, drror->drr_mac);
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
return (0);
}
static boolean_t
send_do_embed(const blkptr_t *bp, uint64_t featureflags)
{
if (!BP_IS_EMBEDDED(bp))
return (B_FALSE);
/*
* Compression function must be legacy, or explicitly enabled.
*/
if ((BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_LEGACY_FUNCTIONS &&
!(featureflags & DMU_BACKUP_FEATURE_LZ4)))
return (B_FALSE);
/*
* If we have not set the ZSTD feature flag, we can't send ZSTD
* compressed embedded blocks, as the receiver may not support them.
*/
if ((BP_GET_COMPRESS(bp) == ZIO_COMPRESS_ZSTD &&
!(featureflags & DMU_BACKUP_FEATURE_ZSTD)))
return (B_FALSE);
/*
* Embed type must be explicitly enabled.
*/
switch (BPE_GET_ETYPE(bp)) {
case BP_EMBEDDED_TYPE_DATA:
if (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)
return (B_TRUE);
break;
default:
return (B_FALSE);
}
return (B_FALSE);
}
/*
* This function actually handles figuring out what kind of record needs to be
* dumped, and calling the appropriate helper function. In most cases,
* the data has already been read by send_reader_thread().
*/
static int
do_dump(dmu_send_cookie_t *dscp, struct send_range *range)
{
int err = 0;
switch (range->type) {
case OBJECT:
err = dump_dnode(dscp, &range->sru.object.bp, range->object,
range->sru.object.dnp);
return (err);
case OBJECT_RANGE: {
ASSERT3U(range->start_blkid + 1, ==, range->end_blkid);
if (!(dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW)) {
return (0);
}
uint64_t epb = BP_GET_LSIZE(&range->sru.object_range.bp) >>
DNODE_SHIFT;
uint64_t firstobj = range->start_blkid * epb;
err = dump_object_range(dscp, &range->sru.object_range.bp,
firstobj, epb);
break;
}
case REDACT: {
struct srr *srrp = &range->sru.redact;
err = dump_redact(dscp, range->object, range->start_blkid *
srrp->datablksz, (range->end_blkid - range->start_blkid) *
srrp->datablksz);
return (err);
}
case DATA: {
struct srd *srdp = &range->sru.data;
blkptr_t *bp = &srdp->bp;
spa_t *spa =
dmu_objset_spa(dscp->dsc_os);
ASSERT3U(srdp->datablksz, ==, BP_GET_LSIZE(bp));
ASSERT3U(range->start_blkid + 1, ==, range->end_blkid);
if (BP_GET_TYPE(bp) == DMU_OT_SA) {
arc_flags_t aflags = ARC_FLAG_WAIT;
enum zio_flag zioflags = ZIO_FLAG_CANFAIL;
if (dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW) {
ASSERT(BP_IS_PROTECTED(bp));
zioflags |= ZIO_FLAG_RAW;
}
zbookmark_phys_t zb;
ASSERT3U(range->start_blkid, ==, DMU_SPILL_BLKID);
zb.zb_objset = dmu_objset_id(dscp->dsc_os);
zb.zb_object = range->object;
zb.zb_level = 0;
zb.zb_blkid = range->start_blkid;
arc_buf_t *abuf = NULL;
if (!dscp->dsc_dso->dso_dryrun && arc_read(NULL, spa,
bp, arc_getbuf_func, &abuf, ZIO_PRIORITY_ASYNC_READ,
zioflags, &aflags, &zb) != 0)
return (SET_ERROR(EIO));
err = dump_spill(dscp, bp, zb.zb_object,
(abuf == NULL ? NULL : abuf->b_data));
if (abuf != NULL)
arc_buf_destroy(abuf, &abuf);
return (err);
}
if (send_do_embed(bp, dscp->dsc_featureflags)) {
err = dump_write_embedded(dscp, range->object,
range->start_blkid * srdp->datablksz,
srdp->datablksz, bp);
return (err);
}
ASSERT(range->object > dscp->dsc_resume_object ||
(range->object == dscp->dsc_resume_object &&
range->start_blkid * srdp->datablksz >=
dscp->dsc_resume_offset));
/* it's a level-0 block of a regular object */
mutex_enter(&srdp->lock);
while (srdp->io_outstanding)
cv_wait(&srdp->cv, &srdp->lock);
err = srdp->io_err;
mutex_exit(&srdp->lock);
if (err != 0) {
if (zfs_send_corrupt_data &&
!dscp->dsc_dso->dso_dryrun) {
/*
* Send a block filled with 0x"zfs badd bloc"
*/
srdp->abuf = arc_alloc_buf(spa, &srdp->abuf,
ARC_BUFC_DATA, srdp->datablksz);
uint64_t *ptr;
for (ptr = srdp->abuf->b_data;
(char *)ptr < (char *)srdp->abuf->b_data +
srdp->datablksz; ptr++)
*ptr = 0x2f5baddb10cULL;
} else {
return (SET_ERROR(EIO));
}
}
ASSERT(dscp->dsc_dso->dso_dryrun ||
srdp->abuf != NULL || srdp->abd != NULL);
uint64_t offset = range->start_blkid * srdp->datablksz;
char *data = NULL;
if (srdp->abd != NULL) {
data = abd_to_buf(srdp->abd);
ASSERT3P(srdp->abuf, ==, NULL);
} else if (srdp->abuf != NULL) {
data = srdp->abuf->b_data;
}
/*
* If we have large blocks stored on disk but the send flags
* don't allow us to send large blocks, we split the data from
* the arc buf into chunks.
*/
if (srdp->datablksz > SPA_OLD_MAXBLOCKSIZE &&
!(dscp->dsc_featureflags &
DMU_BACKUP_FEATURE_LARGE_BLOCKS)) {
while (srdp->datablksz > 0 && err == 0) {
int n = MIN(srdp->datablksz,
SPA_OLD_MAXBLOCKSIZE);
err = dmu_dump_write(dscp, srdp->obj_type,
range->object, offset, n, n, NULL, B_FALSE,
data);
offset += n;
/*
* When doing dry run, data==NULL is used as a
* sentinel value by
* dmu_dump_write()->dump_record().
*/
if (data != NULL)
data += n;
srdp->datablksz -= n;
}
} else {
err = dmu_dump_write(dscp, srdp->obj_type,
range->object, offset,
srdp->datablksz, srdp->datasz, bp,
srdp->io_compressed, data);
}
return (err);
}
case HOLE: {
struct srh *srhp = &range->sru.hole;
if (range->object == DMU_META_DNODE_OBJECT) {
uint32_t span = srhp->datablksz >> DNODE_SHIFT;
uint64_t first_obj = range->start_blkid * span;
uint64_t numobj = range->end_blkid * span - first_obj;
return (dump_freeobjects(dscp, first_obj, numobj));
}
uint64_t offset = 0;
/*
* If this multiply overflows, we don't need to send this block.
* Even if it has a birth time, it can never not be a hole, so
* we don't need to send records for it.
*/
if (!overflow_multiply(range->start_blkid, srhp->datablksz,
&offset)) {
return (0);
}
uint64_t len = 0;
if (!overflow_multiply(range->end_blkid, srhp->datablksz, &len))
len = UINT64_MAX;
len = len - offset;
return (dump_free(dscp, range->object, offset, len));
}
default:
panic("Invalid range type in do_dump: %d", range->type);
}
return (err);
}
static struct send_range *
range_alloc(enum type type, uint64_t object, uint64_t start_blkid,
uint64_t end_blkid, boolean_t eos)
{
struct send_range *range = kmem_alloc(sizeof (*range), KM_SLEEP);
range->type = type;
range->object = object;
range->start_blkid = start_blkid;
range->end_blkid = end_blkid;
range->eos_marker = eos;
if (type == DATA) {
range->sru.data.abd = NULL;
range->sru.data.abuf = NULL;
mutex_init(&range->sru.data.lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&range->sru.data.cv, NULL, CV_DEFAULT, NULL);
range->sru.data.io_outstanding = 0;
range->sru.data.io_err = 0;
range->sru.data.io_compressed = B_FALSE;
}
return (range);
}
/*
* This is the callback function to traverse_dataset that acts as a worker
* thread for dmu_send_impl.
*/
-/*ARGSUSED*/
static int
send_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg)
{
+ (void) zilog;
struct send_thread_arg *sta = arg;
struct send_range *record;
ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT ||
zb->zb_object >= sta->resume.zb_object);
/*
* All bps of an encrypted os should have the encryption bit set.
* If this is not true it indicates tampering and we report an error.
*/
if (sta->os->os_encrypted &&
!BP_IS_HOLE(bp) && !BP_USES_CRYPT(bp)) {
spa_log_error(spa, zb);
zfs_panic_recover("unencrypted block in encrypted "
"object set %llu", dmu_objset_id(sta->os));
return (SET_ERROR(EIO));
}
if (sta->cancel)
return (SET_ERROR(EINTR));
if (zb->zb_object != DMU_META_DNODE_OBJECT &&
DMU_OBJECT_IS_SPECIAL(zb->zb_object))
return (0);
atomic_inc_64(sta->num_blocks_visited);
if (zb->zb_level == ZB_DNODE_LEVEL) {
if (zb->zb_object == DMU_META_DNODE_OBJECT)
return (0);
record = range_alloc(OBJECT, zb->zb_object, 0, 0, B_FALSE);
record->sru.object.bp = *bp;
size_t size = sizeof (*dnp) * (dnp->dn_extra_slots + 1);
record->sru.object.dnp = kmem_alloc(size, KM_SLEEP);
bcopy(dnp, record->sru.object.dnp, size);
bqueue_enqueue(&sta->q, record, sizeof (*record));
return (0);
}
if (zb->zb_level == 0 && zb->zb_object == DMU_META_DNODE_OBJECT &&
!BP_IS_HOLE(bp)) {
record = range_alloc(OBJECT_RANGE, 0, zb->zb_blkid,
zb->zb_blkid + 1, B_FALSE);
record->sru.object_range.bp = *bp;
bqueue_enqueue(&sta->q, record, sizeof (*record));
return (0);
}
if (zb->zb_level < 0 || (zb->zb_level > 0 && !BP_IS_HOLE(bp)))
return (0);
if (zb->zb_object == DMU_META_DNODE_OBJECT && !BP_IS_HOLE(bp))
return (0);
uint64_t span = bp_span_in_blocks(dnp->dn_indblkshift, zb->zb_level);
uint64_t start;
/*
* If this multiply overflows, we don't need to send this block.
* Even if it has a birth time, it can never not be a hole, so
* we don't need to send records for it.
*/
if (!overflow_multiply(span, zb->zb_blkid, &start) || (!(zb->zb_blkid ==
DMU_SPILL_BLKID || DMU_OT_IS_METADATA(dnp->dn_type)) &&
span * zb->zb_blkid > dnp->dn_maxblkid)) {
ASSERT(BP_IS_HOLE(bp));
return (0);
}
if (zb->zb_blkid == DMU_SPILL_BLKID)
ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_SA);
enum type record_type = DATA;
if (BP_IS_HOLE(bp))
record_type = HOLE;
else if (BP_IS_REDACTED(bp))
record_type = REDACT;
else
record_type = DATA;
record = range_alloc(record_type, zb->zb_object, start,
(start + span < start ? 0 : start + span), B_FALSE);
uint64_t datablksz = (zb->zb_blkid == DMU_SPILL_BLKID ?
BP_GET_LSIZE(bp) : dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
if (BP_IS_HOLE(bp)) {
record->sru.hole.datablksz = datablksz;
} else if (BP_IS_REDACTED(bp)) {
record->sru.redact.datablksz = datablksz;
} else {
record->sru.data.datablksz = datablksz;
record->sru.data.obj_type = dnp->dn_type;
record->sru.data.bp = *bp;
}
bqueue_enqueue(&sta->q, record, sizeof (*record));
return (0);
}
struct redact_list_cb_arg {
uint64_t *num_blocks_visited;
bqueue_t *q;
boolean_t *cancel;
boolean_t mark_redact;
};
static int
redact_list_cb(redact_block_phys_t *rb, void *arg)
{
struct redact_list_cb_arg *rlcap = arg;
atomic_inc_64(rlcap->num_blocks_visited);
if (*rlcap->cancel)
return (-1);
struct send_range *data = range_alloc(REDACT, rb->rbp_object,
rb->rbp_blkid, rb->rbp_blkid + redact_block_get_count(rb), B_FALSE);
ASSERT3U(data->end_blkid, >, rb->rbp_blkid);
if (rlcap->mark_redact) {
data->type = REDACT;
data->sru.redact.datablksz = redact_block_get_size(rb);
} else {
data->type = PREVIOUSLY_REDACTED;
}
bqueue_enqueue(rlcap->q, data, sizeof (*data));
return (0);
}
/*
* This function kicks off the traverse_dataset. It also handles setting the
* error code of the thread in case something goes wrong, and pushes the End of
* Stream record when the traverse_dataset call has finished.
*/
static void
send_traverse_thread(void *arg)
{
struct send_thread_arg *st_arg = arg;
int err = 0;
struct send_range *data;
fstrans_cookie_t cookie = spl_fstrans_mark();
err = traverse_dataset_resume(st_arg->os->os_dsl_dataset,
st_arg->fromtxg, &st_arg->resume,
st_arg->flags, send_cb, st_arg);
if (err != EINTR)
st_arg->error_code = err;
data = range_alloc(DATA, 0, 0, 0, B_TRUE);
bqueue_enqueue_flush(&st_arg->q, data, sizeof (*data));
spl_fstrans_unmark(cookie);
thread_exit();
}
/*
* Utility function that causes End of Stream records to compare after of all
* others, so that other threads' comparison logic can stay simple.
*/
static int __attribute__((unused))
send_range_after(const struct send_range *from, const struct send_range *to)
{
if (from->eos_marker == B_TRUE)
return (1);
if (to->eos_marker == B_TRUE)
return (-1);
uint64_t from_obj = from->object;
uint64_t from_end_obj = from->object + 1;
uint64_t to_obj = to->object;
uint64_t to_end_obj = to->object + 1;
if (from_obj == 0) {
ASSERT(from->type == HOLE || from->type == OBJECT_RANGE);
from_obj = from->start_blkid << DNODES_PER_BLOCK_SHIFT;
from_end_obj = from->end_blkid << DNODES_PER_BLOCK_SHIFT;
}
if (to_obj == 0) {
ASSERT(to->type == HOLE || to->type == OBJECT_RANGE);
to_obj = to->start_blkid << DNODES_PER_BLOCK_SHIFT;
to_end_obj = to->end_blkid << DNODES_PER_BLOCK_SHIFT;
}
if (from_end_obj <= to_obj)
return (-1);
if (from_obj >= to_end_obj)
return (1);
int64_t cmp = TREE_CMP(to->type == OBJECT_RANGE, from->type ==
OBJECT_RANGE);
if (unlikely(cmp))
return (cmp);
cmp = TREE_CMP(to->type == OBJECT, from->type == OBJECT);
if (unlikely(cmp))
return (cmp);
if (from->end_blkid <= to->start_blkid)
return (-1);
if (from->start_blkid >= to->end_blkid)
return (1);
return (0);
}
/*
* Pop the new data off the queue, check that the records we receive are in
* the right order, but do not free the old data. This is used so that the
* records can be sent on to the main thread without copying the data.
*/
static struct send_range *
get_next_range_nofree(bqueue_t *bq, struct send_range *prev)
{
struct send_range *next = bqueue_dequeue(bq);
ASSERT3S(send_range_after(prev, next), ==, -1);
return (next);
}
/*
* Pop the new data off the queue, check that the records we receive are in
* the right order, and free the old data.
*/
static struct send_range *
get_next_range(bqueue_t *bq, struct send_range *prev)
{
struct send_range *next = get_next_range_nofree(bq, prev);
range_free(prev);
return (next);
}
static void
redact_list_thread(void *arg)
{
struct redact_list_thread_arg *rlt_arg = arg;
struct send_range *record;
fstrans_cookie_t cookie = spl_fstrans_mark();
if (rlt_arg->rl != NULL) {
struct redact_list_cb_arg rlcba = {0};
rlcba.cancel = &rlt_arg->cancel;
rlcba.q = &rlt_arg->q;
rlcba.num_blocks_visited = rlt_arg->num_blocks_visited;
rlcba.mark_redact = rlt_arg->mark_redact;
int err = dsl_redaction_list_traverse(rlt_arg->rl,
&rlt_arg->resume, redact_list_cb, &rlcba);
if (err != EINTR)
rlt_arg->error_code = err;
}
record = range_alloc(DATA, 0, 0, 0, B_TRUE);
bqueue_enqueue_flush(&rlt_arg->q, record, sizeof (*record));
spl_fstrans_unmark(cookie);
thread_exit();
}
/*
* Compare the start point of the two provided ranges. End of stream ranges
* compare last, objects compare before any data or hole inside that object and
* multi-object holes that start at the same object.
*/
static int
send_range_start_compare(struct send_range *r1, struct send_range *r2)
{
uint64_t r1_objequiv = r1->object;
uint64_t r1_l0equiv = r1->start_blkid;
uint64_t r2_objequiv = r2->object;
uint64_t r2_l0equiv = r2->start_blkid;
int64_t cmp = TREE_CMP(r1->eos_marker, r2->eos_marker);
if (unlikely(cmp))
return (cmp);
if (r1->object == 0) {
r1_objequiv = r1->start_blkid * DNODES_PER_BLOCK;
r1_l0equiv = 0;
}
if (r2->object == 0) {
r2_objequiv = r2->start_blkid * DNODES_PER_BLOCK;
r2_l0equiv = 0;
}
cmp = TREE_CMP(r1_objequiv, r2_objequiv);
if (likely(cmp))
return (cmp);
cmp = TREE_CMP(r2->type == OBJECT_RANGE, r1->type == OBJECT_RANGE);
if (unlikely(cmp))
return (cmp);
cmp = TREE_CMP(r2->type == OBJECT, r1->type == OBJECT);
if (unlikely(cmp))
return (cmp);
return (TREE_CMP(r1_l0equiv, r2_l0equiv));
}
enum q_idx {
REDACT_IDX = 0,
TO_IDX,
FROM_IDX,
NUM_THREADS
};
/*
* This function returns the next range the send_merge_thread should operate on.
* The inputs are two arrays; the first one stores the range at the front of the
* queues stored in the second one. The ranges are sorted in descending
* priority order; the metadata from earlier ranges overrules metadata from
* later ranges. out_mask is used to return which threads the ranges came from;
* bit i is set if ranges[i] started at the same place as the returned range.
*
* This code is not hardcoded to compare a specific number of threads; it could
* be used with any number, just by changing the q_idx enum.
*
* The "next range" is the one with the earliest start; if two starts are equal,
* the highest-priority range is the next to operate on. If a higher-priority
* range starts in the middle of the first range, then the first range will be
* truncated to end where the higher-priority range starts, and we will operate
* on that one next time. In this way, we make sure that each block covered by
* some range gets covered by a returned range, and each block covered is
* returned using the metadata of the highest-priority range it appears in.
*
* For example, if the three ranges at the front of the queues were [2,4),
* [3,5), and [1,3), then the ranges returned would be [1,2) with the metadata
* from the third range, [2,4) with the metadata from the first range, and then
* [4,5) with the metadata from the second.
*/
static struct send_range *
find_next_range(struct send_range **ranges, bqueue_t **qs, uint64_t *out_mask)
{
int idx = 0; // index of the range with the earliest start
int i;
uint64_t bmask = 0;
for (i = 1; i < NUM_THREADS; i++) {
if (send_range_start_compare(ranges[i], ranges[idx]) < 0)
idx = i;
}
if (ranges[idx]->eos_marker) {
struct send_range *ret = range_alloc(DATA, 0, 0, 0, B_TRUE);
*out_mask = 0;
return (ret);
}
/*
* Find all the ranges that start at that same point.
*/
for (i = 0; i < NUM_THREADS; i++) {
if (send_range_start_compare(ranges[i], ranges[idx]) == 0)
bmask |= 1 << i;
}
*out_mask = bmask;
/*
* OBJECT_RANGE records only come from the TO thread, and should always
* be treated as overlapping with nothing and sent on immediately. They
* are only used in raw sends, and are never redacted.
*/
if (ranges[idx]->type == OBJECT_RANGE) {
ASSERT3U(idx, ==, TO_IDX);
ASSERT3U(*out_mask, ==, 1 << TO_IDX);
struct send_range *ret = ranges[idx];
ranges[idx] = get_next_range_nofree(qs[idx], ranges[idx]);
return (ret);
}
/*
* Find the first start or end point after the start of the first range.
*/
uint64_t first_change = ranges[idx]->end_blkid;
for (i = 0; i < NUM_THREADS; i++) {
if (i == idx || ranges[i]->eos_marker ||
ranges[i]->object > ranges[idx]->object ||
ranges[i]->object == DMU_META_DNODE_OBJECT)
continue;
ASSERT3U(ranges[i]->object, ==, ranges[idx]->object);
if (first_change > ranges[i]->start_blkid &&
(bmask & (1 << i)) == 0)
first_change = ranges[i]->start_blkid;
else if (first_change > ranges[i]->end_blkid)
first_change = ranges[i]->end_blkid;
}
/*
* Update all ranges to no longer overlap with the range we're
* returning. All such ranges must start at the same place as the range
* being returned, and end at or after first_change. Thus we update
* their start to first_change. If that makes them size 0, then free
* them and pull a new range from that thread.
*/
for (i = 0; i < NUM_THREADS; i++) {
if (i == idx || (bmask & (1 << i)) == 0)
continue;
ASSERT3U(first_change, >, ranges[i]->start_blkid);
ranges[i]->start_blkid = first_change;
ASSERT3U(ranges[i]->start_blkid, <=, ranges[i]->end_blkid);
if (ranges[i]->start_blkid == ranges[i]->end_blkid)
ranges[i] = get_next_range(qs[i], ranges[i]);
}
/*
* Short-circuit the simple case; if the range doesn't overlap with
* anything else, or it only overlaps with things that start at the same
* place and are longer, send it on.
*/
if (first_change == ranges[idx]->end_blkid) {
struct send_range *ret = ranges[idx];
ranges[idx] = get_next_range_nofree(qs[idx], ranges[idx]);
return (ret);
}
/*
* Otherwise, return a truncated copy of ranges[idx] and move the start
* of ranges[idx] back to first_change.
*/
struct send_range *ret = kmem_alloc(sizeof (*ret), KM_SLEEP);
*ret = *ranges[idx];
ret->end_blkid = first_change;
ranges[idx]->start_blkid = first_change;
return (ret);
}
#define FROM_AND_REDACT_BITS ((1 << REDACT_IDX) | (1 << FROM_IDX))
/*
* Merge the results from the from thread and the to thread, and then hand the
* records off to send_prefetch_thread to prefetch them. If this is not a
* send from a redaction bookmark, the from thread will push an end of stream
* record and stop, and we'll just send everything that was changed in the
* to_ds since the ancestor's creation txg. If it is, then since
* traverse_dataset has a canonical order, we can compare each change as
* they're pulled off the queues. That will give us a stream that is
* appropriately sorted, and covers all records. In addition, we pull the
* data from the redact_list_thread and use that to determine which blocks
* should be redacted.
*/
static void
send_merge_thread(void *arg)
{
struct send_merge_thread_arg *smt_arg = arg;
struct send_range *front_ranges[NUM_THREADS];
bqueue_t *queues[NUM_THREADS];
int err = 0;
fstrans_cookie_t cookie = spl_fstrans_mark();
if (smt_arg->redact_arg == NULL) {
front_ranges[REDACT_IDX] =
kmem_zalloc(sizeof (struct send_range), KM_SLEEP);
front_ranges[REDACT_IDX]->eos_marker = B_TRUE;
front_ranges[REDACT_IDX]->type = REDACT;
queues[REDACT_IDX] = NULL;
} else {
front_ranges[REDACT_IDX] =
bqueue_dequeue(&smt_arg->redact_arg->q);
queues[REDACT_IDX] = &smt_arg->redact_arg->q;
}
front_ranges[TO_IDX] = bqueue_dequeue(&smt_arg->to_arg->q);
queues[TO_IDX] = &smt_arg->to_arg->q;
front_ranges[FROM_IDX] = bqueue_dequeue(&smt_arg->from_arg->q);
queues[FROM_IDX] = &smt_arg->from_arg->q;
uint64_t mask = 0;
struct send_range *range;
for (range = find_next_range(front_ranges, queues, &mask);
!range->eos_marker && err == 0 && !smt_arg->cancel;
range = find_next_range(front_ranges, queues, &mask)) {
/*
* If the range in question was in both the from redact bookmark
* and the bookmark we're using to redact, then don't send it.
* It's already redacted on the receiving system, so a redaction
* record would be redundant.
*/
if ((mask & FROM_AND_REDACT_BITS) == FROM_AND_REDACT_BITS) {
ASSERT3U(range->type, ==, REDACT);
range_free(range);
continue;
}
bqueue_enqueue(&smt_arg->q, range, sizeof (*range));
if (smt_arg->to_arg->error_code != 0) {
err = smt_arg->to_arg->error_code;
} else if (smt_arg->from_arg->error_code != 0) {
err = smt_arg->from_arg->error_code;
} else if (smt_arg->redact_arg != NULL &&
smt_arg->redact_arg->error_code != 0) {
err = smt_arg->redact_arg->error_code;
}
}
if (smt_arg->cancel && err == 0)
err = SET_ERROR(EINTR);
smt_arg->error = err;
if (smt_arg->error != 0) {
smt_arg->to_arg->cancel = B_TRUE;
smt_arg->from_arg->cancel = B_TRUE;
if (smt_arg->redact_arg != NULL)
smt_arg->redact_arg->cancel = B_TRUE;
}
for (int i = 0; i < NUM_THREADS; i++) {
while (!front_ranges[i]->eos_marker) {
front_ranges[i] = get_next_range(queues[i],
front_ranges[i]);
}
range_free(front_ranges[i]);
}
if (range == NULL)
range = kmem_zalloc(sizeof (*range), KM_SLEEP);
range->eos_marker = B_TRUE;
bqueue_enqueue_flush(&smt_arg->q, range, 1);
spl_fstrans_unmark(cookie);
thread_exit();
}
struct send_reader_thread_arg {
struct send_merge_thread_arg *smta;
bqueue_t q;
boolean_t cancel;
boolean_t issue_reads;
uint64_t featureflags;
int error;
};
static void
dmu_send_read_done(zio_t *zio)
{
struct send_range *range = zio->io_private;
mutex_enter(&range->sru.data.lock);
if (zio->io_error != 0) {
abd_free(range->sru.data.abd);
range->sru.data.abd = NULL;
range->sru.data.io_err = zio->io_error;
}
ASSERT(range->sru.data.io_outstanding);
range->sru.data.io_outstanding = B_FALSE;
cv_broadcast(&range->sru.data.cv);
mutex_exit(&range->sru.data.lock);
}
static void
issue_data_read(struct send_reader_thread_arg *srta, struct send_range *range)
{
struct srd *srdp = &range->sru.data;
blkptr_t *bp = &srdp->bp;
objset_t *os = srta->smta->os;
ASSERT3U(range->type, ==, DATA);
ASSERT3U(range->start_blkid + 1, ==, range->end_blkid);
/*
* If we have large blocks stored on disk but
* the send flags don't allow us to send large
* blocks, we split the data from the arc buf
* into chunks.
*/
boolean_t split_large_blocks =
srdp->datablksz > SPA_OLD_MAXBLOCKSIZE &&
!(srta->featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS);
/*
* We should only request compressed data from the ARC if all
* the following are true:
* - stream compression was requested
* - we aren't splitting large blocks into smaller chunks
* - the data won't need to be byteswapped before sending
* - this isn't an embedded block
* - this isn't metadata (if receiving on a different endian
* system it can be byteswapped more easily)
*/
boolean_t request_compressed =
(srta->featureflags & DMU_BACKUP_FEATURE_COMPRESSED) &&
!split_large_blocks && !BP_SHOULD_BYTESWAP(bp) &&
!BP_IS_EMBEDDED(bp) && !DMU_OT_IS_METADATA(BP_GET_TYPE(bp));
enum zio_flag zioflags = ZIO_FLAG_CANFAIL;
if (srta->featureflags & DMU_BACKUP_FEATURE_RAW) {
zioflags |= ZIO_FLAG_RAW;
srdp->io_compressed = B_TRUE;
} else if (request_compressed) {
zioflags |= ZIO_FLAG_RAW_COMPRESS;
srdp->io_compressed = B_TRUE;
}
srdp->datasz = (zioflags & ZIO_FLAG_RAW_COMPRESS) ?
BP_GET_PSIZE(bp) : BP_GET_LSIZE(bp);
if (!srta->issue_reads)
return;
if (BP_IS_REDACTED(bp))
return;
if (send_do_embed(bp, srta->featureflags))
return;
zbookmark_phys_t zb = {
.zb_objset = dmu_objset_id(os),
.zb_object = range->object,
.zb_level = 0,
.zb_blkid = range->start_blkid,
};
arc_flags_t aflags = ARC_FLAG_CACHED_ONLY;
int arc_err = arc_read(NULL, os->os_spa, bp,
arc_getbuf_func, &srdp->abuf, ZIO_PRIORITY_ASYNC_READ,
zioflags, &aflags, &zb);
/*
* If the data is not already cached in the ARC, we read directly
* from zio. This avoids the performance overhead of adding a new
* entry to the ARC, and we also avoid polluting the ARC cache with
* data that is not likely to be used in the future.
*/
if (arc_err != 0) {
srdp->abd = abd_alloc_linear(srdp->datasz, B_FALSE);
srdp->io_outstanding = B_TRUE;
zio_nowait(zio_read(NULL, os->os_spa, bp, srdp->abd,
srdp->datasz, dmu_send_read_done, range,
ZIO_PRIORITY_ASYNC_READ, zioflags, &zb));
}
}
/*
* Create a new record with the given values.
*/
static void
enqueue_range(struct send_reader_thread_arg *srta, bqueue_t *q, dnode_t *dn,
uint64_t blkid, uint64_t count, const blkptr_t *bp, uint32_t datablksz)
{
enum type range_type = (bp == NULL || BP_IS_HOLE(bp) ? HOLE :
(BP_IS_REDACTED(bp) ? REDACT : DATA));
struct send_range *range = range_alloc(range_type, dn->dn_object,
blkid, blkid + count, B_FALSE);
if (blkid == DMU_SPILL_BLKID)
ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_SA);
switch (range_type) {
case HOLE:
range->sru.hole.datablksz = datablksz;
break;
case DATA:
ASSERT3U(count, ==, 1);
range->sru.data.datablksz = datablksz;
range->sru.data.obj_type = dn->dn_type;
range->sru.data.bp = *bp;
issue_data_read(srta, range);
break;
case REDACT:
range->sru.redact.datablksz = datablksz;
break;
default:
break;
}
bqueue_enqueue(q, range, datablksz);
}
/*
* This thread is responsible for two things: First, it retrieves the correct
* blkptr in the to ds if we need to send the data because of something from
* the from thread. As a result of this, we're the first ones to discover that
* some indirect blocks can be discarded because they're not holes. Second,
* it issues prefetches for the data we need to send.
*/
static void
send_reader_thread(void *arg)
{
struct send_reader_thread_arg *srta = arg;
struct send_merge_thread_arg *smta = srta->smta;
bqueue_t *inq = &smta->q;
bqueue_t *outq = &srta->q;
objset_t *os = smta->os;
fstrans_cookie_t cookie = spl_fstrans_mark();
struct send_range *range = bqueue_dequeue(inq);
int err = 0;
/*
* If the record we're analyzing is from a redaction bookmark from the
* fromds, then we need to know whether or not it exists in the tods so
* we know whether to create records for it or not. If it does, we need
* the datablksz so we can generate an appropriate record for it.
* Finally, if it isn't redacted, we need the blkptr so that we can send
* a WRITE record containing the actual data.
*/
uint64_t last_obj = UINT64_MAX;
uint64_t last_obj_exists = B_TRUE;
while (!range->eos_marker && !srta->cancel && smta->error == 0 &&
err == 0) {
switch (range->type) {
case DATA:
issue_data_read(srta, range);
bqueue_enqueue(outq, range, range->sru.data.datablksz);
range = get_next_range_nofree(inq, range);
break;
case HOLE:
case OBJECT:
case OBJECT_RANGE:
case REDACT: // Redacted blocks must exist
bqueue_enqueue(outq, range, sizeof (*range));
range = get_next_range_nofree(inq, range);
break;
case PREVIOUSLY_REDACTED: {
/*
* This entry came from the "from bookmark" when
* sending from a bookmark that has a redaction
* list. We need to check if this object/blkid
* exists in the target ("to") dataset, and if
* not then we drop this entry. We also need
* to fill in the block pointer so that we know
* what to prefetch.
*
* To accomplish the above, we first cache whether or
* not the last object we examined exists. If it
* doesn't, we can drop this record. If it does, we hold
* the dnode and use it to call dbuf_dnode_findbp. We do
* this instead of dbuf_bookmark_findbp because we will
* often operate on large ranges, and holding the dnode
* once is more efficient.
*/
boolean_t object_exists = B_TRUE;
/*
* If the data is redacted, we only care if it exists,
* so that we don't send records for objects that have
* been deleted.
*/
dnode_t *dn;
if (range->object == last_obj && !last_obj_exists) {
/*
* If we're still examining the same object as
* previously, and it doesn't exist, we don't
* need to call dbuf_bookmark_findbp.
*/
object_exists = B_FALSE;
} else {
err = dnode_hold(os, range->object, FTAG, &dn);
if (err == ENOENT) {
object_exists = B_FALSE;
err = 0;
}
last_obj = range->object;
last_obj_exists = object_exists;
}
if (err != 0) {
break;
} else if (!object_exists) {
/*
* The block was modified, but doesn't
* exist in the to dataset; if it was
* deleted in the to dataset, then we'll
* visit the hole bp for it at some point.
*/
range = get_next_range(inq, range);
continue;
}
uint64_t file_max =
(dn->dn_maxblkid < range->end_blkid ?
dn->dn_maxblkid : range->end_blkid);
/*
* The object exists, so we need to try to find the
* blkptr for each block in the range we're processing.
*/
rw_enter(&dn->dn_struct_rwlock, RW_READER);
for (uint64_t blkid = range->start_blkid;
blkid < file_max; blkid++) {
blkptr_t bp;
uint32_t datablksz =
dn->dn_phys->dn_datablkszsec <<
SPA_MINBLOCKSHIFT;
uint64_t offset = blkid * datablksz;
/*
* This call finds the next non-hole block in
* the object. This is to prevent a
* performance problem where we're unredacting
* a large hole. Using dnode_next_offset to
* skip over the large hole avoids iterating
* over every block in it.
*/
err = dnode_next_offset(dn, DNODE_FIND_HAVELOCK,
&offset, 1, 1, 0);
if (err == ESRCH) {
offset = UINT64_MAX;
err = 0;
} else if (err != 0) {
break;
}
if (offset != blkid * datablksz) {
/*
* if there is a hole from here
* (blkid) to offset
*/
offset = MIN(offset, file_max *
datablksz);
uint64_t nblks = (offset / datablksz) -
blkid;
enqueue_range(srta, outq, dn, blkid,
nblks, NULL, datablksz);
blkid += nblks;
}
if (blkid >= file_max)
break;
err = dbuf_dnode_findbp(dn, 0, blkid, &bp,
NULL, NULL);
if (err != 0)
break;
ASSERT(!BP_IS_HOLE(&bp));
enqueue_range(srta, outq, dn, blkid, 1, &bp,
datablksz);
}
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
range = get_next_range(inq, range);
}
}
}
if (srta->cancel || err != 0) {
smta->cancel = B_TRUE;
srta->error = err;
} else if (smta->error != 0) {
srta->error = smta->error;
}
while (!range->eos_marker)
range = get_next_range(inq, range);
bqueue_enqueue_flush(outq, range, 1);
spl_fstrans_unmark(cookie);
thread_exit();
}
#define NUM_SNAPS_NOT_REDACTED UINT64_MAX
struct dmu_send_params {
/* Pool args */
void *tag; // Tag that dp was held with, will be used to release dp.
dsl_pool_t *dp;
/* To snapshot args */
const char *tosnap;
dsl_dataset_t *to_ds;
/* From snapshot args */
zfs_bookmark_phys_t ancestor_zb;
uint64_t *fromredactsnaps;
/* NUM_SNAPS_NOT_REDACTED if not sending from redaction bookmark */
uint64_t numfromredactsnaps;
/* Stream params */
boolean_t is_clone;
boolean_t embedok;
boolean_t large_block_ok;
boolean_t compressok;
boolean_t rawok;
boolean_t savedok;
uint64_t resumeobj;
uint64_t resumeoff;
uint64_t saved_guid;
zfs_bookmark_phys_t *redactbook;
/* Stream output params */
dmu_send_outparams_t *dso;
/* Stream progress params */
offset_t *off;
int outfd;
char saved_toname[MAXNAMELEN];
};
static int
setup_featureflags(struct dmu_send_params *dspp, objset_t *os,
uint64_t *featureflags)
{
dsl_dataset_t *to_ds = dspp->to_ds;
dsl_pool_t *dp = dspp->dp;
#ifdef _KERNEL
if (dmu_objset_type(os) == DMU_OST_ZFS) {
uint64_t version;
if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0)
return (SET_ERROR(EINVAL));
if (version >= ZPL_VERSION_SA)
*featureflags |= DMU_BACKUP_FEATURE_SA_SPILL;
}
#endif
/* raw sends imply large_block_ok */
if ((dspp->rawok || dspp->large_block_ok) &&
dsl_dataset_feature_is_active(to_ds, SPA_FEATURE_LARGE_BLOCKS)) {
*featureflags |= DMU_BACKUP_FEATURE_LARGE_BLOCKS;
}
/* encrypted datasets will not have embedded blocks */
if ((dspp->embedok || dspp->rawok) && !os->os_encrypted &&
spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) {
*featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA;
}
/* raw send implies compressok */
if (dspp->compressok || dspp->rawok)
*featureflags |= DMU_BACKUP_FEATURE_COMPRESSED;
if (dspp->rawok && os->os_encrypted)
*featureflags |= DMU_BACKUP_FEATURE_RAW;
if ((*featureflags &
(DMU_BACKUP_FEATURE_EMBED_DATA | DMU_BACKUP_FEATURE_COMPRESSED |
DMU_BACKUP_FEATURE_RAW)) != 0 &&
spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS)) {
*featureflags |= DMU_BACKUP_FEATURE_LZ4;
}
/*
* We specifically do not include DMU_BACKUP_FEATURE_EMBED_DATA here to
* allow sending ZSTD compressed datasets to a receiver that does not
* support ZSTD
*/
if ((*featureflags &
(DMU_BACKUP_FEATURE_COMPRESSED | DMU_BACKUP_FEATURE_RAW)) != 0 &&
dsl_dataset_feature_is_active(to_ds, SPA_FEATURE_ZSTD_COMPRESS)) {
*featureflags |= DMU_BACKUP_FEATURE_ZSTD;
}
if (dspp->resumeobj != 0 || dspp->resumeoff != 0) {
*featureflags |= DMU_BACKUP_FEATURE_RESUMING;
}
if (dspp->redactbook != NULL) {
*featureflags |= DMU_BACKUP_FEATURE_REDACTED;
}
if (dsl_dataset_feature_is_active(to_ds, SPA_FEATURE_LARGE_DNODE)) {
*featureflags |= DMU_BACKUP_FEATURE_LARGE_DNODE;
}
return (0);
}
static dmu_replay_record_t *
create_begin_record(struct dmu_send_params *dspp, objset_t *os,
uint64_t featureflags)
{
dmu_replay_record_t *drr = kmem_zalloc(sizeof (dmu_replay_record_t),
KM_SLEEP);
drr->drr_type = DRR_BEGIN;
struct drr_begin *drrb = &drr->drr_u.drr_begin;
dsl_dataset_t *to_ds = dspp->to_ds;
drrb->drr_magic = DMU_BACKUP_MAGIC;
drrb->drr_creation_time = dsl_dataset_phys(to_ds)->ds_creation_time;
drrb->drr_type = dmu_objset_type(os);
drrb->drr_toguid = dsl_dataset_phys(to_ds)->ds_guid;
drrb->drr_fromguid = dspp->ancestor_zb.zbm_guid;
DMU_SET_STREAM_HDRTYPE(drrb->drr_versioninfo, DMU_SUBSTREAM);
DMU_SET_FEATUREFLAGS(drrb->drr_versioninfo, featureflags);
if (dspp->is_clone)
drrb->drr_flags |= DRR_FLAG_CLONE;
if (dsl_dataset_phys(dspp->to_ds)->ds_flags & DS_FLAG_CI_DATASET)
drrb->drr_flags |= DRR_FLAG_CI_DATA;
if (zfs_send_set_freerecords_bit)
drrb->drr_flags |= DRR_FLAG_FREERECORDS;
drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_SPILL_BLOCK;
if (dspp->savedok) {
drrb->drr_toguid = dspp->saved_guid;
strlcpy(drrb->drr_toname, dspp->saved_toname,
sizeof (drrb->drr_toname));
} else {
dsl_dataset_name(to_ds, drrb->drr_toname);
if (!to_ds->ds_is_snapshot) {
(void) strlcat(drrb->drr_toname, "@--head--",
sizeof (drrb->drr_toname));
}
}
return (drr);
}
static void
setup_to_thread(struct send_thread_arg *to_arg, objset_t *to_os,
dmu_sendstatus_t *dssp, uint64_t fromtxg, boolean_t rawok)
{
VERIFY0(bqueue_init(&to_arg->q, zfs_send_no_prefetch_queue_ff,
MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize),
offsetof(struct send_range, ln)));
to_arg->error_code = 0;
to_arg->cancel = B_FALSE;
to_arg->os = to_os;
to_arg->fromtxg = fromtxg;
to_arg->flags = TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA;
if (rawok)
to_arg->flags |= TRAVERSE_NO_DECRYPT;
if (zfs_send_corrupt_data)
to_arg->flags |= TRAVERSE_HARD;
to_arg->num_blocks_visited = &dssp->dss_blocks;
(void) thread_create(NULL, 0, send_traverse_thread, to_arg, 0,
curproc, TS_RUN, minclsyspri);
}
static void
setup_from_thread(struct redact_list_thread_arg *from_arg,
redaction_list_t *from_rl, dmu_sendstatus_t *dssp)
{
VERIFY0(bqueue_init(&from_arg->q, zfs_send_no_prefetch_queue_ff,
MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize),
offsetof(struct send_range, ln)));
from_arg->error_code = 0;
from_arg->cancel = B_FALSE;
from_arg->rl = from_rl;
from_arg->mark_redact = B_FALSE;
from_arg->num_blocks_visited = &dssp->dss_blocks;
/*
* If from_ds is null, send_traverse_thread just returns success and
* enqueues an eos marker.
*/
(void) thread_create(NULL, 0, redact_list_thread, from_arg, 0,
curproc, TS_RUN, minclsyspri);
}
static void
setup_redact_list_thread(struct redact_list_thread_arg *rlt_arg,
struct dmu_send_params *dspp, redaction_list_t *rl, dmu_sendstatus_t *dssp)
{
if (dspp->redactbook == NULL)
return;
rlt_arg->cancel = B_FALSE;
VERIFY0(bqueue_init(&rlt_arg->q, zfs_send_no_prefetch_queue_ff,
MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize),
offsetof(struct send_range, ln)));
rlt_arg->error_code = 0;
rlt_arg->mark_redact = B_TRUE;
rlt_arg->rl = rl;
rlt_arg->num_blocks_visited = &dssp->dss_blocks;
(void) thread_create(NULL, 0, redact_list_thread, rlt_arg, 0,
curproc, TS_RUN, minclsyspri);
}
static void
setup_merge_thread(struct send_merge_thread_arg *smt_arg,
struct dmu_send_params *dspp, struct redact_list_thread_arg *from_arg,
struct send_thread_arg *to_arg, struct redact_list_thread_arg *rlt_arg,
objset_t *os)
{
VERIFY0(bqueue_init(&smt_arg->q, zfs_send_no_prefetch_queue_ff,
MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize),
offsetof(struct send_range, ln)));
smt_arg->cancel = B_FALSE;
smt_arg->error = 0;
smt_arg->from_arg = from_arg;
smt_arg->to_arg = to_arg;
if (dspp->redactbook != NULL)
smt_arg->redact_arg = rlt_arg;
smt_arg->os = os;
(void) thread_create(NULL, 0, send_merge_thread, smt_arg, 0, curproc,
TS_RUN, minclsyspri);
}
static void
setup_reader_thread(struct send_reader_thread_arg *srt_arg,
struct dmu_send_params *dspp, struct send_merge_thread_arg *smt_arg,
uint64_t featureflags)
{
VERIFY0(bqueue_init(&srt_arg->q, zfs_send_queue_ff,
MAX(zfs_send_queue_length, 2 * zfs_max_recordsize),
offsetof(struct send_range, ln)));
srt_arg->smta = smt_arg;
srt_arg->issue_reads = !dspp->dso->dso_dryrun;
srt_arg->featureflags = featureflags;
(void) thread_create(NULL, 0, send_reader_thread, srt_arg, 0,
curproc, TS_RUN, minclsyspri);
}
static int
setup_resume_points(struct dmu_send_params *dspp,
struct send_thread_arg *to_arg, struct redact_list_thread_arg *from_arg,
struct redact_list_thread_arg *rlt_arg,
struct send_merge_thread_arg *smt_arg, boolean_t resuming, objset_t *os,
redaction_list_t *redact_rl, nvlist_t *nvl)
{
+ (void) smt_arg;
dsl_dataset_t *to_ds = dspp->to_ds;
int err = 0;
uint64_t obj = 0;
uint64_t blkid = 0;
if (resuming) {
obj = dspp->resumeobj;
dmu_object_info_t to_doi;
err = dmu_object_info(os, obj, &to_doi);
if (err != 0)
return (err);
blkid = dspp->resumeoff / to_doi.doi_data_block_size;
}
/*
* If we're resuming a redacted send, we can skip to the appropriate
* point in the redaction bookmark by binary searching through it.
*/
if (redact_rl != NULL) {
SET_BOOKMARK(&rlt_arg->resume, to_ds->ds_object, obj, 0, blkid);
}
SET_BOOKMARK(&to_arg->resume, to_ds->ds_object, obj, 0, blkid);
if (nvlist_exists(nvl, BEGINNV_REDACT_FROM_SNAPS)) {
uint64_t objset = dspp->ancestor_zb.zbm_redaction_obj;
/*
* Note: If the resume point is in an object whose
* blocksize is different in the from vs to snapshots,
* we will have divided by the "wrong" blocksize.
* However, in this case fromsnap's send_cb() will
* detect that the blocksize has changed and therefore
* ignore this object.
*
* If we're resuming a send from a redaction bookmark,
* we still cannot accidentally suggest blocks behind
* the to_ds. In addition, we know that any blocks in
* the object in the to_ds will have to be sent, since
* the size changed. Therefore, we can't cause any harm
* this way either.
*/
SET_BOOKMARK(&from_arg->resume, objset, obj, 0, blkid);
}
if (resuming) {
fnvlist_add_uint64(nvl, BEGINNV_RESUME_OBJECT, dspp->resumeobj);
fnvlist_add_uint64(nvl, BEGINNV_RESUME_OFFSET, dspp->resumeoff);
}
return (0);
}
static dmu_sendstatus_t *
setup_send_progress(struct dmu_send_params *dspp)
{
dmu_sendstatus_t *dssp = kmem_zalloc(sizeof (*dssp), KM_SLEEP);
dssp->dss_outfd = dspp->outfd;
dssp->dss_off = dspp->off;
dssp->dss_proc = curproc;
mutex_enter(&dspp->to_ds->ds_sendstream_lock);
list_insert_head(&dspp->to_ds->ds_sendstreams, dssp);
mutex_exit(&dspp->to_ds->ds_sendstream_lock);
return (dssp);
}
/*
* Actually do the bulk of the work in a zfs send.
*
* The idea is that we want to do a send from ancestor_zb to to_ds. We also
* want to not send any data that has been modified by all the datasets in
* redactsnaparr, and store the list of blocks that are redacted in this way in
* a bookmark named redactbook, created on the to_ds. We do this by creating
* several worker threads, whose function is described below.
*
* There are three cases.
* The first case is a redacted zfs send. In this case there are 5 threads.
* The first thread is the to_ds traversal thread: it calls dataset_traverse on
* the to_ds and finds all the blocks that have changed since ancestor_zb (if
* it's a full send, that's all blocks in the dataset). It then sends those
* blocks on to the send merge thread. The redact list thread takes the data
* from the redaction bookmark and sends those blocks on to the send merge
* thread. The send merge thread takes the data from the to_ds traversal
* thread, and combines it with the redaction records from the redact list
* thread. If a block appears in both the to_ds's data and the redaction data,
* the send merge thread will mark it as redacted and send it on to the prefetch
* thread. Otherwise, the send merge thread will send the block on to the
* prefetch thread unchanged. The prefetch thread will issue prefetch reads for
* any data that isn't redacted, and then send the data on to the main thread.
* The main thread behaves the same as in a normal send case, issuing demand
* reads for data blocks and sending out records over the network
*
* The graphic below diagrams the flow of data in the case of a redacted zfs
* send. Each box represents a thread, and each line represents the flow of
* data.
*
* Records from the |
* redaction bookmark |
* +--------------------+ | +---------------------------+
* | | v | Send Merge Thread |
* | Redact List Thread +----------> Apply redaction marks to |
* | | | records as specified by |
* +--------------------+ | redaction ranges |
* +----^---------------+------+
* | | Merged data
* | |
* | +------------v--------+
* | | Prefetch Thread |
* +--------------------+ | | Issues prefetch |
* | to_ds Traversal | | | reads of data blocks|
* | Thread (finds +---------------+ +------------+--------+
* | candidate blocks) | Blocks modified | Prefetched data
* +--------------------+ by to_ds since |
* ancestor_zb +------------v----+
* | Main Thread | File Descriptor
* | Sends data over +->(to zfs receive)
* | wire |
* +-----------------+
*
* The second case is an incremental send from a redaction bookmark. The to_ds
* traversal thread and the main thread behave the same as in the redacted
* send case. The new thread is the from bookmark traversal thread. It
* iterates over the redaction list in the redaction bookmark, and enqueues
* records for each block that was redacted in the original send. The send
* merge thread now has to merge the data from the two threads. For details
* about that process, see the header comment of send_merge_thread(). Any data
* it decides to send on will be prefetched by the prefetch thread. Note that
* you can perform a redacted send from a redaction bookmark; in that case,
* the data flow behaves very similarly to the flow in the redacted send case,
* except with the addition of the bookmark traversal thread iterating over the
* redaction bookmark. The send_merge_thread also has to take on the
* responsibility of merging the redact list thread's records, the bookmark
* traversal thread's records, and the to_ds records.
*
* +---------------------+
* | |
* | Redact List Thread +--------------+
* | | |
* +---------------------+ |
* Blocks in redaction list | Ranges modified by every secure snap
* of from bookmark | (or EOS if not readcted)
* |
* +---------------------+ | +----v----------------------+
* | bookmark Traversal | v | Send Merge Thread |
* | Thread (finds +---------> Merges bookmark, rlt, and |
* | candidate blocks) | | to_ds send records |
* +---------------------+ +----^---------------+------+
* | | Merged data
* | +------------v--------+
* | | Prefetch Thread |
* +--------------------+ | | Issues prefetch |
* | to_ds Traversal | | | reads of data blocks|
* | Thread (finds +---------------+ +------------+--------+
* | candidate blocks) | Blocks modified | Prefetched data
* +--------------------+ by to_ds since +------------v----+
* ancestor_zb | Main Thread | File Descriptor
* | Sends data over +->(to zfs receive)
* | wire |
* +-----------------+
*
* The final case is a simple zfs full or incremental send. The to_ds traversal
* thread behaves the same as always. The redact list thread is never started.
* The send merge thread takes all the blocks that the to_ds traversal thread
* sends it, prefetches the data, and sends the blocks on to the main thread.
* The main thread sends the data over the wire.
*
* To keep performance acceptable, we want to prefetch the data in the worker
* threads. While the to_ds thread could simply use the TRAVERSE_PREFETCH
* feature built into traverse_dataset, the combining and deletion of records
* due to redaction and sends from redaction bookmarks mean that we could
* issue many unnecessary prefetches. As a result, we only prefetch data
* after we've determined that the record is not going to be redacted. To
* prevent the prefetching from getting too far ahead of the main thread, the
* blocking queues that are used for communication are capped not by the
* number of entries in the queue, but by the sum of the size of the
* prefetches associated with them. The limit on the amount of data that the
* thread can prefetch beyond what the main thread has reached is controlled
* by the global variable zfs_send_queue_length. In addition, to prevent poor
* performance in the beginning of a send, we also limit the distance ahead
* that the traversal threads can be. That distance is controlled by the
* zfs_send_no_prefetch_queue_length tunable.
*
* Note: Releases dp using the specified tag.
*/
static int
dmu_send_impl(struct dmu_send_params *dspp)
{
objset_t *os;
dmu_replay_record_t *drr;
dmu_sendstatus_t *dssp;
dmu_send_cookie_t dsc = {0};
int err;
uint64_t fromtxg = dspp->ancestor_zb.zbm_creation_txg;
uint64_t featureflags = 0;
struct redact_list_thread_arg *from_arg;
struct send_thread_arg *to_arg;
struct redact_list_thread_arg *rlt_arg;
struct send_merge_thread_arg *smt_arg;
struct send_reader_thread_arg *srt_arg;
struct send_range *range;
redaction_list_t *from_rl = NULL;
redaction_list_t *redact_rl = NULL;
boolean_t resuming = (dspp->resumeobj != 0 || dspp->resumeoff != 0);
boolean_t book_resuming = resuming;
dsl_dataset_t *to_ds = dspp->to_ds;
zfs_bookmark_phys_t *ancestor_zb = &dspp->ancestor_zb;
dsl_pool_t *dp = dspp->dp;
void *tag = dspp->tag;
err = dmu_objset_from_ds(to_ds, &os);
if (err != 0) {
dsl_pool_rele(dp, tag);
return (err);
}
/*
* If this is a non-raw send of an encrypted ds, we can ensure that
* the objset_phys_t is authenticated. This is safe because this is
* either a snapshot or we have owned the dataset, ensuring that
* it can't be modified.
*/
if (!dspp->rawok && os->os_encrypted &&
arc_is_unauthenticated(os->os_phys_buf)) {
zbookmark_phys_t zb;
SET_BOOKMARK(&zb, to_ds->ds_object, ZB_ROOT_OBJECT,
ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
err = arc_untransform(os->os_phys_buf, os->os_spa,
&zb, B_FALSE);
if (err != 0) {
dsl_pool_rele(dp, tag);
return (err);
}
ASSERT0(arc_is_unauthenticated(os->os_phys_buf));
}
if ((err = setup_featureflags(dspp, os, &featureflags)) != 0) {
dsl_pool_rele(dp, tag);
return (err);
}
/*
* If we're doing a redacted send, hold the bookmark's redaction list.
*/
if (dspp->redactbook != NULL) {
err = dsl_redaction_list_hold_obj(dp,
dspp->redactbook->zbm_redaction_obj, FTAG,
&redact_rl);
if (err != 0) {
dsl_pool_rele(dp, tag);
return (SET_ERROR(EINVAL));
}
dsl_redaction_list_long_hold(dp, redact_rl, FTAG);
}
/*
* If we're sending from a redaction bookmark, hold the redaction list
* so that we can consider sending the redacted blocks.
*/
if (ancestor_zb->zbm_redaction_obj != 0) {
err = dsl_redaction_list_hold_obj(dp,
ancestor_zb->zbm_redaction_obj, FTAG, &from_rl);
if (err != 0) {
if (redact_rl != NULL) {
dsl_redaction_list_long_rele(redact_rl, FTAG);
dsl_redaction_list_rele(redact_rl, FTAG);
}
dsl_pool_rele(dp, tag);
return (SET_ERROR(EINVAL));
}
dsl_redaction_list_long_hold(dp, from_rl, FTAG);
}
dsl_dataset_long_hold(to_ds, FTAG);
from_arg = kmem_zalloc(sizeof (*from_arg), KM_SLEEP);
to_arg = kmem_zalloc(sizeof (*to_arg), KM_SLEEP);
rlt_arg = kmem_zalloc(sizeof (*rlt_arg), KM_SLEEP);
smt_arg = kmem_zalloc(sizeof (*smt_arg), KM_SLEEP);
srt_arg = kmem_zalloc(sizeof (*srt_arg), KM_SLEEP);
drr = create_begin_record(dspp, os, featureflags);
dssp = setup_send_progress(dspp);
dsc.dsc_drr = drr;
dsc.dsc_dso = dspp->dso;
dsc.dsc_os = os;
dsc.dsc_off = dspp->off;
dsc.dsc_toguid = dsl_dataset_phys(to_ds)->ds_guid;
dsc.dsc_fromtxg = fromtxg;
dsc.dsc_pending_op = PENDING_NONE;
dsc.dsc_featureflags = featureflags;
dsc.dsc_resume_object = dspp->resumeobj;
dsc.dsc_resume_offset = dspp->resumeoff;
dsl_pool_rele(dp, tag);
void *payload = NULL;
size_t payload_len = 0;
nvlist_t *nvl = fnvlist_alloc();
/*
* If we're doing a redacted send, we include the snapshots we're
* redacted with respect to so that the target system knows what send
* streams can be correctly received on top of this dataset. If we're
* instead sending a redacted dataset, we include the snapshots that the
* dataset was created with respect to.
*/
if (dspp->redactbook != NULL) {
fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_SNAPS,
redact_rl->rl_phys->rlp_snaps,
redact_rl->rl_phys->rlp_num_snaps);
} else if (dsl_dataset_feature_is_active(to_ds,
SPA_FEATURE_REDACTED_DATASETS)) {
uint64_t *tods_guids;
uint64_t length;
VERIFY(dsl_dataset_get_uint64_array_feature(to_ds,
SPA_FEATURE_REDACTED_DATASETS, &length, &tods_guids));
fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_SNAPS, tods_guids,
length);
}
/*
* If we're sending from a redaction bookmark, then we should retrieve
* the guids of that bookmark so we can send them over the wire.
*/
if (from_rl != NULL) {
fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_FROM_SNAPS,
from_rl->rl_phys->rlp_snaps,
from_rl->rl_phys->rlp_num_snaps);
}
/*
* If the snapshot we're sending from is redacted, include the redaction
* list in the stream.
*/
if (dspp->numfromredactsnaps != NUM_SNAPS_NOT_REDACTED) {
ASSERT3P(from_rl, ==, NULL);
fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_FROM_SNAPS,
dspp->fromredactsnaps, (uint_t)dspp->numfromredactsnaps);
if (dspp->numfromredactsnaps > 0) {
kmem_free(dspp->fromredactsnaps,
dspp->numfromredactsnaps * sizeof (uint64_t));
dspp->fromredactsnaps = NULL;
}
}
if (resuming || book_resuming) {
err = setup_resume_points(dspp, to_arg, from_arg,
rlt_arg, smt_arg, resuming, os, redact_rl, nvl);
if (err != 0)
goto out;
}
if (featureflags & DMU_BACKUP_FEATURE_RAW) {
uint64_t ivset_guid = (ancestor_zb != NULL) ?
ancestor_zb->zbm_ivset_guid : 0;
nvlist_t *keynvl = NULL;
ASSERT(os->os_encrypted);
err = dsl_crypto_populate_key_nvlist(os, ivset_guid,
&keynvl);
if (err != 0) {
fnvlist_free(nvl);
goto out;
}
fnvlist_add_nvlist(nvl, "crypt_keydata", keynvl);
fnvlist_free(keynvl);
}
if (!nvlist_empty(nvl)) {
payload = fnvlist_pack(nvl, &payload_len);
drr->drr_payloadlen = payload_len;
}
fnvlist_free(nvl);
err = dump_record(&dsc, payload, payload_len);
fnvlist_pack_free(payload, payload_len);
if (err != 0) {
err = dsc.dsc_err;
goto out;
}
setup_to_thread(to_arg, os, dssp, fromtxg, dspp->rawok);
setup_from_thread(from_arg, from_rl, dssp);
setup_redact_list_thread(rlt_arg, dspp, redact_rl, dssp);
setup_merge_thread(smt_arg, dspp, from_arg, to_arg, rlt_arg, os);
setup_reader_thread(srt_arg, dspp, smt_arg, featureflags);
range = bqueue_dequeue(&srt_arg->q);
while (err == 0 && !range->eos_marker) {
err = do_dump(&dsc, range);
range = get_next_range(&srt_arg->q, range);
if (issig(JUSTLOOKING) && issig(FORREAL))
err = SET_ERROR(EINTR);
}
/*
* If we hit an error or are interrupted, cancel our worker threads and
* clear the queue of any pending records. The threads will pass the
* cancel up the tree of worker threads, and each one will clean up any
* pending records before exiting.
*/
if (err != 0) {
srt_arg->cancel = B_TRUE;
while (!range->eos_marker) {
range = get_next_range(&srt_arg->q, range);
}
}
range_free(range);
bqueue_destroy(&srt_arg->q);
bqueue_destroy(&smt_arg->q);
if (dspp->redactbook != NULL)
bqueue_destroy(&rlt_arg->q);
bqueue_destroy(&to_arg->q);
bqueue_destroy(&from_arg->q);
if (err == 0 && srt_arg->error != 0)
err = srt_arg->error;
if (err != 0)
goto out;
if (dsc.dsc_pending_op != PENDING_NONE)
if (dump_record(&dsc, NULL, 0) != 0)
err = SET_ERROR(EINTR);
if (err != 0) {
if (err == EINTR && dsc.dsc_err != 0)
err = dsc.dsc_err;
goto out;
}
/*
* Send the DRR_END record if this is not a saved stream.
* Otherwise, the omitted DRR_END record will signal to
* the receive side that the stream is incomplete.
*/
if (!dspp->savedok) {
bzero(drr, sizeof (dmu_replay_record_t));
drr->drr_type = DRR_END;
drr->drr_u.drr_end.drr_checksum = dsc.dsc_zc;
drr->drr_u.drr_end.drr_toguid = dsc.dsc_toguid;
if (dump_record(&dsc, NULL, 0) != 0)
err = dsc.dsc_err;
}
out:
mutex_enter(&to_ds->ds_sendstream_lock);
list_remove(&to_ds->ds_sendstreams, dssp);
mutex_exit(&to_ds->ds_sendstream_lock);
VERIFY(err != 0 || (dsc.dsc_sent_begin &&
(dsc.dsc_sent_end || dspp->savedok)));
kmem_free(drr, sizeof (dmu_replay_record_t));
kmem_free(dssp, sizeof (dmu_sendstatus_t));
kmem_free(from_arg, sizeof (*from_arg));
kmem_free(to_arg, sizeof (*to_arg));
kmem_free(rlt_arg, sizeof (*rlt_arg));
kmem_free(smt_arg, sizeof (*smt_arg));
kmem_free(srt_arg, sizeof (*srt_arg));
dsl_dataset_long_rele(to_ds, FTAG);
if (from_rl != NULL) {
dsl_redaction_list_long_rele(from_rl, FTAG);
dsl_redaction_list_rele(from_rl, FTAG);
}
if (redact_rl != NULL) {
dsl_redaction_list_long_rele(redact_rl, FTAG);
dsl_redaction_list_rele(redact_rl, FTAG);
}
return (err);
}
int
dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap,
boolean_t embedok, boolean_t large_block_ok, boolean_t compressok,
boolean_t rawok, boolean_t savedok, int outfd, offset_t *off,
dmu_send_outparams_t *dsop)
{
int err;
dsl_dataset_t *fromds;
ds_hold_flags_t dsflags;
struct dmu_send_params dspp = {0};
dspp.embedok = embedok;
dspp.large_block_ok = large_block_ok;
dspp.compressok = compressok;
dspp.outfd = outfd;
dspp.off = off;
dspp.dso = dsop;
dspp.tag = FTAG;
dspp.rawok = rawok;
dspp.savedok = savedok;
dsflags = (rawok) ? DS_HOLD_FLAG_NONE : DS_HOLD_FLAG_DECRYPT;
err = dsl_pool_hold(pool, FTAG, &dspp.dp);
if (err != 0)
return (err);
err = dsl_dataset_hold_obj_flags(dspp.dp, tosnap, dsflags, FTAG,
&dspp.to_ds);
if (err != 0) {
dsl_pool_rele(dspp.dp, FTAG);
return (err);
}
if (fromsnap != 0) {
err = dsl_dataset_hold_obj_flags(dspp.dp, fromsnap, dsflags,
FTAG, &fromds);
if (err != 0) {
dsl_dataset_rele_flags(dspp.to_ds, dsflags, FTAG);
dsl_pool_rele(dspp.dp, FTAG);
return (err);
}
dspp.ancestor_zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
dspp.ancestor_zb.zbm_creation_txg =
dsl_dataset_phys(fromds)->ds_creation_txg;
dspp.ancestor_zb.zbm_creation_time =
dsl_dataset_phys(fromds)->ds_creation_time;
if (dsl_dataset_is_zapified(fromds)) {
(void) zap_lookup(dspp.dp->dp_meta_objset,
fromds->ds_object, DS_FIELD_IVSET_GUID, 8, 1,
&dspp.ancestor_zb.zbm_ivset_guid);
}
/* See dmu_send for the reasons behind this. */
uint64_t *fromredact;
if (!dsl_dataset_get_uint64_array_feature(fromds,
SPA_FEATURE_REDACTED_DATASETS,
&dspp.numfromredactsnaps,
&fromredact)) {
dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED;
} else if (dspp.numfromredactsnaps > 0) {
uint64_t size = dspp.numfromredactsnaps *
sizeof (uint64_t);
dspp.fromredactsnaps = kmem_zalloc(size, KM_SLEEP);
bcopy(fromredact, dspp.fromredactsnaps, size);
}
boolean_t is_before =
dsl_dataset_is_before(dspp.to_ds, fromds, 0);
dspp.is_clone = (dspp.to_ds->ds_dir !=
fromds->ds_dir);
dsl_dataset_rele(fromds, FTAG);
if (!is_before) {
dsl_pool_rele(dspp.dp, FTAG);
err = SET_ERROR(EXDEV);
} else {
err = dmu_send_impl(&dspp);
}
} else {
dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED;
err = dmu_send_impl(&dspp);
}
dsl_dataset_rele(dspp.to_ds, FTAG);
return (err);
}
int
dmu_send(const char *tosnap, const char *fromsnap, boolean_t embedok,
boolean_t large_block_ok, boolean_t compressok, boolean_t rawok,
boolean_t savedok, uint64_t resumeobj, uint64_t resumeoff,
const char *redactbook, int outfd, offset_t *off,
dmu_send_outparams_t *dsop)
{
int err = 0;
ds_hold_flags_t dsflags;
boolean_t owned = B_FALSE;
dsl_dataset_t *fromds = NULL;
zfs_bookmark_phys_t book = {0};
struct dmu_send_params dspp = {0};
dsflags = (rawok) ? DS_HOLD_FLAG_NONE : DS_HOLD_FLAG_DECRYPT;
dspp.tosnap = tosnap;
dspp.embedok = embedok;
dspp.large_block_ok = large_block_ok;
dspp.compressok = compressok;
dspp.outfd = outfd;
dspp.off = off;
dspp.dso = dsop;
dspp.tag = FTAG;
dspp.resumeobj = resumeobj;
dspp.resumeoff = resumeoff;
dspp.rawok = rawok;
dspp.savedok = savedok;
if (fromsnap != NULL && strpbrk(fromsnap, "@#") == NULL)
return (SET_ERROR(EINVAL));
err = dsl_pool_hold(tosnap, FTAG, &dspp.dp);
if (err != 0)
return (err);
if (strchr(tosnap, '@') == NULL && spa_writeable(dspp.dp->dp_spa)) {
/*
* We are sending a filesystem or volume. Ensure
* that it doesn't change by owning the dataset.
*/
if (savedok) {
/*
* We are looking for the dataset that represents the
* partially received send stream. If this stream was
* received as a new snapshot of an existing dataset,
* this will be saved in a hidden clone named
* "<pool>/<dataset>/%recv". Otherwise, the stream
* will be saved in the live dataset itself. In
* either case we need to use dsl_dataset_own_force()
* because the stream is marked as inconsistent,
* which would normally make it unavailable to be
* owned.
*/
char *name = kmem_asprintf("%s/%s", tosnap,
recv_clone_name);
err = dsl_dataset_own_force(dspp.dp, name, dsflags,
FTAG, &dspp.to_ds);
if (err == ENOENT) {
err = dsl_dataset_own_force(dspp.dp, tosnap,
dsflags, FTAG, &dspp.to_ds);
}
if (err == 0) {
err = zap_lookup(dspp.dp->dp_meta_objset,
dspp.to_ds->ds_object,
DS_FIELD_RESUME_TOGUID, 8, 1,
&dspp.saved_guid);
}
if (err == 0) {
err = zap_lookup(dspp.dp->dp_meta_objset,
dspp.to_ds->ds_object,
DS_FIELD_RESUME_TONAME, 1,
sizeof (dspp.saved_toname),
dspp.saved_toname);
}
if (err != 0)
dsl_dataset_disown(dspp.to_ds, dsflags, FTAG);
kmem_strfree(name);
} else {
err = dsl_dataset_own(dspp.dp, tosnap, dsflags,
FTAG, &dspp.to_ds);
}
owned = B_TRUE;
} else {
err = dsl_dataset_hold_flags(dspp.dp, tosnap, dsflags, FTAG,
&dspp.to_ds);
}
if (err != 0) {
dsl_pool_rele(dspp.dp, FTAG);
return (err);
}
if (redactbook != NULL) {
char path[ZFS_MAX_DATASET_NAME_LEN];
(void) strlcpy(path, tosnap, sizeof (path));
char *at = strchr(path, '@');
if (at == NULL) {
err = EINVAL;
} else {
(void) snprintf(at, sizeof (path) - (at - path), "#%s",
redactbook);
err = dsl_bookmark_lookup(dspp.dp, path,
NULL, &book);
dspp.redactbook = &book;
}
}
if (err != 0) {
dsl_pool_rele(dspp.dp, FTAG);
if (owned)
dsl_dataset_disown(dspp.to_ds, dsflags, FTAG);
else
dsl_dataset_rele_flags(dspp.to_ds, dsflags, FTAG);
return (err);
}
if (fromsnap != NULL) {
zfs_bookmark_phys_t *zb = &dspp.ancestor_zb;
int fsnamelen;
if (strpbrk(tosnap, "@#") != NULL)
fsnamelen = strpbrk(tosnap, "@#") - tosnap;
else
fsnamelen = strlen(tosnap);
/*
* If the fromsnap is in a different filesystem, then
* mark the send stream as a clone.
*/
if (strncmp(tosnap, fromsnap, fsnamelen) != 0 ||
(fromsnap[fsnamelen] != '@' &&
fromsnap[fsnamelen] != '#')) {
dspp.is_clone = B_TRUE;
}
if (strchr(fromsnap, '@') != NULL) {
err = dsl_dataset_hold(dspp.dp, fromsnap, FTAG,
&fromds);
if (err != 0) {
ASSERT3P(fromds, ==, NULL);
} else {
/*
* We need to make a deep copy of the redact
* snapshots of the from snapshot, because the
* array will be freed when we evict from_ds.
*/
uint64_t *fromredact;
if (!dsl_dataset_get_uint64_array_feature(
fromds, SPA_FEATURE_REDACTED_DATASETS,
&dspp.numfromredactsnaps,
&fromredact)) {
dspp.numfromredactsnaps =
NUM_SNAPS_NOT_REDACTED;
} else if (dspp.numfromredactsnaps > 0) {
uint64_t size =
dspp.numfromredactsnaps *
sizeof (uint64_t);
dspp.fromredactsnaps = kmem_zalloc(size,
KM_SLEEP);
bcopy(fromredact, dspp.fromredactsnaps,
size);
}
if (!dsl_dataset_is_before(dspp.to_ds, fromds,
0)) {
err = SET_ERROR(EXDEV);
} else {
zb->zbm_creation_txg =
dsl_dataset_phys(fromds)->
ds_creation_txg;
zb->zbm_creation_time =
dsl_dataset_phys(fromds)->
ds_creation_time;
zb->zbm_guid =
dsl_dataset_phys(fromds)->ds_guid;
zb->zbm_redaction_obj = 0;
if (dsl_dataset_is_zapified(fromds)) {
(void) zap_lookup(
dspp.dp->dp_meta_objset,
fromds->ds_object,
DS_FIELD_IVSET_GUID, 8, 1,
&zb->zbm_ivset_guid);
}
}
dsl_dataset_rele(fromds, FTAG);
}
} else {
dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED;
err = dsl_bookmark_lookup(dspp.dp, fromsnap, dspp.to_ds,
zb);
if (err == EXDEV && zb->zbm_redaction_obj != 0 &&
zb->zbm_guid ==
dsl_dataset_phys(dspp.to_ds)->ds_guid)
err = 0;
}
if (err == 0) {
/* dmu_send_impl will call dsl_pool_rele for us. */
err = dmu_send_impl(&dspp);
} else {
dsl_pool_rele(dspp.dp, FTAG);
}
} else {
dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED;
err = dmu_send_impl(&dspp);
}
if (owned)
dsl_dataset_disown(dspp.to_ds, dsflags, FTAG);
else
dsl_dataset_rele_flags(dspp.to_ds, dsflags, FTAG);
return (err);
}
static int
dmu_adjust_send_estimate_for_indirects(dsl_dataset_t *ds, uint64_t uncompressed,
uint64_t compressed, boolean_t stream_compressed, uint64_t *sizep)
{
int err = 0;
uint64_t size;
/*
* Assume that space (both on-disk and in-stream) is dominated by
* data. We will adjust for indirect blocks and the copies property,
* but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
*/
uint64_t recordsize;
uint64_t record_count;
objset_t *os;
VERIFY0(dmu_objset_from_ds(ds, &os));
/* Assume all (uncompressed) blocks are recordsize. */
if (zfs_override_estimate_recordsize != 0) {
recordsize = zfs_override_estimate_recordsize;
} else if (os->os_phys->os_type == DMU_OST_ZVOL) {
err = dsl_prop_get_int_ds(ds,
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &recordsize);
} else {
err = dsl_prop_get_int_ds(ds,
zfs_prop_to_name(ZFS_PROP_RECORDSIZE), &recordsize);
}
if (err != 0)
return (err);
record_count = uncompressed / recordsize;
/*
* If we're estimating a send size for a compressed stream, use the
* compressed data size to estimate the stream size. Otherwise, use the
* uncompressed data size.
*/
size = stream_compressed ? compressed : uncompressed;
/*
* Subtract out approximate space used by indirect blocks.
* Assume most space is used by data blocks (non-indirect, non-dnode).
* Assume no ditto blocks or internal fragmentation.
*
* Therefore, space used by indirect blocks is sizeof(blkptr_t) per
* block.
*/
size -= record_count * sizeof (blkptr_t);
/* Add in the space for the record associated with each block. */
size += record_count * sizeof (dmu_replay_record_t);
*sizep = size;
return (0);
}
int
dmu_send_estimate_fast(dsl_dataset_t *origds, dsl_dataset_t *fromds,
zfs_bookmark_phys_t *frombook, boolean_t stream_compressed,
boolean_t saved, uint64_t *sizep)
{
int err;
dsl_dataset_t *ds = origds;
uint64_t uncomp, comp;
ASSERT(dsl_pool_config_held(origds->ds_dir->dd_pool));
ASSERT(fromds == NULL || frombook == NULL);
/*
* If this is a saved send we may actually be sending
* from the %recv clone used for resuming.
*/
if (saved) {
objset_t *mos = origds->ds_dir->dd_pool->dp_meta_objset;
uint64_t guid;
char dsname[ZFS_MAX_DATASET_NAME_LEN + 6];
dsl_dataset_name(origds, dsname);
(void) strcat(dsname, "/");
(void) strcat(dsname, recv_clone_name);
err = dsl_dataset_hold(origds->ds_dir->dd_pool,
dsname, FTAG, &ds);
if (err != ENOENT && err != 0) {
return (err);
} else if (err == ENOENT) {
ds = origds;
}
/* check that this dataset has partially received data */
err = zap_lookup(mos, ds->ds_object,
DS_FIELD_RESUME_TOGUID, 8, 1, &guid);
if (err != 0) {
err = SET_ERROR(err == ENOENT ? EINVAL : err);
goto out;
}
err = zap_lookup(mos, ds->ds_object,
DS_FIELD_RESUME_TONAME, 1, sizeof (dsname), dsname);
if (err != 0) {
err = SET_ERROR(err == ENOENT ? EINVAL : err);
goto out;
}
}
/* tosnap must be a snapshot or the target of a saved send */
if (!ds->ds_is_snapshot && ds == origds)
return (SET_ERROR(EINVAL));
if (fromds != NULL) {
uint64_t used;
if (!fromds->ds_is_snapshot) {
err = SET_ERROR(EINVAL);
goto out;
}
if (!dsl_dataset_is_before(ds, fromds, 0)) {
err = SET_ERROR(EXDEV);
goto out;
}
err = dsl_dataset_space_written(fromds, ds, &used, &comp,
&uncomp);
if (err != 0)
goto out;
} else if (frombook != NULL) {
uint64_t used;
err = dsl_dataset_space_written_bookmark(frombook, ds, &used,
&comp, &uncomp);
if (err != 0)
goto out;
} else {
uncomp = dsl_dataset_phys(ds)->ds_uncompressed_bytes;
comp = dsl_dataset_phys(ds)->ds_compressed_bytes;
}
err = dmu_adjust_send_estimate_for_indirects(ds, uncomp, comp,
stream_compressed, sizep);
/*
* Add the size of the BEGIN and END records to the estimate.
*/
*sizep += 2 * sizeof (dmu_replay_record_t);
out:
if (ds != origds)
dsl_dataset_rele(ds, FTAG);
return (err);
}
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_send, zfs_send_, corrupt_data, INT, ZMOD_RW,
"Allow sending corrupt data");
ZFS_MODULE_PARAM(zfs_send, zfs_send_, queue_length, INT, ZMOD_RW,
"Maximum send queue length");
ZFS_MODULE_PARAM(zfs_send, zfs_send_, unmodified_spill_blocks, INT, ZMOD_RW,
"Send unmodified spill blocks");
ZFS_MODULE_PARAM(zfs_send, zfs_send_, no_prefetch_queue_length, INT, ZMOD_RW,
"Maximum send queue length for non-prefetch queues");
ZFS_MODULE_PARAM(zfs_send, zfs_send_, queue_ff, INT, ZMOD_RW,
"Send queue fill fraction");
ZFS_MODULE_PARAM(zfs_send, zfs_send_, no_prefetch_queue_ff, INT, ZMOD_RW,
"Send queue fill fraction for non-prefetch queues");
ZFS_MODULE_PARAM(zfs_send, zfs_, override_estimate_recordsize, INT, ZMOD_RW,
"Override block size estimate with fixed size");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/dmu_traverse.c b/sys/contrib/openzfs/module/zfs/dmu_traverse.c
index 862c0bf404ad..2f1c2978b3be 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_traverse.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_traverse.c
@@ -1,827 +1,827 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_traverse.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_pool.h>
#include <sys/dnode.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/zio.h>
#include <sys/dmu_impl.h>
#include <sys/sa.h>
#include <sys/sa_impl.h>
#include <sys/callb.h>
#include <sys/zfeature.h>
int32_t zfs_pd_bytes_max = 50 * 1024 * 1024; /* 50MB */
int32_t send_holes_without_birth_time = 1;
int32_t zfs_traverse_indirect_prefetch_limit = 32;
typedef struct prefetch_data {
kmutex_t pd_mtx;
kcondvar_t pd_cv;
int32_t pd_bytes_fetched;
int pd_flags;
boolean_t pd_cancel;
boolean_t pd_exited;
zbookmark_phys_t pd_resume;
} prefetch_data_t;
typedef struct traverse_data {
spa_t *td_spa;
uint64_t td_objset;
blkptr_t *td_rootbp;
uint64_t td_min_txg;
zbookmark_phys_t *td_resume;
int td_flags;
prefetch_data_t *td_pfd;
boolean_t td_paused;
uint64_t td_hole_birth_enabled_txg;
blkptr_cb_t *td_func;
void *td_arg;
boolean_t td_realloc_possible;
} traverse_data_t;
static int traverse_dnode(traverse_data_t *td, const blkptr_t *bp,
const dnode_phys_t *dnp, uint64_t objset, uint64_t object);
static void prefetch_dnode_metadata(traverse_data_t *td, const dnode_phys_t *,
uint64_t objset, uint64_t object);
static int
traverse_zil_block(zilog_t *zilog, const blkptr_t *bp, void *arg,
uint64_t claim_txg)
{
traverse_data_t *td = arg;
zbookmark_phys_t zb;
if (BP_IS_HOLE(bp))
return (0);
if (claim_txg == 0 && bp->blk_birth >= spa_min_claim_txg(td->td_spa))
return (-1);
SET_BOOKMARK(&zb, td->td_objset, ZB_ZIL_OBJECT, ZB_ZIL_LEVEL,
bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
(void) td->td_func(td->td_spa, zilog, bp, &zb, NULL, td->td_arg);
return (0);
}
static int
traverse_zil_record(zilog_t *zilog, const lr_t *lrc, void *arg,
uint64_t claim_txg)
{
traverse_data_t *td = arg;
if (lrc->lrc_txtype == TX_WRITE) {
lr_write_t *lr = (lr_write_t *)lrc;
blkptr_t *bp = &lr->lr_blkptr;
zbookmark_phys_t zb;
if (BP_IS_HOLE(bp))
return (0);
if (claim_txg == 0 || bp->blk_birth < claim_txg)
return (0);
SET_BOOKMARK(&zb, td->td_objset, lr->lr_foid,
ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp));
(void) td->td_func(td->td_spa, zilog, bp, &zb, NULL,
td->td_arg);
}
return (0);
}
static void
traverse_zil(traverse_data_t *td, zil_header_t *zh)
{
uint64_t claim_txg = zh->zh_claim_txg;
/*
* We only want to visit blocks that have been claimed but not yet
* replayed; plus blocks that are already stable in read-only mode.
*/
if (claim_txg == 0 && spa_writeable(td->td_spa))
return;
zilog_t *zilog = zil_alloc(spa_get_dsl(td->td_spa)->dp_meta_objset, zh);
(void) zil_parse(zilog, traverse_zil_block, traverse_zil_record, td,
claim_txg, !(td->td_flags & TRAVERSE_NO_DECRYPT));
zil_free(zilog);
}
typedef enum resume_skip {
RESUME_SKIP_ALL,
RESUME_SKIP_NONE,
RESUME_SKIP_CHILDREN
} resume_skip_t;
/*
* Returns RESUME_SKIP_ALL if td indicates that we are resuming a traversal and
* the block indicated by zb does not need to be visited at all. Returns
* RESUME_SKIP_CHILDREN if we are resuming a post traversal and we reach the
* resume point. This indicates that this block should be visited but not its
* children (since they must have been visited in a previous traversal).
* Otherwise returns RESUME_SKIP_NONE.
*/
static resume_skip_t
resume_skip_check(traverse_data_t *td, const dnode_phys_t *dnp,
const zbookmark_phys_t *zb)
{
if (td->td_resume != NULL && !ZB_IS_ZERO(td->td_resume)) {
/*
* If we already visited this bp & everything below,
* don't bother doing it again.
*/
if (zbookmark_subtree_completed(dnp, zb, td->td_resume))
return (RESUME_SKIP_ALL);
/*
* If we found the block we're trying to resume from, zero
* the bookmark out to indicate that we have resumed.
*/
if (bcmp(zb, td->td_resume, sizeof (*zb)) == 0) {
bzero(td->td_resume, sizeof (*zb));
if (td->td_flags & TRAVERSE_POST)
return (RESUME_SKIP_CHILDREN);
}
}
return (RESUME_SKIP_NONE);
}
/*
* Returns B_TRUE, if prefetch read is issued, otherwise B_FALSE.
*/
static boolean_t
traverse_prefetch_metadata(traverse_data_t *td,
const blkptr_t *bp, const zbookmark_phys_t *zb)
{
arc_flags_t flags = ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH;
int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE;
if (!(td->td_flags & TRAVERSE_PREFETCH_METADATA))
return (B_FALSE);
/*
* If we are in the process of resuming, don't prefetch, because
* some children will not be needed (and in fact may have already
* been freed).
*/
if (td->td_resume != NULL && !ZB_IS_ZERO(td->td_resume))
return (B_FALSE);
if (BP_IS_HOLE(bp) || bp->blk_birth <= td->td_min_txg)
return (B_FALSE);
if (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE)
return (B_FALSE);
ASSERT(!BP_IS_REDACTED(bp));
if ((td->td_flags & TRAVERSE_NO_DECRYPT) && BP_IS_PROTECTED(bp))
zio_flags |= ZIO_FLAG_RAW;
(void) arc_read(NULL, td->td_spa, bp, NULL, NULL,
ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb);
return (B_TRUE);
}
static boolean_t
prefetch_needed(prefetch_data_t *pfd, const blkptr_t *bp)
{
ASSERT(pfd->pd_flags & TRAVERSE_PREFETCH_DATA);
if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) ||
BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG || BP_IS_REDACTED(bp))
return (B_FALSE);
return (B_TRUE);
}
static int
traverse_visitbp(traverse_data_t *td, const dnode_phys_t *dnp,
const blkptr_t *bp, const zbookmark_phys_t *zb)
{
int err = 0;
arc_buf_t *buf = NULL;
prefetch_data_t *pd = td->td_pfd;
switch (resume_skip_check(td, dnp, zb)) {
case RESUME_SKIP_ALL:
return (0);
case RESUME_SKIP_CHILDREN:
goto post;
case RESUME_SKIP_NONE:
break;
default:
ASSERT(0);
}
if (bp->blk_birth == 0) {
/*
* Since this block has a birth time of 0 it must be one of
* two things: a hole created before the
* SPA_FEATURE_HOLE_BIRTH feature was enabled, or a hole
* which has always been a hole in an object.
*
* If a file is written sparsely, then the unwritten parts of
* the file were "always holes" -- that is, they have been
* holes since this object was allocated. However, we (and
* our callers) can not necessarily tell when an object was
* allocated. Therefore, if it's possible that this object
* was freed and then its object number reused, we need to
* visit all the holes with birth==0.
*
* If it isn't possible that the object number was reused,
* then if SPA_FEATURE_HOLE_BIRTH was enabled before we wrote
* all the blocks we will visit as part of this traversal,
* then this hole must have always existed, so we can skip
* it. We visit blocks born after (exclusive) td_min_txg.
*
* Note that the meta-dnode cannot be reallocated.
*/
if (!send_holes_without_birth_time &&
(!td->td_realloc_possible ||
zb->zb_object == DMU_META_DNODE_OBJECT) &&
td->td_hole_birth_enabled_txg <= td->td_min_txg)
return (0);
} else if (bp->blk_birth <= td->td_min_txg) {
return (0);
}
if (pd != NULL && !pd->pd_exited && prefetch_needed(pd, bp)) {
uint64_t size = BP_GET_LSIZE(bp);
mutex_enter(&pd->pd_mtx);
ASSERT(pd->pd_bytes_fetched >= 0);
while (pd->pd_bytes_fetched < size && !pd->pd_exited)
cv_wait_sig(&pd->pd_cv, &pd->pd_mtx);
pd->pd_bytes_fetched -= size;
cv_broadcast(&pd->pd_cv);
mutex_exit(&pd->pd_mtx);
}
if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp)) {
err = td->td_func(td->td_spa, NULL, bp, zb, dnp, td->td_arg);
if (err != 0)
goto post;
return (0);
}
if (td->td_flags & TRAVERSE_PRE) {
err = td->td_func(td->td_spa, NULL, bp, zb, dnp,
td->td_arg);
if (err == TRAVERSE_VISIT_NO_CHILDREN)
return (0);
if (err != 0)
goto post;
}
if (BP_GET_LEVEL(bp) > 0) {
uint32_t flags = ARC_FLAG_WAIT;
int32_t i, ptidx, pidx;
uint32_t prefetchlimit;
int32_t epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
zbookmark_phys_t *czb;
ASSERT(!BP_IS_PROTECTED(bp));
err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf,
ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
if (err != 0)
goto post;
czb = kmem_alloc(sizeof (zbookmark_phys_t), KM_SLEEP);
/*
* When performing a traversal it is beneficial to
* asynchronously read-ahead the upcoming indirect
* blocks since they will be needed shortly. However,
* since a 128k indirect (non-L0) block may contain up
* to 1024 128-byte block pointers, its preferable to not
* prefetch them all at once. Issuing a large number of
* async reads may effect performance, and the earlier
* the indirect blocks are prefetched the less likely
* they are to still be resident in the ARC when needed.
* Therefore, prefetching indirect blocks is limited to
* zfs_traverse_indirect_prefetch_limit=32 blocks by
* default.
*
* pidx: Index for which next prefetch to be issued.
* ptidx: Index at which next prefetch to be triggered.
*/
ptidx = 0;
pidx = 1;
prefetchlimit = zfs_traverse_indirect_prefetch_limit;
for (i = 0; i < epb; i++) {
if (prefetchlimit && i == ptidx) {
ASSERT3S(ptidx, <=, pidx);
for (uint32_t prefetched = 0; pidx < epb &&
prefetched < prefetchlimit; pidx++) {
SET_BOOKMARK(czb, zb->zb_objset,
zb->zb_object, zb->zb_level - 1,
zb->zb_blkid * epb + pidx);
if (traverse_prefetch_metadata(td,
&((blkptr_t *)buf->b_data)[pidx],
czb) == B_TRUE) {
prefetched++;
if (prefetched ==
MAX(prefetchlimit / 2, 1))
ptidx = pidx;
}
}
}
/* recursively visitbp() blocks below this */
SET_BOOKMARK(czb, zb->zb_objset, zb->zb_object,
zb->zb_level - 1,
zb->zb_blkid * epb + i);
err = traverse_visitbp(td, dnp,
&((blkptr_t *)buf->b_data)[i], czb);
if (err != 0)
break;
}
kmem_free(czb, sizeof (zbookmark_phys_t));
} else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
uint32_t flags = ARC_FLAG_WAIT;
uint32_t zio_flags = ZIO_FLAG_CANFAIL;
int32_t i;
int32_t epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
dnode_phys_t *child_dnp;
/*
* dnode blocks might have their bonus buffers encrypted, so
* we must be careful to honor TRAVERSE_NO_DECRYPT
*/
if ((td->td_flags & TRAVERSE_NO_DECRYPT) && BP_IS_PROTECTED(bp))
zio_flags |= ZIO_FLAG_RAW;
err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf,
ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb);
if (err != 0)
goto post;
child_dnp = buf->b_data;
for (i = 0; i < epb; i += child_dnp[i].dn_extra_slots + 1) {
prefetch_dnode_metadata(td, &child_dnp[i],
zb->zb_objset, zb->zb_blkid * epb + i);
}
/* recursively visitbp() blocks below this */
for (i = 0; i < epb; i += child_dnp[i].dn_extra_slots + 1) {
err = traverse_dnode(td, bp, &child_dnp[i],
zb->zb_objset, zb->zb_blkid * epb + i);
if (err != 0)
break;
}
} else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
uint32_t zio_flags = ZIO_FLAG_CANFAIL;
arc_flags_t flags = ARC_FLAG_WAIT;
objset_phys_t *osp;
if ((td->td_flags & TRAVERSE_NO_DECRYPT) && BP_IS_PROTECTED(bp))
zio_flags |= ZIO_FLAG_RAW;
err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf,
ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb);
if (err != 0)
goto post;
osp = buf->b_data;
prefetch_dnode_metadata(td, &osp->os_meta_dnode, zb->zb_objset,
DMU_META_DNODE_OBJECT);
/*
* See the block comment above for the goal of this variable.
* If the maxblkid of the meta-dnode is 0, then we know that
* we've never had more than DNODES_PER_BLOCK objects in the
* dataset, which means we can't have reused any object ids.
*/
if (osp->os_meta_dnode.dn_maxblkid == 0)
td->td_realloc_possible = B_FALSE;
if (OBJSET_BUF_HAS_USERUSED(buf)) {
if (OBJSET_BUF_HAS_PROJECTUSED(buf))
prefetch_dnode_metadata(td,
&osp->os_projectused_dnode,
zb->zb_objset, DMU_PROJECTUSED_OBJECT);
prefetch_dnode_metadata(td, &osp->os_groupused_dnode,
zb->zb_objset, DMU_GROUPUSED_OBJECT);
prefetch_dnode_metadata(td, &osp->os_userused_dnode,
zb->zb_objset, DMU_USERUSED_OBJECT);
}
err = traverse_dnode(td, bp, &osp->os_meta_dnode, zb->zb_objset,
DMU_META_DNODE_OBJECT);
if (err == 0 && OBJSET_BUF_HAS_USERUSED(buf)) {
if (OBJSET_BUF_HAS_PROJECTUSED(buf))
err = traverse_dnode(td, bp,
&osp->os_projectused_dnode, zb->zb_objset,
DMU_PROJECTUSED_OBJECT);
if (err == 0)
err = traverse_dnode(td, bp,
&osp->os_groupused_dnode, zb->zb_objset,
DMU_GROUPUSED_OBJECT);
if (err == 0)
err = traverse_dnode(td, bp,
&osp->os_userused_dnode, zb->zb_objset,
DMU_USERUSED_OBJECT);
}
}
if (buf)
arc_buf_destroy(buf, &buf);
post:
if (err == 0 && (td->td_flags & TRAVERSE_POST))
err = td->td_func(td->td_spa, NULL, bp, zb, dnp, td->td_arg);
if ((td->td_flags & TRAVERSE_HARD) && (err == EIO || err == ECKSUM)) {
/*
* Ignore this disk error as requested by the HARD flag,
* and continue traversal.
*/
err = 0;
}
/*
* If we are stopping here, set td_resume.
*/
if (td->td_resume != NULL && err != 0 && !td->td_paused) {
td->td_resume->zb_objset = zb->zb_objset;
td->td_resume->zb_object = zb->zb_object;
td->td_resume->zb_level = 0;
/*
* If we have stopped on an indirect block (e.g. due to
* i/o error), we have not visited anything below it.
* Set the bookmark to the first level-0 block that we need
* to visit. This way, the resuming code does not need to
* deal with resuming from indirect blocks.
*
* Note, if zb_level <= 0, dnp may be NULL, so we don't want
* to dereference it.
*/
td->td_resume->zb_blkid = zb->zb_blkid;
if (zb->zb_level > 0) {
td->td_resume->zb_blkid <<= zb->zb_level *
(dnp->dn_indblkshift - SPA_BLKPTRSHIFT);
}
td->td_paused = B_TRUE;
}
return (err);
}
static void
prefetch_dnode_metadata(traverse_data_t *td, const dnode_phys_t *dnp,
uint64_t objset, uint64_t object)
{
int j;
zbookmark_phys_t czb;
for (j = 0; j < dnp->dn_nblkptr; j++) {
SET_BOOKMARK(&czb, objset, object, dnp->dn_nlevels - 1, j);
traverse_prefetch_metadata(td, &dnp->dn_blkptr[j], &czb);
}
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
SET_BOOKMARK(&czb, objset, object, 0, DMU_SPILL_BLKID);
traverse_prefetch_metadata(td, DN_SPILL_BLKPTR(dnp), &czb);
}
}
static int
traverse_dnode(traverse_data_t *td, const blkptr_t *bp, const dnode_phys_t *dnp,
uint64_t objset, uint64_t object)
{
int j, err = 0;
zbookmark_phys_t czb;
if (object != DMU_META_DNODE_OBJECT && td->td_resume != NULL &&
object < td->td_resume->zb_object)
return (0);
if (td->td_flags & TRAVERSE_PRE) {
SET_BOOKMARK(&czb, objset, object, ZB_DNODE_LEVEL,
ZB_DNODE_BLKID);
err = td->td_func(td->td_spa, NULL, bp, &czb, dnp,
td->td_arg);
if (err == TRAVERSE_VISIT_NO_CHILDREN)
return (0);
if (err != 0)
return (err);
}
for (j = 0; j < dnp->dn_nblkptr; j++) {
SET_BOOKMARK(&czb, objset, object, dnp->dn_nlevels - 1, j);
err = traverse_visitbp(td, dnp, &dnp->dn_blkptr[j], &czb);
if (err != 0)
break;
}
if (err == 0 && (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
SET_BOOKMARK(&czb, objset, object, 0, DMU_SPILL_BLKID);
err = traverse_visitbp(td, dnp, DN_SPILL_BLKPTR(dnp), &czb);
}
if (err == 0 && (td->td_flags & TRAVERSE_POST)) {
SET_BOOKMARK(&czb, objset, object, ZB_DNODE_LEVEL,
ZB_DNODE_BLKID);
err = td->td_func(td->td_spa, NULL, bp, &czb, dnp,
td->td_arg);
if (err == TRAVERSE_VISIT_NO_CHILDREN)
return (0);
if (err != 0)
return (err);
}
return (err);
}
-/* ARGSUSED */
static int
traverse_prefetcher(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
+ (void) zilog, (void) dnp;
prefetch_data_t *pfd = arg;
int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE;
arc_flags_t aflags = ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH |
ARC_FLAG_PRESCIENT_PREFETCH;
ASSERT(pfd->pd_bytes_fetched >= 0);
if (zb->zb_level == ZB_DNODE_LEVEL)
return (0);
if (pfd->pd_cancel)
return (SET_ERROR(EINTR));
if (!prefetch_needed(pfd, bp))
return (0);
mutex_enter(&pfd->pd_mtx);
while (!pfd->pd_cancel && pfd->pd_bytes_fetched >= zfs_pd_bytes_max)
cv_wait_sig(&pfd->pd_cv, &pfd->pd_mtx);
pfd->pd_bytes_fetched += BP_GET_LSIZE(bp);
cv_broadcast(&pfd->pd_cv);
mutex_exit(&pfd->pd_mtx);
if ((pfd->pd_flags & TRAVERSE_NO_DECRYPT) && BP_IS_PROTECTED(bp))
zio_flags |= ZIO_FLAG_RAW;
(void) arc_read(NULL, spa, bp, NULL, NULL, ZIO_PRIORITY_ASYNC_READ,
zio_flags, &aflags, zb);
return (0);
}
static void
traverse_prefetch_thread(void *arg)
{
traverse_data_t *td_main = arg;
traverse_data_t td = *td_main;
zbookmark_phys_t czb;
fstrans_cookie_t cookie = spl_fstrans_mark();
td.td_func = traverse_prefetcher;
td.td_arg = td_main->td_pfd;
td.td_pfd = NULL;
td.td_resume = &td_main->td_pfd->pd_resume;
SET_BOOKMARK(&czb, td.td_objset,
ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
(void) traverse_visitbp(&td, NULL, td.td_rootbp, &czb);
mutex_enter(&td_main->td_pfd->pd_mtx);
td_main->td_pfd->pd_exited = B_TRUE;
cv_broadcast(&td_main->td_pfd->pd_cv);
mutex_exit(&td_main->td_pfd->pd_mtx);
spl_fstrans_unmark(cookie);
}
/*
* NB: dataset must not be changing on-disk (eg, is a snapshot or we are
* in syncing context).
*/
static int
traverse_impl(spa_t *spa, dsl_dataset_t *ds, uint64_t objset, blkptr_t *rootbp,
uint64_t txg_start, zbookmark_phys_t *resume, int flags,
blkptr_cb_t func, void *arg)
{
traverse_data_t *td;
prefetch_data_t *pd;
zbookmark_phys_t *czb;
int err;
ASSERT(ds == NULL || objset == ds->ds_object);
ASSERT(!(flags & TRAVERSE_PRE) || !(flags & TRAVERSE_POST));
td = kmem_alloc(sizeof (traverse_data_t), KM_SLEEP);
pd = kmem_zalloc(sizeof (prefetch_data_t), KM_SLEEP);
czb = kmem_alloc(sizeof (zbookmark_phys_t), KM_SLEEP);
td->td_spa = spa;
td->td_objset = objset;
td->td_rootbp = rootbp;
td->td_min_txg = txg_start;
td->td_resume = resume;
td->td_func = func;
td->td_arg = arg;
td->td_pfd = pd;
td->td_flags = flags;
td->td_paused = B_FALSE;
td->td_realloc_possible = (txg_start == 0 ? B_FALSE : B_TRUE);
if (spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) {
VERIFY(spa_feature_enabled_txg(spa,
SPA_FEATURE_HOLE_BIRTH, &td->td_hole_birth_enabled_txg));
} else {
td->td_hole_birth_enabled_txg = UINT64_MAX;
}
pd->pd_flags = flags;
if (resume != NULL)
pd->pd_resume = *resume;
mutex_init(&pd->pd_mtx, NULL, MUTEX_DEFAULT, NULL);
cv_init(&pd->pd_cv, NULL, CV_DEFAULT, NULL);
SET_BOOKMARK(czb, td->td_objset,
ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
/* See comment on ZIL traversal in dsl_scan_visitds. */
if (ds != NULL && !ds->ds_is_snapshot && !BP_IS_HOLE(rootbp)) {
enum zio_flag zio_flags = ZIO_FLAG_CANFAIL;
uint32_t flags = ARC_FLAG_WAIT;
objset_phys_t *osp;
arc_buf_t *buf;
ASSERT(!BP_IS_REDACTED(rootbp));
if ((td->td_flags & TRAVERSE_NO_DECRYPT) &&
BP_IS_PROTECTED(rootbp))
zio_flags |= ZIO_FLAG_RAW;
err = arc_read(NULL, td->td_spa, rootbp, arc_getbuf_func,
&buf, ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, czb);
if (err != 0) {
/*
* If both TRAVERSE_HARD and TRAVERSE_PRE are set,
* continue to visitbp so that td_func can be called
* in pre stage, and err will reset to zero.
*/
if (!(td->td_flags & TRAVERSE_HARD) ||
!(td->td_flags & TRAVERSE_PRE))
goto out;
} else {
osp = buf->b_data;
traverse_zil(td, &osp->os_zil_header);
arc_buf_destroy(buf, &buf);
}
}
if (!(flags & TRAVERSE_PREFETCH_DATA) ||
taskq_dispatch(spa->spa_prefetch_taskq, traverse_prefetch_thread,
td, TQ_NOQUEUE) == TASKQID_INVALID)
pd->pd_exited = B_TRUE;
err = traverse_visitbp(td, NULL, rootbp, czb);
mutex_enter(&pd->pd_mtx);
pd->pd_cancel = B_TRUE;
cv_broadcast(&pd->pd_cv);
while (!pd->pd_exited)
cv_wait_sig(&pd->pd_cv, &pd->pd_mtx);
mutex_exit(&pd->pd_mtx);
out:
mutex_destroy(&pd->pd_mtx);
cv_destroy(&pd->pd_cv);
kmem_free(czb, sizeof (zbookmark_phys_t));
kmem_free(pd, sizeof (struct prefetch_data));
kmem_free(td, sizeof (struct traverse_data));
return (err);
}
/*
* NB: dataset must not be changing on-disk (eg, is a snapshot or we are
* in syncing context).
*/
int
traverse_dataset_resume(dsl_dataset_t *ds, uint64_t txg_start,
zbookmark_phys_t *resume,
int flags, blkptr_cb_t func, void *arg)
{
return (traverse_impl(ds->ds_dir->dd_pool->dp_spa, ds, ds->ds_object,
&dsl_dataset_phys(ds)->ds_bp, txg_start, resume, flags, func, arg));
}
int
traverse_dataset(dsl_dataset_t *ds, uint64_t txg_start,
int flags, blkptr_cb_t func, void *arg)
{
return (traverse_dataset_resume(ds, txg_start, NULL, flags, func, arg));
}
int
traverse_dataset_destroyed(spa_t *spa, blkptr_t *blkptr,
uint64_t txg_start, zbookmark_phys_t *resume, int flags,
blkptr_cb_t func, void *arg)
{
return (traverse_impl(spa, NULL, ZB_DESTROYED_OBJSET,
blkptr, txg_start, resume, flags, func, arg));
}
/*
* NB: pool must not be changing on-disk (eg, from zdb or sync context).
*/
int
traverse_pool(spa_t *spa, uint64_t txg_start, int flags,
blkptr_cb_t func, void *arg)
{
int err;
dsl_pool_t *dp = spa_get_dsl(spa);
objset_t *mos = dp->dp_meta_objset;
boolean_t hard = (flags & TRAVERSE_HARD);
/* visit the MOS */
err = traverse_impl(spa, NULL, 0, spa_get_rootblkptr(spa),
txg_start, NULL, flags, func, arg);
if (err != 0)
return (err);
/* visit each dataset */
for (uint64_t obj = 1; err == 0;
err = dmu_object_next(mos, &obj, B_FALSE, txg_start)) {
dmu_object_info_t doi;
err = dmu_object_info(mos, obj, &doi);
if (err != 0) {
if (hard)
continue;
break;
}
if (doi.doi_bonus_type == DMU_OT_DSL_DATASET) {
dsl_dataset_t *ds;
uint64_t txg = txg_start;
dsl_pool_config_enter(dp, FTAG);
err = dsl_dataset_hold_obj(dp, obj, FTAG, &ds);
dsl_pool_config_exit(dp, FTAG);
if (err != 0) {
if (hard)
continue;
break;
}
if (dsl_dataset_phys(ds)->ds_prev_snap_txg > txg)
txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
err = traverse_dataset(ds, txg, flags, func, arg);
dsl_dataset_rele(ds, FTAG);
if (err != 0)
break;
}
}
if (err == ESRCH)
err = 0;
return (err);
}
EXPORT_SYMBOL(traverse_dataset);
EXPORT_SYMBOL(traverse_pool);
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs, zfs_, pd_bytes_max, INT, ZMOD_RW,
"Max number of bytes to prefetch");
ZFS_MODULE_PARAM(zfs, zfs_, traverse_indirect_prefetch_limit, INT, ZMOD_RW,
"Traverse prefetch number of blocks pointed by indirect block");
#if defined(_KERNEL)
module_param_named(ignore_hole_birth, send_holes_without_birth_time, int, 0644);
MODULE_PARM_DESC(ignore_hole_birth,
"Alias for send_holes_without_birth_time");
#endif
ZFS_MODULE_PARAM(zfs, , send_holes_without_birth_time, INT, ZMOD_RW,
"Ignore hole_birth txg for zfs send");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/dmu_zfetch.c b/sys/contrib/openzfs/module/zfs/dmu_zfetch.c
index a26b0d739921..7566663bd3ad 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_zfetch.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_zfetch.c
@@ -1,550 +1,551 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2013, 2017 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/dnode.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_zfetch.h>
#include <sys/dmu.h>
#include <sys/dbuf.h>
#include <sys/kstat.h>
#include <sys/wmsum.h>
/*
* This tunable disables predictive prefetch. Note that it leaves "prescient"
* prefetch (e.g. prefetch for zfs send) intact. Unlike predictive prefetch,
* prescient prefetch never issues i/os that end up not being needed,
* so it can't hurt performance.
*/
int zfs_prefetch_disable = B_FALSE;
/* max # of streams per zfetch */
unsigned int zfetch_max_streams = 8;
/* min time before stream reclaim */
unsigned int zfetch_min_sec_reap = 2;
/* max bytes to prefetch per stream (default 8MB) */
unsigned int zfetch_max_distance = 8 * 1024 * 1024;
/* max bytes to prefetch indirects for per stream (default 64MB) */
unsigned int zfetch_max_idistance = 64 * 1024 * 1024;
/* max number of bytes in an array_read in which we allow prefetching (1MB) */
unsigned long zfetch_array_rd_sz = 1024 * 1024;
typedef struct zfetch_stats {
kstat_named_t zfetchstat_hits;
kstat_named_t zfetchstat_misses;
kstat_named_t zfetchstat_max_streams;
kstat_named_t zfetchstat_io_issued;
} zfetch_stats_t;
static zfetch_stats_t zfetch_stats = {
{ "hits", KSTAT_DATA_UINT64 },
{ "misses", KSTAT_DATA_UINT64 },
{ "max_streams", KSTAT_DATA_UINT64 },
{ "io_issued", KSTAT_DATA_UINT64 },
};
struct {
wmsum_t zfetchstat_hits;
wmsum_t zfetchstat_misses;
wmsum_t zfetchstat_max_streams;
wmsum_t zfetchstat_io_issued;
} zfetch_sums;
#define ZFETCHSTAT_BUMP(stat) \
wmsum_add(&zfetch_sums.stat, 1)
#define ZFETCHSTAT_ADD(stat, val) \
wmsum_add(&zfetch_sums.stat, val)
kstat_t *zfetch_ksp;
static int
zfetch_kstats_update(kstat_t *ksp, int rw)
{
zfetch_stats_t *zs = ksp->ks_data;
if (rw == KSTAT_WRITE)
return (EACCES);
zs->zfetchstat_hits.value.ui64 =
wmsum_value(&zfetch_sums.zfetchstat_hits);
zs->zfetchstat_misses.value.ui64 =
wmsum_value(&zfetch_sums.zfetchstat_misses);
zs->zfetchstat_max_streams.value.ui64 =
wmsum_value(&zfetch_sums.zfetchstat_max_streams);
zs->zfetchstat_io_issued.value.ui64 =
wmsum_value(&zfetch_sums.zfetchstat_io_issued);
return (0);
}
void
zfetch_init(void)
{
wmsum_init(&zfetch_sums.zfetchstat_hits, 0);
wmsum_init(&zfetch_sums.zfetchstat_misses, 0);
wmsum_init(&zfetch_sums.zfetchstat_max_streams, 0);
wmsum_init(&zfetch_sums.zfetchstat_io_issued, 0);
zfetch_ksp = kstat_create("zfs", 0, "zfetchstats", "misc",
KSTAT_TYPE_NAMED, sizeof (zfetch_stats) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL);
if (zfetch_ksp != NULL) {
zfetch_ksp->ks_data = &zfetch_stats;
zfetch_ksp->ks_update = zfetch_kstats_update;
kstat_install(zfetch_ksp);
}
}
void
zfetch_fini(void)
{
if (zfetch_ksp != NULL) {
kstat_delete(zfetch_ksp);
zfetch_ksp = NULL;
}
wmsum_fini(&zfetch_sums.zfetchstat_hits);
wmsum_fini(&zfetch_sums.zfetchstat_misses);
wmsum_fini(&zfetch_sums.zfetchstat_max_streams);
wmsum_fini(&zfetch_sums.zfetchstat_io_issued);
}
/*
* This takes a pointer to a zfetch structure and a dnode. It performs the
* necessary setup for the zfetch structure, grokking data from the
* associated dnode.
*/
void
dmu_zfetch_init(zfetch_t *zf, dnode_t *dno)
{
if (zf == NULL)
return;
zf->zf_dnode = dno;
zf->zf_numstreams = 0;
list_create(&zf->zf_stream, sizeof (zstream_t),
offsetof(zstream_t, zs_node));
mutex_init(&zf->zf_lock, NULL, MUTEX_DEFAULT, NULL);
}
static void
dmu_zfetch_stream_fini(zstream_t *zs)
{
ASSERT(!list_link_active(&zs->zs_node));
zfs_refcount_destroy(&zs->zs_callers);
zfs_refcount_destroy(&zs->zs_refs);
kmem_free(zs, sizeof (*zs));
}
static void
dmu_zfetch_stream_remove(zfetch_t *zf, zstream_t *zs)
{
ASSERT(MUTEX_HELD(&zf->zf_lock));
list_remove(&zf->zf_stream, zs);
zf->zf_numstreams--;
membar_producer();
if (zfs_refcount_remove(&zs->zs_refs, NULL) == 0)
dmu_zfetch_stream_fini(zs);
}
/*
* Clean-up state associated with a zfetch structure (e.g. destroy the
* streams). This doesn't free the zfetch_t itself, that's left to the caller.
*/
void
dmu_zfetch_fini(zfetch_t *zf)
{
zstream_t *zs;
mutex_enter(&zf->zf_lock);
while ((zs = list_head(&zf->zf_stream)) != NULL)
dmu_zfetch_stream_remove(zf, zs);
mutex_exit(&zf->zf_lock);
list_destroy(&zf->zf_stream);
mutex_destroy(&zf->zf_lock);
zf->zf_dnode = NULL;
}
/*
* If there aren't too many streams already, create a new stream.
* The "blkid" argument is the next block that we expect this stream to access.
* While we're here, clean up old streams (which haven't been
* accessed for at least zfetch_min_sec_reap seconds).
*/
static void
dmu_zfetch_stream_create(zfetch_t *zf, uint64_t blkid)
{
zstream_t *zs_next;
hrtime_t now = gethrtime();
ASSERT(MUTEX_HELD(&zf->zf_lock));
/*
* Clean up old streams.
*/
for (zstream_t *zs = list_head(&zf->zf_stream);
zs != NULL; zs = zs_next) {
zs_next = list_next(&zf->zf_stream, zs);
/*
* Skip if still active. 1 -- zf_stream reference.
*/
if (zfs_refcount_count(&zs->zs_refs) != 1)
continue;
if (((now - zs->zs_atime) / NANOSEC) >
zfetch_min_sec_reap)
dmu_zfetch_stream_remove(zf, zs);
}
/*
* The maximum number of streams is normally zfetch_max_streams,
* but for small files we lower it such that it's at least possible
* for all the streams to be non-overlapping.
*
* If we are already at the maximum number of streams for this file,
* even after removing old streams, then don't create this stream.
*/
uint32_t max_streams = MAX(1, MIN(zfetch_max_streams,
zf->zf_dnode->dn_maxblkid * zf->zf_dnode->dn_datablksz /
zfetch_max_distance));
if (zf->zf_numstreams >= max_streams) {
ZFETCHSTAT_BUMP(zfetchstat_max_streams);
return;
}
zstream_t *zs = kmem_zalloc(sizeof (*zs), KM_SLEEP);
zs->zs_blkid = blkid;
zs->zs_pf_blkid1 = blkid;
zs->zs_pf_blkid = blkid;
zs->zs_ipf_blkid1 = blkid;
zs->zs_ipf_blkid = blkid;
zs->zs_atime = now;
zs->zs_fetch = zf;
zs->zs_missed = B_FALSE;
zfs_refcount_create(&zs->zs_callers);
zfs_refcount_create(&zs->zs_refs);
/* One reference for zf_stream. */
zfs_refcount_add(&zs->zs_refs, NULL);
zf->zf_numstreams++;
list_insert_head(&zf->zf_stream, zs);
}
static void
dmu_zfetch_stream_done(void *arg, boolean_t io_issued)
{
+ (void) io_issued;
zstream_t *zs = arg;
if (zfs_refcount_remove(&zs->zs_refs, NULL) == 0)
dmu_zfetch_stream_fini(zs);
}
/*
* This is the predictive prefetch entry point. dmu_zfetch_prepare()
* associates dnode access specified with blkid and nblks arguments with
* prefetch stream, predicts further accesses based on that stats and returns
* the stream pointer on success. That pointer must later be passed to
* dmu_zfetch_run() to initiate the speculative prefetch for the stream and
* release it. dmu_zfetch() is a wrapper for simple cases when window between
* prediction and prefetch initiation is not needed.
* fetch_data argument specifies whether actual data blocks should be fetched:
* FALSE -- prefetch only indirect blocks for predicted data blocks;
* TRUE -- prefetch predicted data blocks plus following indirect blocks.
*/
zstream_t *
dmu_zfetch_prepare(zfetch_t *zf, uint64_t blkid, uint64_t nblks,
boolean_t fetch_data, boolean_t have_lock)
{
zstream_t *zs;
int64_t pf_start, ipf_start;
int64_t pf_ahead_blks, max_blks;
int max_dist_blks, pf_nblks, ipf_nblks;
uint64_t end_of_access_blkid, maxblkid;
end_of_access_blkid = blkid + nblks;
spa_t *spa = zf->zf_dnode->dn_objset->os_spa;
if (zfs_prefetch_disable)
return (NULL);
/*
* If we haven't yet loaded the indirect vdevs' mappings, we
* can only read from blocks that we carefully ensure are on
* concrete vdevs (or previously-loaded indirect vdevs). So we
* can't allow the predictive prefetcher to attempt reads of other
* blocks (e.g. of the MOS's dnode object).
*/
if (!spa_indirect_vdevs_loaded(spa))
return (NULL);
/*
* As a fast path for small (single-block) files, ignore access
* to the first block.
*/
if (!have_lock && blkid == 0)
return (NULL);
if (!have_lock)
rw_enter(&zf->zf_dnode->dn_struct_rwlock, RW_READER);
/*
* A fast path for small files for which no prefetch will
* happen.
*/
maxblkid = zf->zf_dnode->dn_maxblkid;
if (maxblkid < 2) {
if (!have_lock)
rw_exit(&zf->zf_dnode->dn_struct_rwlock);
return (NULL);
}
mutex_enter(&zf->zf_lock);
/*
* Find matching prefetch stream. Depending on whether the accesses
* are block-aligned, first block of the new access may either follow
* the last block of the previous access, or be equal to it.
*/
for (zs = list_head(&zf->zf_stream); zs != NULL;
zs = list_next(&zf->zf_stream, zs)) {
if (blkid == zs->zs_blkid) {
break;
} else if (blkid + 1 == zs->zs_blkid) {
blkid++;
nblks--;
break;
}
}
/*
* If the file is ending, remove the matching stream if found.
* If not found then it is too late to create a new one now.
*/
if (end_of_access_blkid >= maxblkid) {
if (zs != NULL)
dmu_zfetch_stream_remove(zf, zs);
mutex_exit(&zf->zf_lock);
if (!have_lock)
rw_exit(&zf->zf_dnode->dn_struct_rwlock);
return (NULL);
}
/* Exit if we already prefetched this block before. */
if (nblks == 0) {
mutex_exit(&zf->zf_lock);
if (!have_lock)
rw_exit(&zf->zf_dnode->dn_struct_rwlock);
return (NULL);
}
if (zs == NULL) {
/*
* This access is not part of any existing stream. Create
* a new stream for it.
*/
dmu_zfetch_stream_create(zf, end_of_access_blkid);
mutex_exit(&zf->zf_lock);
if (!have_lock)
rw_exit(&zf->zf_dnode->dn_struct_rwlock);
ZFETCHSTAT_BUMP(zfetchstat_misses);
return (NULL);
}
/*
* This access was to a block that we issued a prefetch for on
* behalf of this stream. Issue further prefetches for this stream.
*
* Normally, we start prefetching where we stopped
* prefetching last (zs_pf_blkid). But when we get our first
* hit on this stream, zs_pf_blkid == zs_blkid, we don't
* want to prefetch the block we just accessed. In this case,
* start just after the block we just accessed.
*/
pf_start = MAX(zs->zs_pf_blkid, end_of_access_blkid);
if (zs->zs_pf_blkid1 < end_of_access_blkid)
zs->zs_pf_blkid1 = end_of_access_blkid;
if (zs->zs_ipf_blkid1 < end_of_access_blkid)
zs->zs_ipf_blkid1 = end_of_access_blkid;
/*
* Double our amount of prefetched data, but don't let the
* prefetch get further ahead than zfetch_max_distance.
*/
if (fetch_data) {
max_dist_blks =
zfetch_max_distance >> zf->zf_dnode->dn_datablkshift;
/*
* Previously, we were (zs_pf_blkid - blkid) ahead. We
* want to now be double that, so read that amount again,
* plus the amount we are catching up by (i.e. the amount
* read just now).
*/
pf_ahead_blks = zs->zs_pf_blkid - blkid + nblks;
max_blks = max_dist_blks - (pf_start - end_of_access_blkid);
pf_nblks = MIN(pf_ahead_blks, max_blks);
} else {
pf_nblks = 0;
}
zs->zs_pf_blkid = pf_start + pf_nblks;
/*
* Do the same for indirects, starting from where we stopped last,
* or where we will stop reading data blocks (and the indirects
* that point to them).
*/
ipf_start = MAX(zs->zs_ipf_blkid, zs->zs_pf_blkid);
max_dist_blks = zfetch_max_idistance >> zf->zf_dnode->dn_datablkshift;
/*
* We want to double our distance ahead of the data prefetch
* (or reader, if we are not prefetching data). Previously, we
* were (zs_ipf_blkid - blkid) ahead. To double that, we read
* that amount again, plus the amount we are catching up by
* (i.e. the amount read now + the amount of data prefetched now).
*/
pf_ahead_blks = zs->zs_ipf_blkid - blkid + nblks + pf_nblks;
max_blks = max_dist_blks - (ipf_start - zs->zs_pf_blkid);
ipf_nblks = MIN(pf_ahead_blks, max_blks);
zs->zs_ipf_blkid = ipf_start + ipf_nblks;
zs->zs_blkid = end_of_access_blkid;
/* Protect the stream from reclamation. */
zs->zs_atime = gethrtime();
zfs_refcount_add(&zs->zs_refs, NULL);
/* Count concurrent callers. */
zfs_refcount_add(&zs->zs_callers, NULL);
mutex_exit(&zf->zf_lock);
if (!have_lock)
rw_exit(&zf->zf_dnode->dn_struct_rwlock);
ZFETCHSTAT_BUMP(zfetchstat_hits);
return (zs);
}
void
dmu_zfetch_run(zstream_t *zs, boolean_t missed, boolean_t have_lock)
{
zfetch_t *zf = zs->zs_fetch;
int64_t pf_start, pf_end, ipf_start, ipf_end;
int epbs, issued;
if (missed)
zs->zs_missed = missed;
/*
* Postpone the prefetch if there are more concurrent callers.
* It happens when multiple requests are waiting for the same
* indirect block. The last one will run the prefetch for all.
*/
if (zfs_refcount_remove(&zs->zs_callers, NULL) != 0) {
/* Drop reference taken in dmu_zfetch_prepare(). */
if (zfs_refcount_remove(&zs->zs_refs, NULL) == 0)
dmu_zfetch_stream_fini(zs);
return;
}
mutex_enter(&zf->zf_lock);
if (zs->zs_missed) {
pf_start = zs->zs_pf_blkid1;
pf_end = zs->zs_pf_blkid1 = zs->zs_pf_blkid;
} else {
pf_start = pf_end = 0;
}
ipf_start = MAX(zs->zs_pf_blkid1, zs->zs_ipf_blkid1);
ipf_end = zs->zs_ipf_blkid1 = zs->zs_ipf_blkid;
mutex_exit(&zf->zf_lock);
ASSERT3S(pf_start, <=, pf_end);
ASSERT3S(ipf_start, <=, ipf_end);
epbs = zf->zf_dnode->dn_indblkshift - SPA_BLKPTRSHIFT;
ipf_start = P2ROUNDUP(ipf_start, 1 << epbs) >> epbs;
ipf_end = P2ROUNDUP(ipf_end, 1 << epbs) >> epbs;
ASSERT3S(ipf_start, <=, ipf_end);
issued = pf_end - pf_start + ipf_end - ipf_start;
if (issued > 1) {
/* More references on top of taken in dmu_zfetch_prepare(). */
zfs_refcount_add_many(&zs->zs_refs, issued - 1, NULL);
} else if (issued == 0) {
/* Some other thread has done our work, so drop the ref. */
if (zfs_refcount_remove(&zs->zs_refs, NULL) == 0)
dmu_zfetch_stream_fini(zs);
return;
}
if (!have_lock)
rw_enter(&zf->zf_dnode->dn_struct_rwlock, RW_READER);
issued = 0;
for (int64_t blk = pf_start; blk < pf_end; blk++) {
issued += dbuf_prefetch_impl(zf->zf_dnode, 0, blk,
ZIO_PRIORITY_ASYNC_READ, ARC_FLAG_PREDICTIVE_PREFETCH,
dmu_zfetch_stream_done, zs);
}
for (int64_t iblk = ipf_start; iblk < ipf_end; iblk++) {
issued += dbuf_prefetch_impl(zf->zf_dnode, 1, iblk,
ZIO_PRIORITY_ASYNC_READ, ARC_FLAG_PREDICTIVE_PREFETCH,
dmu_zfetch_stream_done, zs);
}
if (!have_lock)
rw_exit(&zf->zf_dnode->dn_struct_rwlock);
if (issued)
ZFETCHSTAT_ADD(zfetchstat_io_issued, issued);
}
void
dmu_zfetch(zfetch_t *zf, uint64_t blkid, uint64_t nblks, boolean_t fetch_data,
boolean_t missed, boolean_t have_lock)
{
zstream_t *zs;
zs = dmu_zfetch_prepare(zf, blkid, nblks, fetch_data, have_lock);
if (zs)
dmu_zfetch_run(zs, missed, have_lock);
}
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_prefetch, zfs_prefetch_, disable, INT, ZMOD_RW,
"Disable all ZFS prefetching");
ZFS_MODULE_PARAM(zfs_prefetch, zfetch_, max_streams, UINT, ZMOD_RW,
"Max number of streams per zfetch");
ZFS_MODULE_PARAM(zfs_prefetch, zfetch_, min_sec_reap, UINT, ZMOD_RW,
"Min time before stream reclaim");
ZFS_MODULE_PARAM(zfs_prefetch, zfetch_, max_distance, UINT, ZMOD_RW,
"Max bytes to prefetch per stream");
ZFS_MODULE_PARAM(zfs_prefetch, zfetch_, max_idistance, UINT, ZMOD_RW,
"Max bytes to prefetch indirects for per stream");
ZFS_MODULE_PARAM(zfs_prefetch, zfetch_, array_rd_sz, ULONG, ZMOD_RW,
"Number of bytes in a array_read");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/dnode.c b/sys/contrib/openzfs/module/zfs/dnode.c
index 7044c1fc6342..8e55d5447975 100644
--- a/sys/contrib/openzfs/module/zfs/dnode.c
+++ b/sys/contrib/openzfs/module/zfs/dnode.c
@@ -1,2600 +1,2597 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/dbuf.h>
#include <sys/dnode.h>
#include <sys/dmu.h>
#include <sys/dmu_impl.h>
#include <sys/dmu_tx.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_dataset.h>
#include <sys/spa.h>
#include <sys/zio.h>
#include <sys/dmu_zfetch.h>
#include <sys/range_tree.h>
#include <sys/trace_zfs.h>
#include <sys/zfs_project.h>
dnode_stats_t dnode_stats = {
{ "dnode_hold_dbuf_hold", KSTAT_DATA_UINT64 },
{ "dnode_hold_dbuf_read", KSTAT_DATA_UINT64 },
{ "dnode_hold_alloc_hits", KSTAT_DATA_UINT64 },
{ "dnode_hold_alloc_misses", KSTAT_DATA_UINT64 },
{ "dnode_hold_alloc_interior", KSTAT_DATA_UINT64 },
{ "dnode_hold_alloc_lock_retry", KSTAT_DATA_UINT64 },
{ "dnode_hold_alloc_lock_misses", KSTAT_DATA_UINT64 },
{ "dnode_hold_alloc_type_none", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_hits", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_misses", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_lock_misses", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_lock_retry", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_overflow", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_refcount", KSTAT_DATA_UINT64 },
{ "dnode_free_interior_lock_retry", KSTAT_DATA_UINT64 },
{ "dnode_allocate", KSTAT_DATA_UINT64 },
{ "dnode_reallocate", KSTAT_DATA_UINT64 },
{ "dnode_buf_evict", KSTAT_DATA_UINT64 },
{ "dnode_alloc_next_chunk", KSTAT_DATA_UINT64 },
{ "dnode_alloc_race", KSTAT_DATA_UINT64 },
{ "dnode_alloc_next_block", KSTAT_DATA_UINT64 },
{ "dnode_move_invalid", KSTAT_DATA_UINT64 },
{ "dnode_move_recheck1", KSTAT_DATA_UINT64 },
{ "dnode_move_recheck2", KSTAT_DATA_UINT64 },
{ "dnode_move_special", KSTAT_DATA_UINT64 },
{ "dnode_move_handle", KSTAT_DATA_UINT64 },
{ "dnode_move_rwlock", KSTAT_DATA_UINT64 },
{ "dnode_move_active", KSTAT_DATA_UINT64 },
};
static kstat_t *dnode_ksp;
static kmem_cache_t *dnode_cache;
static dnode_phys_t dnode_phys_zero __maybe_unused;
int zfs_default_bs = SPA_MINBLOCKSHIFT;
int zfs_default_ibs = DN_MAX_INDBLKSHIFT;
#ifdef _KERNEL
static kmem_cbrc_t dnode_move(void *, void *, size_t, void *);
#endif /* _KERNEL */
static int
dbuf_compare(const void *x1, const void *x2)
{
const dmu_buf_impl_t *d1 = x1;
const dmu_buf_impl_t *d2 = x2;
int cmp = TREE_CMP(d1->db_level, d2->db_level);
if (likely(cmp))
return (cmp);
cmp = TREE_CMP(d1->db_blkid, d2->db_blkid);
if (likely(cmp))
return (cmp);
if (d1->db_state == DB_SEARCH) {
ASSERT3S(d2->db_state, !=, DB_SEARCH);
return (-1);
} else if (d2->db_state == DB_SEARCH) {
ASSERT3S(d1->db_state, !=, DB_SEARCH);
return (1);
}
return (TREE_PCMP(d1, d2));
}
-/* ARGSUSED */
static int
dnode_cons(void *arg, void *unused, int kmflag)
{
+ (void) unused, (void) kmflag;
dnode_t *dn = arg;
- int i;
rw_init(&dn->dn_struct_rwlock, NULL, RW_NOLOCKDEP, NULL);
mutex_init(&dn->dn_mtx, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&dn->dn_dbufs_mtx, NULL, MUTEX_DEFAULT, NULL);
cv_init(&dn->dn_notxholds, NULL, CV_DEFAULT, NULL);
cv_init(&dn->dn_nodnholds, NULL, CV_DEFAULT, NULL);
/*
* Every dbuf has a reference, and dropping a tracked reference is
* O(number of references), so don't track dn_holds.
*/
zfs_refcount_create_untracked(&dn->dn_holds);
zfs_refcount_create(&dn->dn_tx_holds);
list_link_init(&dn->dn_link);
bzero(&dn->dn_next_type[0], sizeof (dn->dn_next_type));
bzero(&dn->dn_next_nblkptr[0], sizeof (dn->dn_next_nblkptr));
bzero(&dn->dn_next_nlevels[0], sizeof (dn->dn_next_nlevels));
bzero(&dn->dn_next_indblkshift[0], sizeof (dn->dn_next_indblkshift));
bzero(&dn->dn_next_bonustype[0], sizeof (dn->dn_next_bonustype));
bzero(&dn->dn_rm_spillblk[0], sizeof (dn->dn_rm_spillblk));
bzero(&dn->dn_next_bonuslen[0], sizeof (dn->dn_next_bonuslen));
bzero(&dn->dn_next_blksz[0], sizeof (dn->dn_next_blksz));
bzero(&dn->dn_next_maxblkid[0], sizeof (dn->dn_next_maxblkid));
- for (i = 0; i < TXG_SIZE; i++) {
+ for (int i = 0; i < TXG_SIZE; i++) {
multilist_link_init(&dn->dn_dirty_link[i]);
dn->dn_free_ranges[i] = NULL;
list_create(&dn->dn_dirty_records[i],
sizeof (dbuf_dirty_record_t),
offsetof(dbuf_dirty_record_t, dr_dirty_node));
}
dn->dn_allocated_txg = 0;
dn->dn_free_txg = 0;
dn->dn_assigned_txg = 0;
dn->dn_dirty_txg = 0;
dn->dn_dirtyctx = 0;
dn->dn_dirtyctx_firstset = NULL;
dn->dn_bonus = NULL;
dn->dn_have_spill = B_FALSE;
dn->dn_zio = NULL;
dn->dn_oldused = 0;
dn->dn_oldflags = 0;
dn->dn_olduid = 0;
dn->dn_oldgid = 0;
dn->dn_oldprojid = ZFS_DEFAULT_PROJID;
dn->dn_newuid = 0;
dn->dn_newgid = 0;
dn->dn_newprojid = ZFS_DEFAULT_PROJID;
dn->dn_id_flags = 0;
dn->dn_dbufs_count = 0;
avl_create(&dn->dn_dbufs, dbuf_compare, sizeof (dmu_buf_impl_t),
offsetof(dmu_buf_impl_t, db_link));
dn->dn_moved = 0;
return (0);
}
-/* ARGSUSED */
static void
dnode_dest(void *arg, void *unused)
{
- int i;
+ (void) unused;
dnode_t *dn = arg;
rw_destroy(&dn->dn_struct_rwlock);
mutex_destroy(&dn->dn_mtx);
mutex_destroy(&dn->dn_dbufs_mtx);
cv_destroy(&dn->dn_notxholds);
cv_destroy(&dn->dn_nodnholds);
zfs_refcount_destroy(&dn->dn_holds);
zfs_refcount_destroy(&dn->dn_tx_holds);
ASSERT(!list_link_active(&dn->dn_link));
- for (i = 0; i < TXG_SIZE; i++) {
+ for (int i = 0; i < TXG_SIZE; i++) {
ASSERT(!multilist_link_active(&dn->dn_dirty_link[i]));
ASSERT3P(dn->dn_free_ranges[i], ==, NULL);
list_destroy(&dn->dn_dirty_records[i]);
ASSERT0(dn->dn_next_nblkptr[i]);
ASSERT0(dn->dn_next_nlevels[i]);
ASSERT0(dn->dn_next_indblkshift[i]);
ASSERT0(dn->dn_next_bonustype[i]);
ASSERT0(dn->dn_rm_spillblk[i]);
ASSERT0(dn->dn_next_bonuslen[i]);
ASSERT0(dn->dn_next_blksz[i]);
ASSERT0(dn->dn_next_maxblkid[i]);
}
ASSERT0(dn->dn_allocated_txg);
ASSERT0(dn->dn_free_txg);
ASSERT0(dn->dn_assigned_txg);
ASSERT0(dn->dn_dirty_txg);
ASSERT0(dn->dn_dirtyctx);
ASSERT3P(dn->dn_dirtyctx_firstset, ==, NULL);
ASSERT3P(dn->dn_bonus, ==, NULL);
ASSERT(!dn->dn_have_spill);
ASSERT3P(dn->dn_zio, ==, NULL);
ASSERT0(dn->dn_oldused);
ASSERT0(dn->dn_oldflags);
ASSERT0(dn->dn_olduid);
ASSERT0(dn->dn_oldgid);
ASSERT0(dn->dn_oldprojid);
ASSERT0(dn->dn_newuid);
ASSERT0(dn->dn_newgid);
ASSERT0(dn->dn_newprojid);
ASSERT0(dn->dn_id_flags);
ASSERT0(dn->dn_dbufs_count);
avl_destroy(&dn->dn_dbufs);
}
void
dnode_init(void)
{
ASSERT(dnode_cache == NULL);
dnode_cache = kmem_cache_create("dnode_t", sizeof (dnode_t),
0, dnode_cons, dnode_dest, NULL, NULL, NULL, 0);
kmem_cache_set_move(dnode_cache, dnode_move);
dnode_ksp = kstat_create("zfs", 0, "dnodestats", "misc",
KSTAT_TYPE_NAMED, sizeof (dnode_stats) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL);
if (dnode_ksp != NULL) {
dnode_ksp->ks_data = &dnode_stats;
kstat_install(dnode_ksp);
}
}
void
dnode_fini(void)
{
if (dnode_ksp != NULL) {
kstat_delete(dnode_ksp);
dnode_ksp = NULL;
}
kmem_cache_destroy(dnode_cache);
dnode_cache = NULL;
}
#ifdef ZFS_DEBUG
void
dnode_verify(dnode_t *dn)
{
int drop_struct_lock = FALSE;
ASSERT(dn->dn_phys);
ASSERT(dn->dn_objset);
ASSERT(dn->dn_handle->dnh_dnode == dn);
ASSERT(DMU_OT_IS_VALID(dn->dn_phys->dn_type));
if (!(zfs_flags & ZFS_DEBUG_DNODE_VERIFY))
return;
if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
rw_enter(&dn->dn_struct_rwlock, RW_READER);
drop_struct_lock = TRUE;
}
if (dn->dn_phys->dn_type != DMU_OT_NONE || dn->dn_allocated_txg != 0) {
int i;
int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
ASSERT3U(dn->dn_indblkshift, <=, SPA_MAXBLOCKSHIFT);
if (dn->dn_datablkshift) {
ASSERT3U(dn->dn_datablkshift, >=, SPA_MINBLOCKSHIFT);
ASSERT3U(dn->dn_datablkshift, <=, SPA_MAXBLOCKSHIFT);
ASSERT3U(1<<dn->dn_datablkshift, ==, dn->dn_datablksz);
}
ASSERT3U(dn->dn_nlevels, <=, 30);
ASSERT(DMU_OT_IS_VALID(dn->dn_type));
ASSERT3U(dn->dn_nblkptr, >=, 1);
ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR);
ASSERT3U(dn->dn_bonuslen, <=, max_bonuslen);
ASSERT3U(dn->dn_datablksz, ==,
dn->dn_datablkszsec << SPA_MINBLOCKSHIFT);
ASSERT3U(ISP2(dn->dn_datablksz), ==, dn->dn_datablkshift != 0);
ASSERT3U((dn->dn_nblkptr - 1) * sizeof (blkptr_t) +
dn->dn_bonuslen, <=, max_bonuslen);
for (i = 0; i < TXG_SIZE; i++) {
ASSERT3U(dn->dn_next_nlevels[i], <=, dn->dn_nlevels);
}
}
if (dn->dn_phys->dn_type != DMU_OT_NONE)
ASSERT3U(dn->dn_phys->dn_nlevels, <=, dn->dn_nlevels);
ASSERT(DMU_OBJECT_IS_SPECIAL(dn->dn_object) || dn->dn_dbuf != NULL);
if (dn->dn_dbuf != NULL) {
ASSERT3P(dn->dn_phys, ==,
(dnode_phys_t *)dn->dn_dbuf->db.db_data +
(dn->dn_object % (dn->dn_dbuf->db.db_size >> DNODE_SHIFT)));
}
if (drop_struct_lock)
rw_exit(&dn->dn_struct_rwlock);
}
#endif
void
dnode_byteswap(dnode_phys_t *dnp)
{
uint64_t *buf64 = (void*)&dnp->dn_blkptr;
int i;
if (dnp->dn_type == DMU_OT_NONE) {
bzero(dnp, sizeof (dnode_phys_t));
return;
}
dnp->dn_datablkszsec = BSWAP_16(dnp->dn_datablkszsec);
dnp->dn_bonuslen = BSWAP_16(dnp->dn_bonuslen);
dnp->dn_extra_slots = BSWAP_8(dnp->dn_extra_slots);
dnp->dn_maxblkid = BSWAP_64(dnp->dn_maxblkid);
dnp->dn_used = BSWAP_64(dnp->dn_used);
/*
* dn_nblkptr is only one byte, so it's OK to read it in either
* byte order. We can't read dn_bouslen.
*/
ASSERT(dnp->dn_indblkshift <= SPA_MAXBLOCKSHIFT);
ASSERT(dnp->dn_nblkptr <= DN_MAX_NBLKPTR);
for (i = 0; i < dnp->dn_nblkptr * sizeof (blkptr_t)/8; i++)
buf64[i] = BSWAP_64(buf64[i]);
/*
* OK to check dn_bonuslen for zero, because it won't matter if
* we have the wrong byte order. This is necessary because the
* dnode dnode is smaller than a regular dnode.
*/
if (dnp->dn_bonuslen != 0) {
/*
* Note that the bonus length calculated here may be
* longer than the actual bonus buffer. This is because
* we always put the bonus buffer after the last block
* pointer (instead of packing it against the end of the
* dnode buffer).
*/
int off = (dnp->dn_nblkptr-1) * sizeof (blkptr_t);
int slots = dnp->dn_extra_slots + 1;
size_t len = DN_SLOTS_TO_BONUSLEN(slots) - off;
dmu_object_byteswap_t byteswap;
ASSERT(DMU_OT_IS_VALID(dnp->dn_bonustype));
byteswap = DMU_OT_BYTESWAP(dnp->dn_bonustype);
dmu_ot_byteswap[byteswap].ob_func(dnp->dn_bonus + off, len);
}
/* Swap SPILL block if we have one */
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)
byteswap_uint64_array(DN_SPILL_BLKPTR(dnp), sizeof (blkptr_t));
}
void
dnode_buf_byteswap(void *vbuf, size_t size)
{
int i = 0;
ASSERT3U(sizeof (dnode_phys_t), ==, (1<<DNODE_SHIFT));
ASSERT((size & (sizeof (dnode_phys_t)-1)) == 0);
while (i < size) {
dnode_phys_t *dnp = (void *)(((char *)vbuf) + i);
dnode_byteswap(dnp);
i += DNODE_MIN_SIZE;
if (dnp->dn_type != DMU_OT_NONE)
i += dnp->dn_extra_slots * DNODE_MIN_SIZE;
}
}
void
dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx)
{
ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
dnode_setdirty(dn, tx);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
ASSERT3U(newsize, <=, DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
(dn->dn_nblkptr-1) * sizeof (blkptr_t));
if (newsize < dn->dn_bonuslen) {
/* clear any data after the end of the new size */
size_t diff = dn->dn_bonuslen - newsize;
char *data_end = ((char *)dn->dn_bonus->db.db_data) + newsize;
bzero(data_end, diff);
}
dn->dn_bonuslen = newsize;
if (newsize == 0)
dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = DN_ZERO_BONUSLEN;
else
dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen;
rw_exit(&dn->dn_struct_rwlock);
}
void
dnode_setbonus_type(dnode_t *dn, dmu_object_type_t newtype, dmu_tx_t *tx)
{
ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
dnode_setdirty(dn, tx);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
dn->dn_bonustype = newtype;
dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = dn->dn_bonustype;
rw_exit(&dn->dn_struct_rwlock);
}
void
dnode_rm_spill(dnode_t *dn, dmu_tx_t *tx)
{
ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
dnode_setdirty(dn, tx);
dn->dn_rm_spillblk[tx->tx_txg & TXG_MASK] = DN_KILL_SPILLBLK;
dn->dn_have_spill = B_FALSE;
}
static void
dnode_setdblksz(dnode_t *dn, int size)
{
ASSERT0(P2PHASE(size, SPA_MINBLOCKSIZE));
ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
ASSERT3U(size, >=, SPA_MINBLOCKSIZE);
ASSERT3U(size >> SPA_MINBLOCKSHIFT, <,
1<<(sizeof (dn->dn_phys->dn_datablkszsec) * 8));
dn->dn_datablksz = size;
dn->dn_datablkszsec = size >> SPA_MINBLOCKSHIFT;
dn->dn_datablkshift = ISP2(size) ? highbit64(size - 1) : 0;
}
static dnode_t *
dnode_create(objset_t *os, dnode_phys_t *dnp, dmu_buf_impl_t *db,
uint64_t object, dnode_handle_t *dnh)
{
dnode_t *dn;
dn = kmem_cache_alloc(dnode_cache, KM_SLEEP);
dn->dn_moved = 0;
/*
* Defer setting dn_objset until the dnode is ready to be a candidate
* for the dnode_move() callback.
*/
dn->dn_object = object;
dn->dn_dbuf = db;
dn->dn_handle = dnh;
dn->dn_phys = dnp;
if (dnp->dn_datablkszsec) {
dnode_setdblksz(dn, dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
} else {
dn->dn_datablksz = 0;
dn->dn_datablkszsec = 0;
dn->dn_datablkshift = 0;
}
dn->dn_indblkshift = dnp->dn_indblkshift;
dn->dn_nlevels = dnp->dn_nlevels;
dn->dn_type = dnp->dn_type;
dn->dn_nblkptr = dnp->dn_nblkptr;
dn->dn_checksum = dnp->dn_checksum;
dn->dn_compress = dnp->dn_compress;
dn->dn_bonustype = dnp->dn_bonustype;
dn->dn_bonuslen = dnp->dn_bonuslen;
dn->dn_num_slots = dnp->dn_extra_slots + 1;
dn->dn_maxblkid = dnp->dn_maxblkid;
dn->dn_have_spill = ((dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) != 0);
dn->dn_id_flags = 0;
dmu_zfetch_init(&dn->dn_zfetch, dn);
ASSERT(DMU_OT_IS_VALID(dn->dn_phys->dn_type));
ASSERT(zrl_is_locked(&dnh->dnh_zrlock));
ASSERT(!DN_SLOT_IS_PTR(dnh->dnh_dnode));
mutex_enter(&os->os_lock);
/*
* Exclude special dnodes from os_dnodes so an empty os_dnodes
* signifies that the special dnodes have no references from
* their children (the entries in os_dnodes). This allows
* dnode_destroy() to easily determine if the last child has
* been removed and then complete eviction of the objset.
*/
if (!DMU_OBJECT_IS_SPECIAL(object))
list_insert_head(&os->os_dnodes, dn);
membar_producer();
/*
* Everything else must be valid before assigning dn_objset
* makes the dnode eligible for dnode_move().
*/
dn->dn_objset = os;
dnh->dnh_dnode = dn;
mutex_exit(&os->os_lock);
arc_space_consume(sizeof (dnode_t), ARC_SPACE_DNODE);
return (dn);
}
/*
* Caller must be holding the dnode handle, which is released upon return.
*/
static void
dnode_destroy(dnode_t *dn)
{
objset_t *os = dn->dn_objset;
boolean_t complete_os_eviction = B_FALSE;
ASSERT((dn->dn_id_flags & DN_ID_NEW_EXIST) == 0);
mutex_enter(&os->os_lock);
POINTER_INVALIDATE(&dn->dn_objset);
if (!DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
list_remove(&os->os_dnodes, dn);
complete_os_eviction =
list_is_empty(&os->os_dnodes) &&
list_link_active(&os->os_evicting_node);
}
mutex_exit(&os->os_lock);
/* the dnode can no longer move, so we can release the handle */
if (!zrl_is_locked(&dn->dn_handle->dnh_zrlock))
zrl_remove(&dn->dn_handle->dnh_zrlock);
dn->dn_allocated_txg = 0;
dn->dn_free_txg = 0;
dn->dn_assigned_txg = 0;
dn->dn_dirty_txg = 0;
dn->dn_dirtyctx = 0;
dn->dn_dirtyctx_firstset = NULL;
if (dn->dn_bonus != NULL) {
mutex_enter(&dn->dn_bonus->db_mtx);
dbuf_destroy(dn->dn_bonus);
dn->dn_bonus = NULL;
}
dn->dn_zio = NULL;
dn->dn_have_spill = B_FALSE;
dn->dn_oldused = 0;
dn->dn_oldflags = 0;
dn->dn_olduid = 0;
dn->dn_oldgid = 0;
dn->dn_oldprojid = ZFS_DEFAULT_PROJID;
dn->dn_newuid = 0;
dn->dn_newgid = 0;
dn->dn_newprojid = ZFS_DEFAULT_PROJID;
dn->dn_id_flags = 0;
dmu_zfetch_fini(&dn->dn_zfetch);
kmem_cache_free(dnode_cache, dn);
arc_space_return(sizeof (dnode_t), ARC_SPACE_DNODE);
if (complete_os_eviction)
dmu_objset_evict_done(os);
}
void
dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
dmu_object_type_t bonustype, int bonuslen, int dn_slots, dmu_tx_t *tx)
{
int i;
ASSERT3U(dn_slots, >, 0);
ASSERT3U(dn_slots << DNODE_SHIFT, <=,
spa_maxdnodesize(dmu_objset_spa(dn->dn_objset)));
ASSERT3U(blocksize, <=,
spa_maxblocksize(dmu_objset_spa(dn->dn_objset)));
if (blocksize == 0)
blocksize = 1 << zfs_default_bs;
else
blocksize = P2ROUNDUP(blocksize, SPA_MINBLOCKSIZE);
if (ibs == 0)
ibs = zfs_default_ibs;
ibs = MIN(MAX(ibs, DN_MIN_INDBLKSHIFT), DN_MAX_INDBLKSHIFT);
dprintf("os=%p obj=%llu txg=%llu blocksize=%d ibs=%d dn_slots=%d\n",
dn->dn_objset, (u_longlong_t)dn->dn_object,
(u_longlong_t)tx->tx_txg, blocksize, ibs, dn_slots);
DNODE_STAT_BUMP(dnode_allocate);
ASSERT(dn->dn_type == DMU_OT_NONE);
ASSERT(bcmp(dn->dn_phys, &dnode_phys_zero, sizeof (dnode_phys_t)) == 0);
ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE);
ASSERT(ot != DMU_OT_NONE);
ASSERT(DMU_OT_IS_VALID(ot));
ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) ||
(bonustype == DMU_OT_SA && bonuslen == 0) ||
(bonustype != DMU_OT_NONE && bonuslen != 0));
ASSERT(DMU_OT_IS_VALID(bonustype));
ASSERT3U(bonuslen, <=, DN_SLOTS_TO_BONUSLEN(dn_slots));
ASSERT(dn->dn_type == DMU_OT_NONE);
ASSERT0(dn->dn_maxblkid);
ASSERT0(dn->dn_allocated_txg);
ASSERT0(dn->dn_assigned_txg);
ASSERT(zfs_refcount_is_zero(&dn->dn_tx_holds));
ASSERT3U(zfs_refcount_count(&dn->dn_holds), <=, 1);
ASSERT(avl_is_empty(&dn->dn_dbufs));
for (i = 0; i < TXG_SIZE; i++) {
ASSERT0(dn->dn_next_nblkptr[i]);
ASSERT0(dn->dn_next_nlevels[i]);
ASSERT0(dn->dn_next_indblkshift[i]);
ASSERT0(dn->dn_next_bonuslen[i]);
ASSERT0(dn->dn_next_bonustype[i]);
ASSERT0(dn->dn_rm_spillblk[i]);
ASSERT0(dn->dn_next_blksz[i]);
ASSERT0(dn->dn_next_maxblkid[i]);
ASSERT(!multilist_link_active(&dn->dn_dirty_link[i]));
ASSERT3P(list_head(&dn->dn_dirty_records[i]), ==, NULL);
ASSERT3P(dn->dn_free_ranges[i], ==, NULL);
}
dn->dn_type = ot;
dnode_setdblksz(dn, blocksize);
dn->dn_indblkshift = ibs;
dn->dn_nlevels = 1;
dn->dn_num_slots = dn_slots;
if (bonustype == DMU_OT_SA) /* Maximize bonus space for SA */
dn->dn_nblkptr = 1;
else {
dn->dn_nblkptr = MIN(DN_MAX_NBLKPTR,
1 + ((DN_SLOTS_TO_BONUSLEN(dn_slots) - bonuslen) >>
SPA_BLKPTRSHIFT));
}
dn->dn_bonustype = bonustype;
dn->dn_bonuslen = bonuslen;
dn->dn_checksum = ZIO_CHECKSUM_INHERIT;
dn->dn_compress = ZIO_COMPRESS_INHERIT;
dn->dn_dirtyctx = 0;
dn->dn_free_txg = 0;
dn->dn_dirtyctx_firstset = NULL;
dn->dn_dirty_txg = 0;
dn->dn_allocated_txg = tx->tx_txg;
dn->dn_id_flags = 0;
dnode_setdirty(dn, tx);
dn->dn_next_indblkshift[tx->tx_txg & TXG_MASK] = ibs;
dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen;
dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = dn->dn_bonustype;
dn->dn_next_blksz[tx->tx_txg & TXG_MASK] = dn->dn_datablksz;
}
void
dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize,
dmu_object_type_t bonustype, int bonuslen, int dn_slots,
boolean_t keep_spill, dmu_tx_t *tx)
{
int nblkptr;
ASSERT3U(blocksize, >=, SPA_MINBLOCKSIZE);
ASSERT3U(blocksize, <=,
spa_maxblocksize(dmu_objset_spa(dn->dn_objset)));
ASSERT0(blocksize % SPA_MINBLOCKSIZE);
ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT || dmu_tx_private_ok(tx));
ASSERT(tx->tx_txg != 0);
ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) ||
(bonustype != DMU_OT_NONE && bonuslen != 0) ||
(bonustype == DMU_OT_SA && bonuslen == 0));
ASSERT(DMU_OT_IS_VALID(bonustype));
ASSERT3U(bonuslen, <=,
DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(dn->dn_objset))));
ASSERT3U(bonuslen, <=, DN_BONUS_SIZE(dn_slots << DNODE_SHIFT));
dnode_free_interior_slots(dn);
DNODE_STAT_BUMP(dnode_reallocate);
/* clean up any unreferenced dbufs */
dnode_evict_dbufs(dn);
dn->dn_id_flags = 0;
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
dnode_setdirty(dn, tx);
if (dn->dn_datablksz != blocksize) {
/* change blocksize */
ASSERT0(dn->dn_maxblkid);
ASSERT(BP_IS_HOLE(&dn->dn_phys->dn_blkptr[0]) ||
dnode_block_freed(dn, 0));
dnode_setdblksz(dn, blocksize);
dn->dn_next_blksz[tx->tx_txg & TXG_MASK] = blocksize;
}
if (dn->dn_bonuslen != bonuslen)
dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = bonuslen;
if (bonustype == DMU_OT_SA) /* Maximize bonus space for SA */
nblkptr = 1;
else
nblkptr = MIN(DN_MAX_NBLKPTR,
1 + ((DN_SLOTS_TO_BONUSLEN(dn_slots) - bonuslen) >>
SPA_BLKPTRSHIFT));
if (dn->dn_bonustype != bonustype)
dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = bonustype;
if (dn->dn_nblkptr != nblkptr)
dn->dn_next_nblkptr[tx->tx_txg & TXG_MASK] = nblkptr;
if (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR && !keep_spill) {
dbuf_rm_spill(dn, tx);
dnode_rm_spill(dn, tx);
}
rw_exit(&dn->dn_struct_rwlock);
/* change type */
dn->dn_type = ot;
/* change bonus size and type */
mutex_enter(&dn->dn_mtx);
dn->dn_bonustype = bonustype;
dn->dn_bonuslen = bonuslen;
dn->dn_num_slots = dn_slots;
dn->dn_nblkptr = nblkptr;
dn->dn_checksum = ZIO_CHECKSUM_INHERIT;
dn->dn_compress = ZIO_COMPRESS_INHERIT;
ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR);
/* fix up the bonus db_size */
if (dn->dn_bonus) {
dn->dn_bonus->db.db_size =
DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
(dn->dn_nblkptr-1) * sizeof (blkptr_t);
ASSERT(dn->dn_bonuslen <= dn->dn_bonus->db.db_size);
}
dn->dn_allocated_txg = tx->tx_txg;
mutex_exit(&dn->dn_mtx);
}
#ifdef _KERNEL
static void
dnode_move_impl(dnode_t *odn, dnode_t *ndn)
{
int i;
ASSERT(!RW_LOCK_HELD(&odn->dn_struct_rwlock));
ASSERT(MUTEX_NOT_HELD(&odn->dn_mtx));
ASSERT(MUTEX_NOT_HELD(&odn->dn_dbufs_mtx));
/* Copy fields. */
ndn->dn_objset = odn->dn_objset;
ndn->dn_object = odn->dn_object;
ndn->dn_dbuf = odn->dn_dbuf;
ndn->dn_handle = odn->dn_handle;
ndn->dn_phys = odn->dn_phys;
ndn->dn_type = odn->dn_type;
ndn->dn_bonuslen = odn->dn_bonuslen;
ndn->dn_bonustype = odn->dn_bonustype;
ndn->dn_nblkptr = odn->dn_nblkptr;
ndn->dn_checksum = odn->dn_checksum;
ndn->dn_compress = odn->dn_compress;
ndn->dn_nlevels = odn->dn_nlevels;
ndn->dn_indblkshift = odn->dn_indblkshift;
ndn->dn_datablkshift = odn->dn_datablkshift;
ndn->dn_datablkszsec = odn->dn_datablkszsec;
ndn->dn_datablksz = odn->dn_datablksz;
ndn->dn_maxblkid = odn->dn_maxblkid;
ndn->dn_num_slots = odn->dn_num_slots;
bcopy(&odn->dn_next_type[0], &ndn->dn_next_type[0],
sizeof (odn->dn_next_type));
bcopy(&odn->dn_next_nblkptr[0], &ndn->dn_next_nblkptr[0],
sizeof (odn->dn_next_nblkptr));
bcopy(&odn->dn_next_nlevels[0], &ndn->dn_next_nlevels[0],
sizeof (odn->dn_next_nlevels));
bcopy(&odn->dn_next_indblkshift[0], &ndn->dn_next_indblkshift[0],
sizeof (odn->dn_next_indblkshift));
bcopy(&odn->dn_next_bonustype[0], &ndn->dn_next_bonustype[0],
sizeof (odn->dn_next_bonustype));
bcopy(&odn->dn_rm_spillblk[0], &ndn->dn_rm_spillblk[0],
sizeof (odn->dn_rm_spillblk));
bcopy(&odn->dn_next_bonuslen[0], &ndn->dn_next_bonuslen[0],
sizeof (odn->dn_next_bonuslen));
bcopy(&odn->dn_next_blksz[0], &ndn->dn_next_blksz[0],
sizeof (odn->dn_next_blksz));
bcopy(&odn->dn_next_maxblkid[0], &ndn->dn_next_maxblkid[0],
sizeof (odn->dn_next_maxblkid));
for (i = 0; i < TXG_SIZE; i++) {
list_move_tail(&ndn->dn_dirty_records[i],
&odn->dn_dirty_records[i]);
}
bcopy(&odn->dn_free_ranges[0], &ndn->dn_free_ranges[0],
sizeof (odn->dn_free_ranges));
ndn->dn_allocated_txg = odn->dn_allocated_txg;
ndn->dn_free_txg = odn->dn_free_txg;
ndn->dn_assigned_txg = odn->dn_assigned_txg;
ndn->dn_dirty_txg = odn->dn_dirty_txg;
ndn->dn_dirtyctx = odn->dn_dirtyctx;
ndn->dn_dirtyctx_firstset = odn->dn_dirtyctx_firstset;
ASSERT(zfs_refcount_count(&odn->dn_tx_holds) == 0);
zfs_refcount_transfer(&ndn->dn_holds, &odn->dn_holds);
ASSERT(avl_is_empty(&ndn->dn_dbufs));
avl_swap(&ndn->dn_dbufs, &odn->dn_dbufs);
ndn->dn_dbufs_count = odn->dn_dbufs_count;
ndn->dn_bonus = odn->dn_bonus;
ndn->dn_have_spill = odn->dn_have_spill;
ndn->dn_zio = odn->dn_zio;
ndn->dn_oldused = odn->dn_oldused;
ndn->dn_oldflags = odn->dn_oldflags;
ndn->dn_olduid = odn->dn_olduid;
ndn->dn_oldgid = odn->dn_oldgid;
ndn->dn_oldprojid = odn->dn_oldprojid;
ndn->dn_newuid = odn->dn_newuid;
ndn->dn_newgid = odn->dn_newgid;
ndn->dn_newprojid = odn->dn_newprojid;
ndn->dn_id_flags = odn->dn_id_flags;
dmu_zfetch_init(&ndn->dn_zfetch, ndn);
/*
* Update back pointers. Updating the handle fixes the back pointer of
* every descendant dbuf as well as the bonus dbuf.
*/
ASSERT(ndn->dn_handle->dnh_dnode == odn);
ndn->dn_handle->dnh_dnode = ndn;
/*
* Invalidate the original dnode by clearing all of its back pointers.
*/
odn->dn_dbuf = NULL;
odn->dn_handle = NULL;
avl_create(&odn->dn_dbufs, dbuf_compare, sizeof (dmu_buf_impl_t),
offsetof(dmu_buf_impl_t, db_link));
odn->dn_dbufs_count = 0;
odn->dn_bonus = NULL;
dmu_zfetch_fini(&odn->dn_zfetch);
/*
* Set the low bit of the objset pointer to ensure that dnode_move()
* recognizes the dnode as invalid in any subsequent callback.
*/
POINTER_INVALIDATE(&odn->dn_objset);
/*
* Satisfy the destructor.
*/
for (i = 0; i < TXG_SIZE; i++) {
list_create(&odn->dn_dirty_records[i],
sizeof (dbuf_dirty_record_t),
offsetof(dbuf_dirty_record_t, dr_dirty_node));
odn->dn_free_ranges[i] = NULL;
odn->dn_next_nlevels[i] = 0;
odn->dn_next_indblkshift[i] = 0;
odn->dn_next_bonustype[i] = 0;
odn->dn_rm_spillblk[i] = 0;
odn->dn_next_bonuslen[i] = 0;
odn->dn_next_blksz[i] = 0;
}
odn->dn_allocated_txg = 0;
odn->dn_free_txg = 0;
odn->dn_assigned_txg = 0;
odn->dn_dirty_txg = 0;
odn->dn_dirtyctx = 0;
odn->dn_dirtyctx_firstset = NULL;
odn->dn_have_spill = B_FALSE;
odn->dn_zio = NULL;
odn->dn_oldused = 0;
odn->dn_oldflags = 0;
odn->dn_olduid = 0;
odn->dn_oldgid = 0;
odn->dn_oldprojid = ZFS_DEFAULT_PROJID;
odn->dn_newuid = 0;
odn->dn_newgid = 0;
odn->dn_newprojid = ZFS_DEFAULT_PROJID;
odn->dn_id_flags = 0;
/*
* Mark the dnode.
*/
ndn->dn_moved = 1;
odn->dn_moved = (uint8_t)-1;
}
-/*ARGSUSED*/
static kmem_cbrc_t
dnode_move(void *buf, void *newbuf, size_t size, void *arg)
{
dnode_t *odn = buf, *ndn = newbuf;
objset_t *os;
int64_t refcount;
uint32_t dbufs;
/*
* The dnode is on the objset's list of known dnodes if the objset
* pointer is valid. We set the low bit of the objset pointer when
* freeing the dnode to invalidate it, and the memory patterns written
* by kmem (baddcafe and deadbeef) set at least one of the two low bits.
* A newly created dnode sets the objset pointer last of all to indicate
* that the dnode is known and in a valid state to be moved by this
* function.
*/
os = odn->dn_objset;
if (!POINTER_IS_VALID(os)) {
DNODE_STAT_BUMP(dnode_move_invalid);
return (KMEM_CBRC_DONT_KNOW);
}
/*
* Ensure that the objset does not go away during the move.
*/
rw_enter(&os_lock, RW_WRITER);
if (os != odn->dn_objset) {
rw_exit(&os_lock);
DNODE_STAT_BUMP(dnode_move_recheck1);
return (KMEM_CBRC_DONT_KNOW);
}
/*
* If the dnode is still valid, then so is the objset. We know that no
* valid objset can be freed while we hold os_lock, so we can safely
* ensure that the objset remains in use.
*/
mutex_enter(&os->os_lock);
/*
* Recheck the objset pointer in case the dnode was removed just before
* acquiring the lock.
*/
if (os != odn->dn_objset) {
mutex_exit(&os->os_lock);
rw_exit(&os_lock);
DNODE_STAT_BUMP(dnode_move_recheck2);
return (KMEM_CBRC_DONT_KNOW);
}
/*
* At this point we know that as long as we hold os->os_lock, the dnode
* cannot be freed and fields within the dnode can be safely accessed.
* The objset listing this dnode cannot go away as long as this dnode is
* on its list.
*/
rw_exit(&os_lock);
if (DMU_OBJECT_IS_SPECIAL(odn->dn_object)) {
mutex_exit(&os->os_lock);
DNODE_STAT_BUMP(dnode_move_special);
return (KMEM_CBRC_NO);
}
ASSERT(odn->dn_dbuf != NULL); /* only "special" dnodes have no parent */
/*
* Lock the dnode handle to prevent the dnode from obtaining any new
* holds. This also prevents the descendant dbufs and the bonus dbuf
* from accessing the dnode, so that we can discount their holds. The
* handle is safe to access because we know that while the dnode cannot
* go away, neither can its handle. Once we hold dnh_zrlock, we can
* safely move any dnode referenced only by dbufs.
*/
if (!zrl_tryenter(&odn->dn_handle->dnh_zrlock)) {
mutex_exit(&os->os_lock);
DNODE_STAT_BUMP(dnode_move_handle);
return (KMEM_CBRC_LATER);
}
/*
* Ensure a consistent view of the dnode's holds and the dnode's dbufs.
* We need to guarantee that there is a hold for every dbuf in order to
* determine whether the dnode is actively referenced. Falsely matching
* a dbuf to an active hold would lead to an unsafe move. It's possible
* that a thread already having an active dnode hold is about to add a
* dbuf, and we can't compare hold and dbuf counts while the add is in
* progress.
*/
if (!rw_tryenter(&odn->dn_struct_rwlock, RW_WRITER)) {
zrl_exit(&odn->dn_handle->dnh_zrlock);
mutex_exit(&os->os_lock);
DNODE_STAT_BUMP(dnode_move_rwlock);
return (KMEM_CBRC_LATER);
}
/*
* A dbuf may be removed (evicted) without an active dnode hold. In that
* case, the dbuf count is decremented under the handle lock before the
* dbuf's hold is released. This order ensures that if we count the hold
* after the dbuf is removed but before its hold is released, we will
* treat the unmatched hold as active and exit safely. If we count the
* hold before the dbuf is removed, the hold is discounted, and the
* removal is blocked until the move completes.
*/
refcount = zfs_refcount_count(&odn->dn_holds);
ASSERT(refcount >= 0);
dbufs = DN_DBUFS_COUNT(odn);
/* We can't have more dbufs than dnode holds. */
ASSERT3U(dbufs, <=, refcount);
DTRACE_PROBE3(dnode__move, dnode_t *, odn, int64_t, refcount,
uint32_t, dbufs);
if (refcount > dbufs) {
rw_exit(&odn->dn_struct_rwlock);
zrl_exit(&odn->dn_handle->dnh_zrlock);
mutex_exit(&os->os_lock);
DNODE_STAT_BUMP(dnode_move_active);
return (KMEM_CBRC_LATER);
}
rw_exit(&odn->dn_struct_rwlock);
/*
* At this point we know that anyone with a hold on the dnode is not
* actively referencing it. The dnode is known and in a valid state to
* move. We're holding the locks needed to execute the critical section.
*/
dnode_move_impl(odn, ndn);
list_link_replace(&odn->dn_link, &ndn->dn_link);
/* If the dnode was safe to move, the refcount cannot have changed. */
ASSERT(refcount == zfs_refcount_count(&ndn->dn_holds));
ASSERT(dbufs == DN_DBUFS_COUNT(ndn));
zrl_exit(&ndn->dn_handle->dnh_zrlock); /* handle has moved */
mutex_exit(&os->os_lock);
return (KMEM_CBRC_YES);
}
#endif /* _KERNEL */
static void
dnode_slots_hold(dnode_children_t *children, int idx, int slots)
{
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
for (int i = idx; i < idx + slots; i++) {
dnode_handle_t *dnh = &children->dnc_children[i];
zrl_add(&dnh->dnh_zrlock);
}
}
static void
dnode_slots_rele(dnode_children_t *children, int idx, int slots)
{
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
for (int i = idx; i < idx + slots; i++) {
dnode_handle_t *dnh = &children->dnc_children[i];
if (zrl_is_locked(&dnh->dnh_zrlock))
zrl_exit(&dnh->dnh_zrlock);
else
zrl_remove(&dnh->dnh_zrlock);
}
}
static int
dnode_slots_tryenter(dnode_children_t *children, int idx, int slots)
{
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
for (int i = idx; i < idx + slots; i++) {
dnode_handle_t *dnh = &children->dnc_children[i];
if (!zrl_tryenter(&dnh->dnh_zrlock)) {
for (int j = idx; j < i; j++) {
dnh = &children->dnc_children[j];
zrl_exit(&dnh->dnh_zrlock);
}
return (0);
}
}
return (1);
}
static void
dnode_set_slots(dnode_children_t *children, int idx, int slots, void *ptr)
{
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
for (int i = idx; i < idx + slots; i++) {
dnode_handle_t *dnh = &children->dnc_children[i];
dnh->dnh_dnode = ptr;
}
}
static boolean_t
dnode_check_slots_free(dnode_children_t *children, int idx, int slots)
{
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
/*
* If all dnode slots are either already free or
* evictable return B_TRUE.
*/
for (int i = idx; i < idx + slots; i++) {
dnode_handle_t *dnh = &children->dnc_children[i];
dnode_t *dn = dnh->dnh_dnode;
if (dn == DN_SLOT_FREE) {
continue;
} else if (DN_SLOT_IS_PTR(dn)) {
mutex_enter(&dn->dn_mtx);
boolean_t can_free = (dn->dn_type == DMU_OT_NONE &&
zfs_refcount_is_zero(&dn->dn_holds) &&
!DNODE_IS_DIRTY(dn));
mutex_exit(&dn->dn_mtx);
if (!can_free)
return (B_FALSE);
else
continue;
} else {
return (B_FALSE);
}
}
return (B_TRUE);
}
static void
dnode_reclaim_slots(dnode_children_t *children, int idx, int slots)
{
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
for (int i = idx; i < idx + slots; i++) {
dnode_handle_t *dnh = &children->dnc_children[i];
ASSERT(zrl_is_locked(&dnh->dnh_zrlock));
if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
ASSERT3S(dnh->dnh_dnode->dn_type, ==, DMU_OT_NONE);
dnode_destroy(dnh->dnh_dnode);
dnh->dnh_dnode = DN_SLOT_FREE;
}
}
}
void
dnode_free_interior_slots(dnode_t *dn)
{
dnode_children_t *children = dmu_buf_get_user(&dn->dn_dbuf->db);
int epb = dn->dn_dbuf->db.db_size >> DNODE_SHIFT;
int idx = (dn->dn_object & (epb - 1)) + 1;
int slots = dn->dn_num_slots - 1;
if (slots == 0)
return;
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
while (!dnode_slots_tryenter(children, idx, slots)) {
DNODE_STAT_BUMP(dnode_free_interior_lock_retry);
cond_resched();
}
dnode_set_slots(children, idx, slots, DN_SLOT_FREE);
dnode_slots_rele(children, idx, slots);
}
void
dnode_special_close(dnode_handle_t *dnh)
{
dnode_t *dn = dnh->dnh_dnode;
/*
* Ensure dnode_rele_and_unlock() has released dn_mtx, after final
* zfs_refcount_remove()
*/
mutex_enter(&dn->dn_mtx);
if (zfs_refcount_count(&dn->dn_holds) > 0)
cv_wait(&dn->dn_nodnholds, &dn->dn_mtx);
mutex_exit(&dn->dn_mtx);
ASSERT3U(zfs_refcount_count(&dn->dn_holds), ==, 0);
ASSERT(dn->dn_dbuf == NULL ||
dmu_buf_get_user(&dn->dn_dbuf->db) == NULL);
zrl_add(&dnh->dnh_zrlock);
dnode_destroy(dn); /* implicit zrl_remove() */
zrl_destroy(&dnh->dnh_zrlock);
dnh->dnh_dnode = NULL;
}
void
dnode_special_open(objset_t *os, dnode_phys_t *dnp, uint64_t object,
dnode_handle_t *dnh)
{
dnode_t *dn;
zrl_init(&dnh->dnh_zrlock);
VERIFY3U(1, ==, zrl_tryenter(&dnh->dnh_zrlock));
dn = dnode_create(os, dnp, NULL, object, dnh);
DNODE_VERIFY(dn);
zrl_exit(&dnh->dnh_zrlock);
}
static void
dnode_buf_evict_async(void *dbu)
{
dnode_children_t *dnc = dbu;
DNODE_STAT_BUMP(dnode_buf_evict);
for (int i = 0; i < dnc->dnc_count; i++) {
dnode_handle_t *dnh = &dnc->dnc_children[i];
dnode_t *dn;
/*
* The dnode handle lock guards against the dnode moving to
* another valid address, so there is no need here to guard
* against changes to or from NULL.
*/
if (!DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
zrl_destroy(&dnh->dnh_zrlock);
dnh->dnh_dnode = DN_SLOT_UNINIT;
continue;
}
zrl_add(&dnh->dnh_zrlock);
dn = dnh->dnh_dnode;
/*
* If there are holds on this dnode, then there should
* be holds on the dnode's containing dbuf as well; thus
* it wouldn't be eligible for eviction and this function
* would not have been called.
*/
ASSERT(zfs_refcount_is_zero(&dn->dn_holds));
ASSERT(zfs_refcount_is_zero(&dn->dn_tx_holds));
dnode_destroy(dn); /* implicit zrl_remove() for first slot */
zrl_destroy(&dnh->dnh_zrlock);
dnh->dnh_dnode = DN_SLOT_UNINIT;
}
kmem_free(dnc, sizeof (dnode_children_t) +
dnc->dnc_count * sizeof (dnode_handle_t));
}
/*
* When the DNODE_MUST_BE_FREE flag is set, the "slots" parameter is used
* to ensure the hole at the specified object offset is large enough to
* hold the dnode being created. The slots parameter is also used to ensure
* a dnode does not span multiple dnode blocks. In both of these cases, if
* a failure occurs, ENOSPC is returned. Keep in mind, these failure cases
* are only possible when using DNODE_MUST_BE_FREE.
*
* If the DNODE_MUST_BE_ALLOCATED flag is set, "slots" must be 0.
* dnode_hold_impl() will check if the requested dnode is already consumed
* as an extra dnode slot by an large dnode, in which case it returns
* ENOENT.
*
* If the DNODE_DRY_RUN flag is set, we don't actually hold the dnode, just
* return whether the hold would succeed or not. tag and dnp should set to
* NULL in this case.
*
* errors:
* EINVAL - Invalid object number or flags.
* ENOSPC - Hole too small to fulfill "slots" request (DNODE_MUST_BE_FREE)
* EEXIST - Refers to an allocated dnode (DNODE_MUST_BE_FREE)
* - Refers to a freeing dnode (DNODE_MUST_BE_FREE)
* - Refers to an interior dnode slot (DNODE_MUST_BE_ALLOCATED)
* ENOENT - The requested dnode is not allocated (DNODE_MUST_BE_ALLOCATED)
* - The requested dnode is being freed (DNODE_MUST_BE_ALLOCATED)
* EIO - I/O error when reading the meta dnode dbuf.
*
* succeeds even for free dnodes.
*/
int
dnode_hold_impl(objset_t *os, uint64_t object, int flag, int slots,
void *tag, dnode_t **dnp)
{
int epb, idx, err;
int drop_struct_lock = FALSE;
int type;
uint64_t blk;
dnode_t *mdn, *dn;
dmu_buf_impl_t *db;
dnode_children_t *dnc;
dnode_phys_t *dn_block;
dnode_handle_t *dnh;
ASSERT(!(flag & DNODE_MUST_BE_ALLOCATED) || (slots == 0));
ASSERT(!(flag & DNODE_MUST_BE_FREE) || (slots > 0));
IMPLY(flag & DNODE_DRY_RUN, (tag == NULL) && (dnp == NULL));
/*
* If you are holding the spa config lock as writer, you shouldn't
* be asking the DMU to do *anything* unless it's the root pool
* which may require us to read from the root filesystem while
* holding some (not all) of the locks as writer.
*/
ASSERT(spa_config_held(os->os_spa, SCL_ALL, RW_WRITER) == 0 ||
(spa_is_root(os->os_spa) &&
spa_config_held(os->os_spa, SCL_STATE, RW_WRITER)));
ASSERT((flag & DNODE_MUST_BE_ALLOCATED) || (flag & DNODE_MUST_BE_FREE));
if (object == DMU_USERUSED_OBJECT || object == DMU_GROUPUSED_OBJECT ||
object == DMU_PROJECTUSED_OBJECT) {
if (object == DMU_USERUSED_OBJECT)
dn = DMU_USERUSED_DNODE(os);
else if (object == DMU_GROUPUSED_OBJECT)
dn = DMU_GROUPUSED_DNODE(os);
else
dn = DMU_PROJECTUSED_DNODE(os);
if (dn == NULL)
return (SET_ERROR(ENOENT));
type = dn->dn_type;
if ((flag & DNODE_MUST_BE_ALLOCATED) && type == DMU_OT_NONE)
return (SET_ERROR(ENOENT));
if ((flag & DNODE_MUST_BE_FREE) && type != DMU_OT_NONE)
return (SET_ERROR(EEXIST));
DNODE_VERIFY(dn);
/* Don't actually hold if dry run, just return 0 */
if (!(flag & DNODE_DRY_RUN)) {
(void) zfs_refcount_add(&dn->dn_holds, tag);
*dnp = dn;
}
return (0);
}
if (object == 0 || object >= DN_MAX_OBJECT)
return (SET_ERROR(EINVAL));
mdn = DMU_META_DNODE(os);
ASSERT(mdn->dn_object == DMU_META_DNODE_OBJECT);
DNODE_VERIFY(mdn);
if (!RW_WRITE_HELD(&mdn->dn_struct_rwlock)) {
rw_enter(&mdn->dn_struct_rwlock, RW_READER);
drop_struct_lock = TRUE;
}
blk = dbuf_whichblock(mdn, 0, object * sizeof (dnode_phys_t));
db = dbuf_hold(mdn, blk, FTAG);
if (drop_struct_lock)
rw_exit(&mdn->dn_struct_rwlock);
if (db == NULL) {
DNODE_STAT_BUMP(dnode_hold_dbuf_hold);
return (SET_ERROR(EIO));
}
/*
* We do not need to decrypt to read the dnode so it doesn't matter
* if we get the encrypted or decrypted version.
*/
err = dbuf_read(db, NULL, DB_RF_CANFAIL |
DB_RF_NO_DECRYPT | DB_RF_NOPREFETCH);
if (err) {
DNODE_STAT_BUMP(dnode_hold_dbuf_read);
dbuf_rele(db, FTAG);
return (err);
}
ASSERT3U(db->db.db_size, >=, 1<<DNODE_SHIFT);
epb = db->db.db_size >> DNODE_SHIFT;
idx = object & (epb - 1);
dn_block = (dnode_phys_t *)db->db.db_data;
ASSERT(DB_DNODE(db)->dn_type == DMU_OT_DNODE);
dnc = dmu_buf_get_user(&db->db);
dnh = NULL;
if (dnc == NULL) {
dnode_children_t *winner;
int skip = 0;
dnc = kmem_zalloc(sizeof (dnode_children_t) +
epb * sizeof (dnode_handle_t), KM_SLEEP);
dnc->dnc_count = epb;
dnh = &dnc->dnc_children[0];
/* Initialize dnode slot status from dnode_phys_t */
for (int i = 0; i < epb; i++) {
zrl_init(&dnh[i].dnh_zrlock);
if (skip) {
skip--;
continue;
}
if (dn_block[i].dn_type != DMU_OT_NONE) {
int interior = dn_block[i].dn_extra_slots;
dnode_set_slots(dnc, i, 1, DN_SLOT_ALLOCATED);
dnode_set_slots(dnc, i + 1, interior,
DN_SLOT_INTERIOR);
skip = interior;
} else {
dnh[i].dnh_dnode = DN_SLOT_FREE;
skip = 0;
}
}
dmu_buf_init_user(&dnc->dnc_dbu, NULL,
dnode_buf_evict_async, NULL);
winner = dmu_buf_set_user(&db->db, &dnc->dnc_dbu);
if (winner != NULL) {
for (int i = 0; i < epb; i++)
zrl_destroy(&dnh[i].dnh_zrlock);
kmem_free(dnc, sizeof (dnode_children_t) +
epb * sizeof (dnode_handle_t));
dnc = winner;
}
}
ASSERT(dnc->dnc_count == epb);
if (flag & DNODE_MUST_BE_ALLOCATED) {
slots = 1;
dnode_slots_hold(dnc, idx, slots);
dnh = &dnc->dnc_children[idx];
if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
dn = dnh->dnh_dnode;
} else if (dnh->dnh_dnode == DN_SLOT_INTERIOR) {
DNODE_STAT_BUMP(dnode_hold_alloc_interior);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(EEXIST));
} else if (dnh->dnh_dnode != DN_SLOT_ALLOCATED) {
DNODE_STAT_BUMP(dnode_hold_alloc_misses);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(ENOENT));
} else {
dnode_slots_rele(dnc, idx, slots);
while (!dnode_slots_tryenter(dnc, idx, slots)) {
DNODE_STAT_BUMP(dnode_hold_alloc_lock_retry);
cond_resched();
}
/*
* Someone else won the race and called dnode_create()
* after we checked DN_SLOT_IS_PTR() above but before
* we acquired the lock.
*/
if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
DNODE_STAT_BUMP(dnode_hold_alloc_lock_misses);
dn = dnh->dnh_dnode;
} else {
dn = dnode_create(os, dn_block + idx, db,
object, dnh);
}
}
mutex_enter(&dn->dn_mtx);
if (dn->dn_type == DMU_OT_NONE || dn->dn_free_txg != 0) {
DNODE_STAT_BUMP(dnode_hold_alloc_type_none);
mutex_exit(&dn->dn_mtx);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(ENOENT));
}
/* Don't actually hold if dry run, just return 0 */
if (flag & DNODE_DRY_RUN) {
mutex_exit(&dn->dn_mtx);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (0);
}
DNODE_STAT_BUMP(dnode_hold_alloc_hits);
} else if (flag & DNODE_MUST_BE_FREE) {
if (idx + slots - 1 >= DNODES_PER_BLOCK) {
DNODE_STAT_BUMP(dnode_hold_free_overflow);
dbuf_rele(db, FTAG);
return (SET_ERROR(ENOSPC));
}
dnode_slots_hold(dnc, idx, slots);
if (!dnode_check_slots_free(dnc, idx, slots)) {
DNODE_STAT_BUMP(dnode_hold_free_misses);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(ENOSPC));
}
dnode_slots_rele(dnc, idx, slots);
while (!dnode_slots_tryenter(dnc, idx, slots)) {
DNODE_STAT_BUMP(dnode_hold_free_lock_retry);
cond_resched();
}
if (!dnode_check_slots_free(dnc, idx, slots)) {
DNODE_STAT_BUMP(dnode_hold_free_lock_misses);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(ENOSPC));
}
/*
* Allocated but otherwise free dnodes which would
* be in the interior of a multi-slot dnodes need
* to be freed. Single slot dnodes can be safely
* re-purposed as a performance optimization.
*/
if (slots > 1)
dnode_reclaim_slots(dnc, idx + 1, slots - 1);
dnh = &dnc->dnc_children[idx];
if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
dn = dnh->dnh_dnode;
} else {
dn = dnode_create(os, dn_block + idx, db,
object, dnh);
}
mutex_enter(&dn->dn_mtx);
if (!zfs_refcount_is_zero(&dn->dn_holds) || dn->dn_free_txg) {
DNODE_STAT_BUMP(dnode_hold_free_refcount);
mutex_exit(&dn->dn_mtx);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(EEXIST));
}
/* Don't actually hold if dry run, just return 0 */
if (flag & DNODE_DRY_RUN) {
mutex_exit(&dn->dn_mtx);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (0);
}
dnode_set_slots(dnc, idx + 1, slots - 1, DN_SLOT_INTERIOR);
DNODE_STAT_BUMP(dnode_hold_free_hits);
} else {
dbuf_rele(db, FTAG);
return (SET_ERROR(EINVAL));
}
ASSERT0(dn->dn_free_txg);
if (zfs_refcount_add(&dn->dn_holds, tag) == 1)
dbuf_add_ref(db, dnh);
mutex_exit(&dn->dn_mtx);
/* Now we can rely on the hold to prevent the dnode from moving. */
dnode_slots_rele(dnc, idx, slots);
DNODE_VERIFY(dn);
ASSERT3P(dnp, !=, NULL);
ASSERT3P(dn->dn_dbuf, ==, db);
ASSERT3U(dn->dn_object, ==, object);
dbuf_rele(db, FTAG);
*dnp = dn;
return (0);
}
/*
* Return held dnode if the object is allocated, NULL if not.
*/
int
dnode_hold(objset_t *os, uint64_t object, void *tag, dnode_t **dnp)
{
return (dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED, 0, tag,
dnp));
}
/*
* Can only add a reference if there is already at least one
* reference on the dnode. Returns FALSE if unable to add a
* new reference.
*/
boolean_t
dnode_add_ref(dnode_t *dn, void *tag)
{
mutex_enter(&dn->dn_mtx);
if (zfs_refcount_is_zero(&dn->dn_holds)) {
mutex_exit(&dn->dn_mtx);
return (FALSE);
}
VERIFY(1 < zfs_refcount_add(&dn->dn_holds, tag));
mutex_exit(&dn->dn_mtx);
return (TRUE);
}
void
dnode_rele(dnode_t *dn, void *tag)
{
mutex_enter(&dn->dn_mtx);
dnode_rele_and_unlock(dn, tag, B_FALSE);
}
void
dnode_rele_and_unlock(dnode_t *dn, void *tag, boolean_t evicting)
{
uint64_t refs;
/* Get while the hold prevents the dnode from moving. */
dmu_buf_impl_t *db = dn->dn_dbuf;
dnode_handle_t *dnh = dn->dn_handle;
refs = zfs_refcount_remove(&dn->dn_holds, tag);
if (refs == 0)
cv_broadcast(&dn->dn_nodnholds);
mutex_exit(&dn->dn_mtx);
/* dnode could get destroyed at this point, so don't use it anymore */
/*
* It's unsafe to release the last hold on a dnode by dnode_rele() or
* indirectly by dbuf_rele() while relying on the dnode handle to
* prevent the dnode from moving, since releasing the last hold could
* result in the dnode's parent dbuf evicting its dnode handles. For
* that reason anyone calling dnode_rele() or dbuf_rele() without some
* other direct or indirect hold on the dnode must first drop the dnode
* handle.
*/
ASSERT(refs > 0 || dnh->dnh_zrlock.zr_owner != curthread);
/* NOTE: the DNODE_DNODE does not have a dn_dbuf */
if (refs == 0 && db != NULL) {
/*
* Another thread could add a hold to the dnode handle in
* dnode_hold_impl() while holding the parent dbuf. Since the
* hold on the parent dbuf prevents the handle from being
* destroyed, the hold on the handle is OK. We can't yet assert
* that the handle has zero references, but that will be
* asserted anyway when the handle gets destroyed.
*/
mutex_enter(&db->db_mtx);
dbuf_rele_and_unlock(db, dnh, evicting);
}
}
/*
* Test whether we can create a dnode at the specified location.
*/
int
dnode_try_claim(objset_t *os, uint64_t object, int slots)
{
return (dnode_hold_impl(os, object, DNODE_MUST_BE_FREE | DNODE_DRY_RUN,
slots, NULL, NULL));
}
/*
* Checks if the dnode contains any uncommitted dirty records.
*/
boolean_t
dnode_is_dirty(dnode_t *dn)
{
mutex_enter(&dn->dn_mtx);
for (int i = 0; i < TXG_SIZE; i++) {
if (multilist_link_active(&dn->dn_dirty_link[i])) {
mutex_exit(&dn->dn_mtx);
return (B_TRUE);
}
}
mutex_exit(&dn->dn_mtx);
return (B_FALSE);
}
void
dnode_setdirty(dnode_t *dn, dmu_tx_t *tx)
{
objset_t *os = dn->dn_objset;
uint64_t txg = tx->tx_txg;
if (DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
dsl_dataset_dirty(os->os_dsl_dataset, tx);
return;
}
DNODE_VERIFY(dn);
#ifdef ZFS_DEBUG
mutex_enter(&dn->dn_mtx);
ASSERT(dn->dn_phys->dn_type || dn->dn_allocated_txg);
ASSERT(dn->dn_free_txg == 0 || dn->dn_free_txg >= txg);
mutex_exit(&dn->dn_mtx);
#endif
/*
* Determine old uid/gid when necessary
*/
dmu_objset_userquota_get_ids(dn, B_TRUE, tx);
multilist_t *dirtylist = &os->os_dirty_dnodes[txg & TXG_MASK];
multilist_sublist_t *mls = multilist_sublist_lock_obj(dirtylist, dn);
/*
* If we are already marked dirty, we're done.
*/
if (multilist_link_active(&dn->dn_dirty_link[txg & TXG_MASK])) {
multilist_sublist_unlock(mls);
return;
}
ASSERT(!zfs_refcount_is_zero(&dn->dn_holds) ||
!avl_is_empty(&dn->dn_dbufs));
ASSERT(dn->dn_datablksz != 0);
ASSERT0(dn->dn_next_bonuslen[txg & TXG_MASK]);
ASSERT0(dn->dn_next_blksz[txg & TXG_MASK]);
ASSERT0(dn->dn_next_bonustype[txg & TXG_MASK]);
dprintf_ds(os->os_dsl_dataset, "obj=%llu txg=%llu\n",
(u_longlong_t)dn->dn_object, (u_longlong_t)txg);
multilist_sublist_insert_head(mls, dn);
multilist_sublist_unlock(mls);
/*
* The dnode maintains a hold on its containing dbuf as
* long as there are holds on it. Each instantiated child
* dbuf maintains a hold on the dnode. When the last child
* drops its hold, the dnode will drop its hold on the
* containing dbuf. We add a "dirty hold" here so that the
* dnode will hang around after we finish processing its
* children.
*/
VERIFY(dnode_add_ref(dn, (void *)(uintptr_t)tx->tx_txg));
(void) dbuf_dirty(dn->dn_dbuf, tx);
dsl_dataset_dirty(os->os_dsl_dataset, tx);
}
void
dnode_free(dnode_t *dn, dmu_tx_t *tx)
{
mutex_enter(&dn->dn_mtx);
if (dn->dn_type == DMU_OT_NONE || dn->dn_free_txg) {
mutex_exit(&dn->dn_mtx);
return;
}
dn->dn_free_txg = tx->tx_txg;
mutex_exit(&dn->dn_mtx);
dnode_setdirty(dn, tx);
}
/*
* Try to change the block size for the indicated dnode. This can only
* succeed if there are no blocks allocated or dirty beyond first block
*/
int
dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx)
{
dmu_buf_impl_t *db;
int err;
ASSERT3U(size, <=, spa_maxblocksize(dmu_objset_spa(dn->dn_objset)));
if (size == 0)
size = SPA_MINBLOCKSIZE;
else
size = P2ROUNDUP(size, SPA_MINBLOCKSIZE);
if (ibs == dn->dn_indblkshift)
ibs = 0;
if (size >> SPA_MINBLOCKSHIFT == dn->dn_datablkszsec && ibs == 0)
return (0);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
/* Check for any allocated blocks beyond the first */
if (dn->dn_maxblkid != 0)
goto fail;
mutex_enter(&dn->dn_dbufs_mtx);
for (db = avl_first(&dn->dn_dbufs); db != NULL;
db = AVL_NEXT(&dn->dn_dbufs, db)) {
if (db->db_blkid != 0 && db->db_blkid != DMU_BONUS_BLKID &&
db->db_blkid != DMU_SPILL_BLKID) {
mutex_exit(&dn->dn_dbufs_mtx);
goto fail;
}
}
mutex_exit(&dn->dn_dbufs_mtx);
if (ibs && dn->dn_nlevels != 1)
goto fail;
/* resize the old block */
err = dbuf_hold_impl(dn, 0, 0, TRUE, FALSE, FTAG, &db);
if (err == 0) {
dbuf_new_size(db, size, tx);
} else if (err != ENOENT) {
goto fail;
}
dnode_setdblksz(dn, size);
dnode_setdirty(dn, tx);
dn->dn_next_blksz[tx->tx_txg&TXG_MASK] = size;
if (ibs) {
dn->dn_indblkshift = ibs;
dn->dn_next_indblkshift[tx->tx_txg&TXG_MASK] = ibs;
}
/* release after we have fixed the blocksize in the dnode */
if (db)
dbuf_rele(db, FTAG);
rw_exit(&dn->dn_struct_rwlock);
return (0);
fail:
rw_exit(&dn->dn_struct_rwlock);
return (SET_ERROR(ENOTSUP));
}
static void
dnode_set_nlevels_impl(dnode_t *dn, int new_nlevels, dmu_tx_t *tx)
{
uint64_t txgoff = tx->tx_txg & TXG_MASK;
int old_nlevels = dn->dn_nlevels;
dmu_buf_impl_t *db;
list_t *list;
dbuf_dirty_record_t *new, *dr, *dr_next;
ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
ASSERT3U(new_nlevels, >, dn->dn_nlevels);
dn->dn_nlevels = new_nlevels;
ASSERT3U(new_nlevels, >, dn->dn_next_nlevels[txgoff]);
dn->dn_next_nlevels[txgoff] = new_nlevels;
/* dirty the left indirects */
db = dbuf_hold_level(dn, old_nlevels, 0, FTAG);
ASSERT(db != NULL);
new = dbuf_dirty(db, tx);
dbuf_rele(db, FTAG);
/* transfer the dirty records to the new indirect */
mutex_enter(&dn->dn_mtx);
mutex_enter(&new->dt.di.dr_mtx);
list = &dn->dn_dirty_records[txgoff];
for (dr = list_head(list); dr; dr = dr_next) {
dr_next = list_next(&dn->dn_dirty_records[txgoff], dr);
IMPLY(dr->dr_dbuf == NULL, old_nlevels == 1);
if (dr->dr_dbuf == NULL ||
(dr->dr_dbuf->db_level == old_nlevels - 1 &&
dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID)) {
list_remove(&dn->dn_dirty_records[txgoff], dr);
list_insert_tail(&new->dt.di.dr_children, dr);
dr->dr_parent = new;
}
}
mutex_exit(&new->dt.di.dr_mtx);
mutex_exit(&dn->dn_mtx);
}
int
dnode_set_nlevels(dnode_t *dn, int nlevels, dmu_tx_t *tx)
{
int ret = 0;
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
if (dn->dn_nlevels == nlevels) {
ret = 0;
goto out;
} else if (nlevels < dn->dn_nlevels) {
ret = SET_ERROR(EINVAL);
goto out;
}
dnode_set_nlevels_impl(dn, nlevels, tx);
out:
rw_exit(&dn->dn_struct_rwlock);
return (ret);
}
/* read-holding callers must not rely on the lock being continuously held */
void
dnode_new_blkid(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx, boolean_t have_read,
boolean_t force)
{
int epbs, new_nlevels;
uint64_t sz;
ASSERT(blkid != DMU_BONUS_BLKID);
ASSERT(have_read ?
RW_READ_HELD(&dn->dn_struct_rwlock) :
RW_WRITE_HELD(&dn->dn_struct_rwlock));
/*
* if we have a read-lock, check to see if we need to do any work
* before upgrading to a write-lock.
*/
if (have_read) {
if (blkid <= dn->dn_maxblkid)
return;
if (!rw_tryupgrade(&dn->dn_struct_rwlock)) {
rw_exit(&dn->dn_struct_rwlock);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
}
}
/*
* Raw sends (indicated by the force flag) require that we take the
* given blkid even if the value is lower than the current value.
*/
if (!force && blkid <= dn->dn_maxblkid)
goto out;
/*
* We use the (otherwise unused) top bit of dn_next_maxblkid[txgoff]
* to indicate that this field is set. This allows us to set the
* maxblkid to 0 on an existing object in dnode_sync().
*/
dn->dn_maxblkid = blkid;
dn->dn_next_maxblkid[tx->tx_txg & TXG_MASK] =
blkid | DMU_NEXT_MAXBLKID_SET;
/*
* Compute the number of levels necessary to support the new maxblkid.
* Raw sends will ensure nlevels is set correctly for us.
*/
new_nlevels = 1;
epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
for (sz = dn->dn_nblkptr;
sz <= blkid && sz >= dn->dn_nblkptr; sz <<= epbs)
new_nlevels++;
ASSERT3U(new_nlevels, <=, DN_MAX_LEVELS);
if (!force) {
if (new_nlevels > dn->dn_nlevels)
dnode_set_nlevels_impl(dn, new_nlevels, tx);
} else {
ASSERT3U(dn->dn_nlevels, >=, new_nlevels);
}
out:
if (have_read)
rw_downgrade(&dn->dn_struct_rwlock);
}
static void
dnode_dirty_l1(dnode_t *dn, uint64_t l1blkid, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = dbuf_hold_level(dn, 1, l1blkid, FTAG);
if (db != NULL) {
dmu_buf_will_dirty(&db->db, tx);
dbuf_rele(db, FTAG);
}
}
/*
* Dirty all the in-core level-1 dbufs in the range specified by start_blkid
* and end_blkid.
*/
static void
dnode_dirty_l1range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
dmu_tx_t *tx)
{
dmu_buf_impl_t *db_search;
dmu_buf_impl_t *db;
avl_index_t where;
db_search = kmem_zalloc(sizeof (dmu_buf_impl_t), KM_SLEEP);
mutex_enter(&dn->dn_dbufs_mtx);
db_search->db_level = 1;
db_search->db_blkid = start_blkid + 1;
db_search->db_state = DB_SEARCH;
for (;;) {
db = avl_find(&dn->dn_dbufs, db_search, &where);
if (db == NULL)
db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
if (db == NULL || db->db_level != 1 ||
db->db_blkid >= end_blkid) {
break;
}
/*
* Setup the next blkid we want to search for.
*/
db_search->db_blkid = db->db_blkid + 1;
ASSERT3U(db->db_blkid, >=, start_blkid);
/*
* If the dbuf transitions to DB_EVICTING while we're trying
* to dirty it, then we will be unable to discover it in
* the dbuf hash table. This will result in a call to
* dbuf_create() which needs to acquire the dn_dbufs_mtx
* lock. To avoid a deadlock, we drop the lock before
* dirtying the level-1 dbuf.
*/
mutex_exit(&dn->dn_dbufs_mtx);
dnode_dirty_l1(dn, db->db_blkid, tx);
mutex_enter(&dn->dn_dbufs_mtx);
}
#ifdef ZFS_DEBUG
/*
* Walk all the in-core level-1 dbufs and verify they have been dirtied.
*/
db_search->db_level = 1;
db_search->db_blkid = start_blkid + 1;
db_search->db_state = DB_SEARCH;
db = avl_find(&dn->dn_dbufs, db_search, &where);
if (db == NULL)
db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
for (; db != NULL; db = AVL_NEXT(&dn->dn_dbufs, db)) {
if (db->db_level != 1 || db->db_blkid >= end_blkid)
break;
if (db->db_state != DB_EVICTING)
ASSERT(db->db_dirtycnt > 0);
}
#endif
kmem_free(db_search, sizeof (dmu_buf_impl_t));
mutex_exit(&dn->dn_dbufs_mtx);
}
void
dnode_set_dirtyctx(dnode_t *dn, dmu_tx_t *tx, void *tag)
{
/*
* Don't set dirtyctx to SYNC if we're just modifying this as we
* initialize the objset.
*/
if (dn->dn_dirtyctx == DN_UNDIRTIED) {
dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
if (ds != NULL) {
rrw_enter(&ds->ds_bp_rwlock, RW_READER, tag);
}
if (!BP_IS_HOLE(dn->dn_objset->os_rootbp)) {
if (dmu_tx_is_syncing(tx))
dn->dn_dirtyctx = DN_DIRTY_SYNC;
else
dn->dn_dirtyctx = DN_DIRTY_OPEN;
dn->dn_dirtyctx_firstset = tag;
}
if (ds != NULL) {
rrw_exit(&ds->ds_bp_rwlock, tag);
}
}
}
void
dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx)
{
dmu_buf_impl_t *db;
uint64_t blkoff, blkid, nblks;
int blksz, blkshift, head, tail;
int trunc = FALSE;
int epbs;
blksz = dn->dn_datablksz;
blkshift = dn->dn_datablkshift;
epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
if (len == DMU_OBJECT_END) {
len = UINT64_MAX - off;
trunc = TRUE;
}
/*
* First, block align the region to free:
*/
if (ISP2(blksz)) {
head = P2NPHASE(off, blksz);
blkoff = P2PHASE(off, blksz);
if ((off >> blkshift) > dn->dn_maxblkid)
return;
} else {
ASSERT(dn->dn_maxblkid == 0);
if (off == 0 && len >= blksz) {
/*
* Freeing the whole block; fast-track this request.
*/
blkid = 0;
nblks = 1;
if (dn->dn_nlevels > 1) {
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
dnode_dirty_l1(dn, 0, tx);
rw_exit(&dn->dn_struct_rwlock);
}
goto done;
} else if (off >= blksz) {
/* Freeing past end-of-data */
return;
} else {
/* Freeing part of the block. */
head = blksz - off;
ASSERT3U(head, >, 0);
}
blkoff = off;
}
/* zero out any partial block data at the start of the range */
if (head) {
int res;
ASSERT3U(blkoff + head, ==, blksz);
if (len < head)
head = len;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
res = dbuf_hold_impl(dn, 0, dbuf_whichblock(dn, 0, off),
TRUE, FALSE, FTAG, &db);
rw_exit(&dn->dn_struct_rwlock);
if (res == 0) {
caddr_t data;
boolean_t dirty;
db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER,
FTAG);
/* don't dirty if it isn't on disk and isn't dirty */
dirty = !list_is_empty(&db->db_dirty_records) ||
(db->db_blkptr && !BP_IS_HOLE(db->db_blkptr));
dmu_buf_unlock_parent(db, dblt, FTAG);
if (dirty) {
dmu_buf_will_dirty(&db->db, tx);
data = db->db.db_data;
bzero(data + blkoff, head);
}
dbuf_rele(db, FTAG);
}
off += head;
len -= head;
}
/* If the range was less than one block, we're done */
if (len == 0)
return;
/* If the remaining range is past end of file, we're done */
if ((off >> blkshift) > dn->dn_maxblkid)
return;
ASSERT(ISP2(blksz));
if (trunc)
tail = 0;
else
tail = P2PHASE(len, blksz);
ASSERT0(P2PHASE(off, blksz));
/* zero out any partial block data at the end of the range */
if (tail) {
int res;
if (len < tail)
tail = len;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
res = dbuf_hold_impl(dn, 0, dbuf_whichblock(dn, 0, off+len),
TRUE, FALSE, FTAG, &db);
rw_exit(&dn->dn_struct_rwlock);
if (res == 0) {
boolean_t dirty;
/* don't dirty if not on disk and not dirty */
db_lock_type_t type = dmu_buf_lock_parent(db, RW_READER,
FTAG);
dirty = !list_is_empty(&db->db_dirty_records) ||
(db->db_blkptr && !BP_IS_HOLE(db->db_blkptr));
dmu_buf_unlock_parent(db, type, FTAG);
if (dirty) {
dmu_buf_will_dirty(&db->db, tx);
bzero(db->db.db_data, tail);
}
dbuf_rele(db, FTAG);
}
len -= tail;
}
/* If the range did not include a full block, we are done */
if (len == 0)
return;
ASSERT(IS_P2ALIGNED(off, blksz));
ASSERT(trunc || IS_P2ALIGNED(len, blksz));
blkid = off >> blkshift;
nblks = len >> blkshift;
if (trunc)
nblks += 1;
/*
* Dirty all the indirect blocks in this range. Note that only
* the first and last indirect blocks can actually be written
* (if they were partially freed) -- they must be dirtied, even if
* they do not exist on disk yet. The interior blocks will
* be freed by free_children(), so they will not actually be written.
* Even though these interior blocks will not be written, we
* dirty them for two reasons:
*
* - It ensures that the indirect blocks remain in memory until
* syncing context. (They have already been prefetched by
* dmu_tx_hold_free(), so we don't have to worry about reading
* them serially here.)
*
* - The dirty space accounting will put pressure on the txg sync
* mechanism to begin syncing, and to delay transactions if there
* is a large amount of freeing. Even though these indirect
* blocks will not be written, we could need to write the same
* amount of space if we copy the freed BPs into deadlists.
*/
if (dn->dn_nlevels > 1) {
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
uint64_t first, last;
first = blkid >> epbs;
dnode_dirty_l1(dn, first, tx);
if (trunc)
last = dn->dn_maxblkid >> epbs;
else
last = (blkid + nblks - 1) >> epbs;
if (last != first)
dnode_dirty_l1(dn, last, tx);
dnode_dirty_l1range(dn, first, last, tx);
int shift = dn->dn_datablkshift + dn->dn_indblkshift -
SPA_BLKPTRSHIFT;
for (uint64_t i = first + 1; i < last; i++) {
/*
* Set i to the blockid of the next non-hole
* level-1 indirect block at or after i. Note
* that dnode_next_offset() operates in terms of
* level-0-equivalent bytes.
*/
uint64_t ibyte = i << shift;
int err = dnode_next_offset(dn, DNODE_FIND_HAVELOCK,
&ibyte, 2, 1, 0);
i = ibyte >> shift;
if (i >= last)
break;
/*
* Normally we should not see an error, either
* from dnode_next_offset() or dbuf_hold_level()
* (except for ESRCH from dnode_next_offset).
* If there is an i/o error, then when we read
* this block in syncing context, it will use
* ZIO_FLAG_MUSTSUCCEED, and thus hang/panic according
* to the "failmode" property. dnode_next_offset()
* doesn't have a flag to indicate MUSTSUCCEED.
*/
if (err != 0)
break;
dnode_dirty_l1(dn, i, tx);
}
rw_exit(&dn->dn_struct_rwlock);
}
done:
/*
* Add this range to the dnode range list.
* We will finish up this free operation in the syncing phase.
*/
mutex_enter(&dn->dn_mtx);
{
int txgoff = tx->tx_txg & TXG_MASK;
if (dn->dn_free_ranges[txgoff] == NULL) {
dn->dn_free_ranges[txgoff] = range_tree_create(NULL,
RANGE_SEG64, NULL, 0, 0);
}
range_tree_clear(dn->dn_free_ranges[txgoff], blkid, nblks);
range_tree_add(dn->dn_free_ranges[txgoff], blkid, nblks);
}
dprintf_dnode(dn, "blkid=%llu nblks=%llu txg=%llu\n",
(u_longlong_t)blkid, (u_longlong_t)nblks,
(u_longlong_t)tx->tx_txg);
mutex_exit(&dn->dn_mtx);
dbuf_free_range(dn, blkid, blkid + nblks - 1, tx);
dnode_setdirty(dn, tx);
}
static boolean_t
dnode_spill_freed(dnode_t *dn)
{
int i;
mutex_enter(&dn->dn_mtx);
for (i = 0; i < TXG_SIZE; i++) {
if (dn->dn_rm_spillblk[i] == DN_KILL_SPILLBLK)
break;
}
mutex_exit(&dn->dn_mtx);
return (i < TXG_SIZE);
}
/* return TRUE if this blkid was freed in a recent txg, or FALSE if it wasn't */
uint64_t
dnode_block_freed(dnode_t *dn, uint64_t blkid)
{
void *dp = spa_get_dsl(dn->dn_objset->os_spa);
int i;
if (blkid == DMU_BONUS_BLKID)
return (FALSE);
/*
* If we're in the process of opening the pool, dp will not be
* set yet, but there shouldn't be anything dirty.
*/
if (dp == NULL)
return (FALSE);
if (dn->dn_free_txg)
return (TRUE);
if (blkid == DMU_SPILL_BLKID)
return (dnode_spill_freed(dn));
mutex_enter(&dn->dn_mtx);
for (i = 0; i < TXG_SIZE; i++) {
if (dn->dn_free_ranges[i] != NULL &&
range_tree_contains(dn->dn_free_ranges[i], blkid, 1))
break;
}
mutex_exit(&dn->dn_mtx);
return (i < TXG_SIZE);
}
/* call from syncing context when we actually write/free space for this dnode */
void
dnode_diduse_space(dnode_t *dn, int64_t delta)
{
uint64_t space;
dprintf_dnode(dn, "dn=%p dnp=%p used=%llu delta=%lld\n",
dn, dn->dn_phys,
(u_longlong_t)dn->dn_phys->dn_used,
(longlong_t)delta);
mutex_enter(&dn->dn_mtx);
space = DN_USED_BYTES(dn->dn_phys);
if (delta > 0) {
ASSERT3U(space + delta, >=, space); /* no overflow */
} else {
ASSERT3U(space, >=, -delta); /* no underflow */
}
space += delta;
if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_DNODE_BYTES) {
ASSERT((dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) == 0);
ASSERT0(P2PHASE(space, 1<<DEV_BSHIFT));
dn->dn_phys->dn_used = space >> DEV_BSHIFT;
} else {
dn->dn_phys->dn_used = space;
dn->dn_phys->dn_flags |= DNODE_FLAG_USED_BYTES;
}
mutex_exit(&dn->dn_mtx);
}
/*
* Scans a block at the indicated "level" looking for a hole or data,
* depending on 'flags'.
*
* If level > 0, then we are scanning an indirect block looking at its
* pointers. If level == 0, then we are looking at a block of dnodes.
*
* If we don't find what we are looking for in the block, we return ESRCH.
* Otherwise, return with *offset pointing to the beginning (if searching
* forwards) or end (if searching backwards) of the range covered by the
* block pointer we matched on (or dnode).
*
* The basic search algorithm used below by dnode_next_offset() is to
* use this function to search up the block tree (widen the search) until
* we find something (i.e., we don't return ESRCH) and then search back
* down the tree (narrow the search) until we reach our original search
* level.
*/
static int
dnode_next_offset_level(dnode_t *dn, int flags, uint64_t *offset,
int lvl, uint64_t blkfill, uint64_t txg)
{
dmu_buf_impl_t *db = NULL;
void *data = NULL;
uint64_t epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
uint64_t epb = 1ULL << epbs;
uint64_t minfill, maxfill;
boolean_t hole;
int i, inc, error, span;
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
hole = ((flags & DNODE_FIND_HOLE) != 0);
inc = (flags & DNODE_FIND_BACKWARDS) ? -1 : 1;
ASSERT(txg == 0 || !hole);
if (lvl == dn->dn_phys->dn_nlevels) {
error = 0;
epb = dn->dn_phys->dn_nblkptr;
data = dn->dn_phys->dn_blkptr;
} else {
uint64_t blkid = dbuf_whichblock(dn, lvl, *offset);
error = dbuf_hold_impl(dn, lvl, blkid, TRUE, FALSE, FTAG, &db);
if (error) {
if (error != ENOENT)
return (error);
if (hole)
return (0);
/*
* This can only happen when we are searching up
* the block tree for data. We don't really need to
* adjust the offset, as we will just end up looking
* at the pointer to this block in its parent, and its
* going to be unallocated, so we will skip over it.
*/
return (SET_ERROR(ESRCH));
}
error = dbuf_read(db, NULL,
DB_RF_CANFAIL | DB_RF_HAVESTRUCT |
DB_RF_NO_DECRYPT | DB_RF_NOPREFETCH);
if (error) {
dbuf_rele(db, FTAG);
return (error);
}
data = db->db.db_data;
rw_enter(&db->db_rwlock, RW_READER);
}
if (db != NULL && txg != 0 && (db->db_blkptr == NULL ||
db->db_blkptr->blk_birth <= txg ||
BP_IS_HOLE(db->db_blkptr))) {
/*
* This can only happen when we are searching up the tree
* and these conditions mean that we need to keep climbing.
*/
error = SET_ERROR(ESRCH);
} else if (lvl == 0) {
dnode_phys_t *dnp = data;
ASSERT(dn->dn_type == DMU_OT_DNODE);
ASSERT(!(flags & DNODE_FIND_BACKWARDS));
for (i = (*offset >> DNODE_SHIFT) & (blkfill - 1);
i < blkfill; i += dnp[i].dn_extra_slots + 1) {
if ((dnp[i].dn_type == DMU_OT_NONE) == hole)
break;
}
if (i == blkfill)
error = SET_ERROR(ESRCH);
*offset = (*offset & ~(DNODE_BLOCK_SIZE - 1)) +
(i << DNODE_SHIFT);
} else {
blkptr_t *bp = data;
uint64_t start = *offset;
span = (lvl - 1) * epbs + dn->dn_datablkshift;
minfill = 0;
maxfill = blkfill << ((lvl - 1) * epbs);
if (hole)
maxfill--;
else
minfill++;
if (span >= 8 * sizeof (*offset)) {
/* This only happens on the highest indirection level */
ASSERT3U((lvl - 1), ==, dn->dn_phys->dn_nlevels - 1);
*offset = 0;
} else {
*offset = *offset >> span;
}
for (i = BF64_GET(*offset, 0, epbs);
i >= 0 && i < epb; i += inc) {
if (BP_GET_FILL(&bp[i]) >= minfill &&
BP_GET_FILL(&bp[i]) <= maxfill &&
(hole || bp[i].blk_birth > txg))
break;
if (inc > 0 || *offset > 0)
*offset += inc;
}
if (span >= 8 * sizeof (*offset)) {
*offset = start;
} else {
*offset = *offset << span;
}
if (inc < 0) {
/* traversing backwards; position offset at the end */
ASSERT3U(*offset, <=, start);
*offset = MIN(*offset + (1ULL << span) - 1, start);
} else if (*offset < start) {
*offset = start;
}
if (i < 0 || i >= epb)
error = SET_ERROR(ESRCH);
}
if (db != NULL) {
rw_exit(&db->db_rwlock);
dbuf_rele(db, FTAG);
}
return (error);
}
/*
* Find the next hole, data, or sparse region at or after *offset.
* The value 'blkfill' tells us how many items we expect to find
* in an L0 data block; this value is 1 for normal objects,
* DNODES_PER_BLOCK for the meta dnode, and some fraction of
* DNODES_PER_BLOCK when searching for sparse regions thereof.
*
* Examples:
*
* dnode_next_offset(dn, flags, offset, 1, 1, 0);
* Finds the next/previous hole/data in a file.
* Used in dmu_offset_next().
*
* dnode_next_offset(mdn, flags, offset, 0, DNODES_PER_BLOCK, txg);
* Finds the next free/allocated dnode an objset's meta-dnode.
* Only finds objects that have new contents since txg (ie.
* bonus buffer changes and content removal are ignored).
* Used in dmu_object_next().
*
* dnode_next_offset(mdn, DNODE_FIND_HOLE, offset, 2, DNODES_PER_BLOCK >> 2, 0);
* Finds the next L2 meta-dnode bp that's at most 1/4 full.
* Used in dmu_object_alloc().
*/
int
dnode_next_offset(dnode_t *dn, int flags, uint64_t *offset,
int minlvl, uint64_t blkfill, uint64_t txg)
{
uint64_t initial_offset = *offset;
int lvl, maxlvl;
int error = 0;
if (!(flags & DNODE_FIND_HAVELOCK))
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (dn->dn_phys->dn_nlevels == 0) {
error = SET_ERROR(ESRCH);
goto out;
}
if (dn->dn_datablkshift == 0) {
if (*offset < dn->dn_datablksz) {
if (flags & DNODE_FIND_HOLE)
*offset = dn->dn_datablksz;
} else {
error = SET_ERROR(ESRCH);
}
goto out;
}
maxlvl = dn->dn_phys->dn_nlevels;
for (lvl = minlvl; lvl <= maxlvl; lvl++) {
error = dnode_next_offset_level(dn,
flags, offset, lvl, blkfill, txg);
if (error != ESRCH)
break;
}
while (error == 0 && --lvl >= minlvl) {
error = dnode_next_offset_level(dn,
flags, offset, lvl, blkfill, txg);
}
/*
* There's always a "virtual hole" at the end of the object, even
* if all BP's which physically exist are non-holes.
*/
if ((flags & DNODE_FIND_HOLE) && error == ESRCH && txg == 0 &&
minlvl == 1 && blkfill == 1 && !(flags & DNODE_FIND_BACKWARDS)) {
error = 0;
}
if (error == 0 && (flags & DNODE_FIND_BACKWARDS ?
initial_offset < *offset : initial_offset > *offset))
error = SET_ERROR(ESRCH);
out:
if (!(flags & DNODE_FIND_HAVELOCK))
rw_exit(&dn->dn_struct_rwlock);
return (error);
}
#if defined(_KERNEL)
EXPORT_SYMBOL(dnode_hold);
EXPORT_SYMBOL(dnode_rele);
EXPORT_SYMBOL(dnode_set_nlevels);
EXPORT_SYMBOL(dnode_set_blksz);
EXPORT_SYMBOL(dnode_free_range);
EXPORT_SYMBOL(dnode_evict_dbufs);
EXPORT_SYMBOL(dnode_evict_bonus);
#endif
diff --git a/sys/contrib/openzfs/module/zfs/dnode_sync.c b/sys/contrib/openzfs/module/zfs/dnode_sync.c
index dd37e3af7ed5..12ab4bea145f 100644
--- a/sys/contrib/openzfs/module/zfs/dnode_sync.c
+++ b/sys/contrib/openzfs/module/zfs/dnode_sync.c
@@ -1,859 +1,866 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2020 Oxide Computer Company
*/
#include <sys/zfs_context.h>
#include <sys/dbuf.h>
#include <sys/dnode.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_recv.h>
#include <sys/dsl_dataset.h>
#include <sys/spa.h>
#include <sys/range_tree.h>
#include <sys/zfeature.h>
static void
dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx)
{
dmu_buf_impl_t *db;
int txgoff = tx->tx_txg & TXG_MASK;
int nblkptr = dn->dn_phys->dn_nblkptr;
int old_toplvl = dn->dn_phys->dn_nlevels - 1;
int new_level = dn->dn_next_nlevels[txgoff];
int i;
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
/* this dnode can't be paged out because it's dirty */
ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
ASSERT(new_level > 1 && dn->dn_phys->dn_nlevels > 0);
db = dbuf_hold_level(dn, dn->dn_phys->dn_nlevels, 0, FTAG);
ASSERT(db != NULL);
dn->dn_phys->dn_nlevels = new_level;
dprintf("os=%p obj=%llu, increase to %d\n", dn->dn_objset,
(u_longlong_t)dn->dn_object, dn->dn_phys->dn_nlevels);
/*
* Lock ordering requires that we hold the children's db_mutexes (by
* calling dbuf_find()) before holding the parent's db_rwlock. The lock
* order is imposed by dbuf_read's steps of "grab the lock to protect
* db_parent, get db_parent, hold db_parent's db_rwlock".
*/
dmu_buf_impl_t *children[DN_MAX_NBLKPTR];
ASSERT3U(nblkptr, <=, DN_MAX_NBLKPTR);
for (i = 0; i < nblkptr; i++) {
children[i] =
dbuf_find(dn->dn_objset, dn->dn_object, old_toplvl, i);
}
/* transfer dnode's block pointers to new indirect block */
(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED|DB_RF_HAVESTRUCT);
if (dn->dn_dbuf != NULL)
rw_enter(&dn->dn_dbuf->db_rwlock, RW_WRITER);
rw_enter(&db->db_rwlock, RW_WRITER);
ASSERT(db->db.db_data);
ASSERT(arc_released(db->db_buf));
ASSERT3U(sizeof (blkptr_t) * nblkptr, <=, db->db.db_size);
bcopy(dn->dn_phys->dn_blkptr, db->db.db_data,
sizeof (blkptr_t) * nblkptr);
arc_buf_freeze(db->db_buf);
/* set dbuf's parent pointers to new indirect buf */
for (i = 0; i < nblkptr; i++) {
dmu_buf_impl_t *child = children[i];
if (child == NULL)
continue;
#ifdef ZFS_DEBUG
DB_DNODE_ENTER(child);
ASSERT3P(DB_DNODE(child), ==, dn);
DB_DNODE_EXIT(child);
#endif /* DEBUG */
if (child->db_parent && child->db_parent != dn->dn_dbuf) {
ASSERT(child->db_parent->db_level == db->db_level);
ASSERT(child->db_blkptr !=
&dn->dn_phys->dn_blkptr[child->db_blkid]);
mutex_exit(&child->db_mtx);
continue;
}
ASSERT(child->db_parent == NULL ||
child->db_parent == dn->dn_dbuf);
child->db_parent = db;
dbuf_add_ref(db, child);
if (db->db.db_data)
child->db_blkptr = (blkptr_t *)db->db.db_data + i;
else
child->db_blkptr = NULL;
dprintf_dbuf_bp(child, child->db_blkptr,
"changed db_blkptr to new indirect %s", "");
mutex_exit(&child->db_mtx);
}
bzero(dn->dn_phys->dn_blkptr, sizeof (blkptr_t) * nblkptr);
rw_exit(&db->db_rwlock);
if (dn->dn_dbuf != NULL)
rw_exit(&dn->dn_dbuf->db_rwlock);
dbuf_rele(db, FTAG);
rw_exit(&dn->dn_struct_rwlock);
}
static void
free_blocks(dnode_t *dn, blkptr_t *bp, int num, dmu_tx_t *tx)
{
dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
uint64_t bytesfreed = 0;
dprintf("ds=%p obj=%llx num=%d\n", ds, (u_longlong_t)dn->dn_object,
num);
for (int i = 0; i < num; i++, bp++) {
if (BP_IS_HOLE(bp))
continue;
bytesfreed += dsl_dataset_block_kill(ds, bp, tx, B_FALSE);
ASSERT3U(bytesfreed, <=, DN_USED_BYTES(dn->dn_phys));
/*
* Save some useful information on the holes being
* punched, including logical size, type, and indirection
* level. Retaining birth time enables detection of when
* holes are punched for reducing the number of free
* records transmitted during a zfs send.
*/
uint64_t lsize = BP_GET_LSIZE(bp);
dmu_object_type_t type = BP_GET_TYPE(bp);
uint64_t lvl = BP_GET_LEVEL(bp);
bzero(bp, sizeof (blkptr_t));
if (spa_feature_is_active(dn->dn_objset->os_spa,
SPA_FEATURE_HOLE_BIRTH)) {
BP_SET_LSIZE(bp, lsize);
BP_SET_TYPE(bp, type);
BP_SET_LEVEL(bp, lvl);
BP_SET_BIRTH(bp, dmu_tx_get_txg(tx), 0);
}
}
dnode_diduse_space(dn, -bytesfreed);
}
#ifdef ZFS_DEBUG
static void
free_verify(dmu_buf_impl_t *db, uint64_t start, uint64_t end, dmu_tx_t *tx)
{
int off, num;
int i, err, epbs;
uint64_t txg = tx->tx_txg;
dnode_t *dn;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
off = start - (db->db_blkid * 1<<epbs);
num = end - start + 1;
ASSERT3U(off, >=, 0);
ASSERT3U(num, >=, 0);
ASSERT3U(db->db_level, >, 0);
ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
ASSERT3U(off+num, <=, db->db.db_size >> SPA_BLKPTRSHIFT);
ASSERT(db->db_blkptr != NULL);
for (i = off; i < off+num; i++) {
uint64_t *buf;
dmu_buf_impl_t *child;
dbuf_dirty_record_t *dr;
int j;
ASSERT(db->db_level == 1);
rw_enter(&dn->dn_struct_rwlock, RW_READER);
err = dbuf_hold_impl(dn, db->db_level - 1,
(db->db_blkid << epbs) + i, TRUE, FALSE, FTAG, &child);
rw_exit(&dn->dn_struct_rwlock);
if (err == ENOENT)
continue;
ASSERT(err == 0);
ASSERT(child->db_level == 0);
dr = dbuf_find_dirty_eq(child, txg);
/* data_old better be zeroed */
if (dr) {
buf = dr->dt.dl.dr_data->b_data;
for (j = 0; j < child->db.db_size >> 3; j++) {
if (buf[j] != 0) {
panic("freed data not zero: "
"child=%p i=%d off=%d num=%d\n",
(void *)child, i, off, num);
}
}
}
/*
* db_data better be zeroed unless it's dirty in a
* future txg.
*/
mutex_enter(&child->db_mtx);
buf = child->db.db_data;
if (buf != NULL && child->db_state != DB_FILL &&
list_is_empty(&child->db_dirty_records)) {
for (j = 0; j < child->db.db_size >> 3; j++) {
if (buf[j] != 0) {
panic("freed data not zero: "
"child=%p i=%d off=%d num=%d\n",
(void *)child, i, off, num);
}
}
}
mutex_exit(&child->db_mtx);
dbuf_rele(child, FTAG);
}
DB_DNODE_EXIT(db);
}
#endif
/*
* We don't usually free the indirect blocks here. If in one txg we have a
* free_range and a write to the same indirect block, it's important that we
* preserve the hole's birth times. Therefore, we don't free any any indirect
* blocks in free_children(). If an indirect block happens to turn into all
* holes, it will be freed by dbuf_write_children_ready, which happens at a
* point in the syncing process where we know for certain the contents of the
* indirect block.
*
* However, if we're freeing a dnode, its space accounting must go to zero
* before we actually try to free the dnode, or we will trip an assertion. In
* addition, we know the case described above cannot occur, because the dnode is
* being freed. Therefore, we free the indirect blocks immediately in that
* case.
*/
static void
free_children(dmu_buf_impl_t *db, uint64_t blkid, uint64_t nblks,
boolean_t free_indirects, dmu_tx_t *tx)
{
dnode_t *dn;
blkptr_t *bp;
dmu_buf_impl_t *subdb;
uint64_t start, end, dbstart, dbend;
unsigned int epbs, shift, i;
/*
* There is a small possibility that this block will not be cached:
* 1 - if level > 1 and there are no children with level <= 1
* 2 - if this block was evicted since we read it from
* dmu_tx_hold_free().
*/
if (db->db_state != DB_CACHED)
(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
/*
* If we modify this indirect block, and we are not freeing the
* dnode (!free_indirects), then this indirect block needs to get
* written to disk by dbuf_write(). If it is dirty, we know it will
* be written (otherwise, we would have incorrect on-disk state
* because the space would be freed but still referenced by the BP
* in this indirect block). Therefore we VERIFY that it is
* dirty.
*
* Our VERIFY covers some cases that do not actually have to be
* dirty, but the open-context code happens to dirty. E.g. if the
* blocks we are freeing are all holes, because in that case, we
* are only freeing part of this indirect block, so it is an
* ancestor of the first or last block to be freed. The first and
* last L1 indirect blocks are always dirtied by dnode_free_range().
*/
db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
VERIFY(BP_GET_FILL(db->db_blkptr) == 0 || db->db_dirtycnt > 0);
dmu_buf_unlock_parent(db, dblt, FTAG);
dbuf_release_bp(db);
bp = db->db.db_data;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
ASSERT3U(epbs, <, 31);
shift = (db->db_level - 1) * epbs;
dbstart = db->db_blkid << epbs;
start = blkid >> shift;
if (dbstart < start) {
bp += start - dbstart;
} else {
start = dbstart;
}
dbend = ((db->db_blkid + 1) << epbs) - 1;
end = (blkid + nblks - 1) >> shift;
if (dbend <= end)
end = dbend;
ASSERT3U(start, <=, end);
if (db->db_level == 1) {
FREE_VERIFY(db, start, end, tx);
rw_enter(&db->db_rwlock, RW_WRITER);
free_blocks(dn, bp, end - start + 1, tx);
rw_exit(&db->db_rwlock);
} else {
for (uint64_t id = start; id <= end; id++, bp++) {
if (BP_IS_HOLE(bp))
continue;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
VERIFY0(dbuf_hold_impl(dn, db->db_level - 1,
id, TRUE, FALSE, FTAG, &subdb));
rw_exit(&dn->dn_struct_rwlock);
ASSERT3P(bp, ==, subdb->db_blkptr);
free_children(subdb, blkid, nblks, free_indirects, tx);
dbuf_rele(subdb, FTAG);
}
}
if (free_indirects) {
rw_enter(&db->db_rwlock, RW_WRITER);
for (i = 0, bp = db->db.db_data; i < 1 << epbs; i++, bp++)
ASSERT(BP_IS_HOLE(bp));
bzero(db->db.db_data, db->db.db_size);
free_blocks(dn, db->db_blkptr, 1, tx);
rw_exit(&db->db_rwlock);
}
DB_DNODE_EXIT(db);
arc_buf_freeze(db->db_buf);
}
/*
* Traverse the indicated range of the provided file
* and "free" all the blocks contained there.
*/
static void
dnode_sync_free_range_impl(dnode_t *dn, uint64_t blkid, uint64_t nblks,
boolean_t free_indirects, dmu_tx_t *tx)
{
blkptr_t *bp = dn->dn_phys->dn_blkptr;
int dnlevel = dn->dn_phys->dn_nlevels;
boolean_t trunc = B_FALSE;
if (blkid > dn->dn_phys->dn_maxblkid)
return;
ASSERT(dn->dn_phys->dn_maxblkid < UINT64_MAX);
if (blkid + nblks > dn->dn_phys->dn_maxblkid) {
nblks = dn->dn_phys->dn_maxblkid - blkid + 1;
trunc = B_TRUE;
}
/* There are no indirect blocks in the object */
if (dnlevel == 1) {
if (blkid >= dn->dn_phys->dn_nblkptr) {
/* this range was never made persistent */
return;
}
ASSERT3U(blkid + nblks, <=, dn->dn_phys->dn_nblkptr);
free_blocks(dn, bp + blkid, nblks, tx);
} else {
int shift = (dnlevel - 1) *
(dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT);
int start = blkid >> shift;
int end = (blkid + nblks - 1) >> shift;
dmu_buf_impl_t *db;
ASSERT(start < dn->dn_phys->dn_nblkptr);
bp += start;
for (int i = start; i <= end; i++, bp++) {
if (BP_IS_HOLE(bp))
continue;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
VERIFY0(dbuf_hold_impl(dn, dnlevel - 1, i,
TRUE, FALSE, FTAG, &db));
rw_exit(&dn->dn_struct_rwlock);
free_children(db, blkid, nblks, free_indirects, tx);
dbuf_rele(db, FTAG);
}
}
/*
* Do not truncate the maxblkid if we are performing a raw
* receive. The raw receive sets the maxblkid manually and
* must not be overridden. Usually, the last DRR_FREE record
* will be at the maxblkid, because the source system sets
* the maxblkid when truncating. However, if the last block
* was freed by overwriting with zeros and being compressed
* away to a hole, the source system will generate a DRR_FREE
* record while leaving the maxblkid after the end of that
* record. In this case we need to leave the maxblkid as
* indicated in the DRR_OBJECT record, so that it matches the
* source system, ensuring that the cryptographic hashes will
* match.
*/
if (trunc && !dn->dn_objset->os_raw_receive) {
uint64_t off __maybe_unused;
dn->dn_phys->dn_maxblkid = blkid == 0 ? 0 : blkid - 1;
off = (dn->dn_phys->dn_maxblkid + 1) *
(dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
ASSERT(off < dn->dn_phys->dn_maxblkid ||
dn->dn_phys->dn_maxblkid == 0 ||
dnode_next_offset(dn, 0, &off, 1, 1, 0) != 0);
}
}
typedef struct dnode_sync_free_range_arg {
dnode_t *dsfra_dnode;
dmu_tx_t *dsfra_tx;
boolean_t dsfra_free_indirects;
} dnode_sync_free_range_arg_t;
static void
dnode_sync_free_range(void *arg, uint64_t blkid, uint64_t nblks)
{
dnode_sync_free_range_arg_t *dsfra = arg;
dnode_t *dn = dsfra->dsfra_dnode;
mutex_exit(&dn->dn_mtx);
dnode_sync_free_range_impl(dn, blkid, nblks,
dsfra->dsfra_free_indirects, dsfra->dsfra_tx);
mutex_enter(&dn->dn_mtx);
}
/*
* Try to kick all the dnode's dbufs out of the cache...
*/
void
dnode_evict_dbufs(dnode_t *dn)
{
dmu_buf_impl_t *db_marker;
dmu_buf_impl_t *db, *db_next;
db_marker = kmem_alloc(sizeof (dmu_buf_impl_t), KM_SLEEP);
mutex_enter(&dn->dn_dbufs_mtx);
for (db = avl_first(&dn->dn_dbufs); db != NULL; db = db_next) {
#ifdef ZFS_DEBUG
DB_DNODE_ENTER(db);
ASSERT3P(DB_DNODE(db), ==, dn);
DB_DNODE_EXIT(db);
#endif /* DEBUG */
mutex_enter(&db->db_mtx);
if (db->db_state != DB_EVICTING &&
zfs_refcount_is_zero(&db->db_holds)) {
db_marker->db_level = db->db_level;
db_marker->db_blkid = db->db_blkid;
db_marker->db_state = DB_SEARCH;
avl_insert_here(&dn->dn_dbufs, db_marker, db,
AVL_BEFORE);
/*
* We need to use the "marker" dbuf rather than
* simply getting the next dbuf, because
* dbuf_destroy() may actually remove multiple dbufs.
* It can call itself recursively on the parent dbuf,
* which may also be removed from dn_dbufs. The code
* flow would look like:
*
* dbuf_destroy():
* dnode_rele_and_unlock(parent_dbuf, evicting=TRUE):
* if (!cacheable || pending_evict)
* dbuf_destroy()
*/
dbuf_destroy(db);
db_next = AVL_NEXT(&dn->dn_dbufs, db_marker);
avl_remove(&dn->dn_dbufs, db_marker);
} else {
db->db_pending_evict = TRUE;
mutex_exit(&db->db_mtx);
db_next = AVL_NEXT(&dn->dn_dbufs, db);
}
}
mutex_exit(&dn->dn_dbufs_mtx);
kmem_free(db_marker, sizeof (dmu_buf_impl_t));
dnode_evict_bonus(dn);
}
void
dnode_evict_bonus(dnode_t *dn)
{
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
if (dn->dn_bonus != NULL) {
if (zfs_refcount_is_zero(&dn->dn_bonus->db_holds)) {
mutex_enter(&dn->dn_bonus->db_mtx);
dbuf_destroy(dn->dn_bonus);
dn->dn_bonus = NULL;
} else {
dn->dn_bonus->db_pending_evict = TRUE;
}
}
rw_exit(&dn->dn_struct_rwlock);
}
static void
dnode_undirty_dbufs(list_t *list)
{
dbuf_dirty_record_t *dr;
while ((dr = list_head(list))) {
dmu_buf_impl_t *db = dr->dr_dbuf;
uint64_t txg = dr->dr_txg;
if (db->db_level != 0)
dnode_undirty_dbufs(&dr->dt.di.dr_children);
mutex_enter(&db->db_mtx);
/* XXX - use dbuf_undirty()? */
list_remove(list, dr);
ASSERT(list_head(&db->db_dirty_records) == dr);
list_remove_head(&db->db_dirty_records);
ASSERT(list_is_empty(&db->db_dirty_records));
db->db_dirtycnt -= 1;
if (db->db_level == 0) {
ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
dr->dt.dl.dr_data == db->db_buf);
dbuf_unoverride(dr);
} else {
mutex_destroy(&dr->dt.di.dr_mtx);
list_destroy(&dr->dt.di.dr_children);
}
kmem_free(dr, sizeof (dbuf_dirty_record_t));
dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg, B_FALSE);
}
}
static void
dnode_sync_free(dnode_t *dn, dmu_tx_t *tx)
{
int txgoff = tx->tx_txg & TXG_MASK;
ASSERT(dmu_tx_is_syncing(tx));
/*
* Our contents should have been freed in dnode_sync() by the
* free range record inserted by the caller of dnode_free().
*/
ASSERT0(DN_USED_BYTES(dn->dn_phys));
ASSERT(BP_IS_HOLE(dn->dn_phys->dn_blkptr));
dnode_undirty_dbufs(&dn->dn_dirty_records[txgoff]);
dnode_evict_dbufs(dn);
/*
* XXX - It would be nice to assert this, but we may still
* have residual holds from async evictions from the arc...
*
* zfs_obj_to_path() also depends on this being
* commented out.
*
* ASSERT3U(zfs_refcount_count(&dn->dn_holds), ==, 1);
*/
/* Undirty next bits */
dn->dn_next_nlevels[txgoff] = 0;
dn->dn_next_indblkshift[txgoff] = 0;
dn->dn_next_blksz[txgoff] = 0;
dn->dn_next_maxblkid[txgoff] = 0;
/* ASSERT(blkptrs are zero); */
ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
ASSERT(dn->dn_type != DMU_OT_NONE);
ASSERT(dn->dn_free_txg > 0);
if (dn->dn_allocated_txg != dn->dn_free_txg)
dmu_buf_will_dirty(&dn->dn_dbuf->db, tx);
bzero(dn->dn_phys, sizeof (dnode_phys_t) * dn->dn_num_slots);
dnode_free_interior_slots(dn);
mutex_enter(&dn->dn_mtx);
dn->dn_type = DMU_OT_NONE;
dn->dn_maxblkid = 0;
dn->dn_allocated_txg = 0;
dn->dn_free_txg = 0;
dn->dn_have_spill = B_FALSE;
dn->dn_num_slots = 1;
mutex_exit(&dn->dn_mtx);
ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
/*
* Now that we've released our hold, the dnode may
* be evicted, so we mustn't access it.
*/
}
/*
* Write out the dnode's dirty buffers.
*/
void
dnode_sync(dnode_t *dn, dmu_tx_t *tx)
{
objset_t *os = dn->dn_objset;
dnode_phys_t *dnp = dn->dn_phys;
int txgoff = tx->tx_txg & TXG_MASK;
list_t *list = &dn->dn_dirty_records[txgoff];
static const dnode_phys_t zerodn __maybe_unused = { 0 };
boolean_t kill_spill = B_FALSE;
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(dnp->dn_type != DMU_OT_NONE || dn->dn_allocated_txg);
ASSERT(dnp->dn_type != DMU_OT_NONE ||
bcmp(dnp, &zerodn, DNODE_MIN_SIZE) == 0);
DNODE_VERIFY(dn);
ASSERT(dn->dn_dbuf == NULL || arc_released(dn->dn_dbuf->db_buf));
/*
* Do user accounting if it is enabled and this is not
* an encrypted receive.
*/
if (dmu_objset_userused_enabled(os) &&
!DMU_OBJECT_IS_SPECIAL(dn->dn_object) &&
(!os->os_encrypted || !dmu_objset_is_receiving(os))) {
mutex_enter(&dn->dn_mtx);
dn->dn_oldused = DN_USED_BYTES(dn->dn_phys);
dn->dn_oldflags = dn->dn_phys->dn_flags;
dn->dn_phys->dn_flags |= DNODE_FLAG_USERUSED_ACCOUNTED;
if (dmu_objset_userobjused_enabled(dn->dn_objset))
dn->dn_phys->dn_flags |=
DNODE_FLAG_USEROBJUSED_ACCOUNTED;
mutex_exit(&dn->dn_mtx);
dmu_objset_userquota_get_ids(dn, B_FALSE, tx);
- } else {
- /* Once we account for it, we should always account for it */
+ } else if (!(os->os_encrypted && dmu_objset_is_receiving(os))) {
+ /*
+ * Once we account for it, we should always account for it,
+ * except for the case of a raw receive. We will not be able
+ * to account for it until the receiving dataset has been
+ * mounted.
+ */
ASSERT(!(dn->dn_phys->dn_flags &
DNODE_FLAG_USERUSED_ACCOUNTED));
ASSERT(!(dn->dn_phys->dn_flags &
DNODE_FLAG_USEROBJUSED_ACCOUNTED));
}
mutex_enter(&dn->dn_mtx);
if (dn->dn_allocated_txg == tx->tx_txg) {
/* The dnode is newly allocated or reallocated */
if (dnp->dn_type == DMU_OT_NONE) {
/* this is a first alloc, not a realloc */
dnp->dn_nlevels = 1;
dnp->dn_nblkptr = dn->dn_nblkptr;
}
dnp->dn_type = dn->dn_type;
dnp->dn_bonustype = dn->dn_bonustype;
dnp->dn_bonuslen = dn->dn_bonuslen;
}
dnp->dn_extra_slots = dn->dn_num_slots - 1;
ASSERT(dnp->dn_nlevels > 1 ||
BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
BP_IS_EMBEDDED(&dnp->dn_blkptr[0]) ||
BP_GET_LSIZE(&dnp->dn_blkptr[0]) ==
dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
ASSERT(dnp->dn_nlevels < 2 ||
BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
BP_GET_LSIZE(&dnp->dn_blkptr[0]) == 1 << dnp->dn_indblkshift);
if (dn->dn_next_type[txgoff] != 0) {
dnp->dn_type = dn->dn_type;
dn->dn_next_type[txgoff] = 0;
}
if (dn->dn_next_blksz[txgoff] != 0) {
ASSERT(P2PHASE(dn->dn_next_blksz[txgoff],
SPA_MINBLOCKSIZE) == 0);
ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
dn->dn_maxblkid == 0 || list_head(list) != NULL ||
dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT ==
dnp->dn_datablkszsec ||
!range_tree_is_empty(dn->dn_free_ranges[txgoff]));
dnp->dn_datablkszsec =
dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT;
dn->dn_next_blksz[txgoff] = 0;
}
if (dn->dn_next_bonuslen[txgoff] != 0) {
if (dn->dn_next_bonuslen[txgoff] == DN_ZERO_BONUSLEN)
dnp->dn_bonuslen = 0;
else
dnp->dn_bonuslen = dn->dn_next_bonuslen[txgoff];
ASSERT(dnp->dn_bonuslen <=
DN_SLOTS_TO_BONUSLEN(dnp->dn_extra_slots + 1));
dn->dn_next_bonuslen[txgoff] = 0;
}
if (dn->dn_next_bonustype[txgoff] != 0) {
ASSERT(DMU_OT_IS_VALID(dn->dn_next_bonustype[txgoff]));
dnp->dn_bonustype = dn->dn_next_bonustype[txgoff];
dn->dn_next_bonustype[txgoff] = 0;
}
boolean_t freeing_dnode = dn->dn_free_txg > 0 &&
dn->dn_free_txg <= tx->tx_txg;
/*
* Remove the spill block if we have been explicitly asked to
* remove it, or if the object is being removed.
*/
if (dn->dn_rm_spillblk[txgoff] || freeing_dnode) {
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)
kill_spill = B_TRUE;
dn->dn_rm_spillblk[txgoff] = 0;
}
if (dn->dn_next_indblkshift[txgoff] != 0) {
ASSERT(dnp->dn_nlevels == 1);
dnp->dn_indblkshift = dn->dn_next_indblkshift[txgoff];
dn->dn_next_indblkshift[txgoff] = 0;
}
/*
* Just take the live (open-context) values for checksum and compress.
* Strictly speaking it's a future leak, but nothing bad happens if we
* start using the new checksum or compress algorithm a little early.
*/
dnp->dn_checksum = dn->dn_checksum;
dnp->dn_compress = dn->dn_compress;
mutex_exit(&dn->dn_mtx);
if (kill_spill) {
free_blocks(dn, DN_SPILL_BLKPTR(dn->dn_phys), 1, tx);
mutex_enter(&dn->dn_mtx);
dnp->dn_flags &= ~DNODE_FLAG_SPILL_BLKPTR;
mutex_exit(&dn->dn_mtx);
}
/* process all the "freed" ranges in the file */
if (dn->dn_free_ranges[txgoff] != NULL) {
dnode_sync_free_range_arg_t dsfra;
dsfra.dsfra_dnode = dn;
dsfra.dsfra_tx = tx;
dsfra.dsfra_free_indirects = freeing_dnode;
mutex_enter(&dn->dn_mtx);
if (freeing_dnode) {
ASSERT(range_tree_contains(dn->dn_free_ranges[txgoff],
0, dn->dn_maxblkid + 1));
}
/*
* Because dnode_sync_free_range() must drop dn_mtx during its
* processing, using it as a callback to range_tree_vacate() is
* not safe. No other operations (besides destroy) are allowed
* once range_tree_vacate() has begun, and dropping dn_mtx
* would leave a window open for another thread to observe that
* invalid (and unsafe) state.
*/
range_tree_walk(dn->dn_free_ranges[txgoff],
dnode_sync_free_range, &dsfra);
range_tree_vacate(dn->dn_free_ranges[txgoff], NULL, NULL);
range_tree_destroy(dn->dn_free_ranges[txgoff]);
dn->dn_free_ranges[txgoff] = NULL;
mutex_exit(&dn->dn_mtx);
}
if (freeing_dnode) {
dn->dn_objset->os_freed_dnodes++;
dnode_sync_free(dn, tx);
return;
}
if (dn->dn_num_slots > DNODE_MIN_SLOTS) {
dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
mutex_enter(&ds->ds_lock);
ds->ds_feature_activation[SPA_FEATURE_LARGE_DNODE] =
(void *)B_TRUE;
mutex_exit(&ds->ds_lock);
}
if (dn->dn_next_nlevels[txgoff]) {
dnode_increase_indirection(dn, tx);
dn->dn_next_nlevels[txgoff] = 0;
}
/*
* This must be done after dnode_sync_free_range()
* and dnode_increase_indirection(). See dnode_new_blkid()
* for an explanation of the high bit being set.
*/
if (dn->dn_next_maxblkid[txgoff]) {
mutex_enter(&dn->dn_mtx);
dnp->dn_maxblkid =
dn->dn_next_maxblkid[txgoff] & ~DMU_NEXT_MAXBLKID_SET;
dn->dn_next_maxblkid[txgoff] = 0;
mutex_exit(&dn->dn_mtx);
}
if (dn->dn_next_nblkptr[txgoff]) {
/* this should only happen on a realloc */
ASSERT(dn->dn_allocated_txg == tx->tx_txg);
if (dn->dn_next_nblkptr[txgoff] > dnp->dn_nblkptr) {
/* zero the new blkptrs we are gaining */
bzero(dnp->dn_blkptr + dnp->dn_nblkptr,
sizeof (blkptr_t) *
(dn->dn_next_nblkptr[txgoff] - dnp->dn_nblkptr));
#ifdef ZFS_DEBUG
} else {
int i;
ASSERT(dn->dn_next_nblkptr[txgoff] < dnp->dn_nblkptr);
/* the blkptrs we are losing better be unallocated */
for (i = 0; i < dnp->dn_nblkptr; i++) {
if (i >= dn->dn_next_nblkptr[txgoff])
ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[i]));
}
#endif
}
mutex_enter(&dn->dn_mtx);
dnp->dn_nblkptr = dn->dn_next_nblkptr[txgoff];
dn->dn_next_nblkptr[txgoff] = 0;
mutex_exit(&dn->dn_mtx);
}
dbuf_sync_list(list, dn->dn_phys->dn_nlevels - 1, tx);
if (!DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
ASSERT3P(list_head(list), ==, NULL);
dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
}
+ ASSERT3U(dnp->dn_bonuslen, <=, DN_MAX_BONUS_LEN(dnp));
+
/*
* Although we have dropped our reference to the dnode, it
* can't be evicted until its written, and we haven't yet
* initiated the IO for the dnode's dbuf. Additionally, the caller
* has already added a reference to the dnode because it's on the
* os_synced_dnodes list.
*/
}
diff --git a/sys/contrib/openzfs/module/zfs/dsl_bookmark.c b/sys/contrib/openzfs/module/zfs/dsl_bookmark.c
index bead7da2237f..b8e3523ffc2d 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_bookmark.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_bookmark.c
@@ -1,1734 +1,1734 @@
/*
* CDDL HEADER START
*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2013, 2018 by Delphix. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc.
* Copyright 2019, 2020 by Christian Schwarz. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_synctask.h>
#include <sys/dsl_destroy.h>
#include <sys/dmu_impl.h>
#include <sys/dmu_tx.h>
#include <sys/arc.h>
#include <sys/zap.h>
#include <sys/zfeature.h>
#include <sys/spa.h>
#include <sys/dsl_bookmark.h>
#include <zfs_namecheck.h>
#include <sys/dmu_send.h>
static int
dsl_bookmark_hold_ds(dsl_pool_t *dp, const char *fullname,
dsl_dataset_t **dsp, void *tag, char **shortnamep)
{
char buf[ZFS_MAX_DATASET_NAME_LEN];
char *hashp;
if (strlen(fullname) >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
hashp = strchr(fullname, '#');
if (hashp == NULL)
return (SET_ERROR(EINVAL));
*shortnamep = hashp + 1;
if (zfs_component_namecheck(*shortnamep, NULL, NULL))
return (SET_ERROR(EINVAL));
(void) strlcpy(buf, fullname, hashp - fullname + 1);
return (dsl_dataset_hold(dp, buf, tag, dsp));
}
/*
* When reading BOOKMARK_V1 bookmarks, the BOOKMARK_V2 fields are guaranteed
* to be zeroed.
*
* Returns ESRCH if bookmark is not found.
* Note, we need to use the ZAP rather than the AVL to look up bookmarks
* by name, because only the ZAP honors the casesensitivity setting.
*/
int
dsl_bookmark_lookup_impl(dsl_dataset_t *ds, const char *shortname,
zfs_bookmark_phys_t *bmark_phys)
{
objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
uint64_t bmark_zapobj = ds->ds_bookmarks_obj;
matchtype_t mt = 0;
int err;
if (bmark_zapobj == 0)
return (SET_ERROR(ESRCH));
if (dsl_dataset_phys(ds)->ds_flags & DS_FLAG_CI_DATASET)
mt = MT_NORMALIZE;
/*
* Zero out the bookmark in case the one stored on disk
* is in an older, shorter format.
*/
bzero(bmark_phys, sizeof (*bmark_phys));
err = zap_lookup_norm(mos, bmark_zapobj, shortname, sizeof (uint64_t),
sizeof (*bmark_phys) / sizeof (uint64_t), bmark_phys, mt, NULL, 0,
NULL);
return (err == ENOENT ? SET_ERROR(ESRCH) : err);
}
/*
* If later_ds is non-NULL, this will return EXDEV if the specified bookmark
* does not represents an earlier point in later_ds's timeline. However,
* bmp will still be filled in if we return EXDEV.
*
* Returns ENOENT if the dataset containing the bookmark does not exist.
* Returns ESRCH if the dataset exists but the bookmark was not found in it.
*/
int
dsl_bookmark_lookup(dsl_pool_t *dp, const char *fullname,
dsl_dataset_t *later_ds, zfs_bookmark_phys_t *bmp)
{
char *shortname;
dsl_dataset_t *ds;
int error;
error = dsl_bookmark_hold_ds(dp, fullname, &ds, FTAG, &shortname);
if (error != 0)
return (error);
error = dsl_bookmark_lookup_impl(ds, shortname, bmp);
if (error == 0 && later_ds != NULL) {
if (!dsl_dataset_is_before(later_ds, ds, bmp->zbm_creation_txg))
error = SET_ERROR(EXDEV);
}
dsl_dataset_rele(ds, FTAG);
return (error);
}
/*
* Validates that
* - bmark is a full dataset path of a bookmark (bookmark_namecheck)
* - source is a full path of a snapshot or bookmark
* ({bookmark,snapshot}_namecheck)
*
* Returns 0 if valid, -1 otherwise.
*/
static int
dsl_bookmark_create_nvl_validate_pair(const char *bmark, const char *source)
{
if (bookmark_namecheck(bmark, NULL, NULL) != 0)
return (-1);
int is_bmark, is_snap;
is_bmark = bookmark_namecheck(source, NULL, NULL) == 0;
is_snap = snapshot_namecheck(source, NULL, NULL) == 0;
if (!is_bmark && !is_snap)
return (-1);
return (0);
}
/*
* Check that the given nvlist corresponds to the following schema:
* { newbookmark -> source, ... }
* where
* - each pair passes dsl_bookmark_create_nvl_validate_pair
* - all newbookmarks are in the same pool
* - all newbookmarks have unique names
*
* Note that this function is only validates above schema. Callers must ensure
* that the bookmarks can be created, e.g. that sources exist.
*
* Returns 0 if the nvlist adheres to above schema.
* Returns -1 if it doesn't.
*/
int
dsl_bookmark_create_nvl_validate(nvlist_t *bmarks)
{
char *first;
size_t first_len;
first = NULL;
for (nvpair_t *pair = nvlist_next_nvpair(bmarks, NULL);
pair != NULL; pair = nvlist_next_nvpair(bmarks, pair)) {
char *bmark = nvpair_name(pair);
char *source;
/* list structure: values must be snapshots XOR bookmarks */
if (nvpair_value_string(pair, &source) != 0)
return (-1);
if (dsl_bookmark_create_nvl_validate_pair(bmark, source) != 0)
return (-1);
/* same pool check */
if (first == NULL) {
char *cp = strpbrk(bmark, "/#");
if (cp == NULL)
return (-1);
first = bmark;
first_len = cp - bmark;
}
if (strncmp(first, bmark, first_len) != 0)
return (-1);
switch (*(bmark + first_len)) {
case '/': /* fallthrough */
case '#':
break;
default:
return (-1);
}
/* unique newbookmark names; todo: O(n^2) */
for (nvpair_t *pair2 = nvlist_next_nvpair(bmarks, pair);
pair2 != NULL; pair2 = nvlist_next_nvpair(bmarks, pair2)) {
if (strcmp(nvpair_name(pair), nvpair_name(pair2)) == 0)
return (-1);
}
}
return (0);
}
/*
* expects that newbm and source have been validated using
* dsl_bookmark_create_nvl_validate_pair
*/
static int
dsl_bookmark_create_check_impl(dsl_pool_t *dp,
const char *newbm, const char *source)
{
ASSERT0(dsl_bookmark_create_nvl_validate_pair(newbm, source));
/* defer source namecheck until we know it's a snapshot or bookmark */
int error;
dsl_dataset_t *newbm_ds;
char *newbm_short;
zfs_bookmark_phys_t bmark_phys;
error = dsl_bookmark_hold_ds(dp, newbm, &newbm_ds, FTAG, &newbm_short);
if (error != 0)
return (error);
/* Verify that the new bookmark does not already exist */
error = dsl_bookmark_lookup_impl(newbm_ds, newbm_short, &bmark_phys);
switch (error) {
case ESRCH:
/* happy path: new bmark doesn't exist, proceed after switch */
error = 0;
break;
case 0:
error = SET_ERROR(EEXIST);
goto eholdnewbmds;
default:
/* dsl_bookmark_lookup_impl already did SET_ERROR */
goto eholdnewbmds;
}
/* error is retval of the following if-cascade */
if (strchr(source, '@') != NULL) {
dsl_dataset_t *source_snap_ds;
ASSERT3S(snapshot_namecheck(source, NULL, NULL), ==, 0);
error = dsl_dataset_hold(dp, source, FTAG, &source_snap_ds);
if (error == 0) {
VERIFY(source_snap_ds->ds_is_snapshot);
/*
* Verify that source snapshot is an earlier point in
* newbm_ds's timeline (source may be newbm_ds's origin)
*/
if (!dsl_dataset_is_before(newbm_ds, source_snap_ds, 0))
error = SET_ERROR(
ZFS_ERR_BOOKMARK_SOURCE_NOT_ANCESTOR);
dsl_dataset_rele(source_snap_ds, FTAG);
}
} else if (strchr(source, '#') != NULL) {
zfs_bookmark_phys_t source_phys;
ASSERT3S(bookmark_namecheck(source, NULL, NULL), ==, 0);
/*
* Source must exists and be an earlier point in newbm_ds's
* timeline (newbm_ds's origin may be a snap of source's ds)
*/
error = dsl_bookmark_lookup(dp, source, newbm_ds, &source_phys);
switch (error) {
case 0:
break; /* happy path */
case EXDEV:
error = SET_ERROR(ZFS_ERR_BOOKMARK_SOURCE_NOT_ANCESTOR);
break;
default:
/* dsl_bookmark_lookup already did SET_ERROR */
break;
}
} else {
/*
* dsl_bookmark_create_nvl_validate validates that source is
* either snapshot or bookmark
*/
panic("unreachable code: %s", source);
}
eholdnewbmds:
dsl_dataset_rele(newbm_ds, FTAG);
return (error);
}
int
dsl_bookmark_create_check(void *arg, dmu_tx_t *tx)
{
dsl_bookmark_create_arg_t *dbca = arg;
int rv = 0;
int schema_err = 0;
ASSERT3P(dbca, !=, NULL);
ASSERT3P(dbca->dbca_bmarks, !=, NULL);
/* dbca->dbca_errors is allowed to be NULL */
dsl_pool_t *dp = dmu_tx_pool(tx);
if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_BOOKMARKS))
return (SET_ERROR(ENOTSUP));
if (dsl_bookmark_create_nvl_validate(dbca->dbca_bmarks) != 0)
rv = schema_err = SET_ERROR(EINVAL);
for (nvpair_t *pair = nvlist_next_nvpair(dbca->dbca_bmarks, NULL);
pair != NULL; pair = nvlist_next_nvpair(dbca->dbca_bmarks, pair)) {
char *new = nvpair_name(pair);
int error = schema_err;
if (error == 0) {
char *source = fnvpair_value_string(pair);
error = dsl_bookmark_create_check_impl(dp, new, source);
if (error != 0)
error = SET_ERROR(error);
}
if (error != 0) {
rv = error;
if (dbca->dbca_errors != NULL)
fnvlist_add_int32(dbca->dbca_errors,
new, error);
}
}
return (rv);
}
static dsl_bookmark_node_t *
dsl_bookmark_node_alloc(char *shortname)
{
dsl_bookmark_node_t *dbn = kmem_alloc(sizeof (*dbn), KM_SLEEP);
dbn->dbn_name = spa_strdup(shortname);
dbn->dbn_dirty = B_FALSE;
mutex_init(&dbn->dbn_lock, NULL, MUTEX_DEFAULT, NULL);
return (dbn);
}
/*
* Set the fields in the zfs_bookmark_phys_t based on the specified snapshot.
*/
static void
dsl_bookmark_set_phys(zfs_bookmark_phys_t *zbm, dsl_dataset_t *snap)
{
spa_t *spa = dsl_dataset_get_spa(snap);
objset_t *mos = spa_get_dsl(spa)->dp_meta_objset;
dsl_dataset_phys_t *dsp = dsl_dataset_phys(snap);
zbm->zbm_guid = dsp->ds_guid;
zbm->zbm_creation_txg = dsp->ds_creation_txg;
zbm->zbm_creation_time = dsp->ds_creation_time;
zbm->zbm_redaction_obj = 0;
/*
* If the dataset is encrypted create a larger bookmark to
* accommodate the IVset guid. The IVset guid was added
* after the encryption feature to prevent a problem with
* raw sends. If we encounter an encrypted dataset without
* an IVset guid we fall back to a normal bookmark.
*/
if (snap->ds_dir->dd_crypto_obj != 0 &&
spa_feature_is_enabled(spa, SPA_FEATURE_BOOKMARK_V2)) {
(void) zap_lookup(mos, snap->ds_object,
DS_FIELD_IVSET_GUID, sizeof (uint64_t), 1,
&zbm->zbm_ivset_guid);
}
if (spa_feature_is_enabled(spa, SPA_FEATURE_BOOKMARK_WRITTEN)) {
zbm->zbm_flags = ZBM_FLAG_SNAPSHOT_EXISTS | ZBM_FLAG_HAS_FBN;
zbm->zbm_referenced_bytes_refd = dsp->ds_referenced_bytes;
zbm->zbm_compressed_bytes_refd = dsp->ds_compressed_bytes;
zbm->zbm_uncompressed_bytes_refd = dsp->ds_uncompressed_bytes;
dsl_dataset_t *nextds;
VERIFY0(dsl_dataset_hold_obj(snap->ds_dir->dd_pool,
dsp->ds_next_snap_obj, FTAG, &nextds));
dsl_deadlist_space(&nextds->ds_deadlist,
&zbm->zbm_referenced_freed_before_next_snap,
&zbm->zbm_compressed_freed_before_next_snap,
&zbm->zbm_uncompressed_freed_before_next_snap);
dsl_dataset_rele(nextds, FTAG);
} else {
bzero(&zbm->zbm_flags,
sizeof (zfs_bookmark_phys_t) -
offsetof(zfs_bookmark_phys_t, zbm_flags));
}
}
/*
* Add dsl_bookmark_node_t `dbn` to the given dataset and increment appropriate
* SPA feature counters.
*/
void
dsl_bookmark_node_add(dsl_dataset_t *hds, dsl_bookmark_node_t *dbn,
dmu_tx_t *tx)
{
dsl_pool_t *dp = dmu_tx_pool(tx);
objset_t *mos = dp->dp_meta_objset;
if (hds->ds_bookmarks_obj == 0) {
hds->ds_bookmarks_obj = zap_create_norm(mos,
U8_TEXTPREP_TOUPPER, DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0,
tx);
spa_feature_incr(dp->dp_spa, SPA_FEATURE_BOOKMARKS, tx);
dsl_dataset_zapify(hds, tx);
VERIFY0(zap_add(mos, hds->ds_object,
DS_FIELD_BOOKMARK_NAMES,
sizeof (hds->ds_bookmarks_obj), 1,
&hds->ds_bookmarks_obj, tx));
}
avl_add(&hds->ds_bookmarks, dbn);
/*
* To maintain backwards compatibility with software that doesn't
* understand SPA_FEATURE_BOOKMARK_V2, we need to use the smallest
* possible bookmark size.
*/
uint64_t bookmark_phys_size = BOOKMARK_PHYS_SIZE_V1;
if (spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_BOOKMARK_V2) &&
(dbn->dbn_phys.zbm_ivset_guid != 0 || dbn->dbn_phys.zbm_flags &
ZBM_FLAG_HAS_FBN || dbn->dbn_phys.zbm_redaction_obj != 0)) {
bookmark_phys_size = BOOKMARK_PHYS_SIZE_V2;
spa_feature_incr(dp->dp_spa, SPA_FEATURE_BOOKMARK_V2, tx);
}
__attribute__((unused)) zfs_bookmark_phys_t zero_phys = { 0 };
ASSERT0(bcmp(((char *)&dbn->dbn_phys) + bookmark_phys_size,
&zero_phys, sizeof (zfs_bookmark_phys_t) - bookmark_phys_size));
VERIFY0(zap_add(mos, hds->ds_bookmarks_obj, dbn->dbn_name,
sizeof (uint64_t), bookmark_phys_size / sizeof (uint64_t),
&dbn->dbn_phys, tx));
}
/*
* If redaction_list is non-null, we create a redacted bookmark and redaction
* list, and store the object number of the redaction list in redact_obj.
*/
static void
dsl_bookmark_create_sync_impl_snap(const char *bookmark, const char *snapshot,
dmu_tx_t *tx, uint64_t num_redact_snaps, uint64_t *redact_snaps, void *tag,
redaction_list_t **redaction_list)
{
dsl_pool_t *dp = dmu_tx_pool(tx);
objset_t *mos = dp->dp_meta_objset;
dsl_dataset_t *snapds, *bmark_fs;
char *shortname;
boolean_t bookmark_redacted;
uint64_t *dsredactsnaps;
uint64_t dsnumsnaps;
VERIFY0(dsl_dataset_hold(dp, snapshot, FTAG, &snapds));
VERIFY0(dsl_bookmark_hold_ds(dp, bookmark, &bmark_fs, FTAG,
&shortname));
dsl_bookmark_node_t *dbn = dsl_bookmark_node_alloc(shortname);
dsl_bookmark_set_phys(&dbn->dbn_phys, snapds);
bookmark_redacted = dsl_dataset_get_uint64_array_feature(snapds,
SPA_FEATURE_REDACTED_DATASETS, &dsnumsnaps, &dsredactsnaps);
if (redaction_list != NULL || bookmark_redacted) {
redaction_list_t *local_rl;
if (bookmark_redacted) {
redact_snaps = dsredactsnaps;
num_redact_snaps = dsnumsnaps;
}
dbn->dbn_phys.zbm_redaction_obj = dmu_object_alloc(mos,
DMU_OTN_UINT64_METADATA, SPA_OLD_MAXBLOCKSIZE,
DMU_OTN_UINT64_METADATA, sizeof (redaction_list_phys_t) +
num_redact_snaps * sizeof (uint64_t), tx);
spa_feature_incr(dp->dp_spa,
SPA_FEATURE_REDACTION_BOOKMARKS, tx);
VERIFY0(dsl_redaction_list_hold_obj(dp,
dbn->dbn_phys.zbm_redaction_obj, tag, &local_rl));
dsl_redaction_list_long_hold(dp, local_rl, tag);
ASSERT3U((local_rl)->rl_dbuf->db_size, >=,
sizeof (redaction_list_phys_t) + num_redact_snaps *
sizeof (uint64_t));
dmu_buf_will_dirty(local_rl->rl_dbuf, tx);
bcopy(redact_snaps, local_rl->rl_phys->rlp_snaps,
sizeof (uint64_t) * num_redact_snaps);
local_rl->rl_phys->rlp_num_snaps = num_redact_snaps;
if (bookmark_redacted) {
ASSERT3P(redaction_list, ==, NULL);
local_rl->rl_phys->rlp_last_blkid = UINT64_MAX;
local_rl->rl_phys->rlp_last_object = UINT64_MAX;
dsl_redaction_list_long_rele(local_rl, tag);
dsl_redaction_list_rele(local_rl, tag);
} else {
*redaction_list = local_rl;
}
}
if (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN) {
spa_feature_incr(dp->dp_spa,
SPA_FEATURE_BOOKMARK_WRITTEN, tx);
}
dsl_bookmark_node_add(bmark_fs, dbn, tx);
spa_history_log_internal_ds(bmark_fs, "bookmark", tx,
"name=%s creation_txg=%llu target_snap=%llu redact_obj=%llu",
shortname, (longlong_t)dbn->dbn_phys.zbm_creation_txg,
(longlong_t)snapds->ds_object,
(longlong_t)dbn->dbn_phys.zbm_redaction_obj);
dsl_dataset_rele(bmark_fs, FTAG);
dsl_dataset_rele(snapds, FTAG);
}
static void
dsl_bookmark_create_sync_impl_book(
const char *new_name, const char *source_name, dmu_tx_t *tx)
{
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *bmark_fs_source, *bmark_fs_new;
char *source_shortname, *new_shortname;
zfs_bookmark_phys_t source_phys;
VERIFY0(dsl_bookmark_hold_ds(dp, source_name, &bmark_fs_source, FTAG,
&source_shortname));
VERIFY0(dsl_bookmark_hold_ds(dp, new_name, &bmark_fs_new, FTAG,
&new_shortname));
/*
* create a copy of the source bookmark by copying most of its members
*
* Caveat: bookmarking a redaction bookmark yields a normal bookmark
* -----------------------------------------------------------------
* Reasoning:
* - The zbm_redaction_obj would be referred to by both source and new
* bookmark, but would be destroyed once either source or new is
* destroyed, resulting in use-after-free of the referred object.
* - User expectation when issuing the `zfs bookmark` command is that
* a normal bookmark of the source is created
*
* Design Alternatives For Full Redaction Bookmark Copying:
* - reference-count the redaction object => would require on-disk
* format change for existing redaction objects
* - Copy the redaction object => cannot be done in syncing context
* because the redaction object might be too large
*/
VERIFY0(dsl_bookmark_lookup_impl(bmark_fs_source, source_shortname,
&source_phys));
dsl_bookmark_node_t *new_dbn = dsl_bookmark_node_alloc(new_shortname);
memcpy(&new_dbn->dbn_phys, &source_phys, sizeof (source_phys));
new_dbn->dbn_phys.zbm_redaction_obj = 0;
/* update feature counters */
if (new_dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN) {
spa_feature_incr(dp->dp_spa,
SPA_FEATURE_BOOKMARK_WRITTEN, tx);
}
/* no need for redaction bookmark counter; nulled zbm_redaction_obj */
/* dsl_bookmark_node_add bumps bookmarks and v2-bookmarks counter */
/*
* write new bookmark
*
* Note that dsl_bookmark_lookup_impl guarantees that, if source is a
* v1 bookmark, the v2-only fields are zeroed.
* And dsl_bookmark_node_add writes back a v1-sized bookmark if
* v2 bookmarks are disabled and/or v2-only fields are zeroed.
* => bookmark copying works on pre-bookmark-v2 pools
*/
dsl_bookmark_node_add(bmark_fs_new, new_dbn, tx);
spa_history_log_internal_ds(bmark_fs_source, "bookmark", tx,
"name=%s creation_txg=%llu source_guid=%llu",
new_shortname, (longlong_t)new_dbn->dbn_phys.zbm_creation_txg,
(longlong_t)source_phys.zbm_guid);
dsl_dataset_rele(bmark_fs_source, FTAG);
dsl_dataset_rele(bmark_fs_new, FTAG);
}
void
dsl_bookmark_create_sync(void *arg, dmu_tx_t *tx)
{
dsl_bookmark_create_arg_t *dbca = arg;
ASSERT(spa_feature_is_enabled(dmu_tx_pool(tx)->dp_spa,
SPA_FEATURE_BOOKMARKS));
for (nvpair_t *pair = nvlist_next_nvpair(dbca->dbca_bmarks, NULL);
pair != NULL; pair = nvlist_next_nvpair(dbca->dbca_bmarks, pair)) {
char *new = nvpair_name(pair);
char *source = fnvpair_value_string(pair);
if (strchr(source, '@') != NULL) {
dsl_bookmark_create_sync_impl_snap(new, source, tx,
0, NULL, NULL, NULL);
} else if (strchr(source, '#') != NULL) {
dsl_bookmark_create_sync_impl_book(new, source, tx);
} else {
panic("unreachable code");
}
}
}
/*
* The bookmarks must all be in the same pool.
*/
int
dsl_bookmark_create(nvlist_t *bmarks, nvlist_t *errors)
{
nvpair_t *pair;
dsl_bookmark_create_arg_t dbca;
pair = nvlist_next_nvpair(bmarks, NULL);
if (pair == NULL)
return (0);
dbca.dbca_bmarks = bmarks;
dbca.dbca_errors = errors;
return (dsl_sync_task(nvpair_name(pair), dsl_bookmark_create_check,
dsl_bookmark_create_sync, &dbca,
fnvlist_num_pairs(bmarks), ZFS_SPACE_CHECK_NORMAL));
}
static int
dsl_bookmark_create_redacted_check(void *arg, dmu_tx_t *tx)
{
dsl_bookmark_create_redacted_arg_t *dbcra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
int rv = 0;
if (!spa_feature_is_enabled(dp->dp_spa,
SPA_FEATURE_REDACTION_BOOKMARKS))
return (SET_ERROR(ENOTSUP));
/*
* If the list of redact snaps will not fit in the bonus buffer with
* the furthest reached object and offset, fail.
*/
if (dbcra->dbcra_numsnaps > (dmu_bonus_max() -
sizeof (redaction_list_phys_t)) / sizeof (uint64_t))
return (SET_ERROR(E2BIG));
if (dsl_bookmark_create_nvl_validate_pair(
dbcra->dbcra_bmark, dbcra->dbcra_snap) != 0)
return (SET_ERROR(EINVAL));
rv = dsl_bookmark_create_check_impl(dp,
dbcra->dbcra_bmark, dbcra->dbcra_snap);
return (rv);
}
static void
dsl_bookmark_create_redacted_sync(void *arg, dmu_tx_t *tx)
{
dsl_bookmark_create_redacted_arg_t *dbcra = arg;
dsl_bookmark_create_sync_impl_snap(dbcra->dbcra_bmark,
dbcra->dbcra_snap, tx, dbcra->dbcra_numsnaps, dbcra->dbcra_snaps,
dbcra->dbcra_tag, dbcra->dbcra_rl);
}
int
dsl_bookmark_create_redacted(const char *bookmark, const char *snapshot,
uint64_t numsnaps, uint64_t *snapguids, void *tag, redaction_list_t **rl)
{
dsl_bookmark_create_redacted_arg_t dbcra;
dbcra.dbcra_bmark = bookmark;
dbcra.dbcra_snap = snapshot;
dbcra.dbcra_rl = rl;
dbcra.dbcra_numsnaps = numsnaps;
dbcra.dbcra_snaps = snapguids;
dbcra.dbcra_tag = tag;
return (dsl_sync_task(bookmark, dsl_bookmark_create_redacted_check,
dsl_bookmark_create_redacted_sync, &dbcra, 5,
ZFS_SPACE_CHECK_NORMAL));
}
/*
* Retrieve the list of properties given in the 'props' nvlist for a bookmark.
* If 'props' is NULL, retrieves all properties.
*/
static void
dsl_bookmark_fetch_props(dsl_pool_t *dp, zfs_bookmark_phys_t *bmark_phys,
nvlist_t *props, nvlist_t *out_props)
{
ASSERT3P(dp, !=, NULL);
ASSERT3P(bmark_phys, !=, NULL);
ASSERT3P(out_props, !=, NULL);
ASSERT(RRW_LOCK_HELD(&dp->dp_config_rwlock));
if (props == NULL || nvlist_exists(props,
zfs_prop_to_name(ZFS_PROP_GUID))) {
dsl_prop_nvlist_add_uint64(out_props,
ZFS_PROP_GUID, bmark_phys->zbm_guid);
}
if (props == NULL || nvlist_exists(props,
zfs_prop_to_name(ZFS_PROP_CREATETXG))) {
dsl_prop_nvlist_add_uint64(out_props,
ZFS_PROP_CREATETXG, bmark_phys->zbm_creation_txg);
}
if (props == NULL || nvlist_exists(props,
zfs_prop_to_name(ZFS_PROP_CREATION))) {
dsl_prop_nvlist_add_uint64(out_props,
ZFS_PROP_CREATION, bmark_phys->zbm_creation_time);
}
if (props == NULL || nvlist_exists(props,
zfs_prop_to_name(ZFS_PROP_IVSET_GUID))) {
dsl_prop_nvlist_add_uint64(out_props,
ZFS_PROP_IVSET_GUID, bmark_phys->zbm_ivset_guid);
}
if (bmark_phys->zbm_flags & ZBM_FLAG_HAS_FBN) {
if (props == NULL || nvlist_exists(props,
zfs_prop_to_name(ZFS_PROP_REFERENCED))) {
dsl_prop_nvlist_add_uint64(out_props,
ZFS_PROP_REFERENCED,
bmark_phys->zbm_referenced_bytes_refd);
}
if (props == NULL || nvlist_exists(props,
zfs_prop_to_name(ZFS_PROP_LOGICALREFERENCED))) {
dsl_prop_nvlist_add_uint64(out_props,
ZFS_PROP_LOGICALREFERENCED,
bmark_phys->zbm_uncompressed_bytes_refd);
}
if (props == NULL || nvlist_exists(props,
zfs_prop_to_name(ZFS_PROP_REFRATIO))) {
uint64_t ratio =
bmark_phys->zbm_compressed_bytes_refd == 0 ? 100 :
bmark_phys->zbm_uncompressed_bytes_refd * 100 /
bmark_phys->zbm_compressed_bytes_refd;
dsl_prop_nvlist_add_uint64(out_props,
ZFS_PROP_REFRATIO, ratio);
}
}
if ((props == NULL || nvlist_exists(props, "redact_snaps") ||
nvlist_exists(props, "redact_complete")) &&
bmark_phys->zbm_redaction_obj != 0) {
redaction_list_t *rl;
int err = dsl_redaction_list_hold_obj(dp,
bmark_phys->zbm_redaction_obj, FTAG, &rl);
if (err == 0) {
if (nvlist_exists(props, "redact_snaps")) {
nvlist_t *nvl;
nvl = fnvlist_alloc();
fnvlist_add_uint64_array(nvl, ZPROP_VALUE,
rl->rl_phys->rlp_snaps,
rl->rl_phys->rlp_num_snaps);
fnvlist_add_nvlist(out_props, "redact_snaps",
nvl);
nvlist_free(nvl);
}
if (nvlist_exists(props, "redact_complete")) {
nvlist_t *nvl;
nvl = fnvlist_alloc();
fnvlist_add_boolean_value(nvl, ZPROP_VALUE,
rl->rl_phys->rlp_last_blkid == UINT64_MAX &&
rl->rl_phys->rlp_last_object == UINT64_MAX);
fnvlist_add_nvlist(out_props, "redact_complete",
nvl);
nvlist_free(nvl);
}
dsl_redaction_list_rele(rl, FTAG);
}
}
}
int
dsl_get_bookmarks_impl(dsl_dataset_t *ds, nvlist_t *props, nvlist_t *outnvl)
{
dsl_pool_t *dp = ds->ds_dir->dd_pool;
ASSERT(dsl_pool_config_held(dp));
if (dsl_dataset_is_snapshot(ds))
return (SET_ERROR(EINVAL));
for (dsl_bookmark_node_t *dbn = avl_first(&ds->ds_bookmarks);
dbn != NULL; dbn = AVL_NEXT(&ds->ds_bookmarks, dbn)) {
nvlist_t *out_props = fnvlist_alloc();
dsl_bookmark_fetch_props(dp, &dbn->dbn_phys, props, out_props);
fnvlist_add_nvlist(outnvl, dbn->dbn_name, out_props);
fnvlist_free(out_props);
}
return (0);
}
/*
* Comparison func for ds_bookmarks AVL tree. We sort the bookmarks by
* their TXG, then by their FBN-ness. The "FBN-ness" component ensures
* that all bookmarks at the same TXG that HAS_FBN are adjacent, which
* dsl_bookmark_destroy_sync_impl() depends on. Note that there may be
* multiple bookmarks at the same TXG (with the same FBN-ness). In this
* case we differentiate them by an arbitrary metric (in this case,
* their names).
*/
static int
dsl_bookmark_compare(const void *l, const void *r)
{
const dsl_bookmark_node_t *ldbn = l;
const dsl_bookmark_node_t *rdbn = r;
int64_t cmp = TREE_CMP(ldbn->dbn_phys.zbm_creation_txg,
rdbn->dbn_phys.zbm_creation_txg);
if (likely(cmp))
return (cmp);
cmp = TREE_CMP((ldbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN),
(rdbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN));
if (likely(cmp))
return (cmp);
cmp = strcmp(ldbn->dbn_name, rdbn->dbn_name);
return (TREE_ISIGN(cmp));
}
/*
* Cache this (head) dataset's bookmarks in the ds_bookmarks AVL tree.
*/
int
dsl_bookmark_init_ds(dsl_dataset_t *ds)
{
dsl_pool_t *dp = ds->ds_dir->dd_pool;
objset_t *mos = dp->dp_meta_objset;
ASSERT(!ds->ds_is_snapshot);
avl_create(&ds->ds_bookmarks, dsl_bookmark_compare,
sizeof (dsl_bookmark_node_t),
offsetof(dsl_bookmark_node_t, dbn_node));
if (!dsl_dataset_is_zapified(ds))
return (0);
int zaperr = zap_lookup(mos, ds->ds_object, DS_FIELD_BOOKMARK_NAMES,
sizeof (ds->ds_bookmarks_obj), 1, &ds->ds_bookmarks_obj);
if (zaperr == ENOENT)
return (0);
if (zaperr != 0)
return (zaperr);
if (ds->ds_bookmarks_obj == 0)
return (0);
int err = 0;
zap_cursor_t zc;
zap_attribute_t attr;
for (zap_cursor_init(&zc, mos, ds->ds_bookmarks_obj);
(err = zap_cursor_retrieve(&zc, &attr)) == 0;
zap_cursor_advance(&zc)) {
dsl_bookmark_node_t *dbn =
dsl_bookmark_node_alloc(attr.za_name);
err = dsl_bookmark_lookup_impl(ds,
dbn->dbn_name, &dbn->dbn_phys);
ASSERT3U(err, !=, ENOENT);
if (err != 0) {
kmem_free(dbn, sizeof (*dbn));
break;
}
avl_add(&ds->ds_bookmarks, dbn);
}
zap_cursor_fini(&zc);
if (err == ENOENT)
err = 0;
return (err);
}
void
dsl_bookmark_fini_ds(dsl_dataset_t *ds)
{
void *cookie = NULL;
dsl_bookmark_node_t *dbn;
if (ds->ds_is_snapshot)
return;
while ((dbn = avl_destroy_nodes(&ds->ds_bookmarks, &cookie)) != NULL) {
spa_strfree(dbn->dbn_name);
mutex_destroy(&dbn->dbn_lock);
kmem_free(dbn, sizeof (*dbn));
}
avl_destroy(&ds->ds_bookmarks);
}
/*
* Retrieve the bookmarks that exist in the specified dataset, and the
* requested properties of each bookmark.
*
* The "props" nvlist specifies which properties are requested.
* See lzc_get_bookmarks() for the list of valid properties.
*/
int
dsl_get_bookmarks(const char *dsname, nvlist_t *props, nvlist_t *outnvl)
{
dsl_pool_t *dp;
dsl_dataset_t *ds;
int err;
err = dsl_pool_hold(dsname, FTAG, &dp);
if (err != 0)
return (err);
err = dsl_dataset_hold(dp, dsname, FTAG, &ds);
if (err != 0) {
dsl_pool_rele(dp, FTAG);
return (err);
}
err = dsl_get_bookmarks_impl(ds, props, outnvl);
dsl_dataset_rele(ds, FTAG);
dsl_pool_rele(dp, FTAG);
return (err);
}
/*
* Retrieve all properties for a single bookmark in the given dataset.
*/
int
dsl_get_bookmark_props(const char *dsname, const char *bmname, nvlist_t *props)
{
dsl_pool_t *dp;
dsl_dataset_t *ds;
zfs_bookmark_phys_t bmark_phys = { 0 };
int err;
err = dsl_pool_hold(dsname, FTAG, &dp);
if (err != 0)
return (err);
err = dsl_dataset_hold(dp, dsname, FTAG, &ds);
if (err != 0) {
dsl_pool_rele(dp, FTAG);
return (err);
}
err = dsl_bookmark_lookup_impl(ds, bmname, &bmark_phys);
if (err != 0)
goto out;
dsl_bookmark_fetch_props(dp, &bmark_phys, NULL, props);
out:
dsl_dataset_rele(ds, FTAG);
dsl_pool_rele(dp, FTAG);
return (err);
}
typedef struct dsl_bookmark_destroy_arg {
nvlist_t *dbda_bmarks;
nvlist_t *dbda_success;
nvlist_t *dbda_errors;
} dsl_bookmark_destroy_arg_t;
static void
dsl_bookmark_destroy_sync_impl(dsl_dataset_t *ds, const char *name,
dmu_tx_t *tx)
{
objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
uint64_t bmark_zapobj = ds->ds_bookmarks_obj;
matchtype_t mt = 0;
uint64_t int_size, num_ints;
/*
* 'search' must be zeroed so that dbn_flags (which is used in
* dsl_bookmark_compare()) will be zeroed even if the on-disk
* (in ZAP) bookmark is shorter than offsetof(dbn_flags).
*/
dsl_bookmark_node_t search = { 0 };
char realname[ZFS_MAX_DATASET_NAME_LEN];
/*
* Find the real name of this bookmark, which may be different
* from the given name if the dataset is case-insensitive. Then
* use the real name to find the node in the ds_bookmarks AVL tree.
*/
if (dsl_dataset_phys(ds)->ds_flags & DS_FLAG_CI_DATASET)
mt = MT_NORMALIZE;
VERIFY0(zap_length(mos, bmark_zapobj, name, &int_size, &num_ints));
ASSERT3U(int_size, ==, sizeof (uint64_t));
if (num_ints * int_size > BOOKMARK_PHYS_SIZE_V1) {
spa_feature_decr(dmu_objset_spa(mos),
SPA_FEATURE_BOOKMARK_V2, tx);
}
VERIFY0(zap_lookup_norm(mos, bmark_zapobj, name, sizeof (uint64_t),
num_ints, &search.dbn_phys, mt, realname, sizeof (realname), NULL));
search.dbn_name = realname;
dsl_bookmark_node_t *dbn = avl_find(&ds->ds_bookmarks, &search, NULL);
ASSERT(dbn != NULL);
if (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN) {
/*
* If this bookmark HAS_FBN, and it is before the most
* recent snapshot, then its TXG is a key in the head's
* deadlist (and all clones' heads' deadlists). If this is
* the last thing keeping the key (i.e. there are no more
* bookmarks with HAS_FBN at this TXG, and there is no
* snapshot at this TXG), then remove the key.
*
* Note that this algorithm depends on ds_bookmarks being
* sorted such that all bookmarks at the same TXG with
* HAS_FBN are adjacent (with no non-HAS_FBN bookmarks
* at the same TXG in between them). If this were not
* the case, we would need to examine *all* bookmarks
* at this TXG, rather than just the adjacent ones.
*/
dsl_bookmark_node_t *dbn_prev =
AVL_PREV(&ds->ds_bookmarks, dbn);
dsl_bookmark_node_t *dbn_next =
AVL_NEXT(&ds->ds_bookmarks, dbn);
boolean_t more_bookmarks_at_this_txg =
(dbn_prev != NULL && dbn_prev->dbn_phys.zbm_creation_txg ==
dbn->dbn_phys.zbm_creation_txg &&
(dbn_prev->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN)) ||
(dbn_next != NULL && dbn_next->dbn_phys.zbm_creation_txg ==
dbn->dbn_phys.zbm_creation_txg &&
(dbn_next->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN));
if (!(dbn->dbn_phys.zbm_flags & ZBM_FLAG_SNAPSHOT_EXISTS) &&
!more_bookmarks_at_this_txg &&
dbn->dbn_phys.zbm_creation_txg <
dsl_dataset_phys(ds)->ds_prev_snap_txg) {
dsl_dir_remove_clones_key(ds->ds_dir,
dbn->dbn_phys.zbm_creation_txg, tx);
dsl_deadlist_remove_key(&ds->ds_deadlist,
dbn->dbn_phys.zbm_creation_txg, tx);
}
spa_feature_decr(dmu_objset_spa(mos),
SPA_FEATURE_BOOKMARK_WRITTEN, tx);
}
if (dbn->dbn_phys.zbm_redaction_obj != 0) {
VERIFY0(dmu_object_free(mos,
dbn->dbn_phys.zbm_redaction_obj, tx));
spa_feature_decr(dmu_objset_spa(mos),
SPA_FEATURE_REDACTION_BOOKMARKS, tx);
}
avl_remove(&ds->ds_bookmarks, dbn);
spa_strfree(dbn->dbn_name);
mutex_destroy(&dbn->dbn_lock);
kmem_free(dbn, sizeof (*dbn));
VERIFY0(zap_remove_norm(mos, bmark_zapobj, name, mt, tx));
}
static int
dsl_bookmark_destroy_check(void *arg, dmu_tx_t *tx)
{
dsl_bookmark_destroy_arg_t *dbda = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
int rv = 0;
ASSERT(nvlist_empty(dbda->dbda_success));
ASSERT(nvlist_empty(dbda->dbda_errors));
if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_BOOKMARKS))
return (0);
for (nvpair_t *pair = nvlist_next_nvpair(dbda->dbda_bmarks, NULL);
pair != NULL; pair = nvlist_next_nvpair(dbda->dbda_bmarks, pair)) {
const char *fullname = nvpair_name(pair);
dsl_dataset_t *ds;
zfs_bookmark_phys_t bm;
int error;
char *shortname;
error = dsl_bookmark_hold_ds(dp, fullname, &ds,
FTAG, &shortname);
if (error == ENOENT) {
/* ignore it; the bookmark is "already destroyed" */
continue;
}
if (error == 0) {
error = dsl_bookmark_lookup_impl(ds, shortname, &bm);
dsl_dataset_rele(ds, FTAG);
if (error == ESRCH) {
/*
* ignore it; the bookmark is
* "already destroyed"
*/
continue;
}
if (error == 0 && bm.zbm_redaction_obj != 0) {
redaction_list_t *rl = NULL;
error = dsl_redaction_list_hold_obj(tx->tx_pool,
bm.zbm_redaction_obj, FTAG, &rl);
if (error == ENOENT) {
error = 0;
} else if (error == 0 &&
dsl_redaction_list_long_held(rl)) {
error = SET_ERROR(EBUSY);
}
if (rl != NULL) {
dsl_redaction_list_rele(rl, FTAG);
}
}
}
if (error == 0) {
if (dmu_tx_is_syncing(tx)) {
fnvlist_add_boolean(dbda->dbda_success,
fullname);
}
} else {
fnvlist_add_int32(dbda->dbda_errors, fullname, error);
rv = error;
}
}
return (rv);
}
static void
dsl_bookmark_destroy_sync(void *arg, dmu_tx_t *tx)
{
dsl_bookmark_destroy_arg_t *dbda = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
objset_t *mos = dp->dp_meta_objset;
for (nvpair_t *pair = nvlist_next_nvpair(dbda->dbda_success, NULL);
pair != NULL; pair = nvlist_next_nvpair(dbda->dbda_success, pair)) {
dsl_dataset_t *ds;
char *shortname;
uint64_t zap_cnt;
VERIFY0(dsl_bookmark_hold_ds(dp, nvpair_name(pair),
&ds, FTAG, &shortname));
dsl_bookmark_destroy_sync_impl(ds, shortname, tx);
/*
* If all of this dataset's bookmarks have been destroyed,
* free the zap object and decrement the feature's use count.
*/
VERIFY0(zap_count(mos, ds->ds_bookmarks_obj, &zap_cnt));
if (zap_cnt == 0) {
dmu_buf_will_dirty(ds->ds_dbuf, tx);
VERIFY0(zap_destroy(mos, ds->ds_bookmarks_obj, tx));
ds->ds_bookmarks_obj = 0;
spa_feature_decr(dp->dp_spa, SPA_FEATURE_BOOKMARKS, tx);
VERIFY0(zap_remove(mos, ds->ds_object,
DS_FIELD_BOOKMARK_NAMES, tx));
}
spa_history_log_internal_ds(ds, "remove bookmark", tx,
"name=%s", shortname);
dsl_dataset_rele(ds, FTAG);
}
}
/*
* The bookmarks must all be in the same pool.
*/
int
dsl_bookmark_destroy(nvlist_t *bmarks, nvlist_t *errors)
{
int rv;
dsl_bookmark_destroy_arg_t dbda;
nvpair_t *pair = nvlist_next_nvpair(bmarks, NULL);
if (pair == NULL)
return (0);
dbda.dbda_bmarks = bmarks;
dbda.dbda_errors = errors;
dbda.dbda_success = fnvlist_alloc();
rv = dsl_sync_task(nvpair_name(pair), dsl_bookmark_destroy_check,
dsl_bookmark_destroy_sync, &dbda, fnvlist_num_pairs(bmarks),
ZFS_SPACE_CHECK_RESERVED);
fnvlist_free(dbda.dbda_success);
return (rv);
}
/* Return B_TRUE if there are any long holds on this dataset. */
boolean_t
dsl_redaction_list_long_held(redaction_list_t *rl)
{
return (!zfs_refcount_is_zero(&rl->rl_longholds));
}
void
dsl_redaction_list_long_hold(dsl_pool_t *dp, redaction_list_t *rl, void *tag)
{
ASSERT(dsl_pool_config_held(dp));
(void) zfs_refcount_add(&rl->rl_longholds, tag);
}
void
dsl_redaction_list_long_rele(redaction_list_t *rl, void *tag)
{
(void) zfs_refcount_remove(&rl->rl_longholds, tag);
}
-/* ARGSUSED */
static void
redaction_list_evict_sync(void *rlu)
{
redaction_list_t *rl = rlu;
zfs_refcount_destroy(&rl->rl_longholds);
kmem_free(rl, sizeof (redaction_list_t));
}
void
dsl_redaction_list_rele(redaction_list_t *rl, void *tag)
{
dmu_buf_rele(rl->rl_dbuf, tag);
}
int
dsl_redaction_list_hold_obj(dsl_pool_t *dp, uint64_t rlobj, void *tag,
redaction_list_t **rlp)
{
objset_t *mos = dp->dp_meta_objset;
dmu_buf_t *dbuf;
redaction_list_t *rl;
int err;
ASSERT(dsl_pool_config_held(dp));
err = dmu_bonus_hold(mos, rlobj, tag, &dbuf);
if (err != 0)
return (err);
rl = dmu_buf_get_user(dbuf);
if (rl == NULL) {
redaction_list_t *winner = NULL;
rl = kmem_zalloc(sizeof (redaction_list_t), KM_SLEEP);
rl->rl_dbuf = dbuf;
rl->rl_object = rlobj;
rl->rl_phys = dbuf->db_data;
rl->rl_mos = dp->dp_meta_objset;
zfs_refcount_create(&rl->rl_longholds);
dmu_buf_init_user(&rl->rl_dbu, redaction_list_evict_sync, NULL,
&rl->rl_dbuf);
if ((winner = dmu_buf_set_user_ie(dbuf, &rl->rl_dbu)) != NULL) {
kmem_free(rl, sizeof (*rl));
rl = winner;
}
}
*rlp = rl;
return (0);
}
/*
* Snapshot ds is being destroyed.
*
* Adjust the "freed_before_next" of any bookmarks between this snap
* and the previous snapshot, because their "next snapshot" is changing.
*
* If there are any bookmarks with HAS_FBN at this snapshot, remove
* their HAS_SNAP flag (note: there can be at most one snapshot of
* each filesystem at a given txg), and return B_TRUE. In this case
* the caller can not remove the key in the deadlist at this TXG, because
* the HAS_FBN bookmarks require the key be there.
*
* Returns B_FALSE if there are no bookmarks with HAS_FBN at this
* snapshot's TXG. In this case the caller can remove the key in the
* deadlist at this TXG.
*/
boolean_t
dsl_bookmark_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx)
{
dsl_pool_t *dp = ds->ds_dir->dd_pool;
dsl_dataset_t *head, *next;
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &head));
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_next_snap_obj, FTAG, &next));
/*
* Find the first bookmark that HAS_FBN at or after the
* previous snapshot.
*/
dsl_bookmark_node_t search = { 0 };
avl_index_t idx;
search.dbn_phys.zbm_creation_txg =
dsl_dataset_phys(ds)->ds_prev_snap_txg;
search.dbn_phys.zbm_flags = ZBM_FLAG_HAS_FBN;
/*
* The empty-string name can't be in the AVL, and it compares
* before any entries with this TXG.
*/
search.dbn_name = "";
VERIFY3P(avl_find(&head->ds_bookmarks, &search, &idx), ==, NULL);
dsl_bookmark_node_t *dbn =
avl_nearest(&head->ds_bookmarks, idx, AVL_AFTER);
/*
* Iterate over all bookmarks that are at or after the previous
* snapshot, and before this (being deleted) snapshot. Adjust
* their FBN based on their new next snapshot.
*/
for (; dbn != NULL && dbn->dbn_phys.zbm_creation_txg <
dsl_dataset_phys(ds)->ds_creation_txg;
dbn = AVL_NEXT(&head->ds_bookmarks, dbn)) {
if (!(dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN))
continue;
/*
* Increase our FBN by the amount of space that was live
* (referenced) at the time of this bookmark (i.e.
* birth <= zbm_creation_txg), and killed between this
* (being deleted) snapshot and the next snapshot (i.e.
* on the next snapshot's deadlist). (Space killed before
* this are already on our FBN.)
*/
uint64_t referenced, compressed, uncompressed;
dsl_deadlist_space_range(&next->ds_deadlist,
0, dbn->dbn_phys.zbm_creation_txg,
&referenced, &compressed, &uncompressed);
dbn->dbn_phys.zbm_referenced_freed_before_next_snap +=
referenced;
dbn->dbn_phys.zbm_compressed_freed_before_next_snap +=
compressed;
dbn->dbn_phys.zbm_uncompressed_freed_before_next_snap +=
uncompressed;
VERIFY0(zap_update(dp->dp_meta_objset, head->ds_bookmarks_obj,
dbn->dbn_name, sizeof (uint64_t),
sizeof (zfs_bookmark_phys_t) / sizeof (uint64_t),
&dbn->dbn_phys, tx));
}
dsl_dataset_rele(next, FTAG);
/*
* There may be several bookmarks at this txg (the TXG of the
* snapshot being deleted). We need to clear the SNAPSHOT_EXISTS
* flag on all of them, and return TRUE if there is at least 1
* bookmark here with HAS_FBN (thus preventing the deadlist
* key from being removed).
*/
boolean_t rv = B_FALSE;
for (; dbn != NULL && dbn->dbn_phys.zbm_creation_txg ==
dsl_dataset_phys(ds)->ds_creation_txg;
dbn = AVL_NEXT(&head->ds_bookmarks, dbn)) {
if (!(dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN)) {
ASSERT(!(dbn->dbn_phys.zbm_flags &
ZBM_FLAG_SNAPSHOT_EXISTS));
continue;
}
ASSERT(dbn->dbn_phys.zbm_flags & ZBM_FLAG_SNAPSHOT_EXISTS);
dbn->dbn_phys.zbm_flags &= ~ZBM_FLAG_SNAPSHOT_EXISTS;
VERIFY0(zap_update(dp->dp_meta_objset, head->ds_bookmarks_obj,
dbn->dbn_name, sizeof (uint64_t),
sizeof (zfs_bookmark_phys_t) / sizeof (uint64_t),
&dbn->dbn_phys, tx));
rv = B_TRUE;
}
dsl_dataset_rele(head, FTAG);
return (rv);
}
/*
* A snapshot is being created of this (head) dataset.
*
* We don't keep keys in the deadlist for the most recent snapshot, or any
* bookmarks at or after it, because there can't be any blocks on the
* deadlist in this range. Now that the most recent snapshot is after
* all bookmarks, we need to add these keys. Note that the caller always
* adds a key at the previous snapshot, so we only add keys for bookmarks
* after that.
*/
void
dsl_bookmark_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx)
{
uint64_t last_key_added = UINT64_MAX;
for (dsl_bookmark_node_t *dbn = avl_last(&ds->ds_bookmarks);
dbn != NULL && dbn->dbn_phys.zbm_creation_txg >
dsl_dataset_phys(ds)->ds_prev_snap_txg;
dbn = AVL_PREV(&ds->ds_bookmarks, dbn)) {
uint64_t creation_txg = dbn->dbn_phys.zbm_creation_txg;
ASSERT3U(creation_txg, <=, last_key_added);
/*
* Note, there may be multiple bookmarks at this TXG,
* and we only want to add the key for this TXG once.
* The ds_bookmarks AVL is sorted by TXG, so we will visit
* these bookmarks in sequence.
*/
if ((dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN) &&
creation_txg != last_key_added) {
dsl_deadlist_add_key(&ds->ds_deadlist,
creation_txg, tx);
last_key_added = creation_txg;
}
}
}
/*
* The next snapshot of the origin dataset has changed, due to
* promote or clone swap. If there are any bookmarks at this dataset,
* we need to update their zbm_*_freed_before_next_snap to reflect this.
* The head dataset has the relevant bookmarks in ds_bookmarks.
*/
void
dsl_bookmark_next_changed(dsl_dataset_t *head, dsl_dataset_t *origin,
dmu_tx_t *tx)
{
dsl_pool_t *dp = dmu_tx_pool(tx);
/*
* Find the first bookmark that HAS_FBN at the origin snapshot.
*/
dsl_bookmark_node_t search = { 0 };
avl_index_t idx;
search.dbn_phys.zbm_creation_txg =
dsl_dataset_phys(origin)->ds_creation_txg;
search.dbn_phys.zbm_flags = ZBM_FLAG_HAS_FBN;
/*
* The empty-string name can't be in the AVL, and it compares
* before any entries with this TXG.
*/
search.dbn_name = "";
VERIFY3P(avl_find(&head->ds_bookmarks, &search, &idx), ==, NULL);
dsl_bookmark_node_t *dbn =
avl_nearest(&head->ds_bookmarks, idx, AVL_AFTER);
/*
* Iterate over all bookmarks that are at the origin txg.
* Adjust their FBN based on their new next snapshot.
*/
for (; dbn != NULL && dbn->dbn_phys.zbm_creation_txg ==
dsl_dataset_phys(origin)->ds_creation_txg &&
(dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN);
dbn = AVL_NEXT(&head->ds_bookmarks, dbn)) {
/*
* Bookmark is at the origin, therefore its
* "next dataset" is changing, so we need
* to reset its FBN by recomputing it in
* dsl_bookmark_set_phys().
*/
ASSERT3U(dbn->dbn_phys.zbm_guid, ==,
dsl_dataset_phys(origin)->ds_guid);
ASSERT3U(dbn->dbn_phys.zbm_referenced_bytes_refd, ==,
dsl_dataset_phys(origin)->ds_referenced_bytes);
ASSERT(dbn->dbn_phys.zbm_flags &
ZBM_FLAG_SNAPSHOT_EXISTS);
/*
* Save and restore the zbm_redaction_obj, which
* is zeroed by dsl_bookmark_set_phys().
*/
uint64_t redaction_obj =
dbn->dbn_phys.zbm_redaction_obj;
dsl_bookmark_set_phys(&dbn->dbn_phys, origin);
dbn->dbn_phys.zbm_redaction_obj = redaction_obj;
VERIFY0(zap_update(dp->dp_meta_objset, head->ds_bookmarks_obj,
dbn->dbn_name, sizeof (uint64_t),
sizeof (zfs_bookmark_phys_t) / sizeof (uint64_t),
&dbn->dbn_phys, tx));
}
}
/*
* This block is no longer referenced by this (head) dataset.
*
* Adjust the FBN of any bookmarks that reference this block, whose "next"
* is the head dataset.
*/
-/* ARGSUSED */
void
dsl_bookmark_block_killed(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
{
+ (void) tx;
+
/*
* Iterate over bookmarks whose "next" is the head dataset.
*/
for (dsl_bookmark_node_t *dbn = avl_last(&ds->ds_bookmarks);
dbn != NULL && dbn->dbn_phys.zbm_creation_txg >=
dsl_dataset_phys(ds)->ds_prev_snap_txg;
dbn = AVL_PREV(&ds->ds_bookmarks, dbn)) {
/*
* If the block was live (referenced) at the time of this
* bookmark, add its space to the bookmark's FBN.
*/
if (bp->blk_birth <= dbn->dbn_phys.zbm_creation_txg &&
(dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN)) {
mutex_enter(&dbn->dbn_lock);
dbn->dbn_phys.zbm_referenced_freed_before_next_snap +=
bp_get_dsize_sync(dsl_dataset_get_spa(ds), bp);
dbn->dbn_phys.zbm_compressed_freed_before_next_snap +=
BP_GET_PSIZE(bp);
dbn->dbn_phys.zbm_uncompressed_freed_before_next_snap +=
BP_GET_UCSIZE(bp);
/*
* Changing the ZAP object here would be too
* expensive. Also, we may be called from the zio
* interrupt thread, which can't block on i/o.
* Therefore, we mark this bookmark as dirty and
* modify the ZAP once per txg, in
* dsl_bookmark_sync_done().
*/
dbn->dbn_dirty = B_TRUE;
mutex_exit(&dbn->dbn_lock);
}
}
}
void
dsl_bookmark_sync_done(dsl_dataset_t *ds, dmu_tx_t *tx)
{
dsl_pool_t *dp = dmu_tx_pool(tx);
if (dsl_dataset_is_snapshot(ds))
return;
/*
* We only dirty bookmarks that are at or after the most recent
* snapshot. We can't create snapshots between
* dsl_bookmark_block_killed() and dsl_bookmark_sync_done(), so we
* don't need to look at any bookmarks before ds_prev_snap_txg.
*/
for (dsl_bookmark_node_t *dbn = avl_last(&ds->ds_bookmarks);
dbn != NULL && dbn->dbn_phys.zbm_creation_txg >=
dsl_dataset_phys(ds)->ds_prev_snap_txg;
dbn = AVL_PREV(&ds->ds_bookmarks, dbn)) {
if (dbn->dbn_dirty) {
/*
* We only dirty nodes with HAS_FBN, therefore
* we can always use the current bookmark struct size.
*/
ASSERT(dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN);
VERIFY0(zap_update(dp->dp_meta_objset,
ds->ds_bookmarks_obj,
dbn->dbn_name, sizeof (uint64_t),
sizeof (zfs_bookmark_phys_t) / sizeof (uint64_t),
&dbn->dbn_phys, tx));
dbn->dbn_dirty = B_FALSE;
}
}
#ifdef ZFS_DEBUG
for (dsl_bookmark_node_t *dbn = avl_first(&ds->ds_bookmarks);
dbn != NULL; dbn = AVL_NEXT(&ds->ds_bookmarks, dbn)) {
ASSERT(!dbn->dbn_dirty);
}
#endif
}
/*
* Return the TXG of the most recent bookmark (or 0 if there are no bookmarks).
*/
uint64_t
dsl_bookmark_latest_txg(dsl_dataset_t *ds)
{
ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
dsl_bookmark_node_t *dbn = avl_last(&ds->ds_bookmarks);
if (dbn == NULL)
return (0);
return (dbn->dbn_phys.zbm_creation_txg);
}
/*
* Compare the redact_block_phys_t to the bookmark. If the last block in the
* redact_block_phys_t is before the bookmark, return -1. If the first block in
* the redact_block_phys_t is after the bookmark, return 1. Otherwise, the
* bookmark is inside the range of the redact_block_phys_t, and we return 0.
*/
static int
redact_block_zb_compare(redact_block_phys_t *first,
zbookmark_phys_t *second)
{
/*
* If the block_phys is for a previous object, or the last block in the
* block_phys is strictly before the block in the bookmark, the
* block_phys is earlier.
*/
if (first->rbp_object < second->zb_object ||
(first->rbp_object == second->zb_object &&
first->rbp_blkid + (redact_block_get_count(first) - 1) <
second->zb_blkid)) {
return (-1);
}
/*
* If the bookmark is for a previous object, or the block in the
* bookmark is strictly before the first block in the block_phys, the
* bookmark is earlier.
*/
if (first->rbp_object > second->zb_object ||
(first->rbp_object == second->zb_object &&
first->rbp_blkid > second->zb_blkid)) {
return (1);
}
return (0);
}
/*
* Traverse the redaction list in the provided object, and call the callback for
* each entry we find. Don't call the callback for any records before resume.
*/
int
dsl_redaction_list_traverse(redaction_list_t *rl, zbookmark_phys_t *resume,
rl_traverse_callback_t cb, void *arg)
{
objset_t *mos = rl->rl_mos;
int err = 0;
if (rl->rl_phys->rlp_last_object != UINT64_MAX ||
rl->rl_phys->rlp_last_blkid != UINT64_MAX) {
/*
* When we finish a send, we update the last object and offset
* to UINT64_MAX. If a send fails partway through, the last
* object and offset will have some other value, indicating how
* far the send got. The redaction list must be complete before
* it can be traversed, so return EINVAL if the last object and
* blkid are not set to UINT64_MAX.
*/
return (SET_ERROR(EINVAL));
}
/*
* This allows us to skip the binary search and resume checking logic
* below, if we're not resuming a redacted send.
*/
if (ZB_IS_ZERO(resume))
resume = NULL;
/*
* Binary search for the point to resume from.
*/
uint64_t maxidx = rl->rl_phys->rlp_num_entries - 1;
uint64_t minidx = 0;
while (resume != NULL && maxidx > minidx) {
redact_block_phys_t rbp = { 0 };
ASSERT3U(maxidx, >, minidx);
uint64_t mididx = minidx + ((maxidx - minidx) / 2);
err = dmu_read(mos, rl->rl_object, mididx * sizeof (rbp),
sizeof (rbp), &rbp, DMU_READ_NO_PREFETCH);
if (err != 0)
break;
int cmp = redact_block_zb_compare(&rbp, resume);
if (cmp == 0) {
minidx = mididx;
break;
} else if (cmp > 0) {
maxidx =
(mididx == minidx ? minidx : mididx - 1);
} else {
minidx = mididx + 1;
}
}
unsigned int bufsize = SPA_OLD_MAXBLOCKSIZE;
redact_block_phys_t *buf = zio_data_buf_alloc(bufsize);
unsigned int entries_per_buf = bufsize / sizeof (redact_block_phys_t);
uint64_t start_block = minidx / entries_per_buf;
err = dmu_read(mos, rl->rl_object, start_block * bufsize, bufsize, buf,
DMU_READ_PREFETCH);
for (uint64_t curidx = minidx;
err == 0 && curidx < rl->rl_phys->rlp_num_entries;
curidx++) {
/*
* We read in the redaction list one block at a time. Once we
* finish with all the entries in a given block, we read in a
* new one. The predictive prefetcher will take care of any
* prefetching, and this code shouldn't be the bottleneck, so we
* don't need to do manual prefetching.
*/
if (curidx % entries_per_buf == 0) {
err = dmu_read(mos, rl->rl_object, curidx *
sizeof (*buf), bufsize, buf,
DMU_READ_PREFETCH);
if (err != 0)
break;
}
redact_block_phys_t *rb = &buf[curidx % entries_per_buf];
/*
* If resume is non-null, we should either not send the data, or
* null out resume so we don't have to keep doing these
* comparisons.
*/
if (resume != NULL) {
/*
* It is possible that after the binary search we got
* a record before the resume point. There's two cases
* where this can occur. If the record is the last
* redaction record, and the resume point is after the
* end of the redacted data, curidx will be the last
* redaction record. In that case, the loop will end
* after this iteration. The second case is if the
* resume point is between two redaction records, the
* binary search can return either the record before
* or after the resume point. In that case, the next
* iteration will be greater than the resume point.
*/
if (redact_block_zb_compare(rb, resume) < 0) {
ASSERT3U(curidx, ==, minidx);
continue;
} else {
/*
* If the place to resume is in the middle of
* the range described by this
* redact_block_phys, then modify the
* redact_block_phys in memory so we generate
* the right records.
*/
if (resume->zb_object == rb->rbp_object &&
resume->zb_blkid > rb->rbp_blkid) {
uint64_t diff = resume->zb_blkid -
rb->rbp_blkid;
rb->rbp_blkid = resume->zb_blkid;
redact_block_set_count(rb,
redact_block_get_count(rb) - diff);
}
resume = NULL;
}
}
if (cb(rb, arg) != 0) {
err = EINTR;
break;
}
}
zio_data_buf_free(buf, bufsize);
return (err);
}
diff --git a/sys/contrib/openzfs/module/zfs/dsl_crypt.c b/sys/contrib/openzfs/module/zfs/dsl_crypt.c
index 26d4c2fe7e33..1ea184de338c 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_crypt.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_crypt.c
@@ -1,2868 +1,2861 @@
/*
* CDDL HEADER START
*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2017, Datto, Inc. All rights reserved.
* Copyright (c) 2018 by Delphix. All rights reserved.
*/
#include <sys/dsl_crypt.h>
#include <sys/dsl_pool.h>
#include <sys/zap.h>
#include <sys/zil.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/spa_impl.h>
#include <sys/dmu_objset.h>
#include <sys/zvol.h>
/*
* This file's primary purpose is for managing master encryption keys in
* memory and on disk. For more info on how these keys are used, see the
* block comment in zio_crypt.c.
*
* All master keys are stored encrypted on disk in the form of the DSL
* Crypto Key ZAP object. The binary key data in this object is always
* randomly generated and is encrypted with the user's wrapping key. This
* layer of indirection allows the user to change their key without
* needing to re-encrypt the entire dataset. The ZAP also holds on to the
* (non-encrypted) encryption algorithm identifier, IV, and MAC needed to
* safely decrypt the master key. For more info on the user's key see the
* block comment in libzfs_crypto.c
*
* In-memory encryption keys are managed through the spa_keystore. The
* keystore consists of 3 AVL trees, which are as follows:
*
* The Wrapping Key Tree:
* The wrapping key (wkey) tree stores the user's keys that are fed into the
* kernel through 'zfs load-key' and related commands. Datasets inherit their
* parent's wkey by default, so these structures are refcounted. The wrapping
* keys remain in memory until they are explicitly unloaded (with
* "zfs unload-key"). Unloading is only possible when no datasets are using
* them (refcount=0).
*
* The DSL Crypto Key Tree:
* The DSL Crypto Keys (DCK) are the in-memory representation of decrypted
* master keys. They are used by the functions in zio_crypt.c to perform
* encryption, decryption, and authentication. Snapshots and clones of a given
* dataset will share a DSL Crypto Key, so they are also refcounted. Once the
* refcount on a key hits zero, it is immediately zeroed out and freed.
*
* The Crypto Key Mapping Tree:
* The zio layer needs to lookup master keys by their dataset object id. Since
* the DSL Crypto Keys can belong to multiple datasets, we maintain a tree of
* dsl_key_mapping_t's which essentially just map the dataset object id to its
* appropriate DSL Crypto Key. The management for creating and destroying these
* mappings hooks into the code for owning and disowning datasets. Usually,
* there will only be one active dataset owner, but there are times
* (particularly during dataset creation and destruction) when this may not be
* true or the dataset may not be initialized enough to own. As a result, this
* object is also refcounted.
*/
/*
* This tunable allows datasets to be raw received even if the stream does
* not include IVset guids or if the guids don't match. This is used as part
* of the resolution for ZPOOL_ERRATA_ZOL_8308_ENCRYPTION.
*/
int zfs_disable_ivset_guid_check = 0;
static void
dsl_wrapping_key_hold(dsl_wrapping_key_t *wkey, void *tag)
{
(void) zfs_refcount_add(&wkey->wk_refcnt, tag);
}
static void
dsl_wrapping_key_rele(dsl_wrapping_key_t *wkey, void *tag)
{
(void) zfs_refcount_remove(&wkey->wk_refcnt, tag);
}
static void
dsl_wrapping_key_free(dsl_wrapping_key_t *wkey)
{
ASSERT0(zfs_refcount_count(&wkey->wk_refcnt));
if (wkey->wk_key.ck_data) {
bzero(wkey->wk_key.ck_data,
CRYPTO_BITS2BYTES(wkey->wk_key.ck_length));
kmem_free(wkey->wk_key.ck_data,
CRYPTO_BITS2BYTES(wkey->wk_key.ck_length));
}
zfs_refcount_destroy(&wkey->wk_refcnt);
kmem_free(wkey, sizeof (dsl_wrapping_key_t));
}
static void
dsl_wrapping_key_create(uint8_t *wkeydata, zfs_keyformat_t keyformat,
uint64_t salt, uint64_t iters, dsl_wrapping_key_t **wkey_out)
{
dsl_wrapping_key_t *wkey;
/* allocate the wrapping key */
wkey = kmem_alloc(sizeof (dsl_wrapping_key_t), KM_SLEEP);
/* allocate and initialize the underlying crypto key */
wkey->wk_key.ck_data = kmem_alloc(WRAPPING_KEY_LEN, KM_SLEEP);
wkey->wk_key.ck_format = CRYPTO_KEY_RAW;
wkey->wk_key.ck_length = CRYPTO_BYTES2BITS(WRAPPING_KEY_LEN);
bcopy(wkeydata, wkey->wk_key.ck_data, WRAPPING_KEY_LEN);
/* initialize the rest of the struct */
zfs_refcount_create(&wkey->wk_refcnt);
wkey->wk_keyformat = keyformat;
wkey->wk_salt = salt;
wkey->wk_iters = iters;
*wkey_out = wkey;
}
int
dsl_crypto_params_create_nvlist(dcp_cmd_t cmd, nvlist_t *props,
nvlist_t *crypto_args, dsl_crypto_params_t **dcp_out)
{
int ret;
uint64_t crypt = ZIO_CRYPT_INHERIT;
uint64_t keyformat = ZFS_KEYFORMAT_NONE;
uint64_t salt = 0, iters = 0;
dsl_crypto_params_t *dcp = NULL;
dsl_wrapping_key_t *wkey = NULL;
uint8_t *wkeydata = NULL;
uint_t wkeydata_len = 0;
char *keylocation = NULL;
dcp = kmem_zalloc(sizeof (dsl_crypto_params_t), KM_SLEEP);
dcp->cp_cmd = cmd;
/* get relevant arguments from the nvlists */
if (props != NULL) {
(void) nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_ENCRYPTION), &crypt);
(void) nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_KEYFORMAT), &keyformat);
(void) nvlist_lookup_string(props,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION), &keylocation);
(void) nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_PBKDF2_SALT), &salt);
(void) nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS), &iters);
dcp->cp_crypt = crypt;
}
if (crypto_args != NULL) {
(void) nvlist_lookup_uint8_array(crypto_args, "wkeydata",
&wkeydata, &wkeydata_len);
}
/* check for valid command */
if (dcp->cp_cmd >= DCP_CMD_MAX) {
ret = SET_ERROR(EINVAL);
goto error;
} else {
dcp->cp_cmd = cmd;
}
/* check for valid crypt */
if (dcp->cp_crypt >= ZIO_CRYPT_FUNCTIONS) {
ret = SET_ERROR(EINVAL);
goto error;
} else {
dcp->cp_crypt = crypt;
}
/* check for valid keyformat */
if (keyformat >= ZFS_KEYFORMAT_FORMATS) {
ret = SET_ERROR(EINVAL);
goto error;
}
/* check for a valid keylocation (of any kind) and copy it in */
if (keylocation != NULL) {
if (!zfs_prop_valid_keylocation(keylocation, B_FALSE)) {
ret = SET_ERROR(EINVAL);
goto error;
}
dcp->cp_keylocation = spa_strdup(keylocation);
}
/* check wrapping key length, if given */
if (wkeydata != NULL && wkeydata_len != WRAPPING_KEY_LEN) {
ret = SET_ERROR(EINVAL);
goto error;
}
/* if the user asked for the default crypt, determine that now */
if (dcp->cp_crypt == ZIO_CRYPT_ON)
dcp->cp_crypt = ZIO_CRYPT_ON_VALUE;
/* create the wrapping key from the raw data */
if (wkeydata != NULL) {
/* create the wrapping key with the verified parameters */
dsl_wrapping_key_create(wkeydata, keyformat, salt,
iters, &wkey);
dcp->cp_wkey = wkey;
}
/*
* Remove the encryption properties from the nvlist since they are not
* maintained through the DSL.
*/
(void) nvlist_remove_all(props, zfs_prop_to_name(ZFS_PROP_ENCRYPTION));
(void) nvlist_remove_all(props, zfs_prop_to_name(ZFS_PROP_KEYFORMAT));
(void) nvlist_remove_all(props, zfs_prop_to_name(ZFS_PROP_PBKDF2_SALT));
(void) nvlist_remove_all(props,
zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS));
*dcp_out = dcp;
return (0);
error:
kmem_free(dcp, sizeof (dsl_crypto_params_t));
*dcp_out = NULL;
return (ret);
}
void
dsl_crypto_params_free(dsl_crypto_params_t *dcp, boolean_t unload)
{
if (dcp == NULL)
return;
if (dcp->cp_keylocation != NULL)
spa_strfree(dcp->cp_keylocation);
if (unload && dcp->cp_wkey != NULL)
dsl_wrapping_key_free(dcp->cp_wkey);
kmem_free(dcp, sizeof (dsl_crypto_params_t));
}
static int
spa_crypto_key_compare(const void *a, const void *b)
{
const dsl_crypto_key_t *dcka = a;
const dsl_crypto_key_t *dckb = b;
if (dcka->dck_obj < dckb->dck_obj)
return (-1);
if (dcka->dck_obj > dckb->dck_obj)
return (1);
return (0);
}
static int
spa_key_mapping_compare(const void *a, const void *b)
{
const dsl_key_mapping_t *kma = a;
const dsl_key_mapping_t *kmb = b;
if (kma->km_dsobj < kmb->km_dsobj)
return (-1);
if (kma->km_dsobj > kmb->km_dsobj)
return (1);
return (0);
}
static int
spa_wkey_compare(const void *a, const void *b)
{
const dsl_wrapping_key_t *wka = a;
const dsl_wrapping_key_t *wkb = b;
if (wka->wk_ddobj < wkb->wk_ddobj)
return (-1);
if (wka->wk_ddobj > wkb->wk_ddobj)
return (1);
return (0);
}
void
spa_keystore_init(spa_keystore_t *sk)
{
rw_init(&sk->sk_dk_lock, NULL, RW_DEFAULT, NULL);
rw_init(&sk->sk_km_lock, NULL, RW_DEFAULT, NULL);
rw_init(&sk->sk_wkeys_lock, NULL, RW_DEFAULT, NULL);
avl_create(&sk->sk_dsl_keys, spa_crypto_key_compare,
sizeof (dsl_crypto_key_t),
offsetof(dsl_crypto_key_t, dck_avl_link));
avl_create(&sk->sk_key_mappings, spa_key_mapping_compare,
sizeof (dsl_key_mapping_t),
offsetof(dsl_key_mapping_t, km_avl_link));
avl_create(&sk->sk_wkeys, spa_wkey_compare, sizeof (dsl_wrapping_key_t),
offsetof(dsl_wrapping_key_t, wk_avl_link));
}
void
spa_keystore_fini(spa_keystore_t *sk)
{
dsl_wrapping_key_t *wkey;
void *cookie = NULL;
ASSERT(avl_is_empty(&sk->sk_dsl_keys));
ASSERT(avl_is_empty(&sk->sk_key_mappings));
while ((wkey = avl_destroy_nodes(&sk->sk_wkeys, &cookie)) != NULL)
dsl_wrapping_key_free(wkey);
avl_destroy(&sk->sk_wkeys);
avl_destroy(&sk->sk_key_mappings);
avl_destroy(&sk->sk_dsl_keys);
rw_destroy(&sk->sk_wkeys_lock);
rw_destroy(&sk->sk_km_lock);
rw_destroy(&sk->sk_dk_lock);
}
static int
dsl_dir_get_encryption_root_ddobj(dsl_dir_t *dd, uint64_t *rddobj)
{
if (dd->dd_crypto_obj == 0)
return (SET_ERROR(ENOENT));
return (zap_lookup(dd->dd_pool->dp_meta_objset, dd->dd_crypto_obj,
DSL_CRYPTO_KEY_ROOT_DDOBJ, 8, 1, rddobj));
}
static int
dsl_dir_get_encryption_version(dsl_dir_t *dd, uint64_t *version)
{
*version = 0;
if (dd->dd_crypto_obj == 0)
return (SET_ERROR(ENOENT));
/* version 0 is implied by ENOENT */
(void) zap_lookup(dd->dd_pool->dp_meta_objset, dd->dd_crypto_obj,
DSL_CRYPTO_KEY_VERSION, 8, 1, version);
return (0);
}
boolean_t
dsl_dir_incompatible_encryption_version(dsl_dir_t *dd)
{
int ret;
uint64_t version = 0;
ret = dsl_dir_get_encryption_version(dd, &version);
if (ret != 0)
return (B_FALSE);
return (version != ZIO_CRYPT_KEY_CURRENT_VERSION);
}
static int
spa_keystore_wkey_hold_ddobj_impl(spa_t *spa, uint64_t ddobj,
void *tag, dsl_wrapping_key_t **wkey_out)
{
int ret;
dsl_wrapping_key_t search_wkey;
dsl_wrapping_key_t *found_wkey;
ASSERT(RW_LOCK_HELD(&spa->spa_keystore.sk_wkeys_lock));
/* init the search wrapping key */
search_wkey.wk_ddobj = ddobj;
/* lookup the wrapping key */
found_wkey = avl_find(&spa->spa_keystore.sk_wkeys, &search_wkey, NULL);
if (!found_wkey) {
ret = SET_ERROR(ENOENT);
goto error;
}
/* increment the refcount */
dsl_wrapping_key_hold(found_wkey, tag);
*wkey_out = found_wkey;
return (0);
error:
*wkey_out = NULL;
return (ret);
}
static int
spa_keystore_wkey_hold_dd(spa_t *spa, dsl_dir_t *dd, void *tag,
dsl_wrapping_key_t **wkey_out)
{
int ret;
dsl_wrapping_key_t *wkey;
uint64_t rddobj;
boolean_t locked = B_FALSE;
if (!RW_WRITE_HELD(&spa->spa_keystore.sk_wkeys_lock)) {
rw_enter(&spa->spa_keystore.sk_wkeys_lock, RW_READER);
locked = B_TRUE;
}
/* get the ddobj that the keylocation property was inherited from */
ret = dsl_dir_get_encryption_root_ddobj(dd, &rddobj);
if (ret != 0)
goto error;
/* lookup the wkey in the avl tree */
ret = spa_keystore_wkey_hold_ddobj_impl(spa, rddobj, tag, &wkey);
if (ret != 0)
goto error;
/* unlock the wkey tree if we locked it */
if (locked)
rw_exit(&spa->spa_keystore.sk_wkeys_lock);
*wkey_out = wkey;
return (0);
error:
if (locked)
rw_exit(&spa->spa_keystore.sk_wkeys_lock);
*wkey_out = NULL;
return (ret);
}
int
dsl_crypto_can_set_keylocation(const char *dsname, const char *keylocation)
{
int ret = 0;
dsl_dir_t *dd = NULL;
dsl_pool_t *dp = NULL;
uint64_t rddobj;
/* hold the dsl dir */
ret = dsl_pool_hold(dsname, FTAG, &dp);
if (ret != 0)
goto out;
ret = dsl_dir_hold(dp, dsname, FTAG, &dd, NULL);
if (ret != 0) {
dd = NULL;
goto out;
}
/* if dd is not encrypted, the value may only be "none" */
if (dd->dd_crypto_obj == 0) {
if (strcmp(keylocation, "none") != 0) {
ret = SET_ERROR(EACCES);
goto out;
}
ret = 0;
goto out;
}
/* check for a valid keylocation for encrypted datasets */
if (!zfs_prop_valid_keylocation(keylocation, B_TRUE)) {
ret = SET_ERROR(EINVAL);
goto out;
}
/* check that this is an encryption root */
ret = dsl_dir_get_encryption_root_ddobj(dd, &rddobj);
if (ret != 0)
goto out;
if (rddobj != dd->dd_object) {
ret = SET_ERROR(EACCES);
goto out;
}
dsl_dir_rele(dd, FTAG);
dsl_pool_rele(dp, FTAG);
return (0);
out:
if (dd != NULL)
dsl_dir_rele(dd, FTAG);
if (dp != NULL)
dsl_pool_rele(dp, FTAG);
return (ret);
}
static void
dsl_crypto_key_free(dsl_crypto_key_t *dck)
{
ASSERT(zfs_refcount_count(&dck->dck_holds) == 0);
/* destroy the zio_crypt_key_t */
zio_crypt_key_destroy(&dck->dck_key);
/* free the refcount, wrapping key, and lock */
zfs_refcount_destroy(&dck->dck_holds);
if (dck->dck_wkey)
dsl_wrapping_key_rele(dck->dck_wkey, dck);
/* free the key */
kmem_free(dck, sizeof (dsl_crypto_key_t));
}
static void
dsl_crypto_key_rele(dsl_crypto_key_t *dck, void *tag)
{
if (zfs_refcount_remove(&dck->dck_holds, tag) == 0)
dsl_crypto_key_free(dck);
}
static int
dsl_crypto_key_open(objset_t *mos, dsl_wrapping_key_t *wkey,
uint64_t dckobj, void *tag, dsl_crypto_key_t **dck_out)
{
int ret;
uint64_t crypt = 0, guid = 0, version = 0;
uint8_t raw_keydata[MASTER_KEY_MAX_LEN];
uint8_t raw_hmac_keydata[SHA512_HMAC_KEYLEN];
uint8_t iv[WRAPPING_IV_LEN];
uint8_t mac[WRAPPING_MAC_LEN];
dsl_crypto_key_t *dck;
/* allocate and initialize the key */
dck = kmem_zalloc(sizeof (dsl_crypto_key_t), KM_SLEEP);
/* fetch all of the values we need from the ZAP */
ret = zap_lookup(mos, dckobj, DSL_CRYPTO_KEY_CRYPTO_SUITE, 8, 1,
&crypt);
if (ret != 0)
goto error;
ret = zap_lookup(mos, dckobj, DSL_CRYPTO_KEY_GUID, 8, 1, &guid);
if (ret != 0)
goto error;
ret = zap_lookup(mos, dckobj, DSL_CRYPTO_KEY_MASTER_KEY, 1,
MASTER_KEY_MAX_LEN, raw_keydata);
if (ret != 0)
goto error;
ret = zap_lookup(mos, dckobj, DSL_CRYPTO_KEY_HMAC_KEY, 1,
SHA512_HMAC_KEYLEN, raw_hmac_keydata);
if (ret != 0)
goto error;
ret = zap_lookup(mos, dckobj, DSL_CRYPTO_KEY_IV, 1, WRAPPING_IV_LEN,
iv);
if (ret != 0)
goto error;
ret = zap_lookup(mos, dckobj, DSL_CRYPTO_KEY_MAC, 1, WRAPPING_MAC_LEN,
mac);
if (ret != 0)
goto error;
/* the initial on-disk format for encryption did not have a version */
(void) zap_lookup(mos, dckobj, DSL_CRYPTO_KEY_VERSION, 8, 1, &version);
/*
* Unwrap the keys. If there is an error return EACCES to indicate
* an authentication failure.
*/
ret = zio_crypt_key_unwrap(&wkey->wk_key, crypt, version, guid,
raw_keydata, raw_hmac_keydata, iv, mac, &dck->dck_key);
if (ret != 0) {
ret = SET_ERROR(EACCES);
goto error;
}
/* finish initializing the dsl_crypto_key_t */
zfs_refcount_create(&dck->dck_holds);
dsl_wrapping_key_hold(wkey, dck);
dck->dck_wkey = wkey;
dck->dck_obj = dckobj;
zfs_refcount_add(&dck->dck_holds, tag);
*dck_out = dck;
return (0);
error:
if (dck != NULL) {
bzero(dck, sizeof (dsl_crypto_key_t));
kmem_free(dck, sizeof (dsl_crypto_key_t));
}
*dck_out = NULL;
return (ret);
}
static int
spa_keystore_dsl_key_hold_impl(spa_t *spa, uint64_t dckobj, void *tag,
dsl_crypto_key_t **dck_out)
{
int ret;
dsl_crypto_key_t search_dck;
dsl_crypto_key_t *found_dck;
ASSERT(RW_LOCK_HELD(&spa->spa_keystore.sk_dk_lock));
/* init the search key */
search_dck.dck_obj = dckobj;
/* find the matching key in the keystore */
found_dck = avl_find(&spa->spa_keystore.sk_dsl_keys, &search_dck, NULL);
if (!found_dck) {
ret = SET_ERROR(ENOENT);
goto error;
}
/* increment the refcount */
zfs_refcount_add(&found_dck->dck_holds, tag);
*dck_out = found_dck;
return (0);
error:
*dck_out = NULL;
return (ret);
}
static int
spa_keystore_dsl_key_hold_dd(spa_t *spa, dsl_dir_t *dd, void *tag,
dsl_crypto_key_t **dck_out)
{
int ret;
avl_index_t where;
dsl_crypto_key_t *dck_io = NULL, *dck_ks = NULL;
dsl_wrapping_key_t *wkey = NULL;
uint64_t dckobj = dd->dd_crypto_obj;
/* Lookup the key in the tree of currently loaded keys */
rw_enter(&spa->spa_keystore.sk_dk_lock, RW_READER);
ret = spa_keystore_dsl_key_hold_impl(spa, dckobj, tag, &dck_ks);
rw_exit(&spa->spa_keystore.sk_dk_lock);
if (ret == 0) {
*dck_out = dck_ks;
return (0);
}
/* Lookup the wrapping key from the keystore */
ret = spa_keystore_wkey_hold_dd(spa, dd, FTAG, &wkey);
if (ret != 0) {
*dck_out = NULL;
return (SET_ERROR(EACCES));
}
/* Read the key from disk */
ret = dsl_crypto_key_open(spa->spa_meta_objset, wkey, dckobj,
tag, &dck_io);
if (ret != 0) {
dsl_wrapping_key_rele(wkey, FTAG);
*dck_out = NULL;
return (ret);
}
/*
* Add the key to the keystore. It may already exist if it was
* added while performing the read from disk. In this case discard
* it and return the key from the keystore.
*/
rw_enter(&spa->spa_keystore.sk_dk_lock, RW_WRITER);
ret = spa_keystore_dsl_key_hold_impl(spa, dckobj, tag, &dck_ks);
if (ret != 0) {
avl_find(&spa->spa_keystore.sk_dsl_keys, dck_io, &where);
avl_insert(&spa->spa_keystore.sk_dsl_keys, dck_io, where);
*dck_out = dck_io;
} else {
dsl_crypto_key_free(dck_io);
*dck_out = dck_ks;
}
/* Release the wrapping key (the dsl key now has a reference to it) */
dsl_wrapping_key_rele(wkey, FTAG);
rw_exit(&spa->spa_keystore.sk_dk_lock);
return (0);
}
void
spa_keystore_dsl_key_rele(spa_t *spa, dsl_crypto_key_t *dck, void *tag)
{
rw_enter(&spa->spa_keystore.sk_dk_lock, RW_WRITER);
if (zfs_refcount_remove(&dck->dck_holds, tag) == 0) {
avl_remove(&spa->spa_keystore.sk_dsl_keys, dck);
dsl_crypto_key_free(dck);
}
rw_exit(&spa->spa_keystore.sk_dk_lock);
}
int
spa_keystore_load_wkey_impl(spa_t *spa, dsl_wrapping_key_t *wkey)
{
int ret;
avl_index_t where;
dsl_wrapping_key_t *found_wkey;
rw_enter(&spa->spa_keystore.sk_wkeys_lock, RW_WRITER);
/* insert the wrapping key into the keystore */
found_wkey = avl_find(&spa->spa_keystore.sk_wkeys, wkey, &where);
if (found_wkey != NULL) {
ret = SET_ERROR(EEXIST);
goto error_unlock;
}
avl_insert(&spa->spa_keystore.sk_wkeys, wkey, where);
rw_exit(&spa->spa_keystore.sk_wkeys_lock);
return (0);
error_unlock:
rw_exit(&spa->spa_keystore.sk_wkeys_lock);
return (ret);
}
int
spa_keystore_load_wkey(const char *dsname, dsl_crypto_params_t *dcp,
boolean_t noop)
{
int ret;
dsl_dir_t *dd = NULL;
dsl_crypto_key_t *dck = NULL;
dsl_wrapping_key_t *wkey = dcp->cp_wkey;
dsl_pool_t *dp = NULL;
uint64_t rddobj, keyformat, salt, iters;
/*
* We don't validate the wrapping key's keyformat, salt, or iters
* since they will never be needed after the DCK has been wrapped.
*/
if (dcp->cp_wkey == NULL ||
dcp->cp_cmd != DCP_CMD_NONE ||
dcp->cp_crypt != ZIO_CRYPT_INHERIT ||
dcp->cp_keylocation != NULL)
return (SET_ERROR(EINVAL));
ret = dsl_pool_hold(dsname, FTAG, &dp);
if (ret != 0)
goto error;
if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_ENCRYPTION)) {
ret = SET_ERROR(ENOTSUP);
goto error;
}
/* hold the dsl dir */
ret = dsl_dir_hold(dp, dsname, FTAG, &dd, NULL);
if (ret != 0) {
dd = NULL;
goto error;
}
/* confirm that dd is the encryption root */
ret = dsl_dir_get_encryption_root_ddobj(dd, &rddobj);
if (ret != 0 || rddobj != dd->dd_object) {
ret = SET_ERROR(EINVAL);
goto error;
}
/* initialize the wkey's ddobj */
wkey->wk_ddobj = dd->dd_object;
/* verify that the wkey is correct by opening its dsl key */
ret = dsl_crypto_key_open(dp->dp_meta_objset, wkey,
dd->dd_crypto_obj, FTAG, &dck);
if (ret != 0)
goto error;
/* initialize the wkey encryption parameters from the DSL Crypto Key */
ret = zap_lookup(dp->dp_meta_objset, dd->dd_crypto_obj,
zfs_prop_to_name(ZFS_PROP_KEYFORMAT), 8, 1, &keyformat);
if (ret != 0)
goto error;
ret = zap_lookup(dp->dp_meta_objset, dd->dd_crypto_obj,
zfs_prop_to_name(ZFS_PROP_PBKDF2_SALT), 8, 1, &salt);
if (ret != 0)
goto error;
ret = zap_lookup(dp->dp_meta_objset, dd->dd_crypto_obj,
zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS), 8, 1, &iters);
if (ret != 0)
goto error;
ASSERT3U(keyformat, <, ZFS_KEYFORMAT_FORMATS);
ASSERT3U(keyformat, !=, ZFS_KEYFORMAT_NONE);
IMPLY(keyformat == ZFS_KEYFORMAT_PASSPHRASE, iters != 0);
IMPLY(keyformat == ZFS_KEYFORMAT_PASSPHRASE, salt != 0);
IMPLY(keyformat != ZFS_KEYFORMAT_PASSPHRASE, iters == 0);
IMPLY(keyformat != ZFS_KEYFORMAT_PASSPHRASE, salt == 0);
wkey->wk_keyformat = keyformat;
wkey->wk_salt = salt;
wkey->wk_iters = iters;
/*
* At this point we have verified the wkey and confirmed that it can
* be used to decrypt a DSL Crypto Key. We can simply cleanup and
* return if this is all the user wanted to do.
*/
if (noop)
goto error;
/* insert the wrapping key into the keystore */
ret = spa_keystore_load_wkey_impl(dp->dp_spa, wkey);
if (ret != 0)
goto error;
dsl_crypto_key_rele(dck, FTAG);
dsl_dir_rele(dd, FTAG);
dsl_pool_rele(dp, FTAG);
/* create any zvols under this ds */
zvol_create_minors_recursive(dsname);
return (0);
error:
if (dck != NULL)
dsl_crypto_key_rele(dck, FTAG);
if (dd != NULL)
dsl_dir_rele(dd, FTAG);
if (dp != NULL)
dsl_pool_rele(dp, FTAG);
return (ret);
}
int
spa_keystore_unload_wkey_impl(spa_t *spa, uint64_t ddobj)
{
int ret;
dsl_wrapping_key_t search_wkey;
dsl_wrapping_key_t *found_wkey;
/* init the search wrapping key */
search_wkey.wk_ddobj = ddobj;
rw_enter(&spa->spa_keystore.sk_wkeys_lock, RW_WRITER);
/* remove the wrapping key from the keystore */
found_wkey = avl_find(&spa->spa_keystore.sk_wkeys,
&search_wkey, NULL);
if (!found_wkey) {
ret = SET_ERROR(EACCES);
goto error_unlock;
} else if (zfs_refcount_count(&found_wkey->wk_refcnt) != 0) {
ret = SET_ERROR(EBUSY);
goto error_unlock;
}
avl_remove(&spa->spa_keystore.sk_wkeys, found_wkey);
rw_exit(&spa->spa_keystore.sk_wkeys_lock);
/* free the wrapping key */
dsl_wrapping_key_free(found_wkey);
return (0);
error_unlock:
rw_exit(&spa->spa_keystore.sk_wkeys_lock);
return (ret);
}
int
spa_keystore_unload_wkey(const char *dsname)
{
int ret = 0;
dsl_dir_t *dd = NULL;
dsl_pool_t *dp = NULL;
spa_t *spa = NULL;
ret = spa_open(dsname, &spa, FTAG);
if (ret != 0)
return (ret);
/*
* Wait for any outstanding txg IO to complete, releasing any
* remaining references on the wkey.
*/
if (spa_mode(spa) != SPA_MODE_READ)
txg_wait_synced(spa->spa_dsl_pool, 0);
spa_close(spa, FTAG);
/* hold the dsl dir */
ret = dsl_pool_hold(dsname, FTAG, &dp);
if (ret != 0)
goto error;
if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_ENCRYPTION)) {
ret = (SET_ERROR(ENOTSUP));
goto error;
}
ret = dsl_dir_hold(dp, dsname, FTAG, &dd, NULL);
if (ret != 0) {
dd = NULL;
goto error;
}
/* unload the wkey */
ret = spa_keystore_unload_wkey_impl(dp->dp_spa, dd->dd_object);
if (ret != 0)
goto error;
dsl_dir_rele(dd, FTAG);
dsl_pool_rele(dp, FTAG);
/* remove any zvols under this ds */
zvol_remove_minors(dp->dp_spa, dsname, B_TRUE);
return (0);
error:
if (dd != NULL)
dsl_dir_rele(dd, FTAG);
if (dp != NULL)
dsl_pool_rele(dp, FTAG);
return (ret);
}
void
key_mapping_add_ref(dsl_key_mapping_t *km, void *tag)
{
ASSERT3U(zfs_refcount_count(&km->km_refcnt), >=, 1);
zfs_refcount_add(&km->km_refcnt, tag);
}
/*
* The locking here is a little tricky to ensure we don't cause unnecessary
* performance problems. We want to release a key mapping whenever someone
* decrements the refcount to 0, but freeing the mapping requires removing
* it from the spa_keystore, which requires holding sk_km_lock as a writer.
* Most of the time we don't want to hold this lock as a writer, since the
* same lock is held as a reader for each IO that needs to encrypt / decrypt
* data for any dataset and in practice we will only actually free the
* mapping after unmounting a dataset.
*/
void
key_mapping_rele(spa_t *spa, dsl_key_mapping_t *km, void *tag)
{
ASSERT3U(zfs_refcount_count(&km->km_refcnt), >=, 1);
if (zfs_refcount_remove(&km->km_refcnt, tag) != 0)
return;
/*
* We think we are going to need to free the mapping. Add a
* reference to prevent most other releasers from thinking
* this might be their responsibility. This is inherently
* racy, so we will confirm that we are legitimately the
* last holder once we have the sk_km_lock as a writer.
*/
zfs_refcount_add(&km->km_refcnt, FTAG);
rw_enter(&spa->spa_keystore.sk_km_lock, RW_WRITER);
if (zfs_refcount_remove(&km->km_refcnt, FTAG) != 0) {
rw_exit(&spa->spa_keystore.sk_km_lock);
return;
}
avl_remove(&spa->spa_keystore.sk_key_mappings, km);
rw_exit(&spa->spa_keystore.sk_km_lock);
spa_keystore_dsl_key_rele(spa, km->km_key, km);
zfs_refcount_destroy(&km->km_refcnt);
kmem_free(km, sizeof (dsl_key_mapping_t));
}
int
spa_keystore_create_mapping(spa_t *spa, dsl_dataset_t *ds, void *tag,
dsl_key_mapping_t **km_out)
{
int ret;
avl_index_t where;
dsl_key_mapping_t *km, *found_km;
boolean_t should_free = B_FALSE;
/* Allocate and initialize the mapping */
km = kmem_zalloc(sizeof (dsl_key_mapping_t), KM_SLEEP);
zfs_refcount_create(&km->km_refcnt);
ret = spa_keystore_dsl_key_hold_dd(spa, ds->ds_dir, km, &km->km_key);
if (ret != 0) {
zfs_refcount_destroy(&km->km_refcnt);
kmem_free(km, sizeof (dsl_key_mapping_t));
if (km_out != NULL)
*km_out = NULL;
return (ret);
}
km->km_dsobj = ds->ds_object;
rw_enter(&spa->spa_keystore.sk_km_lock, RW_WRITER);
/*
* If a mapping already exists, simply increment its refcount and
* cleanup the one we made. We want to allocate / free outside of
* the lock because this lock is also used by the zio layer to lookup
* key mappings. Otherwise, use the one we created. Normally, there will
* only be one active reference at a time (the objset owner), but there
* are times when there could be multiple async users.
*/
found_km = avl_find(&spa->spa_keystore.sk_key_mappings, km, &where);
if (found_km != NULL) {
should_free = B_TRUE;
zfs_refcount_add(&found_km->km_refcnt, tag);
if (km_out != NULL)
*km_out = found_km;
} else {
zfs_refcount_add(&km->km_refcnt, tag);
avl_insert(&spa->spa_keystore.sk_key_mappings, km, where);
if (km_out != NULL)
*km_out = km;
}
rw_exit(&spa->spa_keystore.sk_km_lock);
if (should_free) {
spa_keystore_dsl_key_rele(spa, km->km_key, km);
zfs_refcount_destroy(&km->km_refcnt);
kmem_free(km, sizeof (dsl_key_mapping_t));
}
return (0);
}
int
spa_keystore_remove_mapping(spa_t *spa, uint64_t dsobj, void *tag)
{
int ret;
dsl_key_mapping_t search_km;
dsl_key_mapping_t *found_km;
/* init the search key mapping */
search_km.km_dsobj = dsobj;
rw_enter(&spa->spa_keystore.sk_km_lock, RW_READER);
/* find the matching mapping */
found_km = avl_find(&spa->spa_keystore.sk_key_mappings,
&search_km, NULL);
if (found_km == NULL) {
ret = SET_ERROR(ENOENT);
goto error_unlock;
}
rw_exit(&spa->spa_keystore.sk_km_lock);
key_mapping_rele(spa, found_km, tag);
return (0);
error_unlock:
rw_exit(&spa->spa_keystore.sk_km_lock);
return (ret);
}
/*
* This function is primarily used by the zio and arc layer to lookup
* DSL Crypto Keys for encryption. Callers must release the key with
* spa_keystore_dsl_key_rele(). The function may also be called with
* dck_out == NULL and tag == NULL to simply check that a key exists
* without getting a reference to it.
*/
int
spa_keystore_lookup_key(spa_t *spa, uint64_t dsobj, void *tag,
dsl_crypto_key_t **dck_out)
{
int ret;
dsl_key_mapping_t search_km;
dsl_key_mapping_t *found_km;
ASSERT((tag != NULL && dck_out != NULL) ||
(tag == NULL && dck_out == NULL));
/* init the search key mapping */
search_km.km_dsobj = dsobj;
rw_enter(&spa->spa_keystore.sk_km_lock, RW_READER);
/* remove the mapping from the tree */
found_km = avl_find(&spa->spa_keystore.sk_key_mappings, &search_km,
NULL);
if (found_km == NULL) {
ret = SET_ERROR(ENOENT);
goto error_unlock;
}
if (found_km && tag)
zfs_refcount_add(&found_km->km_key->dck_holds, tag);
rw_exit(&spa->spa_keystore.sk_km_lock);
if (dck_out != NULL)
*dck_out = found_km->km_key;
return (0);
error_unlock:
rw_exit(&spa->spa_keystore.sk_km_lock);
if (dck_out != NULL)
*dck_out = NULL;
return (ret);
}
static int
dmu_objset_check_wkey_loaded(dsl_dir_t *dd)
{
int ret;
dsl_wrapping_key_t *wkey = NULL;
ret = spa_keystore_wkey_hold_dd(dd->dd_pool->dp_spa, dd, FTAG,
&wkey);
if (ret != 0)
return (SET_ERROR(EACCES));
dsl_wrapping_key_rele(wkey, FTAG);
return (0);
}
static zfs_keystatus_t
dsl_dataset_get_keystatus(dsl_dir_t *dd)
{
/* check if this dd has a has a dsl key */
if (dd->dd_crypto_obj == 0)
return (ZFS_KEYSTATUS_NONE);
return (dmu_objset_check_wkey_loaded(dd) == 0 ?
ZFS_KEYSTATUS_AVAILABLE : ZFS_KEYSTATUS_UNAVAILABLE);
}
static int
dsl_dir_get_crypt(dsl_dir_t *dd, uint64_t *crypt)
{
if (dd->dd_crypto_obj == 0) {
*crypt = ZIO_CRYPT_OFF;
return (0);
}
return (zap_lookup(dd->dd_pool->dp_meta_objset, dd->dd_crypto_obj,
DSL_CRYPTO_KEY_CRYPTO_SUITE, 8, 1, crypt));
}
static void
dsl_crypto_key_sync_impl(objset_t *mos, uint64_t dckobj, uint64_t crypt,
uint64_t root_ddobj, uint64_t guid, uint8_t *iv, uint8_t *mac,
uint8_t *keydata, uint8_t *hmac_keydata, uint64_t keyformat,
uint64_t salt, uint64_t iters, dmu_tx_t *tx)
{
VERIFY0(zap_update(mos, dckobj, DSL_CRYPTO_KEY_CRYPTO_SUITE, 8, 1,
&crypt, tx));
VERIFY0(zap_update(mos, dckobj, DSL_CRYPTO_KEY_ROOT_DDOBJ, 8, 1,
&root_ddobj, tx));
VERIFY0(zap_update(mos, dckobj, DSL_CRYPTO_KEY_GUID, 8, 1,
&guid, tx));
VERIFY0(zap_update(mos, dckobj, DSL_CRYPTO_KEY_IV, 1, WRAPPING_IV_LEN,
iv, tx));
VERIFY0(zap_update(mos, dckobj, DSL_CRYPTO_KEY_MAC, 1, WRAPPING_MAC_LEN,
mac, tx));
VERIFY0(zap_update(mos, dckobj, DSL_CRYPTO_KEY_MASTER_KEY, 1,
MASTER_KEY_MAX_LEN, keydata, tx));
VERIFY0(zap_update(mos, dckobj, DSL_CRYPTO_KEY_HMAC_KEY, 1,
SHA512_HMAC_KEYLEN, hmac_keydata, tx));
VERIFY0(zap_update(mos, dckobj, zfs_prop_to_name(ZFS_PROP_KEYFORMAT),
8, 1, &keyformat, tx));
VERIFY0(zap_update(mos, dckobj, zfs_prop_to_name(ZFS_PROP_PBKDF2_SALT),
8, 1, &salt, tx));
VERIFY0(zap_update(mos, dckobj, zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS),
8, 1, &iters, tx));
}
static void
dsl_crypto_key_sync(dsl_crypto_key_t *dck, dmu_tx_t *tx)
{
zio_crypt_key_t *key = &dck->dck_key;
dsl_wrapping_key_t *wkey = dck->dck_wkey;
uint8_t keydata[MASTER_KEY_MAX_LEN];
uint8_t hmac_keydata[SHA512_HMAC_KEYLEN];
uint8_t iv[WRAPPING_IV_LEN];
uint8_t mac[WRAPPING_MAC_LEN];
ASSERT(dmu_tx_is_syncing(tx));
ASSERT3U(key->zk_crypt, <, ZIO_CRYPT_FUNCTIONS);
/* encrypt and store the keys along with the IV and MAC */
VERIFY0(zio_crypt_key_wrap(&dck->dck_wkey->wk_key, key, iv, mac,
keydata, hmac_keydata));
/* update the ZAP with the obtained values */
dsl_crypto_key_sync_impl(tx->tx_pool->dp_meta_objset, dck->dck_obj,
key->zk_crypt, wkey->wk_ddobj, key->zk_guid, iv, mac, keydata,
hmac_keydata, wkey->wk_keyformat, wkey->wk_salt, wkey->wk_iters,
tx);
}
typedef struct spa_keystore_change_key_args {
const char *skcka_dsname;
dsl_crypto_params_t *skcka_cp;
} spa_keystore_change_key_args_t;
static int
spa_keystore_change_key_check(void *arg, dmu_tx_t *tx)
{
int ret;
dsl_dir_t *dd = NULL;
dsl_pool_t *dp = dmu_tx_pool(tx);
spa_keystore_change_key_args_t *skcka = arg;
dsl_crypto_params_t *dcp = skcka->skcka_cp;
uint64_t rddobj;
/* check for the encryption feature */
if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_ENCRYPTION)) {
ret = SET_ERROR(ENOTSUP);
goto error;
}
/* check for valid key change command */
if (dcp->cp_cmd != DCP_CMD_NEW_KEY &&
dcp->cp_cmd != DCP_CMD_INHERIT &&
dcp->cp_cmd != DCP_CMD_FORCE_NEW_KEY &&
dcp->cp_cmd != DCP_CMD_FORCE_INHERIT) {
ret = SET_ERROR(EINVAL);
goto error;
}
/* hold the dd */
ret = dsl_dir_hold(dp, skcka->skcka_dsname, FTAG, &dd, NULL);
if (ret != 0) {
dd = NULL;
goto error;
}
/* verify that the dataset is encrypted */
if (dd->dd_crypto_obj == 0) {
ret = SET_ERROR(EINVAL);
goto error;
}
/* clones must always use their origin's key */
if (dsl_dir_is_clone(dd)) {
ret = SET_ERROR(EINVAL);
goto error;
}
/* lookup the ddobj we are inheriting the keylocation from */
ret = dsl_dir_get_encryption_root_ddobj(dd, &rddobj);
if (ret != 0)
goto error;
/* Handle inheritance */
if (dcp->cp_cmd == DCP_CMD_INHERIT ||
dcp->cp_cmd == DCP_CMD_FORCE_INHERIT) {
/* no other encryption params should be given */
if (dcp->cp_crypt != ZIO_CRYPT_INHERIT ||
dcp->cp_keylocation != NULL ||
dcp->cp_wkey != NULL) {
ret = SET_ERROR(EINVAL);
goto error;
}
/* check that this is an encryption root */
if (dd->dd_object != rddobj) {
ret = SET_ERROR(EINVAL);
goto error;
}
/* check that the parent is encrypted */
if (dd->dd_parent->dd_crypto_obj == 0) {
ret = SET_ERROR(EINVAL);
goto error;
}
/* if we are rewrapping check that both keys are loaded */
if (dcp->cp_cmd == DCP_CMD_INHERIT) {
ret = dmu_objset_check_wkey_loaded(dd);
if (ret != 0)
goto error;
ret = dmu_objset_check_wkey_loaded(dd->dd_parent);
if (ret != 0)
goto error;
}
dsl_dir_rele(dd, FTAG);
return (0);
}
/* handle forcing an encryption root without rewrapping */
if (dcp->cp_cmd == DCP_CMD_FORCE_NEW_KEY) {
/* no other encryption params should be given */
if (dcp->cp_crypt != ZIO_CRYPT_INHERIT ||
dcp->cp_keylocation != NULL ||
dcp->cp_wkey != NULL) {
ret = SET_ERROR(EINVAL);
goto error;
}
/* check that this is not an encryption root */
if (dd->dd_object == rddobj) {
ret = SET_ERROR(EINVAL);
goto error;
}
dsl_dir_rele(dd, FTAG);
return (0);
}
/* crypt cannot be changed after creation */
if (dcp->cp_crypt != ZIO_CRYPT_INHERIT) {
ret = SET_ERROR(EINVAL);
goto error;
}
/* we are not inheritting our parent's wkey so we need one ourselves */
if (dcp->cp_wkey == NULL) {
ret = SET_ERROR(EINVAL);
goto error;
}
/* check for a valid keyformat for the new wrapping key */
if (dcp->cp_wkey->wk_keyformat >= ZFS_KEYFORMAT_FORMATS ||
dcp->cp_wkey->wk_keyformat == ZFS_KEYFORMAT_NONE) {
ret = SET_ERROR(EINVAL);
goto error;
}
/*
* If this dataset is not currently an encryption root we need a new
* keylocation for this dataset's new wrapping key. Otherwise we can
* just keep the one we already had.
*/
if (dd->dd_object != rddobj && dcp->cp_keylocation == NULL) {
ret = SET_ERROR(EINVAL);
goto error;
}
/* check that the keylocation is valid if it is not NULL */
if (dcp->cp_keylocation != NULL &&
!zfs_prop_valid_keylocation(dcp->cp_keylocation, B_TRUE)) {
ret = SET_ERROR(EINVAL);
goto error;
}
/* passphrases require pbkdf2 salt and iters */
if (dcp->cp_wkey->wk_keyformat == ZFS_KEYFORMAT_PASSPHRASE) {
if (dcp->cp_wkey->wk_salt == 0 ||
dcp->cp_wkey->wk_iters < MIN_PBKDF2_ITERATIONS) {
ret = SET_ERROR(EINVAL);
goto error;
}
} else {
if (dcp->cp_wkey->wk_salt != 0 || dcp->cp_wkey->wk_iters != 0) {
ret = SET_ERROR(EINVAL);
goto error;
}
}
/* make sure the dd's wkey is loaded */
ret = dmu_objset_check_wkey_loaded(dd);
if (ret != 0)
goto error;
dsl_dir_rele(dd, FTAG);
return (0);
error:
if (dd != NULL)
dsl_dir_rele(dd, FTAG);
return (ret);
}
/*
* This function deals with the intricacies of updating wrapping
* key references and encryption roots recursively in the event
* of a call to 'zfs change-key' or 'zfs promote'. The 'skip'
* parameter should always be set to B_FALSE when called
* externally.
*/
static void
spa_keystore_change_key_sync_impl(uint64_t rddobj, uint64_t ddobj,
uint64_t new_rddobj, dsl_wrapping_key_t *wkey, boolean_t skip,
dmu_tx_t *tx)
{
int ret;
zap_cursor_t *zc;
zap_attribute_t *za;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dir_t *dd = NULL;
dsl_crypto_key_t *dck = NULL;
uint64_t curr_rddobj;
ASSERT(RW_WRITE_HELD(&dp->dp_spa->spa_keystore.sk_wkeys_lock));
/* hold the dd */
VERIFY0(dsl_dir_hold_obj(dp, ddobj, NULL, FTAG, &dd));
/* ignore special dsl dirs */
if (dd->dd_myname[0] == '$' || dd->dd_myname[0] == '%') {
dsl_dir_rele(dd, FTAG);
return;
}
ret = dsl_dir_get_encryption_root_ddobj(dd, &curr_rddobj);
VERIFY(ret == 0 || ret == ENOENT);
/*
* Stop recursing if this dsl dir didn't inherit from the root
* or if this dd is a clone.
*/
if (ret == ENOENT ||
(!skip && (curr_rddobj != rddobj || dsl_dir_is_clone(dd)))) {
dsl_dir_rele(dd, FTAG);
return;
}
/*
* If we don't have a wrapping key just update the dck to reflect the
* new encryption root. Otherwise rewrap the entire dck and re-sync it
* to disk. If skip is set, we don't do any of this work.
*/
if (!skip) {
if (wkey == NULL) {
VERIFY0(zap_update(dp->dp_meta_objset,
dd->dd_crypto_obj,
DSL_CRYPTO_KEY_ROOT_DDOBJ, 8, 1,
&new_rddobj, tx));
} else {
VERIFY0(spa_keystore_dsl_key_hold_dd(dp->dp_spa, dd,
FTAG, &dck));
dsl_wrapping_key_hold(wkey, dck);
dsl_wrapping_key_rele(dck->dck_wkey, dck);
dck->dck_wkey = wkey;
dsl_crypto_key_sync(dck, tx);
spa_keystore_dsl_key_rele(dp->dp_spa, dck, FTAG);
}
}
zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
/* Recurse into all child dsl dirs. */
for (zap_cursor_init(zc, dp->dp_meta_objset,
dsl_dir_phys(dd)->dd_child_dir_zapobj);
zap_cursor_retrieve(zc, za) == 0;
zap_cursor_advance(zc)) {
spa_keystore_change_key_sync_impl(rddobj,
za->za_first_integer, new_rddobj, wkey, B_FALSE, tx);
}
zap_cursor_fini(zc);
/*
* Recurse into all dsl dirs of clones. We utilize the skip parameter
* here so that we don't attempt to process the clones directly. This
* is because the clone and its origin share the same dck, which has
* already been updated.
*/
for (zap_cursor_init(zc, dp->dp_meta_objset,
dsl_dir_phys(dd)->dd_clones);
zap_cursor_retrieve(zc, za) == 0;
zap_cursor_advance(zc)) {
dsl_dataset_t *clone;
VERIFY0(dsl_dataset_hold_obj(dp, za->za_first_integer,
FTAG, &clone));
spa_keystore_change_key_sync_impl(rddobj,
clone->ds_dir->dd_object, new_rddobj, wkey, B_TRUE, tx);
dsl_dataset_rele(clone, FTAG);
}
zap_cursor_fini(zc);
kmem_free(za, sizeof (zap_attribute_t));
kmem_free(zc, sizeof (zap_cursor_t));
dsl_dir_rele(dd, FTAG);
}
static void
spa_keystore_change_key_sync(void *arg, dmu_tx_t *tx)
{
dsl_dataset_t *ds;
avl_index_t where;
dsl_pool_t *dp = dmu_tx_pool(tx);
spa_t *spa = dp->dp_spa;
spa_keystore_change_key_args_t *skcka = arg;
dsl_crypto_params_t *dcp = skcka->skcka_cp;
dsl_wrapping_key_t *wkey = NULL, *found_wkey;
dsl_wrapping_key_t wkey_search;
char *keylocation = dcp->cp_keylocation;
uint64_t rddobj, new_rddobj;
/* create and initialize the wrapping key */
VERIFY0(dsl_dataset_hold(dp, skcka->skcka_dsname, FTAG, &ds));
ASSERT(!ds->ds_is_snapshot);
if (dcp->cp_cmd == DCP_CMD_NEW_KEY ||
dcp->cp_cmd == DCP_CMD_FORCE_NEW_KEY) {
/*
* We are changing to a new wkey. Set additional properties
* which can be sent along with this ioctl. Note that this
* command can set keylocation even if it can't normally be
* set via 'zfs set' due to a non-local keylocation.
*/
if (dcp->cp_cmd == DCP_CMD_NEW_KEY) {
wkey = dcp->cp_wkey;
wkey->wk_ddobj = ds->ds_dir->dd_object;
} else {
keylocation = "prompt";
}
if (keylocation != NULL) {
dsl_prop_set_sync_impl(ds,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION),
ZPROP_SRC_LOCAL, 1, strlen(keylocation) + 1,
keylocation, tx);
}
VERIFY0(dsl_dir_get_encryption_root_ddobj(ds->ds_dir, &rddobj));
new_rddobj = ds->ds_dir->dd_object;
} else {
/*
* We are inheritting the parent's wkey. Unset any local
* keylocation and grab a reference to the wkey.
*/
if (dcp->cp_cmd == DCP_CMD_INHERIT) {
VERIFY0(spa_keystore_wkey_hold_dd(spa,
ds->ds_dir->dd_parent, FTAG, &wkey));
}
dsl_prop_set_sync_impl(ds,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION), ZPROP_SRC_NONE,
0, 0, NULL, tx);
rddobj = ds->ds_dir->dd_object;
VERIFY0(dsl_dir_get_encryption_root_ddobj(ds->ds_dir->dd_parent,
&new_rddobj));
}
if (wkey == NULL) {
ASSERT(dcp->cp_cmd == DCP_CMD_FORCE_INHERIT ||
dcp->cp_cmd == DCP_CMD_FORCE_NEW_KEY);
}
rw_enter(&spa->spa_keystore.sk_wkeys_lock, RW_WRITER);
/* recurse through all children and rewrap their keys */
spa_keystore_change_key_sync_impl(rddobj, ds->ds_dir->dd_object,
new_rddobj, wkey, B_FALSE, tx);
/*
* All references to the old wkey should be released now (if it
* existed). Replace the wrapping key.
*/
wkey_search.wk_ddobj = ds->ds_dir->dd_object;
found_wkey = avl_find(&spa->spa_keystore.sk_wkeys, &wkey_search, NULL);
if (found_wkey != NULL) {
ASSERT0(zfs_refcount_count(&found_wkey->wk_refcnt));
avl_remove(&spa->spa_keystore.sk_wkeys, found_wkey);
dsl_wrapping_key_free(found_wkey);
}
if (dcp->cp_cmd == DCP_CMD_NEW_KEY) {
avl_find(&spa->spa_keystore.sk_wkeys, wkey, &where);
avl_insert(&spa->spa_keystore.sk_wkeys, wkey, where);
} else if (wkey != NULL) {
dsl_wrapping_key_rele(wkey, FTAG);
}
rw_exit(&spa->spa_keystore.sk_wkeys_lock);
dsl_dataset_rele(ds, FTAG);
}
int
spa_keystore_change_key(const char *dsname, dsl_crypto_params_t *dcp)
{
spa_keystore_change_key_args_t skcka;
/* initialize the args struct */
skcka.skcka_dsname = dsname;
skcka.skcka_cp = dcp;
/*
* Perform the actual work in syncing context. The blocks modified
* here could be calculated but it would require holding the pool
* lock and traversing all of the datasets that will have their keys
* changed.
*/
return (dsl_sync_task(dsname, spa_keystore_change_key_check,
spa_keystore_change_key_sync, &skcka, 15,
ZFS_SPACE_CHECK_RESERVED));
}
int
dsl_dir_rename_crypt_check(dsl_dir_t *dd, dsl_dir_t *newparent)
{
int ret;
uint64_t curr_rddobj, parent_rddobj;
if (dd->dd_crypto_obj == 0)
return (0);
ret = dsl_dir_get_encryption_root_ddobj(dd, &curr_rddobj);
if (ret != 0)
goto error;
/*
* if this is not an encryption root, we must make sure we are not
* moving dd to a new encryption root
*/
if (dd->dd_object != curr_rddobj) {
ret = dsl_dir_get_encryption_root_ddobj(newparent,
&parent_rddobj);
if (ret != 0)
goto error;
if (parent_rddobj != curr_rddobj) {
ret = SET_ERROR(EACCES);
goto error;
}
}
return (0);
error:
return (ret);
}
/*
* Check to make sure that a promote from targetdd to origindd will not require
* any key rewraps.
*/
int
dsl_dataset_promote_crypt_check(dsl_dir_t *target, dsl_dir_t *origin)
{
int ret;
uint64_t rddobj, op_rddobj, tp_rddobj;
/* If the dataset is not encrypted we don't need to check anything */
if (origin->dd_crypto_obj == 0)
return (0);
/*
* If we are not changing the first origin snapshot in a chain
* the encryption root won't change either.
*/
if (dsl_dir_is_clone(origin))
return (0);
/*
* If the origin is the encryption root we will update
* the DSL Crypto Key to point to the target instead.
*/
ret = dsl_dir_get_encryption_root_ddobj(origin, &rddobj);
if (ret != 0)
return (ret);
if (rddobj == origin->dd_object)
return (0);
/*
* The origin is inheriting its encryption root from its parent.
* Check that the parent of the target has the same encryption root.
*/
ret = dsl_dir_get_encryption_root_ddobj(origin->dd_parent, &op_rddobj);
if (ret == ENOENT)
return (SET_ERROR(EACCES));
else if (ret != 0)
return (ret);
ret = dsl_dir_get_encryption_root_ddobj(target->dd_parent, &tp_rddobj);
if (ret == ENOENT)
return (SET_ERROR(EACCES));
else if (ret != 0)
return (ret);
if (op_rddobj != tp_rddobj)
return (SET_ERROR(EACCES));
return (0);
}
void
dsl_dataset_promote_crypt_sync(dsl_dir_t *target, dsl_dir_t *origin,
dmu_tx_t *tx)
{
uint64_t rddobj;
dsl_pool_t *dp = target->dd_pool;
dsl_dataset_t *targetds;
dsl_dataset_t *originds;
char *keylocation;
if (origin->dd_crypto_obj == 0)
return;
if (dsl_dir_is_clone(origin))
return;
VERIFY0(dsl_dir_get_encryption_root_ddobj(origin, &rddobj));
if (rddobj != origin->dd_object)
return;
/*
* If the target is being promoted to the encryption root update the
* DSL Crypto Key and keylocation to reflect that. We also need to
* update the DSL Crypto Keys of all children inheritting their
* encryption root to point to the new target. Otherwise, the check
* function ensured that the encryption root will not change.
*/
keylocation = kmem_alloc(ZAP_MAXVALUELEN, KM_SLEEP);
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dir_phys(target)->dd_head_dataset_obj, FTAG, &targetds));
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dir_phys(origin)->dd_head_dataset_obj, FTAG, &originds));
VERIFY0(dsl_prop_get_dd(origin, zfs_prop_to_name(ZFS_PROP_KEYLOCATION),
1, ZAP_MAXVALUELEN, keylocation, NULL, B_FALSE));
dsl_prop_set_sync_impl(targetds, zfs_prop_to_name(ZFS_PROP_KEYLOCATION),
ZPROP_SRC_LOCAL, 1, strlen(keylocation) + 1, keylocation, tx);
dsl_prop_set_sync_impl(originds, zfs_prop_to_name(ZFS_PROP_KEYLOCATION),
ZPROP_SRC_NONE, 0, 0, NULL, tx);
rw_enter(&dp->dp_spa->spa_keystore.sk_wkeys_lock, RW_WRITER);
spa_keystore_change_key_sync_impl(rddobj, origin->dd_object,
target->dd_object, NULL, B_FALSE, tx);
rw_exit(&dp->dp_spa->spa_keystore.sk_wkeys_lock);
dsl_dataset_rele(targetds, FTAG);
dsl_dataset_rele(originds, FTAG);
kmem_free(keylocation, ZAP_MAXVALUELEN);
}
int
dmu_objset_create_crypt_check(dsl_dir_t *parentdd, dsl_crypto_params_t *dcp,
boolean_t *will_encrypt)
{
int ret;
uint64_t pcrypt, crypt;
dsl_crypto_params_t dummy_dcp = { 0 };
if (will_encrypt != NULL)
*will_encrypt = B_FALSE;
if (dcp == NULL)
dcp = &dummy_dcp;
if (dcp->cp_cmd != DCP_CMD_NONE)
return (SET_ERROR(EINVAL));
if (parentdd != NULL) {
ret = dsl_dir_get_crypt(parentdd, &pcrypt);
if (ret != 0)
return (ret);
} else {
pcrypt = ZIO_CRYPT_OFF;
}
crypt = (dcp->cp_crypt == ZIO_CRYPT_INHERIT) ? pcrypt : dcp->cp_crypt;
ASSERT3U(pcrypt, !=, ZIO_CRYPT_INHERIT);
ASSERT3U(crypt, !=, ZIO_CRYPT_INHERIT);
/* check for valid dcp with no encryption (inherited or local) */
if (crypt == ZIO_CRYPT_OFF) {
/* Must not specify encryption params */
if (dcp->cp_wkey != NULL ||
(dcp->cp_keylocation != NULL &&
strcmp(dcp->cp_keylocation, "none") != 0))
return (SET_ERROR(EINVAL));
return (0);
}
if (will_encrypt != NULL)
*will_encrypt = B_TRUE;
/*
* We will now definitely be encrypting. Check the feature flag. When
* creating the pool the caller will check this for us since we won't
* technically have the feature activated yet.
*/
if (parentdd != NULL &&
!spa_feature_is_enabled(parentdd->dd_pool->dp_spa,
SPA_FEATURE_ENCRYPTION)) {
return (SET_ERROR(EOPNOTSUPP));
}
/* Check for errata #4 (encryption enabled, bookmark_v2 disabled) */
if (parentdd != NULL &&
!spa_feature_is_enabled(parentdd->dd_pool->dp_spa,
SPA_FEATURE_BOOKMARK_V2)) {
return (SET_ERROR(EOPNOTSUPP));
}
/* handle inheritance */
if (dcp->cp_wkey == NULL) {
ASSERT3P(parentdd, !=, NULL);
/* key must be fully unspecified */
if (dcp->cp_keylocation != NULL)
return (SET_ERROR(EINVAL));
/* parent must have a key to inherit */
if (pcrypt == ZIO_CRYPT_OFF)
return (SET_ERROR(EINVAL));
/* check for parent key */
ret = dmu_objset_check_wkey_loaded(parentdd);
if (ret != 0)
return (ret);
return (0);
}
/* At this point we should have a fully specified key. Check location */
if (dcp->cp_keylocation == NULL ||
!zfs_prop_valid_keylocation(dcp->cp_keylocation, B_TRUE))
return (SET_ERROR(EINVAL));
/* Must have fully specified keyformat */
switch (dcp->cp_wkey->wk_keyformat) {
case ZFS_KEYFORMAT_HEX:
case ZFS_KEYFORMAT_RAW:
/* requires no pbkdf2 iters and salt */
if (dcp->cp_wkey->wk_salt != 0 || dcp->cp_wkey->wk_iters != 0)
return (SET_ERROR(EINVAL));
break;
case ZFS_KEYFORMAT_PASSPHRASE:
/* requires pbkdf2 iters and salt */
if (dcp->cp_wkey->wk_salt == 0 ||
dcp->cp_wkey->wk_iters < MIN_PBKDF2_ITERATIONS)
return (SET_ERROR(EINVAL));
break;
case ZFS_KEYFORMAT_NONE:
default:
/* keyformat must be specified and valid */
return (SET_ERROR(EINVAL));
}
return (0);
}
void
dsl_dataset_create_crypt_sync(uint64_t dsobj, dsl_dir_t *dd,
dsl_dataset_t *origin, dsl_crypto_params_t *dcp, dmu_tx_t *tx)
{
dsl_pool_t *dp = dd->dd_pool;
uint64_t crypt;
dsl_wrapping_key_t *wkey;
/* clones always use their origin's wrapping key */
if (dsl_dir_is_clone(dd)) {
ASSERT3P(dcp, ==, NULL);
/*
* If this is an encrypted clone we just need to clone the
* dck into dd. Zapify the dd so we can do that.
*/
if (origin->ds_dir->dd_crypto_obj != 0) {
dmu_buf_will_dirty(dd->dd_dbuf, tx);
dsl_dir_zapify(dd, tx);
dd->dd_crypto_obj =
dsl_crypto_key_clone_sync(origin->ds_dir, tx);
VERIFY0(zap_add(dp->dp_meta_objset, dd->dd_object,
DD_FIELD_CRYPTO_KEY_OBJ, sizeof (uint64_t), 1,
&dd->dd_crypto_obj, tx));
}
return;
}
/*
* A NULL dcp at this point indicates this is the origin dataset
* which does not have an objset to encrypt. Raw receives will handle
* encryption separately later. In both cases we can simply return.
*/
if (dcp == NULL || dcp->cp_cmd == DCP_CMD_RAW_RECV)
return;
crypt = dcp->cp_crypt;
wkey = dcp->cp_wkey;
/* figure out the effective crypt */
if (crypt == ZIO_CRYPT_INHERIT && dd->dd_parent != NULL)
VERIFY0(dsl_dir_get_crypt(dd->dd_parent, &crypt));
/* if we aren't doing encryption just return */
if (crypt == ZIO_CRYPT_OFF || crypt == ZIO_CRYPT_INHERIT)
return;
/* zapify the dd so that we can add the crypto key obj to it */
dmu_buf_will_dirty(dd->dd_dbuf, tx);
dsl_dir_zapify(dd, tx);
/* use the new key if given or inherit from the parent */
if (wkey == NULL) {
VERIFY0(spa_keystore_wkey_hold_dd(dp->dp_spa,
dd->dd_parent, FTAG, &wkey));
} else {
wkey->wk_ddobj = dd->dd_object;
}
ASSERT3P(wkey, !=, NULL);
/* Create or clone the DSL crypto key and activate the feature */
dd->dd_crypto_obj = dsl_crypto_key_create_sync(crypt, wkey, tx);
VERIFY0(zap_add(dp->dp_meta_objset, dd->dd_object,
DD_FIELD_CRYPTO_KEY_OBJ, sizeof (uint64_t), 1, &dd->dd_crypto_obj,
tx));
dsl_dataset_activate_feature(dsobj, SPA_FEATURE_ENCRYPTION,
(void *)B_TRUE, tx);
/*
* If we inherited the wrapping key we release our reference now.
* Otherwise, this is a new key and we need to load it into the
* keystore.
*/
if (dcp->cp_wkey == NULL) {
dsl_wrapping_key_rele(wkey, FTAG);
} else {
VERIFY0(spa_keystore_load_wkey_impl(dp->dp_spa, wkey));
}
}
typedef struct dsl_crypto_recv_key_arg {
uint64_t dcrka_dsobj;
uint64_t dcrka_fromobj;
dmu_objset_type_t dcrka_ostype;
nvlist_t *dcrka_nvl;
boolean_t dcrka_do_key;
} dsl_crypto_recv_key_arg_t;
static int
dsl_crypto_recv_raw_objset_check(dsl_dataset_t *ds, dsl_dataset_t *fromds,
dmu_objset_type_t ostype, nvlist_t *nvl, dmu_tx_t *tx)
{
int ret;
objset_t *os;
dnode_t *mdn;
uint8_t *buf = NULL;
uint_t len;
uint64_t intval, nlevels, blksz, ibs;
uint64_t nblkptr, maxblkid;
if (ostype != DMU_OST_ZFS && ostype != DMU_OST_ZVOL)
return (SET_ERROR(EINVAL));
/* raw receives also need info about the structure of the metadnode */
ret = nvlist_lookup_uint64(nvl, "mdn_compress", &intval);
if (ret != 0 || intval >= ZIO_COMPRESS_LEGACY_FUNCTIONS)
return (SET_ERROR(EINVAL));
ret = nvlist_lookup_uint64(nvl, "mdn_checksum", &intval);
if (ret != 0 || intval >= ZIO_CHECKSUM_LEGACY_FUNCTIONS)
return (SET_ERROR(EINVAL));
ret = nvlist_lookup_uint64(nvl, "mdn_nlevels", &nlevels);
if (ret != 0 || nlevels > DN_MAX_LEVELS)
return (SET_ERROR(EINVAL));
ret = nvlist_lookup_uint64(nvl, "mdn_blksz", &blksz);
if (ret != 0 || blksz < SPA_MINBLOCKSIZE)
return (SET_ERROR(EINVAL));
else if (blksz > spa_maxblocksize(tx->tx_pool->dp_spa))
return (SET_ERROR(ENOTSUP));
ret = nvlist_lookup_uint64(nvl, "mdn_indblkshift", &ibs);
if (ret != 0 || ibs < DN_MIN_INDBLKSHIFT || ibs > DN_MAX_INDBLKSHIFT)
return (SET_ERROR(ENOTSUP));
ret = nvlist_lookup_uint64(nvl, "mdn_nblkptr", &nblkptr);
if (ret != 0 || nblkptr != DN_MAX_NBLKPTR)
return (SET_ERROR(ENOTSUP));
ret = nvlist_lookup_uint64(nvl, "mdn_maxblkid", &maxblkid);
if (ret != 0)
return (SET_ERROR(EINVAL));
ret = nvlist_lookup_uint8_array(nvl, "portable_mac", &buf, &len);
if (ret != 0 || len != ZIO_OBJSET_MAC_LEN)
return (SET_ERROR(EINVAL));
ret = dmu_objset_from_ds(ds, &os);
if (ret != 0)
return (ret);
- /*
- * Useraccounting is not portable and must be done with the keys loaded.
- * Therefore, whenever we do any kind of receive the useraccounting
- * must not be present.
- */
- ASSERT0(os->os_flags & OBJSET_FLAG_USERACCOUNTING_COMPLETE);
- ASSERT0(os->os_flags & OBJSET_FLAG_USEROBJACCOUNTING_COMPLETE);
-
mdn = DMU_META_DNODE(os);
/*
* If we already created the objset, make sure its unchangeable
* properties match the ones received in the nvlist.
*/
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
if (!BP_IS_HOLE(dsl_dataset_get_blkptr(ds)) &&
(mdn->dn_nlevels != nlevels || mdn->dn_datablksz != blksz ||
mdn->dn_indblkshift != ibs || mdn->dn_nblkptr != nblkptr)) {
rrw_exit(&ds->ds_bp_rwlock, FTAG);
return (SET_ERROR(EINVAL));
}
rrw_exit(&ds->ds_bp_rwlock, FTAG);
/*
* Check that the ivset guid of the fromds matches the one from the
* send stream. Older versions of the encryption code did not have
* an ivset guid on the from dataset and did not send one in the
* stream. For these streams we provide the
* zfs_disable_ivset_guid_check tunable to allow these datasets to
* be received with a generated ivset guid.
*/
if (fromds != NULL && !zfs_disable_ivset_guid_check) {
uint64_t from_ivset_guid = 0;
intval = 0;
(void) nvlist_lookup_uint64(nvl, "from_ivset_guid", &intval);
(void) zap_lookup(tx->tx_pool->dp_meta_objset,
fromds->ds_object, DS_FIELD_IVSET_GUID,
sizeof (from_ivset_guid), 1, &from_ivset_guid);
if (intval == 0 || from_ivset_guid == 0)
return (SET_ERROR(ZFS_ERR_FROM_IVSET_GUID_MISSING));
if (intval != from_ivset_guid)
return (SET_ERROR(ZFS_ERR_FROM_IVSET_GUID_MISMATCH));
}
return (0);
}
static void
dsl_crypto_recv_raw_objset_sync(dsl_dataset_t *ds, dmu_objset_type_t ostype,
nvlist_t *nvl, dmu_tx_t *tx)
{
dsl_pool_t *dp = tx->tx_pool;
objset_t *os;
dnode_t *mdn;
zio_t *zio;
uint8_t *portable_mac;
uint_t len;
uint64_t compress, checksum, nlevels, blksz, ibs, maxblkid;
boolean_t newds = B_FALSE;
VERIFY0(dmu_objset_from_ds(ds, &os));
mdn = DMU_META_DNODE(os);
/*
* Fetch the values we need from the nvlist. "to_ivset_guid" must
* be set on the snapshot, which doesn't exist yet. The receive
* code will take care of this for us later.
*/
compress = fnvlist_lookup_uint64(nvl, "mdn_compress");
checksum = fnvlist_lookup_uint64(nvl, "mdn_checksum");
nlevels = fnvlist_lookup_uint64(nvl, "mdn_nlevels");
blksz = fnvlist_lookup_uint64(nvl, "mdn_blksz");
ibs = fnvlist_lookup_uint64(nvl, "mdn_indblkshift");
maxblkid = fnvlist_lookup_uint64(nvl, "mdn_maxblkid");
VERIFY0(nvlist_lookup_uint8_array(nvl, "portable_mac", &portable_mac,
&len));
/* if we haven't created an objset for the ds yet, do that now */
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
if (BP_IS_HOLE(dsl_dataset_get_blkptr(ds))) {
(void) dmu_objset_create_impl_dnstats(dp->dp_spa, ds,
dsl_dataset_get_blkptr(ds), ostype, nlevels, blksz,
ibs, tx);
newds = B_TRUE;
}
rrw_exit(&ds->ds_bp_rwlock, FTAG);
/*
* Set the portable MAC. The local MAC will always be zero since the
* incoming data will all be portable and user accounting will be
* deferred until the next mount. Afterwards, flag the os to be
* written out raw next time.
*/
arc_release(os->os_phys_buf, &os->os_phys_buf);
bcopy(portable_mac, os->os_phys->os_portable_mac, ZIO_OBJSET_MAC_LEN);
bzero(os->os_phys->os_local_mac, ZIO_OBJSET_MAC_LEN);
+ os->os_flags &= ~OBJSET_FLAG_USERACCOUNTING_COMPLETE;
os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE;
/* set metadnode compression and checksum */
mdn->dn_compress = compress;
mdn->dn_checksum = checksum;
rw_enter(&mdn->dn_struct_rwlock, RW_WRITER);
dnode_new_blkid(mdn, maxblkid, tx, B_FALSE, B_TRUE);
rw_exit(&mdn->dn_struct_rwlock);
/*
* We can't normally dirty the dataset in syncing context unless
* we are creating a new dataset. In this case, we perform a
* pseudo txg sync here instead.
*/
if (newds) {
dsl_dataset_dirty(ds, tx);
} else {
zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
dsl_dataset_sync(ds, zio, tx);
VERIFY0(zio_wait(zio));
/* dsl_dataset_sync_done will drop this reference. */
dmu_buf_add_ref(ds->ds_dbuf, ds);
dsl_dataset_sync_done(ds, tx);
}
}
int
dsl_crypto_recv_raw_key_check(dsl_dataset_t *ds, nvlist_t *nvl, dmu_tx_t *tx)
{
int ret;
objset_t *mos = tx->tx_pool->dp_meta_objset;
uint8_t *buf = NULL;
uint_t len;
uint64_t intval, key_guid, version;
boolean_t is_passphrase = B_FALSE;
ASSERT(dsl_dataset_phys(ds)->ds_flags & DS_FLAG_INCONSISTENT);
/*
* Read and check all the encryption values from the nvlist. We need
* all of the fields of a DSL Crypto Key, as well as a fully specified
* wrapping key.
*/
ret = nvlist_lookup_uint64(nvl, DSL_CRYPTO_KEY_CRYPTO_SUITE, &intval);
if (ret != 0 || intval >= ZIO_CRYPT_FUNCTIONS ||
intval <= ZIO_CRYPT_OFF)
return (SET_ERROR(EINVAL));
ret = nvlist_lookup_uint64(nvl, DSL_CRYPTO_KEY_GUID, &intval);
if (ret != 0)
return (SET_ERROR(EINVAL));
/*
* If this is an incremental receive make sure the given key guid
* matches the one we already have.
*/
if (ds->ds_dir->dd_crypto_obj != 0) {
ret = zap_lookup(mos, ds->ds_dir->dd_crypto_obj,
DSL_CRYPTO_KEY_GUID, 8, 1, &key_guid);
if (ret != 0)
return (ret);
if (intval != key_guid)
return (SET_ERROR(EACCES));
}
ret = nvlist_lookup_uint8_array(nvl, DSL_CRYPTO_KEY_MASTER_KEY,
&buf, &len);
if (ret != 0 || len != MASTER_KEY_MAX_LEN)
return (SET_ERROR(EINVAL));
ret = nvlist_lookup_uint8_array(nvl, DSL_CRYPTO_KEY_HMAC_KEY,
&buf, &len);
if (ret != 0 || len != SHA512_HMAC_KEYLEN)
return (SET_ERROR(EINVAL));
ret = nvlist_lookup_uint8_array(nvl, DSL_CRYPTO_KEY_IV, &buf, &len);
if (ret != 0 || len != WRAPPING_IV_LEN)
return (SET_ERROR(EINVAL));
ret = nvlist_lookup_uint8_array(nvl, DSL_CRYPTO_KEY_MAC, &buf, &len);
if (ret != 0 || len != WRAPPING_MAC_LEN)
return (SET_ERROR(EINVAL));
/*
* We don't support receiving old on-disk formats. The version 0
* implementation protected several fields in an objset that were
* not always portable during a raw receive. As a result, we call
* the old version an on-disk errata #3.
*/
ret = nvlist_lookup_uint64(nvl, DSL_CRYPTO_KEY_VERSION, &version);
if (ret != 0 || version != ZIO_CRYPT_KEY_CURRENT_VERSION)
return (SET_ERROR(ENOTSUP));
ret = nvlist_lookup_uint64(nvl, zfs_prop_to_name(ZFS_PROP_KEYFORMAT),
&intval);
if (ret != 0 || intval >= ZFS_KEYFORMAT_FORMATS ||
intval == ZFS_KEYFORMAT_NONE)
return (SET_ERROR(EINVAL));
is_passphrase = (intval == ZFS_KEYFORMAT_PASSPHRASE);
/*
* for raw receives we allow any number of pbkdf2iters since there
* won't be a chance for the user to change it.
*/
ret = nvlist_lookup_uint64(nvl, zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS),
&intval);
if (ret != 0 || (is_passphrase == (intval == 0)))
return (SET_ERROR(EINVAL));
ret = nvlist_lookup_uint64(nvl, zfs_prop_to_name(ZFS_PROP_PBKDF2_SALT),
&intval);
if (ret != 0 || (is_passphrase == (intval == 0)))
return (SET_ERROR(EINVAL));
return (0);
}
void
dsl_crypto_recv_raw_key_sync(dsl_dataset_t *ds, nvlist_t *nvl, dmu_tx_t *tx)
{
dsl_pool_t *dp = tx->tx_pool;
objset_t *mos = dp->dp_meta_objset;
dsl_dir_t *dd = ds->ds_dir;
uint_t len;
uint64_t rddobj, one = 1;
uint8_t *keydata, *hmac_keydata, *iv, *mac;
uint64_t crypt, key_guid, keyformat, iters, salt;
uint64_t version = ZIO_CRYPT_KEY_CURRENT_VERSION;
char *keylocation = "prompt";
/* lookup the values we need to create the DSL Crypto Key */
crypt = fnvlist_lookup_uint64(nvl, DSL_CRYPTO_KEY_CRYPTO_SUITE);
key_guid = fnvlist_lookup_uint64(nvl, DSL_CRYPTO_KEY_GUID);
keyformat = fnvlist_lookup_uint64(nvl,
zfs_prop_to_name(ZFS_PROP_KEYFORMAT));
iters = fnvlist_lookup_uint64(nvl,
zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS));
salt = fnvlist_lookup_uint64(nvl,
zfs_prop_to_name(ZFS_PROP_PBKDF2_SALT));
VERIFY0(nvlist_lookup_uint8_array(nvl, DSL_CRYPTO_KEY_MASTER_KEY,
&keydata, &len));
VERIFY0(nvlist_lookup_uint8_array(nvl, DSL_CRYPTO_KEY_HMAC_KEY,
&hmac_keydata, &len));
VERIFY0(nvlist_lookup_uint8_array(nvl, DSL_CRYPTO_KEY_IV, &iv, &len));
VERIFY0(nvlist_lookup_uint8_array(nvl, DSL_CRYPTO_KEY_MAC, &mac, &len));
/* if this is a new dataset setup the DSL Crypto Key. */
if (dd->dd_crypto_obj == 0) {
/* zapify the dsl dir so we can add the key object to it */
dmu_buf_will_dirty(dd->dd_dbuf, tx);
dsl_dir_zapify(dd, tx);
/* create the DSL Crypto Key on disk and activate the feature */
dd->dd_crypto_obj = zap_create(mos,
DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx);
VERIFY0(zap_update(tx->tx_pool->dp_meta_objset,
dd->dd_crypto_obj, DSL_CRYPTO_KEY_REFCOUNT,
sizeof (uint64_t), 1, &one, tx));
VERIFY0(zap_update(tx->tx_pool->dp_meta_objset,
dd->dd_crypto_obj, DSL_CRYPTO_KEY_VERSION,
sizeof (uint64_t), 1, &version, tx));
dsl_dataset_activate_feature(ds->ds_object,
SPA_FEATURE_ENCRYPTION, (void *)B_TRUE, tx);
ds->ds_feature[SPA_FEATURE_ENCRYPTION] = (void *)B_TRUE;
/* save the dd_crypto_obj on disk */
VERIFY0(zap_add(mos, dd->dd_object, DD_FIELD_CRYPTO_KEY_OBJ,
sizeof (uint64_t), 1, &dd->dd_crypto_obj, tx));
/*
* Set the keylocation to prompt by default. If keylocation
* has been provided via the properties, this will be overridden
* later.
*/
dsl_prop_set_sync_impl(ds,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION),
ZPROP_SRC_LOCAL, 1, strlen(keylocation) + 1,
keylocation, tx);
rddobj = dd->dd_object;
} else {
VERIFY0(dsl_dir_get_encryption_root_ddobj(dd, &rddobj));
}
/* sync the key data to the ZAP object on disk */
dsl_crypto_key_sync_impl(mos, dd->dd_crypto_obj, crypt,
rddobj, key_guid, iv, mac, keydata, hmac_keydata, keyformat, salt,
iters, tx);
}
static int
dsl_crypto_recv_key_check(void *arg, dmu_tx_t *tx)
{
int ret;
dsl_crypto_recv_key_arg_t *dcrka = arg;
dsl_dataset_t *ds = NULL, *fromds = NULL;
ret = dsl_dataset_hold_obj(tx->tx_pool, dcrka->dcrka_dsobj,
FTAG, &ds);
if (ret != 0)
goto out;
if (dcrka->dcrka_fromobj != 0) {
ret = dsl_dataset_hold_obj(tx->tx_pool, dcrka->dcrka_fromobj,
FTAG, &fromds);
if (ret != 0)
goto out;
}
ret = dsl_crypto_recv_raw_objset_check(ds, fromds,
dcrka->dcrka_ostype, dcrka->dcrka_nvl, tx);
if (ret != 0)
goto out;
/*
* We run this check even if we won't be doing this part of
* the receive now so that we don't make the user wait until
* the receive finishes to fail.
*/
ret = dsl_crypto_recv_raw_key_check(ds, dcrka->dcrka_nvl, tx);
if (ret != 0)
goto out;
out:
if (ds != NULL)
dsl_dataset_rele(ds, FTAG);
if (fromds != NULL)
dsl_dataset_rele(fromds, FTAG);
return (ret);
}
static void
dsl_crypto_recv_key_sync(void *arg, dmu_tx_t *tx)
{
dsl_crypto_recv_key_arg_t *dcrka = arg;
dsl_dataset_t *ds;
VERIFY0(dsl_dataset_hold_obj(tx->tx_pool, dcrka->dcrka_dsobj,
FTAG, &ds));
dsl_crypto_recv_raw_objset_sync(ds, dcrka->dcrka_ostype,
dcrka->dcrka_nvl, tx);
if (dcrka->dcrka_do_key)
dsl_crypto_recv_raw_key_sync(ds, dcrka->dcrka_nvl, tx);
dsl_dataset_rele(ds, FTAG);
}
/*
* This function is used to sync an nvlist representing a DSL Crypto Key and
* the associated encryption parameters. The key will be written exactly as is
* without wrapping it.
*/
int
dsl_crypto_recv_raw(const char *poolname, uint64_t dsobj, uint64_t fromobj,
dmu_objset_type_t ostype, nvlist_t *nvl, boolean_t do_key)
{
dsl_crypto_recv_key_arg_t dcrka;
dcrka.dcrka_dsobj = dsobj;
dcrka.dcrka_fromobj = fromobj;
dcrka.dcrka_ostype = ostype;
dcrka.dcrka_nvl = nvl;
dcrka.dcrka_do_key = do_key;
return (dsl_sync_task(poolname, dsl_crypto_recv_key_check,
dsl_crypto_recv_key_sync, &dcrka, 1, ZFS_SPACE_CHECK_NORMAL));
}
int
dsl_crypto_populate_key_nvlist(objset_t *os, uint64_t from_ivset_guid,
nvlist_t **nvl_out)
{
int ret;
dsl_dataset_t *ds = os->os_dsl_dataset;
dnode_t *mdn;
uint64_t rddobj;
nvlist_t *nvl = NULL;
uint64_t dckobj = ds->ds_dir->dd_crypto_obj;
dsl_dir_t *rdd = NULL;
dsl_pool_t *dp = ds->ds_dir->dd_pool;
objset_t *mos = dp->dp_meta_objset;
uint64_t crypt = 0, key_guid = 0, format = 0;
uint64_t iters = 0, salt = 0, version = 0;
uint64_t to_ivset_guid = 0;
uint8_t raw_keydata[MASTER_KEY_MAX_LEN];
uint8_t raw_hmac_keydata[SHA512_HMAC_KEYLEN];
uint8_t iv[WRAPPING_IV_LEN];
uint8_t mac[WRAPPING_MAC_LEN];
ASSERT(dckobj != 0);
mdn = DMU_META_DNODE(os);
nvl = fnvlist_alloc();
/* lookup values from the DSL Crypto Key */
ret = zap_lookup(mos, dckobj, DSL_CRYPTO_KEY_CRYPTO_SUITE, 8, 1,
&crypt);
if (ret != 0)
goto error;
ret = zap_lookup(mos, dckobj, DSL_CRYPTO_KEY_GUID, 8, 1, &key_guid);
if (ret != 0)
goto error;
ret = zap_lookup(mos, dckobj, DSL_CRYPTO_KEY_MASTER_KEY, 1,
MASTER_KEY_MAX_LEN, raw_keydata);
if (ret != 0)
goto error;
ret = zap_lookup(mos, dckobj, DSL_CRYPTO_KEY_HMAC_KEY, 1,
SHA512_HMAC_KEYLEN, raw_hmac_keydata);
if (ret != 0)
goto error;
ret = zap_lookup(mos, dckobj, DSL_CRYPTO_KEY_IV, 1, WRAPPING_IV_LEN,
iv);
if (ret != 0)
goto error;
ret = zap_lookup(mos, dckobj, DSL_CRYPTO_KEY_MAC, 1, WRAPPING_MAC_LEN,
mac);
if (ret != 0)
goto error;
/* see zfs_disable_ivset_guid_check tunable for errata info */
ret = zap_lookup(mos, ds->ds_object, DS_FIELD_IVSET_GUID, 8, 1,
&to_ivset_guid);
if (ret != 0)
ASSERT3U(dp->dp_spa->spa_errata, !=, 0);
/*
* We don't support raw sends of legacy on-disk formats. See the
* comment in dsl_crypto_recv_key_check() for details.
*/
ret = zap_lookup(mos, dckobj, DSL_CRYPTO_KEY_VERSION, 8, 1, &version);
if (ret != 0 || version != ZIO_CRYPT_KEY_CURRENT_VERSION) {
dp->dp_spa->spa_errata = ZPOOL_ERRATA_ZOL_6845_ENCRYPTION;
ret = SET_ERROR(ENOTSUP);
goto error;
}
/*
* Lookup wrapping key properties. An early version of the code did
* not correctly add these values to the wrapping key or the DSL
* Crypto Key on disk for non encryption roots, so to be safe we
* always take the slightly circuitous route of looking it up from
* the encryption root's key.
*/
ret = dsl_dir_get_encryption_root_ddobj(ds->ds_dir, &rddobj);
if (ret != 0)
goto error;
dsl_pool_config_enter(dp, FTAG);
ret = dsl_dir_hold_obj(dp, rddobj, NULL, FTAG, &rdd);
if (ret != 0)
goto error_unlock;
ret = zap_lookup(dp->dp_meta_objset, rdd->dd_crypto_obj,
zfs_prop_to_name(ZFS_PROP_KEYFORMAT), 8, 1, &format);
if (ret != 0)
goto error_unlock;
if (format == ZFS_KEYFORMAT_PASSPHRASE) {
ret = zap_lookup(dp->dp_meta_objset, rdd->dd_crypto_obj,
zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS), 8, 1, &iters);
if (ret != 0)
goto error_unlock;
ret = zap_lookup(dp->dp_meta_objset, rdd->dd_crypto_obj,
zfs_prop_to_name(ZFS_PROP_PBKDF2_SALT), 8, 1, &salt);
if (ret != 0)
goto error_unlock;
}
dsl_dir_rele(rdd, FTAG);
dsl_pool_config_exit(dp, FTAG);
fnvlist_add_uint64(nvl, DSL_CRYPTO_KEY_CRYPTO_SUITE, crypt);
fnvlist_add_uint64(nvl, DSL_CRYPTO_KEY_GUID, key_guid);
fnvlist_add_uint64(nvl, DSL_CRYPTO_KEY_VERSION, version);
VERIFY0(nvlist_add_uint8_array(nvl, DSL_CRYPTO_KEY_MASTER_KEY,
raw_keydata, MASTER_KEY_MAX_LEN));
VERIFY0(nvlist_add_uint8_array(nvl, DSL_CRYPTO_KEY_HMAC_KEY,
raw_hmac_keydata, SHA512_HMAC_KEYLEN));
VERIFY0(nvlist_add_uint8_array(nvl, DSL_CRYPTO_KEY_IV, iv,
WRAPPING_IV_LEN));
VERIFY0(nvlist_add_uint8_array(nvl, DSL_CRYPTO_KEY_MAC, mac,
WRAPPING_MAC_LEN));
VERIFY0(nvlist_add_uint8_array(nvl, "portable_mac",
os->os_phys->os_portable_mac, ZIO_OBJSET_MAC_LEN));
fnvlist_add_uint64(nvl, zfs_prop_to_name(ZFS_PROP_KEYFORMAT), format);
fnvlist_add_uint64(nvl, zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS), iters);
fnvlist_add_uint64(nvl, zfs_prop_to_name(ZFS_PROP_PBKDF2_SALT), salt);
fnvlist_add_uint64(nvl, "mdn_checksum", mdn->dn_checksum);
fnvlist_add_uint64(nvl, "mdn_compress", mdn->dn_compress);
fnvlist_add_uint64(nvl, "mdn_nlevels", mdn->dn_nlevels);
fnvlist_add_uint64(nvl, "mdn_blksz", mdn->dn_datablksz);
fnvlist_add_uint64(nvl, "mdn_indblkshift", mdn->dn_indblkshift);
fnvlist_add_uint64(nvl, "mdn_nblkptr", mdn->dn_nblkptr);
fnvlist_add_uint64(nvl, "mdn_maxblkid", mdn->dn_maxblkid);
fnvlist_add_uint64(nvl, "to_ivset_guid", to_ivset_guid);
fnvlist_add_uint64(nvl, "from_ivset_guid", from_ivset_guid);
*nvl_out = nvl;
return (0);
error_unlock:
dsl_pool_config_exit(dp, FTAG);
error:
if (rdd != NULL)
dsl_dir_rele(rdd, FTAG);
nvlist_free(nvl);
*nvl_out = NULL;
return (ret);
}
uint64_t
dsl_crypto_key_create_sync(uint64_t crypt, dsl_wrapping_key_t *wkey,
dmu_tx_t *tx)
{
dsl_crypto_key_t dck;
uint64_t version = ZIO_CRYPT_KEY_CURRENT_VERSION;
uint64_t one = 1ULL;
ASSERT(dmu_tx_is_syncing(tx));
ASSERT3U(crypt, <, ZIO_CRYPT_FUNCTIONS);
ASSERT3U(crypt, >, ZIO_CRYPT_OFF);
/* create the DSL Crypto Key ZAP object */
dck.dck_obj = zap_create(tx->tx_pool->dp_meta_objset,
DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx);
/* fill in the key (on the stack) and sync it to disk */
dck.dck_wkey = wkey;
VERIFY0(zio_crypt_key_init(crypt, &dck.dck_key));
dsl_crypto_key_sync(&dck, tx);
VERIFY0(zap_update(tx->tx_pool->dp_meta_objset, dck.dck_obj,
DSL_CRYPTO_KEY_REFCOUNT, sizeof (uint64_t), 1, &one, tx));
VERIFY0(zap_update(tx->tx_pool->dp_meta_objset, dck.dck_obj,
DSL_CRYPTO_KEY_VERSION, sizeof (uint64_t), 1, &version, tx));
zio_crypt_key_destroy(&dck.dck_key);
bzero(&dck.dck_key, sizeof (zio_crypt_key_t));
return (dck.dck_obj);
}
uint64_t
dsl_crypto_key_clone_sync(dsl_dir_t *origindd, dmu_tx_t *tx)
{
objset_t *mos = tx->tx_pool->dp_meta_objset;
ASSERT(dmu_tx_is_syncing(tx));
VERIFY0(zap_increment(mos, origindd->dd_crypto_obj,
DSL_CRYPTO_KEY_REFCOUNT, 1, tx));
return (origindd->dd_crypto_obj);
}
void
dsl_crypto_key_destroy_sync(uint64_t dckobj, dmu_tx_t *tx)
{
objset_t *mos = tx->tx_pool->dp_meta_objset;
uint64_t refcnt;
/* Decrement the refcount, destroy if this is the last reference */
VERIFY0(zap_lookup(mos, dckobj, DSL_CRYPTO_KEY_REFCOUNT,
sizeof (uint64_t), 1, &refcnt));
if (refcnt != 1) {
VERIFY0(zap_increment(mos, dckobj, DSL_CRYPTO_KEY_REFCOUNT,
-1, tx));
} else {
VERIFY0(zap_destroy(mos, dckobj, tx));
}
}
void
dsl_dataset_crypt_stats(dsl_dataset_t *ds, nvlist_t *nv)
{
uint64_t intval;
dsl_dir_t *dd = ds->ds_dir;
dsl_dir_t *enc_root;
char buf[ZFS_MAX_DATASET_NAME_LEN];
if (dd->dd_crypto_obj == 0)
return;
intval = dsl_dataset_get_keystatus(dd);
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_KEYSTATUS, intval);
if (dsl_dir_get_crypt(dd, &intval) == 0)
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_ENCRYPTION, intval);
if (zap_lookup(dd->dd_pool->dp_meta_objset, dd->dd_crypto_obj,
DSL_CRYPTO_KEY_GUID, 8, 1, &intval) == 0) {
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_KEY_GUID, intval);
}
if (zap_lookup(dd->dd_pool->dp_meta_objset, dd->dd_crypto_obj,
zfs_prop_to_name(ZFS_PROP_KEYFORMAT), 8, 1, &intval) == 0) {
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_KEYFORMAT, intval);
}
if (zap_lookup(dd->dd_pool->dp_meta_objset, dd->dd_crypto_obj,
zfs_prop_to_name(ZFS_PROP_PBKDF2_SALT), 8, 1, &intval) == 0) {
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_PBKDF2_SALT, intval);
}
if (zap_lookup(dd->dd_pool->dp_meta_objset, dd->dd_crypto_obj,
zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS), 8, 1, &intval) == 0) {
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_PBKDF2_ITERS, intval);
}
if (zap_lookup(dd->dd_pool->dp_meta_objset, ds->ds_object,
DS_FIELD_IVSET_GUID, 8, 1, &intval) == 0) {
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_IVSET_GUID, intval);
}
if (dsl_dir_get_encryption_root_ddobj(dd, &intval) == 0) {
if (dsl_dir_hold_obj(dd->dd_pool, intval, NULL, FTAG,
&enc_root) == 0) {
dsl_dir_name(enc_root, buf);
dsl_dir_rele(enc_root, FTAG);
dsl_prop_nvlist_add_string(nv,
ZFS_PROP_ENCRYPTION_ROOT, buf);
}
}
}
int
spa_crypt_get_salt(spa_t *spa, uint64_t dsobj, uint8_t *salt)
{
int ret;
dsl_crypto_key_t *dck = NULL;
/* look up the key from the spa's keystore */
ret = spa_keystore_lookup_key(spa, dsobj, FTAG, &dck);
if (ret != 0)
goto error;
ret = zio_crypt_key_get_salt(&dck->dck_key, salt);
if (ret != 0)
goto error;
spa_keystore_dsl_key_rele(spa, dck, FTAG);
return (0);
error:
if (dck != NULL)
spa_keystore_dsl_key_rele(spa, dck, FTAG);
return (ret);
}
/*
* Objset blocks are a special case for MAC generation. These blocks have 2
* 256-bit MACs which are embedded within the block itself, rather than a
* single 128 bit MAC. As a result, this function handles encoding and decoding
* the MACs on its own, unlike other functions in this file.
*/
int
spa_do_crypt_objset_mac_abd(boolean_t generate, spa_t *spa, uint64_t dsobj,
abd_t *abd, uint_t datalen, boolean_t byteswap)
{
int ret;
dsl_crypto_key_t *dck = NULL;
void *buf = abd_borrow_buf_copy(abd, datalen);
objset_phys_t *osp = buf;
uint8_t portable_mac[ZIO_OBJSET_MAC_LEN];
uint8_t local_mac[ZIO_OBJSET_MAC_LEN];
/* look up the key from the spa's keystore */
ret = spa_keystore_lookup_key(spa, dsobj, FTAG, &dck);
if (ret != 0)
goto error;
/* calculate both HMACs */
ret = zio_crypt_do_objset_hmacs(&dck->dck_key, buf, datalen,
byteswap, portable_mac, local_mac);
if (ret != 0)
goto error;
spa_keystore_dsl_key_rele(spa, dck, FTAG);
/* if we are generating encode the HMACs in the objset_phys_t */
if (generate) {
bcopy(portable_mac, osp->os_portable_mac, ZIO_OBJSET_MAC_LEN);
bcopy(local_mac, osp->os_local_mac, ZIO_OBJSET_MAC_LEN);
abd_return_buf_copy(abd, buf, datalen);
return (0);
}
if (bcmp(portable_mac, osp->os_portable_mac, ZIO_OBJSET_MAC_LEN) != 0 ||
bcmp(local_mac, osp->os_local_mac, ZIO_OBJSET_MAC_LEN) != 0) {
abd_return_buf(abd, buf, datalen);
return (SET_ERROR(ECKSUM));
}
abd_return_buf(abd, buf, datalen);
return (0);
error:
if (dck != NULL)
spa_keystore_dsl_key_rele(spa, dck, FTAG);
abd_return_buf(abd, buf, datalen);
return (ret);
}
int
spa_do_crypt_mac_abd(boolean_t generate, spa_t *spa, uint64_t dsobj, abd_t *abd,
uint_t datalen, uint8_t *mac)
{
int ret;
dsl_crypto_key_t *dck = NULL;
uint8_t *buf = abd_borrow_buf_copy(abd, datalen);
uint8_t digestbuf[ZIO_DATA_MAC_LEN];
/* look up the key from the spa's keystore */
ret = spa_keystore_lookup_key(spa, dsobj, FTAG, &dck);
if (ret != 0)
goto error;
/* perform the hmac */
ret = zio_crypt_do_hmac(&dck->dck_key, buf, datalen,
digestbuf, ZIO_DATA_MAC_LEN);
if (ret != 0)
goto error;
abd_return_buf(abd, buf, datalen);
spa_keystore_dsl_key_rele(spa, dck, FTAG);
/*
* Truncate and fill in mac buffer if we were asked to generate a MAC.
* Otherwise verify that the MAC matched what we expected.
*/
if (generate) {
bcopy(digestbuf, mac, ZIO_DATA_MAC_LEN);
return (0);
}
if (bcmp(digestbuf, mac, ZIO_DATA_MAC_LEN) != 0)
return (SET_ERROR(ECKSUM));
return (0);
error:
if (dck != NULL)
spa_keystore_dsl_key_rele(spa, dck, FTAG);
abd_return_buf(abd, buf, datalen);
return (ret);
}
/*
* This function serves as a multiplexer for encryption and decryption of
* all blocks (except the L2ARC). For encryption, it will populate the IV,
* salt, MAC, and cabd (the ciphertext). On decryption it will simply use
* these fields to populate pabd (the plaintext).
*/
int
spa_do_crypt_abd(boolean_t encrypt, spa_t *spa, const zbookmark_phys_t *zb,
dmu_object_type_t ot, boolean_t dedup, boolean_t bswap, uint8_t *salt,
uint8_t *iv, uint8_t *mac, uint_t datalen, abd_t *pabd, abd_t *cabd,
boolean_t *no_crypt)
{
int ret;
dsl_crypto_key_t *dck = NULL;
uint8_t *plainbuf = NULL, *cipherbuf = NULL;
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_ENCRYPTION));
/* look up the key from the spa's keystore */
ret = spa_keystore_lookup_key(spa, zb->zb_objset, FTAG, &dck);
if (ret != 0) {
ret = SET_ERROR(EACCES);
return (ret);
}
if (encrypt) {
plainbuf = abd_borrow_buf_copy(pabd, datalen);
cipherbuf = abd_borrow_buf(cabd, datalen);
} else {
plainbuf = abd_borrow_buf(pabd, datalen);
cipherbuf = abd_borrow_buf_copy(cabd, datalen);
}
/*
* Both encryption and decryption functions need a salt for key
* generation and an IV. When encrypting a non-dedup block, we
* generate the salt and IV randomly to be stored by the caller. Dedup
* blocks perform a (more expensive) HMAC of the plaintext to obtain
* the salt and the IV. ZIL blocks have their salt and IV generated
* at allocation time in zio_alloc_zil(). On decryption, we simply use
* the provided values.
*/
if (encrypt && ot != DMU_OT_INTENT_LOG && !dedup) {
ret = zio_crypt_key_get_salt(&dck->dck_key, salt);
if (ret != 0)
goto error;
ret = zio_crypt_generate_iv(iv);
if (ret != 0)
goto error;
} else if (encrypt && dedup) {
ret = zio_crypt_generate_iv_salt_dedup(&dck->dck_key,
plainbuf, datalen, iv, salt);
if (ret != 0)
goto error;
}
/* call lower level function to perform encryption / decryption */
ret = zio_do_crypt_data(encrypt, &dck->dck_key, ot, bswap, salt, iv,
mac, datalen, plainbuf, cipherbuf, no_crypt);
/*
* Handle injected decryption faults. Unfortunately, we cannot inject
* faults for dnode blocks because we might trigger the panic in
* dbuf_prepare_encrypted_dnode_leaf(), which exists because syncing
* context is not prepared to handle malicious decryption failures.
*/
if (zio_injection_enabled && !encrypt && ot != DMU_OT_DNODE && ret == 0)
ret = zio_handle_decrypt_injection(spa, zb, ot, ECKSUM);
if (ret != 0)
goto error;
if (encrypt) {
abd_return_buf(pabd, plainbuf, datalen);
abd_return_buf_copy(cabd, cipherbuf, datalen);
} else {
abd_return_buf_copy(pabd, plainbuf, datalen);
abd_return_buf(cabd, cipherbuf, datalen);
}
spa_keystore_dsl_key_rele(spa, dck, FTAG);
return (0);
error:
if (encrypt) {
/* zero out any state we might have changed while encrypting */
bzero(salt, ZIO_DATA_SALT_LEN);
bzero(iv, ZIO_DATA_IV_LEN);
bzero(mac, ZIO_DATA_MAC_LEN);
abd_return_buf(pabd, plainbuf, datalen);
abd_return_buf_copy(cabd, cipherbuf, datalen);
} else {
abd_return_buf_copy(pabd, plainbuf, datalen);
abd_return_buf(cabd, cipherbuf, datalen);
}
spa_keystore_dsl_key_rele(spa, dck, FTAG);
return (ret);
}
ZFS_MODULE_PARAM(zfs, zfs_, disable_ivset_guid_check, INT, ZMOD_RW,
"Set to allow raw receives without IVset guids");
diff --git a/sys/contrib/openzfs/module/zfs/dsl_dataset.c b/sys/contrib/openzfs/module/zfs/dsl_dataset.c
index f99964511aa6..e8692c95438a 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_dataset.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_dataset.c
@@ -1,5011 +1,5007 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2014, Joyent, Inc. All rights reserved.
* Copyright (c) 2014 RackTop Systems.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright 2016, OmniTI Computer Consulting, Inc. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
* Copyright (c) 2020 The FreeBSD Foundation [1]
*
* [1] Portions of this software were developed by Allan Jude
* under sponsorship from the FreeBSD Foundation.
*/
#include <sys/dmu_objset.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_synctask.h>
#include <sys/dmu_traverse.h>
#include <sys/dmu_impl.h>
#include <sys/dmu_tx.h>
#include <sys/arc.h>
#include <sys/zio.h>
#include <sys/zap.h>
#include <sys/zfeature.h>
#include <sys/unique.h>
#include <sys/zfs_context.h>
#include <sys/zfs_ioctl.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/vdev.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_onexit.h>
#include <sys/zvol.h>
#include <sys/dsl_scan.h>
#include <sys/dsl_deadlist.h>
#include <sys/dsl_destroy.h>
#include <sys/dsl_userhold.h>
#include <sys/dsl_bookmark.h>
#include <sys/policy.h>
#include <sys/dmu_send.h>
#include <sys/dmu_recv.h>
#include <sys/zio_compress.h>
#include <zfs_fletcher.h>
#include <sys/zio_checksum.h>
/*
* The SPA supports block sizes up to 16MB. However, very large blocks
* can have an impact on i/o latency (e.g. tying up a spinning disk for
* ~300ms), and also potentially on the memory allocator. Therefore,
* we do not allow the recordsize to be set larger than zfs_max_recordsize
* (default 1MB). Larger blocks can be created by changing this tunable,
* and pools with larger blocks can always be imported and used, regardless
* of this setting.
*/
int zfs_max_recordsize = 1 * 1024 * 1024;
int zfs_allow_redacted_dataset_mount = 0;
#define SWITCH64(x, y) \
{ \
uint64_t __tmp = (x); \
(x) = (y); \
(y) = __tmp; \
}
#define DS_REF_MAX (1ULL << 62)
-extern inline dsl_dataset_phys_t *dsl_dataset_phys(dsl_dataset_t *ds);
-
static void dsl_dataset_set_remap_deadlist_object(dsl_dataset_t *ds,
uint64_t obj, dmu_tx_t *tx);
static void dsl_dataset_unset_remap_deadlist_object(dsl_dataset_t *ds,
dmu_tx_t *tx);
static void unload_zfeature(dsl_dataset_t *ds, spa_feature_t f);
extern int spa_asize_inflation;
static zil_header_t zero_zil;
/*
* Figure out how much of this delta should be propagated to the dsl_dir
* layer. If there's a refreservation, that space has already been
* partially accounted for in our ancestors.
*/
static int64_t
parent_delta(dsl_dataset_t *ds, int64_t delta)
{
dsl_dataset_phys_t *ds_phys;
uint64_t old_bytes, new_bytes;
if (ds->ds_reserved == 0)
return (delta);
ds_phys = dsl_dataset_phys(ds);
old_bytes = MAX(ds_phys->ds_unique_bytes, ds->ds_reserved);
new_bytes = MAX(ds_phys->ds_unique_bytes + delta, ds->ds_reserved);
ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta));
return (new_bytes - old_bytes);
}
void
dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
{
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
int used = bp_get_dsize_sync(spa, bp);
int compressed = BP_GET_PSIZE(bp);
int uncompressed = BP_GET_UCSIZE(bp);
int64_t delta;
spa_feature_t f;
dprintf_bp(bp, "ds=%p", ds);
ASSERT(dmu_tx_is_syncing(tx));
/* It could have been compressed away to nothing */
if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp))
return;
ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE);
ASSERT(DMU_OT_IS_VALID(BP_GET_TYPE(bp)));
if (ds == NULL) {
dsl_pool_mos_diduse_space(tx->tx_pool,
used, compressed, uncompressed);
return;
}
ASSERT3U(bp->blk_birth, >, dsl_dataset_phys(ds)->ds_prev_snap_txg);
dmu_buf_will_dirty(ds->ds_dbuf, tx);
mutex_enter(&ds->ds_lock);
delta = parent_delta(ds, used);
dsl_dataset_phys(ds)->ds_referenced_bytes += used;
dsl_dataset_phys(ds)->ds_compressed_bytes += compressed;
dsl_dataset_phys(ds)->ds_uncompressed_bytes += uncompressed;
dsl_dataset_phys(ds)->ds_unique_bytes += used;
if (BP_GET_LSIZE(bp) > SPA_OLD_MAXBLOCKSIZE) {
ds->ds_feature_activation[SPA_FEATURE_LARGE_BLOCKS] =
(void *)B_TRUE;
}
f = zio_checksum_to_feature(BP_GET_CHECKSUM(bp));
if (f != SPA_FEATURE_NONE) {
ASSERT3S(spa_feature_table[f].fi_type, ==,
ZFEATURE_TYPE_BOOLEAN);
ds->ds_feature_activation[f] = (void *)B_TRUE;
}
f = zio_compress_to_feature(BP_GET_COMPRESS(bp));
if (f != SPA_FEATURE_NONE) {
ASSERT3S(spa_feature_table[f].fi_type, ==,
ZFEATURE_TYPE_BOOLEAN);
ds->ds_feature_activation[f] = (void *)B_TRUE;
}
/*
* Track block for livelist, but ignore embedded blocks because
* they do not need to be freed.
*/
if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) &&
bp->blk_birth > ds->ds_dir->dd_origin_txg &&
!(BP_IS_EMBEDDED(bp))) {
ASSERT(dsl_dir_is_clone(ds->ds_dir));
ASSERT(spa_feature_is_enabled(spa,
SPA_FEATURE_LIVELIST));
bplist_append(&ds->ds_dir->dd_pending_allocs, bp);
}
mutex_exit(&ds->ds_lock);
dsl_dir_diduse_transfer_space(ds->ds_dir, delta,
compressed, uncompressed, used,
DD_USED_REFRSRV, DD_USED_HEAD, tx);
}
/*
* Called when the specified segment has been remapped, and is thus no
* longer referenced in the head dataset. The vdev must be indirect.
*
* If the segment is referenced by a snapshot, put it on the remap deadlist.
* Otherwise, add this segment to the obsolete spacemap.
*/
void
dsl_dataset_block_remapped(dsl_dataset_t *ds, uint64_t vdev, uint64_t offset,
uint64_t size, uint64_t birth, dmu_tx_t *tx)
{
spa_t *spa = ds->ds_dir->dd_pool->dp_spa;
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(birth <= tx->tx_txg);
ASSERT(!ds->ds_is_snapshot);
if (birth > dsl_dataset_phys(ds)->ds_prev_snap_txg) {
spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx);
} else {
blkptr_t fakebp;
dva_t *dva = &fakebp.blk_dva[0];
ASSERT(ds != NULL);
mutex_enter(&ds->ds_remap_deadlist_lock);
if (!dsl_dataset_remap_deadlist_exists(ds)) {
dsl_dataset_create_remap_deadlist(ds, tx);
}
mutex_exit(&ds->ds_remap_deadlist_lock);
BP_ZERO(&fakebp);
fakebp.blk_birth = birth;
DVA_SET_VDEV(dva, vdev);
DVA_SET_OFFSET(dva, offset);
DVA_SET_ASIZE(dva, size);
dsl_deadlist_insert(&ds->ds_remap_deadlist, &fakebp, B_FALSE,
tx);
}
}
int
dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx,
boolean_t async)
{
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
int used = bp_get_dsize_sync(spa, bp);
int compressed = BP_GET_PSIZE(bp);
int uncompressed = BP_GET_UCSIZE(bp);
if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp))
return (0);
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(bp->blk_birth <= tx->tx_txg);
if (ds == NULL) {
dsl_free(tx->tx_pool, tx->tx_txg, bp);
dsl_pool_mos_diduse_space(tx->tx_pool,
-used, -compressed, -uncompressed);
return (used);
}
ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool);
ASSERT(!ds->ds_is_snapshot);
dmu_buf_will_dirty(ds->ds_dbuf, tx);
/*
* Track block for livelist, but ignore embedded blocks because
* they do not need to be freed.
*/
if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) &&
bp->blk_birth > ds->ds_dir->dd_origin_txg &&
!(BP_IS_EMBEDDED(bp))) {
ASSERT(dsl_dir_is_clone(ds->ds_dir));
ASSERT(spa_feature_is_enabled(spa,
SPA_FEATURE_LIVELIST));
bplist_append(&ds->ds_dir->dd_pending_frees, bp);
}
if (bp->blk_birth > dsl_dataset_phys(ds)->ds_prev_snap_txg) {
int64_t delta;
dprintf_bp(bp, "freeing ds=%llu", (u_longlong_t)ds->ds_object);
dsl_free(tx->tx_pool, tx->tx_txg, bp);
mutex_enter(&ds->ds_lock);
ASSERT(dsl_dataset_phys(ds)->ds_unique_bytes >= used ||
!DS_UNIQUE_IS_ACCURATE(ds));
delta = parent_delta(ds, -used);
dsl_dataset_phys(ds)->ds_unique_bytes -= used;
mutex_exit(&ds->ds_lock);
dsl_dir_diduse_transfer_space(ds->ds_dir,
delta, -compressed, -uncompressed, -used,
DD_USED_REFRSRV, DD_USED_HEAD, tx);
} else {
dprintf_bp(bp, "putting on dead list: %s", "");
if (async) {
/*
* We are here as part of zio's write done callback,
* which means we're a zio interrupt thread. We can't
* call dsl_deadlist_insert() now because it may block
* waiting for I/O. Instead, put bp on the deferred
* queue and let dsl_pool_sync() finish the job.
*/
bplist_append(&ds->ds_pending_deadlist, bp);
} else {
dsl_deadlist_insert(&ds->ds_deadlist, bp, B_FALSE, tx);
}
ASSERT3U(ds->ds_prev->ds_object, ==,
dsl_dataset_phys(ds)->ds_prev_snap_obj);
ASSERT(dsl_dataset_phys(ds->ds_prev)->ds_num_children > 0);
/* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
if (dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj ==
ds->ds_object && bp->blk_birth >
dsl_dataset_phys(ds->ds_prev)->ds_prev_snap_txg) {
dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
mutex_enter(&ds->ds_prev->ds_lock);
dsl_dataset_phys(ds->ds_prev)->ds_unique_bytes += used;
mutex_exit(&ds->ds_prev->ds_lock);
}
if (bp->blk_birth > ds->ds_dir->dd_origin_txg) {
dsl_dir_transfer_space(ds->ds_dir, used,
DD_USED_HEAD, DD_USED_SNAP, tx);
}
}
dsl_bookmark_block_killed(ds, bp, tx);
mutex_enter(&ds->ds_lock);
ASSERT3U(dsl_dataset_phys(ds)->ds_referenced_bytes, >=, used);
dsl_dataset_phys(ds)->ds_referenced_bytes -= used;
ASSERT3U(dsl_dataset_phys(ds)->ds_compressed_bytes, >=, compressed);
dsl_dataset_phys(ds)->ds_compressed_bytes -= compressed;
ASSERT3U(dsl_dataset_phys(ds)->ds_uncompressed_bytes, >=, uncompressed);
dsl_dataset_phys(ds)->ds_uncompressed_bytes -= uncompressed;
mutex_exit(&ds->ds_lock);
return (used);
}
struct feature_type_uint64_array_arg {
uint64_t length;
uint64_t *array;
};
static void
unload_zfeature(dsl_dataset_t *ds, spa_feature_t f)
{
switch (spa_feature_table[f].fi_type) {
case ZFEATURE_TYPE_BOOLEAN:
break;
case ZFEATURE_TYPE_UINT64_ARRAY:
{
struct feature_type_uint64_array_arg *ftuaa = ds->ds_feature[f];
kmem_free(ftuaa->array, ftuaa->length * sizeof (uint64_t));
kmem_free(ftuaa, sizeof (*ftuaa));
break;
}
default:
panic("Invalid zfeature type %d", spa_feature_table[f].fi_type);
}
}
static int
load_zfeature(objset_t *mos, dsl_dataset_t *ds, spa_feature_t f)
{
int err = 0;
switch (spa_feature_table[f].fi_type) {
case ZFEATURE_TYPE_BOOLEAN:
err = zap_contains(mos, ds->ds_object,
spa_feature_table[f].fi_guid);
if (err == 0) {
ds->ds_feature[f] = (void *)B_TRUE;
} else {
ASSERT3U(err, ==, ENOENT);
err = 0;
}
break;
case ZFEATURE_TYPE_UINT64_ARRAY:
{
uint64_t int_size, num_int;
uint64_t *data;
err = zap_length(mos, ds->ds_object,
spa_feature_table[f].fi_guid, &int_size, &num_int);
if (err != 0) {
ASSERT3U(err, ==, ENOENT);
err = 0;
break;
}
ASSERT3U(int_size, ==, sizeof (uint64_t));
data = kmem_alloc(int_size * num_int, KM_SLEEP);
VERIFY0(zap_lookup(mos, ds->ds_object,
spa_feature_table[f].fi_guid, int_size, num_int, data));
struct feature_type_uint64_array_arg *ftuaa =
kmem_alloc(sizeof (*ftuaa), KM_SLEEP);
ftuaa->length = num_int;
ftuaa->array = data;
ds->ds_feature[f] = ftuaa;
break;
}
default:
panic("Invalid zfeature type %d", spa_feature_table[f].fi_type);
}
return (err);
}
/*
* We have to release the fsid synchronously or we risk that a subsequent
* mount of the same dataset will fail to unique_insert the fsid. This
* failure would manifest itself as the fsid of this dataset changing
* between mounts which makes NFS clients quite unhappy.
*/
static void
dsl_dataset_evict_sync(void *dbu)
{
dsl_dataset_t *ds = dbu;
ASSERT(ds->ds_owner == NULL);
unique_remove(ds->ds_fsid_guid);
}
static void
dsl_dataset_evict_async(void *dbu)
{
dsl_dataset_t *ds = dbu;
ASSERT(ds->ds_owner == NULL);
ds->ds_dbuf = NULL;
if (ds->ds_objset != NULL)
dmu_objset_evict(ds->ds_objset);
if (ds->ds_prev) {
dsl_dataset_rele(ds->ds_prev, ds);
ds->ds_prev = NULL;
}
dsl_bookmark_fini_ds(ds);
bplist_destroy(&ds->ds_pending_deadlist);
if (dsl_deadlist_is_open(&ds->ds_deadlist))
dsl_deadlist_close(&ds->ds_deadlist);
if (dsl_deadlist_is_open(&ds->ds_remap_deadlist))
dsl_deadlist_close(&ds->ds_remap_deadlist);
if (ds->ds_dir)
dsl_dir_async_rele(ds->ds_dir, ds);
ASSERT(!list_link_active(&ds->ds_synced_link));
for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
if (dsl_dataset_feature_is_active(ds, f))
unload_zfeature(ds, f);
}
list_destroy(&ds->ds_prop_cbs);
mutex_destroy(&ds->ds_lock);
mutex_destroy(&ds->ds_opening_lock);
mutex_destroy(&ds->ds_sendstream_lock);
mutex_destroy(&ds->ds_remap_deadlist_lock);
zfs_refcount_destroy(&ds->ds_longholds);
rrw_destroy(&ds->ds_bp_rwlock);
kmem_free(ds, sizeof (dsl_dataset_t));
}
int
dsl_dataset_get_snapname(dsl_dataset_t *ds)
{
dsl_dataset_phys_t *headphys;
int err;
dmu_buf_t *headdbuf;
dsl_pool_t *dp = ds->ds_dir->dd_pool;
objset_t *mos = dp->dp_meta_objset;
if (ds->ds_snapname[0])
return (0);
if (dsl_dataset_phys(ds)->ds_next_snap_obj == 0)
return (0);
err = dmu_bonus_hold(mos, dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj,
FTAG, &headdbuf);
if (err != 0)
return (err);
headphys = headdbuf->db_data;
err = zap_value_search(dp->dp_meta_objset,
headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname);
if (err != 0 && zfs_recover == B_TRUE) {
err = 0;
(void) snprintf(ds->ds_snapname, sizeof (ds->ds_snapname),
"SNAPOBJ=%llu-ERR=%d",
(unsigned long long)ds->ds_object, err);
}
dmu_buf_rele(headdbuf, FTAG);
return (err);
}
int
dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value)
{
objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
uint64_t snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj;
matchtype_t mt = 0;
int err;
if (dsl_dataset_phys(ds)->ds_flags & DS_FLAG_CI_DATASET)
mt = MT_NORMALIZE;
err = zap_lookup_norm(mos, snapobj, name, 8, 1,
value, mt, NULL, 0, NULL);
if (err == ENOTSUP && (mt & MT_NORMALIZE))
err = zap_lookup(mos, snapobj, name, 8, 1, value);
return (err);
}
int
dsl_dataset_snap_remove(dsl_dataset_t *ds, const char *name, dmu_tx_t *tx,
boolean_t adj_cnt)
{
objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
uint64_t snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj;
matchtype_t mt = 0;
int err;
dsl_dir_snap_cmtime_update(ds->ds_dir);
if (dsl_dataset_phys(ds)->ds_flags & DS_FLAG_CI_DATASET)
mt = MT_NORMALIZE;
err = zap_remove_norm(mos, snapobj, name, mt, tx);
if (err == ENOTSUP && (mt & MT_NORMALIZE))
err = zap_remove(mos, snapobj, name, tx);
if (err == 0 && adj_cnt)
dsl_fs_ss_count_adjust(ds->ds_dir, -1,
DD_FIELD_SNAPSHOT_COUNT, tx);
return (err);
}
boolean_t
dsl_dataset_try_add_ref(dsl_pool_t *dp, dsl_dataset_t *ds, void *tag)
{
dmu_buf_t *dbuf = ds->ds_dbuf;
boolean_t result = B_FALSE;
if (dbuf != NULL && dmu_buf_try_add_ref(dbuf, dp->dp_meta_objset,
ds->ds_object, DMU_BONUS_BLKID, tag)) {
if (ds == dmu_buf_get_user(dbuf))
result = B_TRUE;
else
dmu_buf_rele(dbuf, tag);
}
return (result);
}
int
dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
dsl_dataset_t **dsp)
{
objset_t *mos = dp->dp_meta_objset;
dmu_buf_t *dbuf;
dsl_dataset_t *ds;
int err;
dmu_object_info_t doi;
ASSERT(dsl_pool_config_held(dp));
err = dmu_bonus_hold(mos, dsobj, tag, &dbuf);
if (err != 0)
return (err);
/* Make sure dsobj has the correct object type. */
dmu_object_info_from_db(dbuf, &doi);
if (doi.doi_bonus_type != DMU_OT_DSL_DATASET) {
dmu_buf_rele(dbuf, tag);
return (SET_ERROR(EINVAL));
}
ds = dmu_buf_get_user(dbuf);
if (ds == NULL) {
dsl_dataset_t *winner = NULL;
ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
ds->ds_dbuf = dbuf;
ds->ds_object = dsobj;
ds->ds_is_snapshot = dsl_dataset_phys(ds)->ds_num_children != 0;
list_link_init(&ds->ds_synced_link);
err = dsl_dir_hold_obj(dp, dsl_dataset_phys(ds)->ds_dir_obj,
NULL, ds, &ds->ds_dir);
if (err != 0) {
kmem_free(ds, sizeof (dsl_dataset_t));
dmu_buf_rele(dbuf, tag);
return (err);
}
mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&ds->ds_sendstream_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&ds->ds_remap_deadlist_lock,
NULL, MUTEX_DEFAULT, NULL);
rrw_init(&ds->ds_bp_rwlock, B_FALSE);
zfs_refcount_create(&ds->ds_longholds);
bplist_create(&ds->ds_pending_deadlist);
list_create(&ds->ds_sendstreams, sizeof (dmu_sendstatus_t),
offsetof(dmu_sendstatus_t, dss_link));
list_create(&ds->ds_prop_cbs, sizeof (dsl_prop_cb_record_t),
offsetof(dsl_prop_cb_record_t, cbr_ds_node));
if (doi.doi_type == DMU_OTN_ZAP_METADATA) {
spa_feature_t f;
for (f = 0; f < SPA_FEATURES; f++) {
if (!(spa_feature_table[f].fi_flags &
ZFEATURE_FLAG_PER_DATASET))
continue;
err = load_zfeature(mos, ds, f);
}
}
if (!ds->ds_is_snapshot) {
ds->ds_snapname[0] = '\0';
if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
err = dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj,
ds, &ds->ds_prev);
}
err = dsl_bookmark_init_ds(ds);
} else {
if (zfs_flags & ZFS_DEBUG_SNAPNAMES)
err = dsl_dataset_get_snapname(ds);
if (err == 0 &&
dsl_dataset_phys(ds)->ds_userrefs_obj != 0) {
err = zap_count(
ds->ds_dir->dd_pool->dp_meta_objset,
dsl_dataset_phys(ds)->ds_userrefs_obj,
&ds->ds_userrefs);
}
}
if (err == 0 && !ds->ds_is_snapshot) {
err = dsl_prop_get_int_ds(ds,
zfs_prop_to_name(ZFS_PROP_REFRESERVATION),
&ds->ds_reserved);
if (err == 0) {
err = dsl_prop_get_int_ds(ds,
zfs_prop_to_name(ZFS_PROP_REFQUOTA),
&ds->ds_quota);
}
} else {
ds->ds_reserved = ds->ds_quota = 0;
}
if (err == 0 && ds->ds_dir->dd_crypto_obj != 0 &&
ds->ds_is_snapshot &&
zap_contains(mos, dsobj, DS_FIELD_IVSET_GUID) != 0) {
dp->dp_spa->spa_errata =
ZPOOL_ERRATA_ZOL_8308_ENCRYPTION;
}
dsl_deadlist_open(&ds->ds_deadlist,
mos, dsl_dataset_phys(ds)->ds_deadlist_obj);
uint64_t remap_deadlist_obj =
dsl_dataset_get_remap_deadlist_object(ds);
if (remap_deadlist_obj != 0) {
dsl_deadlist_open(&ds->ds_remap_deadlist, mos,
remap_deadlist_obj);
}
dmu_buf_init_user(&ds->ds_dbu, dsl_dataset_evict_sync,
dsl_dataset_evict_async, &ds->ds_dbuf);
if (err == 0)
winner = dmu_buf_set_user_ie(dbuf, &ds->ds_dbu);
if (err != 0 || winner != NULL) {
bplist_destroy(&ds->ds_pending_deadlist);
dsl_deadlist_close(&ds->ds_deadlist);
if (dsl_deadlist_is_open(&ds->ds_remap_deadlist))
dsl_deadlist_close(&ds->ds_remap_deadlist);
dsl_bookmark_fini_ds(ds);
if (ds->ds_prev)
dsl_dataset_rele(ds->ds_prev, ds);
dsl_dir_rele(ds->ds_dir, ds);
for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
if (dsl_dataset_feature_is_active(ds, f))
unload_zfeature(ds, f);
}
list_destroy(&ds->ds_prop_cbs);
list_destroy(&ds->ds_sendstreams);
mutex_destroy(&ds->ds_lock);
mutex_destroy(&ds->ds_opening_lock);
mutex_destroy(&ds->ds_sendstream_lock);
mutex_destroy(&ds->ds_remap_deadlist_lock);
zfs_refcount_destroy(&ds->ds_longholds);
rrw_destroy(&ds->ds_bp_rwlock);
kmem_free(ds, sizeof (dsl_dataset_t));
if (err != 0) {
dmu_buf_rele(dbuf, tag);
return (err);
}
ds = winner;
} else {
ds->ds_fsid_guid =
unique_insert(dsl_dataset_phys(ds)->ds_fsid_guid);
if (ds->ds_fsid_guid !=
dsl_dataset_phys(ds)->ds_fsid_guid) {
zfs_dbgmsg("ds_fsid_guid changed from "
"%llx to %llx for pool %s dataset id %llu",
(long long)
dsl_dataset_phys(ds)->ds_fsid_guid,
(long long)ds->ds_fsid_guid,
spa_name(dp->dp_spa),
(u_longlong_t)dsobj);
}
}
}
ASSERT3P(ds->ds_dbuf, ==, dbuf);
ASSERT3P(dsl_dataset_phys(ds), ==, dbuf->db_data);
ASSERT(dsl_dataset_phys(ds)->ds_prev_snap_obj != 0 ||
spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN ||
dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap);
*dsp = ds;
return (0);
}
int
dsl_dataset_create_key_mapping(dsl_dataset_t *ds)
{
dsl_dir_t *dd = ds->ds_dir;
if (dd->dd_crypto_obj == 0)
return (0);
return (spa_keystore_create_mapping(dd->dd_pool->dp_spa,
ds, ds, &ds->ds_key_mapping));
}
int
dsl_dataset_hold_obj_flags(dsl_pool_t *dp, uint64_t dsobj,
ds_hold_flags_t flags, void *tag, dsl_dataset_t **dsp)
{
int err;
err = dsl_dataset_hold_obj(dp, dsobj, tag, dsp);
if (err != 0)
return (err);
ASSERT3P(*dsp, !=, NULL);
if (flags & DS_HOLD_FLAG_DECRYPT) {
err = dsl_dataset_create_key_mapping(*dsp);
if (err != 0)
dsl_dataset_rele(*dsp, tag);
}
return (err);
}
int
dsl_dataset_hold_flags(dsl_pool_t *dp, const char *name, ds_hold_flags_t flags,
void *tag, dsl_dataset_t **dsp)
{
dsl_dir_t *dd;
const char *snapname;
uint64_t obj;
int err = 0;
dsl_dataset_t *ds;
err = dsl_dir_hold(dp, name, FTAG, &dd, &snapname);
if (err != 0)
return (err);
ASSERT(dsl_pool_config_held(dp));
obj = dsl_dir_phys(dd)->dd_head_dataset_obj;
if (obj != 0)
err = dsl_dataset_hold_obj_flags(dp, obj, flags, tag, &ds);
else
err = SET_ERROR(ENOENT);
/* we may be looking for a snapshot */
if (err == 0 && snapname != NULL) {
dsl_dataset_t *snap_ds;
if (*snapname++ != '@') {
dsl_dataset_rele_flags(ds, flags, tag);
dsl_dir_rele(dd, FTAG);
return (SET_ERROR(ENOENT));
}
dprintf("looking for snapshot '%s'\n", snapname);
err = dsl_dataset_snap_lookup(ds, snapname, &obj);
if (err == 0) {
err = dsl_dataset_hold_obj_flags(dp, obj, flags, tag,
&snap_ds);
}
dsl_dataset_rele_flags(ds, flags, tag);
if (err == 0) {
mutex_enter(&snap_ds->ds_lock);
if (snap_ds->ds_snapname[0] == 0)
(void) strlcpy(snap_ds->ds_snapname, snapname,
sizeof (snap_ds->ds_snapname));
mutex_exit(&snap_ds->ds_lock);
ds = snap_ds;
}
}
if (err == 0)
*dsp = ds;
dsl_dir_rele(dd, FTAG);
return (err);
}
int
dsl_dataset_hold(dsl_pool_t *dp, const char *name, void *tag,
dsl_dataset_t **dsp)
{
return (dsl_dataset_hold_flags(dp, name, 0, tag, dsp));
}
static int
dsl_dataset_own_obj_impl(dsl_pool_t *dp, uint64_t dsobj, ds_hold_flags_t flags,
void *tag, boolean_t override, dsl_dataset_t **dsp)
{
int err = dsl_dataset_hold_obj_flags(dp, dsobj, flags, tag, dsp);
if (err != 0)
return (err);
if (!dsl_dataset_tryown(*dsp, tag, override)) {
dsl_dataset_rele_flags(*dsp, flags, tag);
*dsp = NULL;
return (SET_ERROR(EBUSY));
}
return (0);
}
int
dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, ds_hold_flags_t flags,
void *tag, dsl_dataset_t **dsp)
{
return (dsl_dataset_own_obj_impl(dp, dsobj, flags, tag, B_FALSE, dsp));
}
int
dsl_dataset_own_obj_force(dsl_pool_t *dp, uint64_t dsobj,
ds_hold_flags_t flags, void *tag, dsl_dataset_t **dsp)
{
return (dsl_dataset_own_obj_impl(dp, dsobj, flags, tag, B_TRUE, dsp));
}
static int
dsl_dataset_own_impl(dsl_pool_t *dp, const char *name, ds_hold_flags_t flags,
void *tag, boolean_t override, dsl_dataset_t **dsp)
{
int err = dsl_dataset_hold_flags(dp, name, flags, tag, dsp);
if (err != 0)
return (err);
if (!dsl_dataset_tryown(*dsp, tag, override)) {
dsl_dataset_rele_flags(*dsp, flags, tag);
return (SET_ERROR(EBUSY));
}
return (0);
}
int
dsl_dataset_own_force(dsl_pool_t *dp, const char *name, ds_hold_flags_t flags,
void *tag, dsl_dataset_t **dsp)
{
return (dsl_dataset_own_impl(dp, name, flags, tag, B_TRUE, dsp));
}
int
dsl_dataset_own(dsl_pool_t *dp, const char *name, ds_hold_flags_t flags,
void *tag, dsl_dataset_t **dsp)
{
return (dsl_dataset_own_impl(dp, name, flags, tag, B_FALSE, dsp));
}
/*
* See the comment above dsl_pool_hold() for details. In summary, a long
* hold is used to prevent destruction of a dataset while the pool hold
* is dropped, allowing other concurrent operations (e.g. spa_sync()).
*
* The dataset and pool must be held when this function is called. After it
* is called, the pool hold may be released while the dataset is still held
* and accessed.
*/
void
dsl_dataset_long_hold(dsl_dataset_t *ds, void *tag)
{
ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
(void) zfs_refcount_add(&ds->ds_longholds, tag);
}
void
dsl_dataset_long_rele(dsl_dataset_t *ds, void *tag)
{
(void) zfs_refcount_remove(&ds->ds_longholds, tag);
}
/* Return B_TRUE if there are any long holds on this dataset. */
boolean_t
dsl_dataset_long_held(dsl_dataset_t *ds)
{
return (!zfs_refcount_is_zero(&ds->ds_longholds));
}
void
dsl_dataset_name(dsl_dataset_t *ds, char *name)
{
if (ds == NULL) {
(void) strlcpy(name, "mos", ZFS_MAX_DATASET_NAME_LEN);
} else {
dsl_dir_name(ds->ds_dir, name);
VERIFY0(dsl_dataset_get_snapname(ds));
if (ds->ds_snapname[0]) {
VERIFY3U(strlcat(name, "@", ZFS_MAX_DATASET_NAME_LEN),
<, ZFS_MAX_DATASET_NAME_LEN);
/*
* We use a "recursive" mutex so that we
* can call dprintf_ds() with ds_lock held.
*/
if (!MUTEX_HELD(&ds->ds_lock)) {
mutex_enter(&ds->ds_lock);
VERIFY3U(strlcat(name, ds->ds_snapname,
ZFS_MAX_DATASET_NAME_LEN), <,
ZFS_MAX_DATASET_NAME_LEN);
mutex_exit(&ds->ds_lock);
} else {
VERIFY3U(strlcat(name, ds->ds_snapname,
ZFS_MAX_DATASET_NAME_LEN), <,
ZFS_MAX_DATASET_NAME_LEN);
}
}
}
}
int
dsl_dataset_namelen(dsl_dataset_t *ds)
{
VERIFY0(dsl_dataset_get_snapname(ds));
mutex_enter(&ds->ds_lock);
int len = strlen(ds->ds_snapname);
mutex_exit(&ds->ds_lock);
/* add '@' if ds is a snap */
if (len > 0)
len++;
len += dsl_dir_namelen(ds->ds_dir);
return (len);
}
void
dsl_dataset_rele(dsl_dataset_t *ds, void *tag)
{
dmu_buf_rele(ds->ds_dbuf, tag);
}
void
dsl_dataset_remove_key_mapping(dsl_dataset_t *ds)
{
dsl_dir_t *dd = ds->ds_dir;
if (dd == NULL || dd->dd_crypto_obj == 0)
return;
(void) spa_keystore_remove_mapping(dd->dd_pool->dp_spa,
ds->ds_object, ds);
}
void
dsl_dataset_rele_flags(dsl_dataset_t *ds, ds_hold_flags_t flags, void *tag)
{
if (flags & DS_HOLD_FLAG_DECRYPT)
dsl_dataset_remove_key_mapping(ds);
dsl_dataset_rele(ds, tag);
}
void
dsl_dataset_disown(dsl_dataset_t *ds, ds_hold_flags_t flags, void *tag)
{
ASSERT3P(ds->ds_owner, ==, tag);
ASSERT(ds->ds_dbuf != NULL);
mutex_enter(&ds->ds_lock);
ds->ds_owner = NULL;
mutex_exit(&ds->ds_lock);
dsl_dataset_long_rele(ds, tag);
dsl_dataset_rele_flags(ds, flags, tag);
}
boolean_t
dsl_dataset_tryown(dsl_dataset_t *ds, void *tag, boolean_t override)
{
boolean_t gotit = FALSE;
ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
mutex_enter(&ds->ds_lock);
if (ds->ds_owner == NULL && (override || !(DS_IS_INCONSISTENT(ds) ||
(dsl_dataset_feature_is_active(ds,
SPA_FEATURE_REDACTED_DATASETS) &&
!zfs_allow_redacted_dataset_mount)))) {
ds->ds_owner = tag;
dsl_dataset_long_hold(ds, tag);
gotit = TRUE;
}
mutex_exit(&ds->ds_lock);
return (gotit);
}
boolean_t
dsl_dataset_has_owner(dsl_dataset_t *ds)
{
boolean_t rv;
mutex_enter(&ds->ds_lock);
rv = (ds->ds_owner != NULL);
mutex_exit(&ds->ds_lock);
return (rv);
}
static boolean_t
zfeature_active(spa_feature_t f, void *arg)
{
switch (spa_feature_table[f].fi_type) {
case ZFEATURE_TYPE_BOOLEAN: {
boolean_t val = (boolean_t)(uintptr_t)arg;
ASSERT(val == B_FALSE || val == B_TRUE);
return (val);
}
case ZFEATURE_TYPE_UINT64_ARRAY:
/*
* In this case, arg is a uint64_t array. The feature is active
* if the array is non-null.
*/
return (arg != NULL);
default:
panic("Invalid zfeature type %d", spa_feature_table[f].fi_type);
return (B_FALSE);
}
}
boolean_t
dsl_dataset_feature_is_active(dsl_dataset_t *ds, spa_feature_t f)
{
return (zfeature_active(f, ds->ds_feature[f]));
}
/*
* The buffers passed out by this function are references to internal buffers;
* they should not be freed by callers of this function, and they should not be
* used after the dataset has been released.
*/
boolean_t
dsl_dataset_get_uint64_array_feature(dsl_dataset_t *ds, spa_feature_t f,
uint64_t *outlength, uint64_t **outp)
{
VERIFY(spa_feature_table[f].fi_type & ZFEATURE_TYPE_UINT64_ARRAY);
if (!dsl_dataset_feature_is_active(ds, f)) {
return (B_FALSE);
}
struct feature_type_uint64_array_arg *ftuaa = ds->ds_feature[f];
*outp = ftuaa->array;
*outlength = ftuaa->length;
return (B_TRUE);
}
void
dsl_dataset_activate_feature(uint64_t dsobj, spa_feature_t f, void *arg,
dmu_tx_t *tx)
{
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
objset_t *mos = dmu_tx_pool(tx)->dp_meta_objset;
uint64_t zero = 0;
VERIFY(spa_feature_table[f].fi_flags & ZFEATURE_FLAG_PER_DATASET);
spa_feature_incr(spa, f, tx);
dmu_object_zapify(mos, dsobj, DMU_OT_DSL_DATASET, tx);
switch (spa_feature_table[f].fi_type) {
case ZFEATURE_TYPE_BOOLEAN:
ASSERT3S((boolean_t)(uintptr_t)arg, ==, B_TRUE);
VERIFY0(zap_add(mos, dsobj, spa_feature_table[f].fi_guid,
sizeof (zero), 1, &zero, tx));
break;
case ZFEATURE_TYPE_UINT64_ARRAY:
{
struct feature_type_uint64_array_arg *ftuaa = arg;
VERIFY0(zap_add(mos, dsobj, spa_feature_table[f].fi_guid,
sizeof (uint64_t), ftuaa->length, ftuaa->array, tx));
break;
}
default:
panic("Invalid zfeature type %d", spa_feature_table[f].fi_type);
}
}
static void
dsl_dataset_deactivate_feature_impl(dsl_dataset_t *ds, spa_feature_t f,
dmu_tx_t *tx)
{
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
objset_t *mos = dmu_tx_pool(tx)->dp_meta_objset;
uint64_t dsobj = ds->ds_object;
VERIFY(spa_feature_table[f].fi_flags & ZFEATURE_FLAG_PER_DATASET);
VERIFY0(zap_remove(mos, dsobj, spa_feature_table[f].fi_guid, tx));
spa_feature_decr(spa, f, tx);
ds->ds_feature[f] = NULL;
}
void
dsl_dataset_deactivate_feature(dsl_dataset_t *ds, spa_feature_t f, dmu_tx_t *tx)
{
unload_zfeature(ds, f);
dsl_dataset_deactivate_feature_impl(ds, f, tx);
}
uint64_t
dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
dsl_crypto_params_t *dcp, uint64_t flags, dmu_tx_t *tx)
{
dsl_pool_t *dp = dd->dd_pool;
dmu_buf_t *dbuf;
dsl_dataset_phys_t *dsphys;
uint64_t dsobj;
objset_t *mos = dp->dp_meta_objset;
if (origin == NULL)
origin = dp->dp_origin_snap;
ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp);
ASSERT(origin == NULL || dsl_dataset_phys(origin)->ds_num_children > 0);
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(dsl_dir_phys(dd)->dd_head_dataset_obj == 0);
dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
VERIFY0(dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
dmu_buf_will_dirty(dbuf, tx);
dsphys = dbuf->db_data;
bzero(dsphys, sizeof (dsl_dataset_phys_t));
dsphys->ds_dir_obj = dd->dd_object;
dsphys->ds_flags = flags;
dsphys->ds_fsid_guid = unique_create();
(void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
sizeof (dsphys->ds_guid));
dsphys->ds_snapnames_zapobj =
zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP,
DMU_OT_NONE, 0, tx);
dsphys->ds_creation_time = gethrestime_sec();
dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg;
if (origin == NULL) {
dsphys->ds_deadlist_obj = dsl_deadlist_alloc(mos, tx);
} else {
dsl_dataset_t *ohds; /* head of the origin snapshot */
dsphys->ds_prev_snap_obj = origin->ds_object;
dsphys->ds_prev_snap_txg =
dsl_dataset_phys(origin)->ds_creation_txg;
dsphys->ds_referenced_bytes =
dsl_dataset_phys(origin)->ds_referenced_bytes;
dsphys->ds_compressed_bytes =
dsl_dataset_phys(origin)->ds_compressed_bytes;
dsphys->ds_uncompressed_bytes =
dsl_dataset_phys(origin)->ds_uncompressed_bytes;
rrw_enter(&origin->ds_bp_rwlock, RW_READER, FTAG);
dsphys->ds_bp = dsl_dataset_phys(origin)->ds_bp;
rrw_exit(&origin->ds_bp_rwlock, FTAG);
/*
* Inherit flags that describe the dataset's contents
* (INCONSISTENT) or properties (Case Insensitive).
*/
dsphys->ds_flags |= dsl_dataset_phys(origin)->ds_flags &
(DS_FLAG_INCONSISTENT | DS_FLAG_CI_DATASET);
for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
if (zfeature_active(f, origin->ds_feature[f])) {
dsl_dataset_activate_feature(dsobj, f,
origin->ds_feature[f], tx);
}
}
dmu_buf_will_dirty(origin->ds_dbuf, tx);
dsl_dataset_phys(origin)->ds_num_children++;
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dir_phys(origin->ds_dir)->dd_head_dataset_obj,
FTAG, &ohds));
dsphys->ds_deadlist_obj = dsl_deadlist_clone(&ohds->ds_deadlist,
dsphys->ds_prev_snap_txg, dsphys->ds_prev_snap_obj, tx);
dsl_dataset_rele(ohds, FTAG);
if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) {
if (dsl_dataset_phys(origin)->ds_next_clones_obj == 0) {
dsl_dataset_phys(origin)->ds_next_clones_obj =
zap_create(mos,
DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
}
VERIFY0(zap_add_int(mos,
dsl_dataset_phys(origin)->ds_next_clones_obj,
dsobj, tx));
}
dmu_buf_will_dirty(dd->dd_dbuf, tx);
dsl_dir_phys(dd)->dd_origin_obj = origin->ds_object;
if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
if (dsl_dir_phys(origin->ds_dir)->dd_clones == 0) {
dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
dsl_dir_phys(origin->ds_dir)->dd_clones =
zap_create(mos,
DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
}
VERIFY0(zap_add_int(mos,
dsl_dir_phys(origin->ds_dir)->dd_clones,
dsobj, tx));
}
}
/* handle encryption */
dsl_dataset_create_crypt_sync(dsobj, dd, origin, dcp, tx);
if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
dmu_buf_rele(dbuf, FTAG);
dmu_buf_will_dirty(dd->dd_dbuf, tx);
dsl_dir_phys(dd)->dd_head_dataset_obj = dsobj;
return (dsobj);
}
static void
dsl_dataset_zero_zil(dsl_dataset_t *ds, dmu_tx_t *tx)
{
objset_t *os;
VERIFY0(dmu_objset_from_ds(ds, &os));
if (bcmp(&os->os_zil_header, &zero_zil, sizeof (zero_zil)) != 0) {
dsl_pool_t *dp = ds->ds_dir->dd_pool;
zio_t *zio;
bzero(&os->os_zil_header, sizeof (os->os_zil_header));
if (os->os_encrypted)
os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE;
zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
dsl_dataset_sync(ds, zio, tx);
VERIFY0(zio_wait(zio));
/* dsl_dataset_sync_done will drop this reference. */
dmu_buf_add_ref(ds->ds_dbuf, ds);
dsl_dataset_sync_done(ds, tx);
}
}
uint64_t
dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname,
dsl_dataset_t *origin, uint64_t flags, cred_t *cr,
dsl_crypto_params_t *dcp, dmu_tx_t *tx)
{
dsl_pool_t *dp = pdd->dd_pool;
uint64_t dsobj, ddobj;
dsl_dir_t *dd;
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(lastname[0] != '@');
/*
* Filesystems will eventually have their origin set to dp_origin_snap,
* but that's taken care of in dsl_dataset_create_sync_dd. When
* creating a filesystem, this function is called with origin equal to
* NULL.
*/
if (origin != NULL)
ASSERT3P(origin, !=, dp->dp_origin_snap);
ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx);
VERIFY0(dsl_dir_hold_obj(dp, ddobj, lastname, FTAG, &dd));
dsobj = dsl_dataset_create_sync_dd(dd, origin, dcp,
flags & ~DS_CREATE_FLAG_NODIRTY, tx);
dsl_deleg_set_create_perms(dd, tx, cr);
/*
* If we are creating a clone and the livelist feature is enabled,
* add the entry DD_FIELD_LIVELIST to ZAP.
*/
if (origin != NULL &&
spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LIVELIST)) {
objset_t *mos = dd->dd_pool->dp_meta_objset;
dsl_dir_zapify(dd, tx);
uint64_t obj = dsl_deadlist_alloc(mos, tx);
VERIFY0(zap_add(mos, dd->dd_object, DD_FIELD_LIVELIST,
sizeof (uint64_t), 1, &obj, tx));
spa_feature_incr(dp->dp_spa, SPA_FEATURE_LIVELIST, tx);
}
/*
* Since we're creating a new node we know it's a leaf, so we can
* initialize the counts if the limit feature is active.
*/
if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT)) {
uint64_t cnt = 0;
objset_t *os = dd->dd_pool->dp_meta_objset;
dsl_dir_zapify(dd, tx);
VERIFY0(zap_add(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT,
sizeof (cnt), 1, &cnt, tx));
VERIFY0(zap_add(os, dd->dd_object, DD_FIELD_SNAPSHOT_COUNT,
sizeof (cnt), 1, &cnt, tx));
}
dsl_dir_rele(dd, FTAG);
/*
* If we are creating a clone, make sure we zero out any stale
* data from the origin snapshots zil header.
*/
if (origin != NULL && !(flags & DS_CREATE_FLAG_NODIRTY)) {
dsl_dataset_t *ds;
VERIFY0(dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
dsl_dataset_zero_zil(ds, tx);
dsl_dataset_rele(ds, FTAG);
}
return (dsobj);
}
/*
* The unique space in the head dataset can be calculated by subtracting
* the space used in the most recent snapshot, that is still being used
* in this file system, from the space currently in use. To figure out
* the space in the most recent snapshot still in use, we need to take
* the total space used in the snapshot and subtract out the space that
* has been freed up since the snapshot was taken.
*/
void
dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds)
{
uint64_t mrs_used;
uint64_t dlused, dlcomp, dluncomp;
ASSERT(!ds->ds_is_snapshot);
if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0)
mrs_used = dsl_dataset_phys(ds->ds_prev)->ds_referenced_bytes;
else
mrs_used = 0;
dsl_deadlist_space(&ds->ds_deadlist, &dlused, &dlcomp, &dluncomp);
ASSERT3U(dlused, <=, mrs_used);
dsl_dataset_phys(ds)->ds_unique_bytes =
dsl_dataset_phys(ds)->ds_referenced_bytes - (mrs_used - dlused);
if (spa_version(ds->ds_dir->dd_pool->dp_spa) >=
SPA_VERSION_UNIQUE_ACCURATE)
dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
}
void
dsl_dataset_remove_from_next_clones(dsl_dataset_t *ds, uint64_t obj,
dmu_tx_t *tx)
{
objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
uint64_t count __maybe_unused;
int err;
ASSERT(dsl_dataset_phys(ds)->ds_num_children >= 2);
err = zap_remove_int(mos, dsl_dataset_phys(ds)->ds_next_clones_obj,
obj, tx);
/*
* The err should not be ENOENT, but a bug in a previous version
* of the code could cause upgrade_clones_cb() to not set
* ds_next_snap_obj when it should, leading to a missing entry.
* If we knew that the pool was created after
* SPA_VERSION_NEXT_CLONES, we could assert that it isn't
* ENOENT. However, at least we can check that we don't have
* too many entries in the next_clones_obj even after failing to
* remove this one.
*/
if (err != ENOENT)
VERIFY0(err);
ASSERT0(zap_count(mos, dsl_dataset_phys(ds)->ds_next_clones_obj,
&count));
ASSERT3U(count, <=, dsl_dataset_phys(ds)->ds_num_children - 2);
}
blkptr_t *
dsl_dataset_get_blkptr(dsl_dataset_t *ds)
{
return (&dsl_dataset_phys(ds)->ds_bp);
}
spa_t *
dsl_dataset_get_spa(dsl_dataset_t *ds)
{
return (ds->ds_dir->dd_pool->dp_spa);
}
void
dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx)
{
dsl_pool_t *dp;
if (ds == NULL) /* this is the meta-objset */
return;
ASSERT(ds->ds_objset != NULL);
if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0)
panic("dirtying snapshot!");
/* Must not dirty a dataset in the same txg where it got snapshotted. */
ASSERT3U(tx->tx_txg, >, dsl_dataset_phys(ds)->ds_prev_snap_txg);
dp = ds->ds_dir->dd_pool;
if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg)) {
objset_t *os = ds->ds_objset;
/* up the hold count until we can be written out */
dmu_buf_add_ref(ds->ds_dbuf, ds);
/* if this dataset is encrypted, grab a reference to the DCK */
if (ds->ds_dir->dd_crypto_obj != 0 &&
!os->os_raw_receive &&
!os->os_next_write_raw[tx->tx_txg & TXG_MASK]) {
ASSERT3P(ds->ds_key_mapping, !=, NULL);
key_mapping_add_ref(ds->ds_key_mapping, ds);
}
}
}
static int
dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx)
{
uint64_t asize;
if (!dmu_tx_is_syncing(tx))
return (0);
/*
* If there's an fs-only reservation, any blocks that might become
* owned by the snapshot dataset must be accommodated by space
* outside of the reservation.
*/
ASSERT(ds->ds_reserved == 0 || DS_UNIQUE_IS_ACCURATE(ds));
asize = MIN(dsl_dataset_phys(ds)->ds_unique_bytes, ds->ds_reserved);
if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
return (SET_ERROR(ENOSPC));
/*
* Propagate any reserved space for this snapshot to other
* snapshot checks in this sync group.
*/
if (asize > 0)
dsl_dir_willuse_space(ds->ds_dir, asize, tx);
return (0);
}
int
dsl_dataset_snapshot_check_impl(dsl_dataset_t *ds, const char *snapname,
dmu_tx_t *tx, boolean_t recv, uint64_t cnt, cred_t *cr, proc_t *proc)
{
int error;
uint64_t value;
ds->ds_trysnap_txg = tx->tx_txg;
if (!dmu_tx_is_syncing(tx))
return (0);
/*
* We don't allow multiple snapshots of the same txg. If there
* is already one, try again.
*/
if (dsl_dataset_phys(ds)->ds_prev_snap_txg >= tx->tx_txg)
return (SET_ERROR(EAGAIN));
/*
* Check for conflicting snapshot name.
*/
error = dsl_dataset_snap_lookup(ds, snapname, &value);
if (error == 0)
return (SET_ERROR(EEXIST));
if (error != ENOENT)
return (error);
/*
* We don't allow taking snapshots of inconsistent datasets, such as
* those into which we are currently receiving. However, if we are
* creating this snapshot as part of a receive, this check will be
* executed atomically with respect to the completion of the receive
* itself but prior to the clearing of DS_FLAG_INCONSISTENT; in this
* case we ignore this, knowing it will be fixed up for us shortly in
* dmu_recv_end_sync().
*/
if (!recv && DS_IS_INCONSISTENT(ds))
return (SET_ERROR(EBUSY));
/*
* Skip the check for temporary snapshots or if we have already checked
* the counts in dsl_dataset_snapshot_check. This means we really only
* check the count here when we're receiving a stream.
*/
if (cnt != 0 && cr != NULL) {
error = dsl_fs_ss_limit_check(ds->ds_dir, cnt,
ZFS_PROP_SNAPSHOT_LIMIT, NULL, cr, proc);
if (error != 0)
return (error);
}
error = dsl_dataset_snapshot_reserve_space(ds, tx);
if (error != 0)
return (error);
return (0);
}
int
dsl_dataset_snapshot_check(void *arg, dmu_tx_t *tx)
{
dsl_dataset_snapshot_arg_t *ddsa = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
nvpair_t *pair;
int rv = 0;
/*
* Pre-compute how many total new snapshots will be created for each
* level in the tree and below. This is needed for validating the
* snapshot limit when either taking a recursive snapshot or when
* taking multiple snapshots.
*
* The problem is that the counts are not actually adjusted when
* we are checking, only when we finally sync. For a single snapshot,
* this is easy, the count will increase by 1 at each node up the tree,
* but its more complicated for the recursive/multiple snapshot case.
*
* The dsl_fs_ss_limit_check function does recursively check the count
* at each level up the tree but since it is validating each snapshot
* independently we need to be sure that we are validating the complete
* count for the entire set of snapshots. We do this by rolling up the
* counts for each component of the name into an nvlist and then
* checking each of those cases with the aggregated count.
*
* This approach properly handles not only the recursive snapshot
* case (where we get all of those on the ddsa_snaps list) but also
* the sibling case (e.g. snapshot a/b and a/c so that we will also
* validate the limit on 'a' using a count of 2).
*
* We validate the snapshot names in the third loop and only report
* name errors once.
*/
if (dmu_tx_is_syncing(tx)) {
char *nm;
nvlist_t *cnt_track = NULL;
cnt_track = fnvlist_alloc();
nm = kmem_alloc(MAXPATHLEN, KM_SLEEP);
/* Rollup aggregated counts into the cnt_track list */
for (pair = nvlist_next_nvpair(ddsa->ddsa_snaps, NULL);
pair != NULL;
pair = nvlist_next_nvpair(ddsa->ddsa_snaps, pair)) {
char *pdelim;
uint64_t val;
(void) strlcpy(nm, nvpair_name(pair), MAXPATHLEN);
pdelim = strchr(nm, '@');
if (pdelim == NULL)
continue;
*pdelim = '\0';
do {
if (nvlist_lookup_uint64(cnt_track, nm,
&val) == 0) {
/* update existing entry */
fnvlist_add_uint64(cnt_track, nm,
val + 1);
} else {
/* add to list */
fnvlist_add_uint64(cnt_track, nm, 1);
}
pdelim = strrchr(nm, '/');
if (pdelim != NULL)
*pdelim = '\0';
} while (pdelim != NULL);
}
kmem_free(nm, MAXPATHLEN);
/* Check aggregated counts at each level */
for (pair = nvlist_next_nvpair(cnt_track, NULL);
pair != NULL; pair = nvlist_next_nvpair(cnt_track, pair)) {
int error = 0;
char *name;
uint64_t cnt = 0;
dsl_dataset_t *ds;
name = nvpair_name(pair);
cnt = fnvpair_value_uint64(pair);
ASSERT(cnt > 0);
error = dsl_dataset_hold(dp, name, FTAG, &ds);
if (error == 0) {
error = dsl_fs_ss_limit_check(ds->ds_dir, cnt,
ZFS_PROP_SNAPSHOT_LIMIT, NULL,
ddsa->ddsa_cr, ddsa->ddsa_proc);
dsl_dataset_rele(ds, FTAG);
}
if (error != 0) {
if (ddsa->ddsa_errors != NULL)
fnvlist_add_int32(ddsa->ddsa_errors,
name, error);
rv = error;
/* only report one error for this check */
break;
}
}
nvlist_free(cnt_track);
}
for (pair = nvlist_next_nvpair(ddsa->ddsa_snaps, NULL);
pair != NULL; pair = nvlist_next_nvpair(ddsa->ddsa_snaps, pair)) {
int error = 0;
dsl_dataset_t *ds;
char *name, *atp = NULL;
char dsname[ZFS_MAX_DATASET_NAME_LEN];
name = nvpair_name(pair);
if (strlen(name) >= ZFS_MAX_DATASET_NAME_LEN)
error = SET_ERROR(ENAMETOOLONG);
if (error == 0) {
atp = strchr(name, '@');
if (atp == NULL)
error = SET_ERROR(EINVAL);
if (error == 0)
(void) strlcpy(dsname, name, atp - name + 1);
}
if (error == 0)
error = dsl_dataset_hold(dp, dsname, FTAG, &ds);
if (error == 0) {
/* passing 0/NULL skips dsl_fs_ss_limit_check */
error = dsl_dataset_snapshot_check_impl(ds,
atp + 1, tx, B_FALSE, 0, NULL, NULL);
dsl_dataset_rele(ds, FTAG);
}
if (error != 0) {
if (ddsa->ddsa_errors != NULL) {
fnvlist_add_int32(ddsa->ddsa_errors,
name, error);
}
rv = error;
}
}
return (rv);
}
void
dsl_dataset_snapshot_sync_impl(dsl_dataset_t *ds, const char *snapname,
dmu_tx_t *tx)
{
dsl_pool_t *dp = ds->ds_dir->dd_pool;
dmu_buf_t *dbuf;
dsl_dataset_phys_t *dsphys;
uint64_t dsobj, crtxg;
objset_t *mos = dp->dp_meta_objset;
static zil_header_t zero_zil __maybe_unused;
objset_t *os __maybe_unused;
ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
/*
* If we are on an old pool, the zil must not be active, in which
* case it will be zeroed. Usually zil_suspend() accomplishes this.
*/
ASSERT(spa_version(dmu_tx_pool(tx)->dp_spa) >= SPA_VERSION_FAST_SNAP ||
dmu_objset_from_ds(ds, &os) != 0 ||
bcmp(&os->os_phys->os_zil_header, &zero_zil,
sizeof (zero_zil)) == 0);
/* Should not snapshot a dirty dataset. */
ASSERT(!txg_list_member(&ds->ds_dir->dd_pool->dp_dirty_datasets,
ds, tx->tx_txg));
dsl_fs_ss_count_adjust(ds->ds_dir, 1, DD_FIELD_SNAPSHOT_COUNT, tx);
/*
* The origin's ds_creation_txg has to be < TXG_INITIAL
*/
if (strcmp(snapname, ORIGIN_DIR_NAME) == 0)
crtxg = 1;
else
crtxg = tx->tx_txg;
dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
VERIFY0(dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
dmu_buf_will_dirty(dbuf, tx);
dsphys = dbuf->db_data;
bzero(dsphys, sizeof (dsl_dataset_phys_t));
dsphys->ds_dir_obj = ds->ds_dir->dd_object;
dsphys->ds_fsid_guid = unique_create();
(void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
sizeof (dsphys->ds_guid));
dsphys->ds_prev_snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
dsphys->ds_prev_snap_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
dsphys->ds_next_snap_obj = ds->ds_object;
dsphys->ds_num_children = 1;
dsphys->ds_creation_time = gethrestime_sec();
dsphys->ds_creation_txg = crtxg;
dsphys->ds_deadlist_obj = dsl_dataset_phys(ds)->ds_deadlist_obj;
dsphys->ds_referenced_bytes = dsl_dataset_phys(ds)->ds_referenced_bytes;
dsphys->ds_compressed_bytes = dsl_dataset_phys(ds)->ds_compressed_bytes;
dsphys->ds_uncompressed_bytes =
dsl_dataset_phys(ds)->ds_uncompressed_bytes;
dsphys->ds_flags = dsl_dataset_phys(ds)->ds_flags;
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
dsphys->ds_bp = dsl_dataset_phys(ds)->ds_bp;
rrw_exit(&ds->ds_bp_rwlock, FTAG);
dmu_buf_rele(dbuf, FTAG);
for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
if (zfeature_active(f, ds->ds_feature[f])) {
dsl_dataset_activate_feature(dsobj, f,
ds->ds_feature[f], tx);
}
}
ASSERT3U(ds->ds_prev != 0, ==,
dsl_dataset_phys(ds)->ds_prev_snap_obj != 0);
if (ds->ds_prev) {
uint64_t next_clones_obj =
dsl_dataset_phys(ds->ds_prev)->ds_next_clones_obj;
ASSERT(dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj ==
ds->ds_object ||
dsl_dataset_phys(ds->ds_prev)->ds_num_children > 1);
if (dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj ==
ds->ds_object) {
dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_txg, ==,
dsl_dataset_phys(ds->ds_prev)->ds_creation_txg);
dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj = dsobj;
} else if (next_clones_obj != 0) {
dsl_dataset_remove_from_next_clones(ds->ds_prev,
dsphys->ds_next_snap_obj, tx);
VERIFY0(zap_add_int(mos,
next_clones_obj, dsobj, tx));
}
}
/*
* If we have a reference-reservation on this dataset, we will
* need to increase the amount of refreservation being charged
* since our unique space is going to zero.
*/
if (ds->ds_reserved) {
int64_t delta;
ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
delta = MIN(dsl_dataset_phys(ds)->ds_unique_bytes,
ds->ds_reserved);
dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV,
delta, 0, 0, tx);
}
dmu_buf_will_dirty(ds->ds_dbuf, tx);
dsl_dataset_phys(ds)->ds_deadlist_obj =
dsl_deadlist_clone(&ds->ds_deadlist, UINT64_MAX,
dsl_dataset_phys(ds)->ds_prev_snap_obj, tx);
dsl_deadlist_close(&ds->ds_deadlist);
dsl_deadlist_open(&ds->ds_deadlist, mos,
dsl_dataset_phys(ds)->ds_deadlist_obj);
dsl_deadlist_add_key(&ds->ds_deadlist,
dsl_dataset_phys(ds)->ds_prev_snap_txg, tx);
dsl_bookmark_snapshotted(ds, tx);
if (dsl_dataset_remap_deadlist_exists(ds)) {
uint64_t remap_deadlist_obj =
dsl_dataset_get_remap_deadlist_object(ds);
/*
* Move the remap_deadlist to the snapshot. The head
* will create a new remap deadlist on demand, from
* dsl_dataset_block_remapped().
*/
dsl_dataset_unset_remap_deadlist_object(ds, tx);
dsl_deadlist_close(&ds->ds_remap_deadlist);
dmu_object_zapify(mos, dsobj, DMU_OT_DSL_DATASET, tx);
VERIFY0(zap_add(mos, dsobj, DS_FIELD_REMAP_DEADLIST,
sizeof (remap_deadlist_obj), 1, &remap_deadlist_obj, tx));
}
/*
* Create a ivset guid for this snapshot if the dataset is
* encrypted. This may be overridden by a raw receive. A
* previous implementation of this code did not have this
* field as part of the on-disk format for ZFS encryption
* (see errata #4). As part of the remediation for this
* issue, we ask the user to enable the bookmark_v2 feature
* which is now a dependency of the encryption feature. We
* use this as a heuristic to determine when the user has
* elected to correct any datasets created with the old code.
* As a result, we only do this step if the bookmark_v2
* feature is enabled, which limits the number of states a
* given pool / dataset can be in with regards to terms of
* correcting the issue.
*/
if (ds->ds_dir->dd_crypto_obj != 0 &&
spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_BOOKMARK_V2)) {
uint64_t ivset_guid = unique_create();
dmu_object_zapify(mos, dsobj, DMU_OT_DSL_DATASET, tx);
VERIFY0(zap_add(mos, dsobj, DS_FIELD_IVSET_GUID,
sizeof (ivset_guid), 1, &ivset_guid, tx));
}
ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_txg, <, tx->tx_txg);
dsl_dataset_phys(ds)->ds_prev_snap_obj = dsobj;
dsl_dataset_phys(ds)->ds_prev_snap_txg = crtxg;
dsl_dataset_phys(ds)->ds_unique_bytes = 0;
if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
VERIFY0(zap_add(mos, dsl_dataset_phys(ds)->ds_snapnames_zapobj,
snapname, 8, 1, &dsobj, tx));
if (ds->ds_prev)
dsl_dataset_rele(ds->ds_prev, ds);
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj, ds, &ds->ds_prev));
dsl_scan_ds_snapshotted(ds, tx);
dsl_dir_snap_cmtime_update(ds->ds_dir);
spa_history_log_internal_ds(ds->ds_prev, "snapshot", tx, " ");
}
void
dsl_dataset_snapshot_sync(void *arg, dmu_tx_t *tx)
{
dsl_dataset_snapshot_arg_t *ddsa = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
nvpair_t *pair;
for (pair = nvlist_next_nvpair(ddsa->ddsa_snaps, NULL);
pair != NULL; pair = nvlist_next_nvpair(ddsa->ddsa_snaps, pair)) {
dsl_dataset_t *ds;
char *name, *atp;
char dsname[ZFS_MAX_DATASET_NAME_LEN];
name = nvpair_name(pair);
atp = strchr(name, '@');
(void) strlcpy(dsname, name, atp - name + 1);
VERIFY0(dsl_dataset_hold(dp, dsname, FTAG, &ds));
dsl_dataset_snapshot_sync_impl(ds, atp + 1, tx);
if (ddsa->ddsa_props != NULL) {
dsl_props_set_sync_impl(ds->ds_prev,
ZPROP_SRC_LOCAL, ddsa->ddsa_props, tx);
}
dsl_dataset_rele(ds, FTAG);
}
}
/*
* The snapshots must all be in the same pool.
* All-or-nothing: if there are any failures, nothing will be modified.
*/
int
dsl_dataset_snapshot(nvlist_t *snaps, nvlist_t *props, nvlist_t *errors)
{
dsl_dataset_snapshot_arg_t ddsa;
nvpair_t *pair;
boolean_t needsuspend;
int error;
spa_t *spa;
char *firstname;
nvlist_t *suspended = NULL;
pair = nvlist_next_nvpair(snaps, NULL);
if (pair == NULL)
return (0);
firstname = nvpair_name(pair);
error = spa_open(firstname, &spa, FTAG);
if (error != 0)
return (error);
needsuspend = (spa_version(spa) < SPA_VERSION_FAST_SNAP);
spa_close(spa, FTAG);
if (needsuspend) {
suspended = fnvlist_alloc();
for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL;
pair = nvlist_next_nvpair(snaps, pair)) {
char fsname[ZFS_MAX_DATASET_NAME_LEN];
char *snapname = nvpair_name(pair);
char *atp;
void *cookie;
atp = strchr(snapname, '@');
if (atp == NULL) {
error = SET_ERROR(EINVAL);
break;
}
(void) strlcpy(fsname, snapname, atp - snapname + 1);
error = zil_suspend(fsname, &cookie);
if (error != 0)
break;
fnvlist_add_uint64(suspended, fsname,
(uintptr_t)cookie);
}
}
ddsa.ddsa_snaps = snaps;
ddsa.ddsa_props = props;
ddsa.ddsa_errors = errors;
ddsa.ddsa_cr = CRED();
ddsa.ddsa_proc = curproc;
if (error == 0) {
error = dsl_sync_task(firstname, dsl_dataset_snapshot_check,
dsl_dataset_snapshot_sync, &ddsa,
fnvlist_num_pairs(snaps) * 3, ZFS_SPACE_CHECK_NORMAL);
}
if (suspended != NULL) {
for (pair = nvlist_next_nvpair(suspended, NULL); pair != NULL;
pair = nvlist_next_nvpair(suspended, pair)) {
zil_resume((void *)(uintptr_t)
fnvpair_value_uint64(pair));
}
fnvlist_free(suspended);
}
if (error == 0) {
for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL;
pair = nvlist_next_nvpair(snaps, pair)) {
zvol_create_minor(nvpair_name(pair));
}
}
return (error);
}
typedef struct dsl_dataset_snapshot_tmp_arg {
const char *ddsta_fsname;
const char *ddsta_snapname;
minor_t ddsta_cleanup_minor;
const char *ddsta_htag;
} dsl_dataset_snapshot_tmp_arg_t;
static int
dsl_dataset_snapshot_tmp_check(void *arg, dmu_tx_t *tx)
{
dsl_dataset_snapshot_tmp_arg_t *ddsta = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
int error;
error = dsl_dataset_hold(dp, ddsta->ddsta_fsname, FTAG, &ds);
if (error != 0)
return (error);
/* NULL cred means no limit check for tmp snapshot */
error = dsl_dataset_snapshot_check_impl(ds, ddsta->ddsta_snapname,
tx, B_FALSE, 0, NULL, NULL);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
if (spa_version(dp->dp_spa) < SPA_VERSION_USERREFS) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ENOTSUP));
}
error = dsl_dataset_user_hold_check_one(NULL, ddsta->ddsta_htag,
B_TRUE, tx);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
dsl_dataset_rele(ds, FTAG);
return (0);
}
static void
dsl_dataset_snapshot_tmp_sync(void *arg, dmu_tx_t *tx)
{
dsl_dataset_snapshot_tmp_arg_t *ddsta = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds = NULL;
VERIFY0(dsl_dataset_hold(dp, ddsta->ddsta_fsname, FTAG, &ds));
dsl_dataset_snapshot_sync_impl(ds, ddsta->ddsta_snapname, tx);
dsl_dataset_user_hold_sync_one(ds->ds_prev, ddsta->ddsta_htag,
ddsta->ddsta_cleanup_minor, gethrestime_sec(), tx);
dsl_destroy_snapshot_sync_impl(ds->ds_prev, B_TRUE, tx);
dsl_dataset_rele(ds, FTAG);
}
int
dsl_dataset_snapshot_tmp(const char *fsname, const char *snapname,
minor_t cleanup_minor, const char *htag)
{
dsl_dataset_snapshot_tmp_arg_t ddsta;
int error;
spa_t *spa;
boolean_t needsuspend;
void *cookie;
ddsta.ddsta_fsname = fsname;
ddsta.ddsta_snapname = snapname;
ddsta.ddsta_cleanup_minor = cleanup_minor;
ddsta.ddsta_htag = htag;
error = spa_open(fsname, &spa, FTAG);
if (error != 0)
return (error);
needsuspend = (spa_version(spa) < SPA_VERSION_FAST_SNAP);
spa_close(spa, FTAG);
if (needsuspend) {
error = zil_suspend(fsname, &cookie);
if (error != 0)
return (error);
}
error = dsl_sync_task(fsname, dsl_dataset_snapshot_tmp_check,
dsl_dataset_snapshot_tmp_sync, &ddsta, 3, ZFS_SPACE_CHECK_RESERVED);
if (needsuspend)
zil_resume(cookie);
return (error);
}
void
dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx)
{
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(ds->ds_objset != NULL);
ASSERT(dsl_dataset_phys(ds)->ds_next_snap_obj == 0);
/*
* in case we had to change ds_fsid_guid when we opened it,
* sync it out now.
*/
dmu_buf_will_dirty(ds->ds_dbuf, tx);
dsl_dataset_phys(ds)->ds_fsid_guid = ds->ds_fsid_guid;
if (ds->ds_resume_bytes[tx->tx_txg & TXG_MASK] != 0) {
VERIFY0(zap_update(tx->tx_pool->dp_meta_objset,
ds->ds_object, DS_FIELD_RESUME_OBJECT, 8, 1,
&ds->ds_resume_object[tx->tx_txg & TXG_MASK], tx));
VERIFY0(zap_update(tx->tx_pool->dp_meta_objset,
ds->ds_object, DS_FIELD_RESUME_OFFSET, 8, 1,
&ds->ds_resume_offset[tx->tx_txg & TXG_MASK], tx));
VERIFY0(zap_update(tx->tx_pool->dp_meta_objset,
ds->ds_object, DS_FIELD_RESUME_BYTES, 8, 1,
&ds->ds_resume_bytes[tx->tx_txg & TXG_MASK], tx));
ds->ds_resume_object[tx->tx_txg & TXG_MASK] = 0;
ds->ds_resume_offset[tx->tx_txg & TXG_MASK] = 0;
ds->ds_resume_bytes[tx->tx_txg & TXG_MASK] = 0;
}
dmu_objset_sync(ds->ds_objset, zio, tx);
for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
if (zfeature_active(f, ds->ds_feature_activation[f])) {
if (zfeature_active(f, ds->ds_feature[f]))
continue;
dsl_dataset_activate_feature(ds->ds_object, f,
ds->ds_feature_activation[f], tx);
ds->ds_feature[f] = ds->ds_feature_activation[f];
}
}
}
/*
* Check if the percentage of blocks shared between the clone and the
* snapshot (as opposed to those that are clone only) is below a certain
* threshold
*/
static boolean_t
dsl_livelist_should_disable(dsl_dataset_t *ds)
{
uint64_t used, referenced;
int percent_shared;
used = dsl_dir_get_usedds(ds->ds_dir);
referenced = dsl_get_referenced(ds);
ASSERT3U(referenced, >=, 0);
ASSERT3U(used, >=, 0);
if (referenced == 0)
return (B_FALSE);
percent_shared = (100 * (referenced - used)) / referenced;
if (percent_shared <= zfs_livelist_min_percent_shared)
return (B_TRUE);
return (B_FALSE);
}
/*
* Check if it is possible to combine two livelist entries into one.
* This is the case if the combined number of 'live' blkptrs (ALLOCs that
* don't have a matching FREE) is under the maximum sublist size.
* We check this by subtracting twice the total number of frees from the total
* number of blkptrs. FREEs are counted twice because each FREE blkptr
* will cancel out an ALLOC blkptr when the livelist is processed.
*/
static boolean_t
dsl_livelist_should_condense(dsl_deadlist_entry_t *first,
dsl_deadlist_entry_t *next)
{
uint64_t total_free = first->dle_bpobj.bpo_phys->bpo_num_freed +
next->dle_bpobj.bpo_phys->bpo_num_freed;
uint64_t total_entries = first->dle_bpobj.bpo_phys->bpo_num_blkptrs +
next->dle_bpobj.bpo_phys->bpo_num_blkptrs;
if ((total_entries - (2 * total_free)) < zfs_livelist_max_entries)
return (B_TRUE);
return (B_FALSE);
}
typedef struct try_condense_arg {
spa_t *spa;
dsl_dataset_t *ds;
} try_condense_arg_t;
/*
* Iterate over the livelist entries, searching for a pair to condense.
* A nonzero return value means stop, 0 means keep looking.
*/
static int
dsl_livelist_try_condense(void *arg, dsl_deadlist_entry_t *first)
{
try_condense_arg_t *tca = arg;
spa_t *spa = tca->spa;
dsl_dataset_t *ds = tca->ds;
dsl_deadlist_t *ll = &ds->ds_dir->dd_livelist;
dsl_deadlist_entry_t *next;
/* The condense thread has not yet been created at import */
if (spa->spa_livelist_condense_zthr == NULL)
return (1);
/* A condense is already in progress */
if (spa->spa_to_condense.ds != NULL)
return (1);
next = AVL_NEXT(&ll->dl_tree, &first->dle_node);
/* The livelist has only one entry - don't condense it */
if (next == NULL)
return (1);
/* Next is the newest entry - don't condense it */
if (AVL_NEXT(&ll->dl_tree, &next->dle_node) == NULL)
return (1);
/* This pair is not ready to condense but keep looking */
if (!dsl_livelist_should_condense(first, next))
return (0);
/*
* Add a ref to prevent the dataset from being evicted while
* the condense zthr or synctask are running. Ref will be
* released at the end of the condense synctask
*/
dmu_buf_add_ref(ds->ds_dbuf, spa);
spa->spa_to_condense.ds = ds;
spa->spa_to_condense.first = first;
spa->spa_to_condense.next = next;
spa->spa_to_condense.syncing = B_FALSE;
spa->spa_to_condense.cancelled = B_FALSE;
zthr_wakeup(spa->spa_livelist_condense_zthr);
return (1);
}
static void
dsl_flush_pending_livelist(dsl_dataset_t *ds, dmu_tx_t *tx)
{
dsl_dir_t *dd = ds->ds_dir;
spa_t *spa = ds->ds_dir->dd_pool->dp_spa;
dsl_deadlist_entry_t *last = dsl_deadlist_last(&dd->dd_livelist);
/* Check if we need to add a new sub-livelist */
if (last == NULL) {
/* The livelist is empty */
dsl_deadlist_add_key(&dd->dd_livelist,
tx->tx_txg - 1, tx);
} else if (spa_sync_pass(spa) == 1) {
/*
* Check if the newest entry is full. If it is, make a new one.
* We only do this once per sync because we could overfill a
* sublist in one sync pass and don't want to add another entry
* for a txg that is already represented. This ensures that
* blkptrs born in the same txg are stored in the same sublist.
*/
bpobj_t bpobj = last->dle_bpobj;
uint64_t all = bpobj.bpo_phys->bpo_num_blkptrs;
uint64_t free = bpobj.bpo_phys->bpo_num_freed;
uint64_t alloc = all - free;
if (alloc > zfs_livelist_max_entries) {
dsl_deadlist_add_key(&dd->dd_livelist,
tx->tx_txg - 1, tx);
}
}
/* Insert each entry into the on-disk livelist */
bplist_iterate(&dd->dd_pending_allocs,
dsl_deadlist_insert_alloc_cb, &dd->dd_livelist, tx);
bplist_iterate(&dd->dd_pending_frees,
dsl_deadlist_insert_free_cb, &dd->dd_livelist, tx);
/* Attempt to condense every pair of adjacent entries */
try_condense_arg_t arg = {
.spa = spa,
.ds = ds
};
dsl_deadlist_iterate(&dd->dd_livelist, dsl_livelist_try_condense,
&arg);
}
void
dsl_dataset_sync_done(dsl_dataset_t *ds, dmu_tx_t *tx)
{
objset_t *os = ds->ds_objset;
bplist_iterate(&ds->ds_pending_deadlist,
dsl_deadlist_insert_alloc_cb, &ds->ds_deadlist, tx);
if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist)) {
dsl_flush_pending_livelist(ds, tx);
if (dsl_livelist_should_disable(ds)) {
dsl_dir_remove_livelist(ds->ds_dir, tx, B_TRUE);
}
}
dsl_bookmark_sync_done(ds, tx);
multilist_destroy(&os->os_synced_dnodes);
if (os->os_encrypted)
os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_FALSE;
else
ASSERT0(os->os_next_write_raw[tx->tx_txg & TXG_MASK]);
ASSERT(!dmu_objset_is_dirty(os, dmu_tx_get_txg(tx)));
dmu_buf_rele(ds->ds_dbuf, ds);
}
int
get_clones_stat_impl(dsl_dataset_t *ds, nvlist_t *val)
{
uint64_t count = 0;
objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
zap_cursor_t zc;
zap_attribute_t za;
ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
/*
* There may be missing entries in ds_next_clones_obj
* due to a bug in a previous version of the code.
* Only trust it if it has the right number of entries.
*/
if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) {
VERIFY0(zap_count(mos, dsl_dataset_phys(ds)->ds_next_clones_obj,
&count));
}
if (count != dsl_dataset_phys(ds)->ds_num_children - 1) {
return (SET_ERROR(ENOENT));
}
for (zap_cursor_init(&zc, mos,
dsl_dataset_phys(ds)->ds_next_clones_obj);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
dsl_dataset_t *clone;
char buf[ZFS_MAX_DATASET_NAME_LEN];
VERIFY0(dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
za.za_first_integer, FTAG, &clone));
dsl_dir_name(clone->ds_dir, buf);
fnvlist_add_boolean(val, buf);
dsl_dataset_rele(clone, FTAG);
}
zap_cursor_fini(&zc);
return (0);
}
void
get_clones_stat(dsl_dataset_t *ds, nvlist_t *nv)
{
nvlist_t *propval = fnvlist_alloc();
nvlist_t *val = fnvlist_alloc();
if (get_clones_stat_impl(ds, val) == 0) {
fnvlist_add_nvlist(propval, ZPROP_VALUE, val);
fnvlist_add_nvlist(nv, zfs_prop_to_name(ZFS_PROP_CLONES),
propval);
}
nvlist_free(val);
nvlist_free(propval);
}
/*
* Returns a string that represents the receive resume stats token. It should
* be freed with strfree().
*/
char *
get_receive_resume_stats_impl(dsl_dataset_t *ds)
{
dsl_pool_t *dp = ds->ds_dir->dd_pool;
if (dsl_dataset_has_resume_receive_state(ds)) {
char *str;
void *packed;
uint8_t *compressed;
uint64_t val;
nvlist_t *token_nv = fnvlist_alloc();
size_t packed_size, compressed_size;
if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_FROMGUID, sizeof (val), 1, &val) == 0) {
fnvlist_add_uint64(token_nv, "fromguid", val);
}
if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_OBJECT, sizeof (val), 1, &val) == 0) {
fnvlist_add_uint64(token_nv, "object", val);
}
if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_OFFSET, sizeof (val), 1, &val) == 0) {
fnvlist_add_uint64(token_nv, "offset", val);
}
if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_BYTES, sizeof (val), 1, &val) == 0) {
fnvlist_add_uint64(token_nv, "bytes", val);
}
if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_TOGUID, sizeof (val), 1, &val) == 0) {
fnvlist_add_uint64(token_nv, "toguid", val);
}
char buf[MAXNAMELEN];
if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_TONAME, 1, sizeof (buf), buf) == 0) {
fnvlist_add_string(token_nv, "toname", buf);
}
if (zap_contains(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_LARGEBLOCK) == 0) {
fnvlist_add_boolean(token_nv, "largeblockok");
}
if (zap_contains(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_EMBEDOK) == 0) {
fnvlist_add_boolean(token_nv, "embedok");
}
if (zap_contains(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_COMPRESSOK) == 0) {
fnvlist_add_boolean(token_nv, "compressok");
}
if (zap_contains(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_RAWOK) == 0) {
fnvlist_add_boolean(token_nv, "rawok");
}
if (dsl_dataset_feature_is_active(ds,
SPA_FEATURE_REDACTED_DATASETS)) {
uint64_t num_redact_snaps;
uint64_t *redact_snaps;
VERIFY(dsl_dataset_get_uint64_array_feature(ds,
SPA_FEATURE_REDACTED_DATASETS, &num_redact_snaps,
&redact_snaps));
fnvlist_add_uint64_array(token_nv, "redact_snaps",
redact_snaps, num_redact_snaps);
}
if (zap_contains(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS) == 0) {
uint64_t num_redact_snaps, int_size;
uint64_t *redact_snaps;
VERIFY0(zap_length(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS, &int_size,
&num_redact_snaps));
ASSERT3U(int_size, ==, sizeof (uint64_t));
redact_snaps = kmem_alloc(int_size * num_redact_snaps,
KM_SLEEP);
VERIFY0(zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS, int_size,
num_redact_snaps, redact_snaps));
fnvlist_add_uint64_array(token_nv, "book_redact_snaps",
redact_snaps, num_redact_snaps);
kmem_free(redact_snaps, int_size * num_redact_snaps);
}
packed = fnvlist_pack(token_nv, &packed_size);
fnvlist_free(token_nv);
compressed = kmem_alloc(packed_size, KM_SLEEP);
compressed_size = gzip_compress(packed, compressed,
packed_size, packed_size, 6);
zio_cksum_t cksum;
fletcher_4_native_varsize(compressed, compressed_size, &cksum);
size_t alloc_size = compressed_size * 2 + 1;
str = kmem_alloc(alloc_size, KM_SLEEP);
for (int i = 0; i < compressed_size; i++) {
size_t offset = i * 2;
(void) snprintf(str + offset, alloc_size - offset,
"%02x", compressed[i]);
}
str[compressed_size * 2] = '\0';
char *propval = kmem_asprintf("%u-%llx-%llx-%s",
ZFS_SEND_RESUME_TOKEN_VERSION,
(longlong_t)cksum.zc_word[0],
(longlong_t)packed_size, str);
kmem_free(packed, packed_size);
kmem_free(str, alloc_size);
kmem_free(compressed, packed_size);
return (propval);
}
return (kmem_strdup(""));
}
/*
* Returns a string that represents the receive resume stats token of the
* dataset's child. It should be freed with strfree().
*/
char *
get_child_receive_stats(dsl_dataset_t *ds)
{
char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
dsl_dataset_t *recv_ds;
dsl_dataset_name(ds, recvname);
if (strlcat(recvname, "/", sizeof (recvname)) <
sizeof (recvname) &&
strlcat(recvname, recv_clone_name, sizeof (recvname)) <
sizeof (recvname) &&
dsl_dataset_hold(ds->ds_dir->dd_pool, recvname, FTAG,
&recv_ds) == 0) {
char *propval = get_receive_resume_stats_impl(recv_ds);
dsl_dataset_rele(recv_ds, FTAG);
return (propval);
}
return (kmem_strdup(""));
}
static void
get_receive_resume_stats(dsl_dataset_t *ds, nvlist_t *nv)
{
char *propval = get_receive_resume_stats_impl(ds);
if (strcmp(propval, "") != 0) {
dsl_prop_nvlist_add_string(nv,
ZFS_PROP_RECEIVE_RESUME_TOKEN, propval);
} else {
char *childval = get_child_receive_stats(ds);
if (strcmp(childval, "") != 0) {
dsl_prop_nvlist_add_string(nv,
ZFS_PROP_RECEIVE_RESUME_TOKEN, childval);
}
kmem_strfree(childval);
}
kmem_strfree(propval);
}
uint64_t
dsl_get_refratio(dsl_dataset_t *ds)
{
uint64_t ratio = dsl_dataset_phys(ds)->ds_compressed_bytes == 0 ? 100 :
(dsl_dataset_phys(ds)->ds_uncompressed_bytes * 100 /
dsl_dataset_phys(ds)->ds_compressed_bytes);
return (ratio);
}
uint64_t
dsl_get_logicalreferenced(dsl_dataset_t *ds)
{
return (dsl_dataset_phys(ds)->ds_uncompressed_bytes);
}
uint64_t
dsl_get_compressratio(dsl_dataset_t *ds)
{
if (ds->ds_is_snapshot) {
return (dsl_get_refratio(ds));
} else {
dsl_dir_t *dd = ds->ds_dir;
mutex_enter(&dd->dd_lock);
uint64_t val = dsl_dir_get_compressratio(dd);
mutex_exit(&dd->dd_lock);
return (val);
}
}
uint64_t
dsl_get_used(dsl_dataset_t *ds)
{
if (ds->ds_is_snapshot) {
return (dsl_dataset_phys(ds)->ds_unique_bytes);
} else {
dsl_dir_t *dd = ds->ds_dir;
mutex_enter(&dd->dd_lock);
uint64_t val = dsl_dir_get_used(dd);
mutex_exit(&dd->dd_lock);
return (val);
}
}
uint64_t
dsl_get_creation(dsl_dataset_t *ds)
{
return (dsl_dataset_phys(ds)->ds_creation_time);
}
uint64_t
dsl_get_creationtxg(dsl_dataset_t *ds)
{
return (dsl_dataset_phys(ds)->ds_creation_txg);
}
uint64_t
dsl_get_refquota(dsl_dataset_t *ds)
{
return (ds->ds_quota);
}
uint64_t
dsl_get_refreservation(dsl_dataset_t *ds)
{
return (ds->ds_reserved);
}
uint64_t
dsl_get_guid(dsl_dataset_t *ds)
{
return (dsl_dataset_phys(ds)->ds_guid);
}
uint64_t
dsl_get_unique(dsl_dataset_t *ds)
{
return (dsl_dataset_phys(ds)->ds_unique_bytes);
}
uint64_t
dsl_get_objsetid(dsl_dataset_t *ds)
{
return (ds->ds_object);
}
uint64_t
dsl_get_userrefs(dsl_dataset_t *ds)
{
return (ds->ds_userrefs);
}
uint64_t
dsl_get_defer_destroy(dsl_dataset_t *ds)
{
return (DS_IS_DEFER_DESTROY(ds) ? 1 : 0);
}
uint64_t
dsl_get_referenced(dsl_dataset_t *ds)
{
return (dsl_dataset_phys(ds)->ds_referenced_bytes);
}
uint64_t
dsl_get_numclones(dsl_dataset_t *ds)
{
ASSERT(ds->ds_is_snapshot);
return (dsl_dataset_phys(ds)->ds_num_children - 1);
}
uint64_t
dsl_get_inconsistent(dsl_dataset_t *ds)
{
return ((dsl_dataset_phys(ds)->ds_flags & DS_FLAG_INCONSISTENT) ?
1 : 0);
}
uint64_t
dsl_get_redacted(dsl_dataset_t *ds)
{
return (dsl_dataset_feature_is_active(ds,
SPA_FEATURE_REDACTED_DATASETS));
}
uint64_t
dsl_get_available(dsl_dataset_t *ds)
{
uint64_t refdbytes = dsl_get_referenced(ds);
uint64_t availbytes = dsl_dir_space_available(ds->ds_dir,
NULL, 0, TRUE);
if (ds->ds_reserved > dsl_dataset_phys(ds)->ds_unique_bytes) {
availbytes +=
ds->ds_reserved - dsl_dataset_phys(ds)->ds_unique_bytes;
}
if (ds->ds_quota != 0) {
/*
* Adjust available bytes according to refquota
*/
if (refdbytes < ds->ds_quota) {
availbytes = MIN(availbytes,
ds->ds_quota - refdbytes);
} else {
availbytes = 0;
}
}
return (availbytes);
}
int
dsl_get_written(dsl_dataset_t *ds, uint64_t *written)
{
dsl_pool_t *dp = ds->ds_dir->dd_pool;
dsl_dataset_t *prev;
int err = dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
if (err == 0) {
uint64_t comp, uncomp;
err = dsl_dataset_space_written(prev, ds, written,
&comp, &uncomp);
dsl_dataset_rele(prev, FTAG);
}
return (err);
}
/*
* 'snap' should be a buffer of size ZFS_MAX_DATASET_NAME_LEN.
*/
int
dsl_get_prev_snap(dsl_dataset_t *ds, char *snap)
{
dsl_pool_t *dp = ds->ds_dir->dd_pool;
if (ds->ds_prev != NULL && ds->ds_prev != dp->dp_origin_snap) {
dsl_dataset_name(ds->ds_prev, snap);
return (0);
} else {
return (SET_ERROR(ENOENT));
}
}
void
dsl_get_redact_snaps(dsl_dataset_t *ds, nvlist_t *propval)
{
uint64_t nsnaps;
uint64_t *snaps;
if (dsl_dataset_get_uint64_array_feature(ds,
SPA_FEATURE_REDACTED_DATASETS, &nsnaps, &snaps)) {
fnvlist_add_uint64_array(propval, ZPROP_VALUE, snaps,
nsnaps);
}
}
/*
* Returns the mountpoint property and source for the given dataset in the value
* and source buffers. The value buffer must be at least as large as MAXPATHLEN
* and the source buffer as least as large a ZFS_MAX_DATASET_NAME_LEN.
* Returns 0 on success and an error on failure.
*/
int
dsl_get_mountpoint(dsl_dataset_t *ds, const char *dsname, char *value,
char *source)
{
int error;
dsl_pool_t *dp = ds->ds_dir->dd_pool;
/* Retrieve the mountpoint value stored in the zap object */
error = dsl_prop_get_ds(ds, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT), 1,
ZAP_MAXVALUELEN, value, source);
if (error != 0) {
return (error);
}
/*
* Process the dsname and source to find the full mountpoint string.
* Can be skipped for 'legacy' or 'none'.
*/
if (value[0] == '/') {
char *buf = kmem_alloc(ZAP_MAXVALUELEN, KM_SLEEP);
char *root = buf;
const char *relpath;
/*
* If we inherit the mountpoint, even from a dataset
* with a received value, the source will be the path of
* the dataset we inherit from. If source is
* ZPROP_SOURCE_VAL_RECVD, the received value is not
* inherited.
*/
if (strcmp(source, ZPROP_SOURCE_VAL_RECVD) == 0) {
relpath = "";
} else {
ASSERT0(strncmp(dsname, source, strlen(source)));
relpath = dsname + strlen(source);
if (relpath[0] == '/')
relpath++;
}
spa_altroot(dp->dp_spa, root, ZAP_MAXVALUELEN);
/*
* Special case an alternate root of '/'. This will
* avoid having multiple leading slashes in the
* mountpoint path.
*/
if (strcmp(root, "/") == 0)
root++;
/*
* If the mountpoint is '/' then skip over this
* if we are obtaining either an alternate root or
* an inherited mountpoint.
*/
char *mnt = value;
if (value[1] == '\0' && (root[0] != '\0' ||
relpath[0] != '\0'))
mnt = value + 1;
if (relpath[0] == '\0') {
(void) snprintf(value, ZAP_MAXVALUELEN, "%s%s",
root, mnt);
} else {
(void) snprintf(value, ZAP_MAXVALUELEN, "%s%s%s%s",
root, mnt, relpath[0] == '@' ? "" : "/",
relpath);
}
kmem_free(buf, ZAP_MAXVALUELEN);
}
return (0);
}
void
dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv)
{
dsl_pool_t *dp = ds->ds_dir->dd_pool;
ASSERT(dsl_pool_config_held(dp));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRATIO,
dsl_get_refratio(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_LOGICALREFERENCED,
dsl_get_logicalreferenced(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO,
dsl_get_compressratio(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
dsl_get_used(ds));
if (ds->ds_is_snapshot) {
get_clones_stat(ds, nv);
} else {
char buf[ZFS_MAX_DATASET_NAME_LEN];
if (dsl_get_prev_snap(ds, buf) == 0)
dsl_prop_nvlist_add_string(nv, ZFS_PROP_PREV_SNAP,
buf);
dsl_dir_stats(ds->ds_dir, nv);
}
nvlist_t *propval = fnvlist_alloc();
dsl_get_redact_snaps(ds, propval);
fnvlist_add_nvlist(nv, zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS),
propval);
nvlist_free(propval);
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE,
dsl_get_available(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED,
dsl_get_referenced(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION,
dsl_get_creation(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG,
dsl_get_creationtxg(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA,
dsl_get_refquota(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION,
dsl_get_refreservation(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID,
dsl_get_guid(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_UNIQUE,
dsl_get_unique(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_OBJSETID,
dsl_get_objsetid(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERREFS,
dsl_get_userrefs(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_DEFER_DESTROY,
dsl_get_defer_destroy(ds));
dsl_dataset_crypt_stats(ds, nv);
if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
uint64_t written;
if (dsl_get_written(ds, &written) == 0) {
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_WRITTEN,
written);
}
}
if (!dsl_dataset_is_snapshot(ds)) {
/*
* A failed "newfs" (e.g. full) resumable receive leaves
* the stats set on this dataset. Check here for the prop.
*/
get_receive_resume_stats(ds, nv);
/*
* A failed incremental resumable receive leaves the
* stats set on our child named "%recv". Check the child
* for the prop.
*/
/* 6 extra bytes for /%recv */
char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
dsl_dataset_t *recv_ds;
dsl_dataset_name(ds, recvname);
if (strlcat(recvname, "/", sizeof (recvname)) <
sizeof (recvname) &&
strlcat(recvname, recv_clone_name, sizeof (recvname)) <
sizeof (recvname) &&
dsl_dataset_hold(dp, recvname, FTAG, &recv_ds) == 0) {
get_receive_resume_stats(recv_ds, nv);
dsl_dataset_rele(recv_ds, FTAG);
}
}
}
void
dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat)
{
dsl_pool_t *dp __maybe_unused = ds->ds_dir->dd_pool;
ASSERT(dsl_pool_config_held(dp));
stat->dds_creation_txg = dsl_get_creationtxg(ds);
stat->dds_inconsistent = dsl_get_inconsistent(ds);
stat->dds_guid = dsl_get_guid(ds);
stat->dds_redacted = dsl_get_redacted(ds);
stat->dds_origin[0] = '\0';
if (ds->ds_is_snapshot) {
stat->dds_is_snapshot = B_TRUE;
stat->dds_num_clones = dsl_get_numclones(ds);
} else {
stat->dds_is_snapshot = B_FALSE;
stat->dds_num_clones = 0;
if (dsl_dir_is_clone(ds->ds_dir)) {
dsl_dir_get_origin(ds->ds_dir, stat->dds_origin);
}
}
}
uint64_t
dsl_dataset_fsid_guid(dsl_dataset_t *ds)
{
return (ds->ds_fsid_guid);
}
void
dsl_dataset_space(dsl_dataset_t *ds,
uint64_t *refdbytesp, uint64_t *availbytesp,
uint64_t *usedobjsp, uint64_t *availobjsp)
{
*refdbytesp = dsl_dataset_phys(ds)->ds_referenced_bytes;
*availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE);
if (ds->ds_reserved > dsl_dataset_phys(ds)->ds_unique_bytes)
*availbytesp +=
ds->ds_reserved - dsl_dataset_phys(ds)->ds_unique_bytes;
if (ds->ds_quota != 0) {
/*
* Adjust available bytes according to refquota
*/
if (*refdbytesp < ds->ds_quota)
*availbytesp = MIN(*availbytesp,
ds->ds_quota - *refdbytesp);
else
*availbytesp = 0;
}
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
*usedobjsp = BP_GET_FILL(&dsl_dataset_phys(ds)->ds_bp);
rrw_exit(&ds->ds_bp_rwlock, FTAG);
*availobjsp = DN_MAX_OBJECT - *usedobjsp;
}
boolean_t
dsl_dataset_modified_since_snap(dsl_dataset_t *ds, dsl_dataset_t *snap)
{
dsl_pool_t *dp __maybe_unused = ds->ds_dir->dd_pool;
uint64_t birth;
ASSERT(dsl_pool_config_held(dp));
if (snap == NULL)
return (B_FALSE);
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
birth = dsl_dataset_get_blkptr(ds)->blk_birth;
rrw_exit(&ds->ds_bp_rwlock, FTAG);
if (birth > dsl_dataset_phys(snap)->ds_creation_txg) {
objset_t *os, *os_snap;
/*
* It may be that only the ZIL differs, because it was
* reset in the head. Don't count that as being
* modified.
*/
if (dmu_objset_from_ds(ds, &os) != 0)
return (B_TRUE);
if (dmu_objset_from_ds(snap, &os_snap) != 0)
return (B_TRUE);
return (bcmp(&os->os_phys->os_meta_dnode,
&os_snap->os_phys->os_meta_dnode,
sizeof (os->os_phys->os_meta_dnode)) != 0);
}
return (B_FALSE);
}
typedef struct dsl_dataset_rename_snapshot_arg {
const char *ddrsa_fsname;
const char *ddrsa_oldsnapname;
const char *ddrsa_newsnapname;
boolean_t ddrsa_recursive;
dmu_tx_t *ddrsa_tx;
} dsl_dataset_rename_snapshot_arg_t;
-/* ARGSUSED */
static int
dsl_dataset_rename_snapshot_check_impl(dsl_pool_t *dp,
dsl_dataset_t *hds, void *arg)
{
+ (void) dp;
dsl_dataset_rename_snapshot_arg_t *ddrsa = arg;
int error;
uint64_t val;
error = dsl_dataset_snap_lookup(hds, ddrsa->ddrsa_oldsnapname, &val);
if (error != 0) {
/* ignore nonexistent snapshots */
return (error == ENOENT ? 0 : error);
}
/* new name should not exist */
error = dsl_dataset_snap_lookup(hds, ddrsa->ddrsa_newsnapname, &val);
if (error == 0)
error = SET_ERROR(EEXIST);
else if (error == ENOENT)
error = 0;
/* dataset name + 1 for the "@" + the new snapshot name must fit */
if (dsl_dir_namelen(hds->ds_dir) + 1 +
strlen(ddrsa->ddrsa_newsnapname) >= ZFS_MAX_DATASET_NAME_LEN)
error = SET_ERROR(ENAMETOOLONG);
return (error);
}
static int
dsl_dataset_rename_snapshot_check(void *arg, dmu_tx_t *tx)
{
dsl_dataset_rename_snapshot_arg_t *ddrsa = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *hds;
int error;
error = dsl_dataset_hold(dp, ddrsa->ddrsa_fsname, FTAG, &hds);
if (error != 0)
return (error);
if (ddrsa->ddrsa_recursive) {
error = dmu_objset_find_dp(dp, hds->ds_dir->dd_object,
dsl_dataset_rename_snapshot_check_impl, ddrsa,
DS_FIND_CHILDREN);
} else {
error = dsl_dataset_rename_snapshot_check_impl(dp, hds, ddrsa);
}
dsl_dataset_rele(hds, FTAG);
return (error);
}
static int
dsl_dataset_rename_snapshot_sync_impl(dsl_pool_t *dp,
dsl_dataset_t *hds, void *arg)
{
dsl_dataset_rename_snapshot_arg_t *ddrsa = arg;
dsl_dataset_t *ds;
uint64_t val;
dmu_tx_t *tx = ddrsa->ddrsa_tx;
int error;
error = dsl_dataset_snap_lookup(hds, ddrsa->ddrsa_oldsnapname, &val);
ASSERT(error == 0 || error == ENOENT);
if (error == ENOENT) {
/* ignore nonexistent snapshots */
return (0);
}
VERIFY0(dsl_dataset_hold_obj(dp, val, FTAG, &ds));
/* log before we change the name */
spa_history_log_internal_ds(ds, "rename", tx,
"-> @%s", ddrsa->ddrsa_newsnapname);
VERIFY0(dsl_dataset_snap_remove(hds, ddrsa->ddrsa_oldsnapname, tx,
B_FALSE));
mutex_enter(&ds->ds_lock);
(void) strlcpy(ds->ds_snapname, ddrsa->ddrsa_newsnapname,
sizeof (ds->ds_snapname));
mutex_exit(&ds->ds_lock);
VERIFY0(zap_add(dp->dp_meta_objset,
dsl_dataset_phys(hds)->ds_snapnames_zapobj,
ds->ds_snapname, 8, 1, &ds->ds_object, tx));
zvol_rename_minors(dp->dp_spa, ddrsa->ddrsa_oldsnapname,
ddrsa->ddrsa_newsnapname, B_TRUE);
dsl_dataset_rele(ds, FTAG);
return (0);
}
static void
dsl_dataset_rename_snapshot_sync(void *arg, dmu_tx_t *tx)
{
dsl_dataset_rename_snapshot_arg_t *ddrsa = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *hds = NULL;
VERIFY0(dsl_dataset_hold(dp, ddrsa->ddrsa_fsname, FTAG, &hds));
ddrsa->ddrsa_tx = tx;
if (ddrsa->ddrsa_recursive) {
VERIFY0(dmu_objset_find_dp(dp, hds->ds_dir->dd_object,
dsl_dataset_rename_snapshot_sync_impl, ddrsa,
DS_FIND_CHILDREN));
} else {
VERIFY0(dsl_dataset_rename_snapshot_sync_impl(dp, hds, ddrsa));
}
dsl_dataset_rele(hds, FTAG);
}
int
dsl_dataset_rename_snapshot(const char *fsname,
const char *oldsnapname, const char *newsnapname, boolean_t recursive)
{
dsl_dataset_rename_snapshot_arg_t ddrsa;
ddrsa.ddrsa_fsname = fsname;
ddrsa.ddrsa_oldsnapname = oldsnapname;
ddrsa.ddrsa_newsnapname = newsnapname;
ddrsa.ddrsa_recursive = recursive;
return (dsl_sync_task(fsname, dsl_dataset_rename_snapshot_check,
dsl_dataset_rename_snapshot_sync, &ddrsa,
1, ZFS_SPACE_CHECK_RESERVED));
}
/*
* If we're doing an ownership handoff, we need to make sure that there is
* only one long hold on the dataset. We're not allowed to change anything here
* so we don't permanently release the long hold or regular hold here. We want
* to do this only when syncing to avoid the dataset unexpectedly going away
* when we release the long hold.
*/
static int
dsl_dataset_handoff_check(dsl_dataset_t *ds, void *owner, dmu_tx_t *tx)
{
boolean_t held = B_FALSE;
if (!dmu_tx_is_syncing(tx))
return (0);
dsl_dir_t *dd = ds->ds_dir;
mutex_enter(&dd->dd_activity_lock);
uint64_t holds = zfs_refcount_count(&ds->ds_longholds) -
(owner != NULL ? 1 : 0);
/*
* The value of dd_activity_waiters can chance as soon as we drop the
* lock, but we're fine with that; new waiters coming in or old
* waiters leaving doesn't cause problems, since we're going to cancel
* waiters later anyway. The goal of this check is to verify that no
* non-waiters have long-holds, and all new long-holds will be
* prevented because we're holding the pool config as writer.
*/
if (holds != dd->dd_activity_waiters)
held = B_TRUE;
mutex_exit(&dd->dd_activity_lock);
if (held)
return (SET_ERROR(EBUSY));
return (0);
}
int
dsl_dataset_rollback_check(void *arg, dmu_tx_t *tx)
{
dsl_dataset_rollback_arg_t *ddra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
int64_t unused_refres_delta;
int error;
error = dsl_dataset_hold(dp, ddra->ddra_fsname, FTAG, &ds);
if (error != 0)
return (error);
/* must not be a snapshot */
if (ds->ds_is_snapshot) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EINVAL));
}
/* must have a most recent snapshot */
if (dsl_dataset_phys(ds)->ds_prev_snap_txg < TXG_INITIAL) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ESRCH));
}
/*
* No rollback to a snapshot created in the current txg, because
* the rollback may dirty the dataset and create blocks that are
* not reachable from the rootbp while having a birth txg that
* falls into the snapshot's range.
*/
if (dmu_tx_is_syncing(tx) &&
dsl_dataset_phys(ds)->ds_prev_snap_txg >= tx->tx_txg) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EAGAIN));
}
/*
* If the expected target snapshot is specified, then check that
* the latest snapshot is it.
*/
if (ddra->ddra_tosnap != NULL) {
dsl_dataset_t *snapds;
/* Check if the target snapshot exists at all. */
error = dsl_dataset_hold(dp, ddra->ddra_tosnap, FTAG, &snapds);
if (error != 0) {
/*
* ESRCH is used to signal that the target snapshot does
* not exist, while ENOENT is used to report that
* the rolled back dataset does not exist.
* ESRCH is also used to cover other cases where the
* target snapshot is not related to the dataset being
* rolled back such as being in a different pool.
*/
if (error == ENOENT || error == EXDEV)
error = SET_ERROR(ESRCH);
dsl_dataset_rele(ds, FTAG);
return (error);
}
ASSERT(snapds->ds_is_snapshot);
/* Check if the snapshot is the latest snapshot indeed. */
if (snapds != ds->ds_prev) {
/*
* Distinguish between the case where the only problem
* is intervening snapshots (EEXIST) vs the snapshot
* not being a valid target for rollback (ESRCH).
*/
if (snapds->ds_dir == ds->ds_dir ||
(dsl_dir_is_clone(ds->ds_dir) &&
dsl_dir_phys(ds->ds_dir)->dd_origin_obj ==
snapds->ds_object)) {
error = SET_ERROR(EEXIST);
} else {
error = SET_ERROR(ESRCH);
}
dsl_dataset_rele(snapds, FTAG);
dsl_dataset_rele(ds, FTAG);
return (error);
}
dsl_dataset_rele(snapds, FTAG);
}
/* must not have any bookmarks after the most recent snapshot */
if (dsl_bookmark_latest_txg(ds) >
dsl_dataset_phys(ds)->ds_prev_snap_txg) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EEXIST));
}
error = dsl_dataset_handoff_check(ds, ddra->ddra_owner, tx);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
/*
* Check if the snap we are rolling back to uses more than
* the refquota.
*/
if (ds->ds_quota != 0 &&
dsl_dataset_phys(ds->ds_prev)->ds_referenced_bytes > ds->ds_quota) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EDQUOT));
}
/*
* When we do the clone swap, we will temporarily use more space
* due to the refreservation (the head will no longer have any
* unique space, so the entire amount of the refreservation will need
* to be free). We will immediately destroy the clone, freeing
* this space, but the freeing happens over many txg's.
*/
unused_refres_delta = (int64_t)MIN(ds->ds_reserved,
dsl_dataset_phys(ds)->ds_unique_bytes);
if (unused_refres_delta > 0 &&
unused_refres_delta >
dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE)) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ENOSPC));
}
dsl_dataset_rele(ds, FTAG);
return (0);
}
void
dsl_dataset_rollback_sync(void *arg, dmu_tx_t *tx)
{
dsl_dataset_rollback_arg_t *ddra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds, *clone;
uint64_t cloneobj;
char namebuf[ZFS_MAX_DATASET_NAME_LEN];
VERIFY0(dsl_dataset_hold(dp, ddra->ddra_fsname, FTAG, &ds));
dsl_dataset_name(ds->ds_prev, namebuf);
fnvlist_add_string(ddra->ddra_result, "target", namebuf);
cloneobj = dsl_dataset_create_sync(ds->ds_dir, "%rollback",
ds->ds_prev, DS_CREATE_FLAG_NODIRTY, kcred, NULL, tx);
VERIFY0(dsl_dataset_hold_obj(dp, cloneobj, FTAG, &clone));
dsl_dataset_clone_swap_sync_impl(clone, ds, tx);
dsl_dataset_zero_zil(ds, tx);
dsl_destroy_head_sync_impl(clone, tx);
dsl_dataset_rele(clone, FTAG);
dsl_dataset_rele(ds, FTAG);
}
/*
* Rolls back the given filesystem or volume to the most recent snapshot.
* The name of the most recent snapshot will be returned under key "target"
* in the result nvlist.
*
* If owner != NULL:
* - The existing dataset MUST be owned by the specified owner at entry
* - Upon return, dataset will still be held by the same owner, whether we
* succeed or not.
*
* This mode is required any time the existing filesystem is mounted. See
* notes above zfs_suspend_fs() for further details.
*/
int
dsl_dataset_rollback(const char *fsname, const char *tosnap, void *owner,
nvlist_t *result)
{
dsl_dataset_rollback_arg_t ddra;
ddra.ddra_fsname = fsname;
ddra.ddra_tosnap = tosnap;
ddra.ddra_owner = owner;
ddra.ddra_result = result;
return (dsl_sync_task(fsname, dsl_dataset_rollback_check,
dsl_dataset_rollback_sync, &ddra,
1, ZFS_SPACE_CHECK_RESERVED));
}
struct promotenode {
list_node_t link;
dsl_dataset_t *ds;
};
static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep);
static int promote_hold(dsl_dataset_promote_arg_t *ddpa, dsl_pool_t *dp,
void *tag);
static void promote_rele(dsl_dataset_promote_arg_t *ddpa, void *tag);
int
dsl_dataset_promote_check(void *arg, dmu_tx_t *tx)
{
dsl_dataset_promote_arg_t *ddpa = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *hds;
struct promotenode *snap;
dsl_dataset_t *origin_ds, *origin_head;
int err;
uint64_t unused;
uint64_t ss_mv_cnt;
size_t max_snap_len;
boolean_t conflicting_snaps;
err = promote_hold(ddpa, dp, FTAG);
if (err != 0)
return (err);
hds = ddpa->ddpa_clone;
max_snap_len = MAXNAMELEN - strlen(ddpa->ddpa_clonename) - 1;
if (dsl_dataset_phys(hds)->ds_flags & DS_FLAG_NOPROMOTE) {
promote_rele(ddpa, FTAG);
return (SET_ERROR(EXDEV));
}
snap = list_head(&ddpa->shared_snaps);
origin_head = snap->ds;
if (snap == NULL) {
err = SET_ERROR(ENOENT);
goto out;
}
origin_ds = snap->ds;
/*
* Encrypted clones share a DSL Crypto Key with their origin's dsl dir.
* When doing a promote we must make sure the encryption root for
* both the target and the target's origin does not change to avoid
* needing to rewrap encryption keys
*/
err = dsl_dataset_promote_crypt_check(hds->ds_dir, origin_ds->ds_dir);
if (err != 0)
goto out;
/*
* Compute and check the amount of space to transfer. Since this is
* so expensive, don't do the preliminary check.
*/
if (!dmu_tx_is_syncing(tx)) {
promote_rele(ddpa, FTAG);
return (0);
}
/* compute origin's new unique space */
snap = list_tail(&ddpa->clone_snaps);
ASSERT(snap != NULL);
ASSERT3U(dsl_dataset_phys(snap->ds)->ds_prev_snap_obj, ==,
origin_ds->ds_object);
dsl_deadlist_space_range(&snap->ds->ds_deadlist,
dsl_dataset_phys(origin_ds)->ds_prev_snap_txg, UINT64_MAX,
&ddpa->unique, &unused, &unused);
/*
* Walk the snapshots that we are moving
*
* Compute space to transfer. Consider the incremental changes
* to used by each snapshot:
* (my used) = (prev's used) + (blocks born) - (blocks killed)
* So each snapshot gave birth to:
* (blocks born) = (my used) - (prev's used) + (blocks killed)
* So a sequence would look like:
* (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
* Which simplifies to:
* uN + kN + kN-1 + ... + k1 + k0
* Note however, if we stop before we reach the ORIGIN we get:
* uN + kN + kN-1 + ... + kM - uM-1
*/
conflicting_snaps = B_FALSE;
ss_mv_cnt = 0;
ddpa->used = dsl_dataset_phys(origin_ds)->ds_referenced_bytes;
ddpa->comp = dsl_dataset_phys(origin_ds)->ds_compressed_bytes;
ddpa->uncomp = dsl_dataset_phys(origin_ds)->ds_uncompressed_bytes;
for (snap = list_head(&ddpa->shared_snaps); snap;
snap = list_next(&ddpa->shared_snaps, snap)) {
uint64_t val, dlused, dlcomp, dluncomp;
dsl_dataset_t *ds = snap->ds;
ss_mv_cnt++;
/*
* If there are long holds, we won't be able to evict
* the objset.
*/
if (dsl_dataset_long_held(ds)) {
err = SET_ERROR(EBUSY);
goto out;
}
/* Check that the snapshot name does not conflict */
VERIFY0(dsl_dataset_get_snapname(ds));
if (strlen(ds->ds_snapname) >= max_snap_len) {
err = SET_ERROR(ENAMETOOLONG);
goto out;
}
err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val);
if (err == 0) {
fnvlist_add_boolean(ddpa->err_ds,
snap->ds->ds_snapname);
conflicting_snaps = B_TRUE;
} else if (err != ENOENT) {
goto out;
}
/* The very first snapshot does not have a deadlist */
if (dsl_dataset_phys(ds)->ds_prev_snap_obj == 0)
continue;
dsl_deadlist_space(&ds->ds_deadlist,
&dlused, &dlcomp, &dluncomp);
ddpa->used += dlused;
ddpa->comp += dlcomp;
ddpa->uncomp += dluncomp;
}
/*
* Check that bookmarks that are being transferred don't have
* name conflicts.
*/
for (dsl_bookmark_node_t *dbn = avl_first(&origin_head->ds_bookmarks);
dbn != NULL && dbn->dbn_phys.zbm_creation_txg <=
dsl_dataset_phys(origin_ds)->ds_creation_txg;
dbn = AVL_NEXT(&origin_head->ds_bookmarks, dbn)) {
if (strlen(dbn->dbn_name) >= max_snap_len) {
err = SET_ERROR(ENAMETOOLONG);
goto out;
}
zfs_bookmark_phys_t bm;
err = dsl_bookmark_lookup_impl(ddpa->ddpa_clone,
dbn->dbn_name, &bm);
if (err == 0) {
fnvlist_add_boolean(ddpa->err_ds, dbn->dbn_name);
conflicting_snaps = B_TRUE;
} else if (err == ESRCH) {
err = 0;
} else if (err != 0) {
goto out;
}
}
/*
* In order to return the full list of conflicting snapshots, we check
* whether there was a conflict after traversing all of them.
*/
if (conflicting_snaps) {
err = SET_ERROR(EEXIST);
goto out;
}
/*
* If we are a clone of a clone then we never reached ORIGIN,
* so we need to subtract out the clone origin's used space.
*/
if (ddpa->origin_origin) {
ddpa->used -=
dsl_dataset_phys(ddpa->origin_origin)->ds_referenced_bytes;
ddpa->comp -=
dsl_dataset_phys(ddpa->origin_origin)->ds_compressed_bytes;
ddpa->uncomp -=
dsl_dataset_phys(ddpa->origin_origin)->
ds_uncompressed_bytes;
}
/* Check that there is enough space and limit headroom here */
err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir,
0, ss_mv_cnt, ddpa->used, ddpa->cr, ddpa->proc);
if (err != 0)
goto out;
/*
* Compute the amounts of space that will be used by snapshots
* after the promotion (for both origin and clone). For each,
* it is the amount of space that will be on all of their
* deadlists (that was not born before their new origin).
*/
if (dsl_dir_phys(hds->ds_dir)->dd_flags & DD_FLAG_USED_BREAKDOWN) {
uint64_t space;
/*
* Note, typically this will not be a clone of a clone,
* so dd_origin_txg will be < TXG_INITIAL, so
* these snaplist_space() -> dsl_deadlist_space_range()
* calls will be fast because they do not have to
* iterate over all bps.
*/
snap = list_head(&ddpa->origin_snaps);
if (snap == NULL) {
err = SET_ERROR(ENOENT);
goto out;
}
err = snaplist_space(&ddpa->shared_snaps,
snap->ds->ds_dir->dd_origin_txg, &ddpa->cloneusedsnap);
if (err != 0)
goto out;
err = snaplist_space(&ddpa->clone_snaps,
snap->ds->ds_dir->dd_origin_txg, &space);
if (err != 0)
goto out;
ddpa->cloneusedsnap += space;
}
if (dsl_dir_phys(origin_ds->ds_dir)->dd_flags &
DD_FLAG_USED_BREAKDOWN) {
err = snaplist_space(&ddpa->origin_snaps,
dsl_dataset_phys(origin_ds)->ds_creation_txg,
&ddpa->originusedsnap);
if (err != 0)
goto out;
}
out:
promote_rele(ddpa, FTAG);
return (err);
}
void
dsl_dataset_promote_sync(void *arg, dmu_tx_t *tx)
{
dsl_dataset_promote_arg_t *ddpa = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *hds;
struct promotenode *snap;
dsl_dataset_t *origin_ds;
dsl_dataset_t *origin_head;
dsl_dir_t *dd;
dsl_dir_t *odd = NULL;
uint64_t oldnext_obj;
int64_t delta;
ASSERT(nvlist_empty(ddpa->err_ds));
VERIFY0(promote_hold(ddpa, dp, FTAG));
hds = ddpa->ddpa_clone;
ASSERT0(dsl_dataset_phys(hds)->ds_flags & DS_FLAG_NOPROMOTE);
snap = list_head(&ddpa->shared_snaps);
origin_ds = snap->ds;
dd = hds->ds_dir;
snap = list_head(&ddpa->origin_snaps);
origin_head = snap->ds;
/*
* We need to explicitly open odd, since origin_ds's dd will be
* changing.
*/
VERIFY0(dsl_dir_hold_obj(dp, origin_ds->ds_dir->dd_object,
NULL, FTAG, &odd));
dsl_dataset_promote_crypt_sync(hds->ds_dir, odd, tx);
/* change origin's next snap */
dmu_buf_will_dirty(origin_ds->ds_dbuf, tx);
oldnext_obj = dsl_dataset_phys(origin_ds)->ds_next_snap_obj;
snap = list_tail(&ddpa->clone_snaps);
ASSERT3U(dsl_dataset_phys(snap->ds)->ds_prev_snap_obj, ==,
origin_ds->ds_object);
dsl_dataset_phys(origin_ds)->ds_next_snap_obj = snap->ds->ds_object;
/* change the origin's next clone */
if (dsl_dataset_phys(origin_ds)->ds_next_clones_obj) {
dsl_dataset_remove_from_next_clones(origin_ds,
snap->ds->ds_object, tx);
VERIFY0(zap_add_int(dp->dp_meta_objset,
dsl_dataset_phys(origin_ds)->ds_next_clones_obj,
oldnext_obj, tx));
}
/* change origin */
dmu_buf_will_dirty(dd->dd_dbuf, tx);
ASSERT3U(dsl_dir_phys(dd)->dd_origin_obj, ==, origin_ds->ds_object);
dsl_dir_phys(dd)->dd_origin_obj = dsl_dir_phys(odd)->dd_origin_obj;
dd->dd_origin_txg = origin_head->ds_dir->dd_origin_txg;
dmu_buf_will_dirty(odd->dd_dbuf, tx);
dsl_dir_phys(odd)->dd_origin_obj = origin_ds->ds_object;
origin_head->ds_dir->dd_origin_txg =
dsl_dataset_phys(origin_ds)->ds_creation_txg;
/* change dd_clone entries */
if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
VERIFY0(zap_remove_int(dp->dp_meta_objset,
dsl_dir_phys(odd)->dd_clones, hds->ds_object, tx));
VERIFY0(zap_add_int(dp->dp_meta_objset,
dsl_dir_phys(ddpa->origin_origin->ds_dir)->dd_clones,
hds->ds_object, tx));
VERIFY0(zap_remove_int(dp->dp_meta_objset,
dsl_dir_phys(ddpa->origin_origin->ds_dir)->dd_clones,
origin_head->ds_object, tx));
if (dsl_dir_phys(dd)->dd_clones == 0) {
dsl_dir_phys(dd)->dd_clones =
zap_create(dp->dp_meta_objset, DMU_OT_DSL_CLONES,
DMU_OT_NONE, 0, tx);
}
VERIFY0(zap_add_int(dp->dp_meta_objset,
dsl_dir_phys(dd)->dd_clones, origin_head->ds_object, tx));
}
/*
* Move bookmarks to this dir.
*/
dsl_bookmark_node_t *dbn_next;
for (dsl_bookmark_node_t *dbn = avl_first(&origin_head->ds_bookmarks);
dbn != NULL && dbn->dbn_phys.zbm_creation_txg <=
dsl_dataset_phys(origin_ds)->ds_creation_txg;
dbn = dbn_next) {
dbn_next = AVL_NEXT(&origin_head->ds_bookmarks, dbn);
avl_remove(&origin_head->ds_bookmarks, dbn);
VERIFY0(zap_remove(dp->dp_meta_objset,
origin_head->ds_bookmarks_obj, dbn->dbn_name, tx));
dsl_bookmark_node_add(hds, dbn, tx);
}
dsl_bookmark_next_changed(hds, origin_ds, tx);
/* move snapshots to this dir */
for (snap = list_head(&ddpa->shared_snaps); snap;
snap = list_next(&ddpa->shared_snaps, snap)) {
dsl_dataset_t *ds = snap->ds;
/*
* Property callbacks are registered to a particular
* dsl_dir. Since ours is changing, evict the objset
* so that they will be unregistered from the old dsl_dir.
*/
if (ds->ds_objset) {
dmu_objset_evict(ds->ds_objset);
ds->ds_objset = NULL;
}
/* move snap name entry */
VERIFY0(dsl_dataset_get_snapname(ds));
VERIFY0(dsl_dataset_snap_remove(origin_head,
ds->ds_snapname, tx, B_TRUE));
VERIFY0(zap_add(dp->dp_meta_objset,
dsl_dataset_phys(hds)->ds_snapnames_zapobj, ds->ds_snapname,
8, 1, &ds->ds_object, tx));
dsl_fs_ss_count_adjust(hds->ds_dir, 1,
DD_FIELD_SNAPSHOT_COUNT, tx);
/* change containing dsl_dir */
dmu_buf_will_dirty(ds->ds_dbuf, tx);
ASSERT3U(dsl_dataset_phys(ds)->ds_dir_obj, ==, odd->dd_object);
dsl_dataset_phys(ds)->ds_dir_obj = dd->dd_object;
ASSERT3P(ds->ds_dir, ==, odd);
dsl_dir_rele(ds->ds_dir, ds);
VERIFY0(dsl_dir_hold_obj(dp, dd->dd_object,
NULL, ds, &ds->ds_dir));
/* move any clone references */
if (dsl_dataset_phys(ds)->ds_next_clones_obj &&
spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
zap_cursor_t zc;
zap_attribute_t za;
for (zap_cursor_init(&zc, dp->dp_meta_objset,
dsl_dataset_phys(ds)->ds_next_clones_obj);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
dsl_dataset_t *cnds;
uint64_t o;
if (za.za_first_integer == oldnext_obj) {
/*
* We've already moved the
* origin's reference.
*/
continue;
}
VERIFY0(dsl_dataset_hold_obj(dp,
za.za_first_integer, FTAG, &cnds));
o = dsl_dir_phys(cnds->ds_dir)->
dd_head_dataset_obj;
VERIFY0(zap_remove_int(dp->dp_meta_objset,
dsl_dir_phys(odd)->dd_clones, o, tx));
VERIFY0(zap_add_int(dp->dp_meta_objset,
dsl_dir_phys(dd)->dd_clones, o, tx));
dsl_dataset_rele(cnds, FTAG);
}
zap_cursor_fini(&zc);
}
ASSERT(!dsl_prop_hascb(ds));
}
/*
* Change space accounting.
* Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
* both be valid, or both be 0 (resulting in delta == 0). This
* is true for each of {clone,origin} independently.
*/
delta = ddpa->cloneusedsnap -
dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_SNAP];
ASSERT3S(delta, >=, 0);
ASSERT3U(ddpa->used, >=, delta);
dsl_dir_diduse_space(dd, DD_USED_SNAP, delta, 0, 0, tx);
dsl_dir_diduse_space(dd, DD_USED_HEAD,
ddpa->used - delta, ddpa->comp, ddpa->uncomp, tx);
delta = ddpa->originusedsnap -
dsl_dir_phys(odd)->dd_used_breakdown[DD_USED_SNAP];
ASSERT3S(delta, <=, 0);
ASSERT3U(ddpa->used, >=, -delta);
dsl_dir_diduse_space(odd, DD_USED_SNAP, delta, 0, 0, tx);
dsl_dir_diduse_space(odd, DD_USED_HEAD,
-ddpa->used - delta, -ddpa->comp, -ddpa->uncomp, tx);
dsl_dataset_phys(origin_ds)->ds_unique_bytes = ddpa->unique;
/*
* Since livelists are specific to a clone's origin txg, they
* are no longer accurate. Destroy the livelist from the clone being
* promoted. If the origin dataset is a clone, destroy its livelist
* as well.
*/
dsl_dir_remove_livelist(dd, tx, B_TRUE);
dsl_dir_remove_livelist(odd, tx, B_TRUE);
/* log history record */
spa_history_log_internal_ds(hds, "promote", tx, " ");
dsl_dir_rele(odd, FTAG);
promote_rele(ddpa, FTAG);
}
/*
* Make a list of dsl_dataset_t's for the snapshots between first_obj
* (exclusive) and last_obj (inclusive). The list will be in reverse
* order (last_obj will be the list_head()). If first_obj == 0, do all
* snapshots back to this dataset's origin.
*/
static int
snaplist_make(dsl_pool_t *dp,
uint64_t first_obj, uint64_t last_obj, list_t *l, void *tag)
{
uint64_t obj = last_obj;
list_create(l, sizeof (struct promotenode),
offsetof(struct promotenode, link));
while (obj != first_obj) {
dsl_dataset_t *ds;
struct promotenode *snap;
int err;
err = dsl_dataset_hold_obj(dp, obj, tag, &ds);
ASSERT(err != ENOENT);
if (err != 0)
return (err);
if (first_obj == 0)
first_obj = dsl_dir_phys(ds->ds_dir)->dd_origin_obj;
snap = kmem_alloc(sizeof (*snap), KM_SLEEP);
snap->ds = ds;
list_insert_tail(l, snap);
obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
}
return (0);
}
static int
snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep)
{
struct promotenode *snap;
*spacep = 0;
for (snap = list_head(l); snap; snap = list_next(l, snap)) {
uint64_t used, comp, uncomp;
dsl_deadlist_space_range(&snap->ds->ds_deadlist,
mintxg, UINT64_MAX, &used, &comp, &uncomp);
*spacep += used;
}
return (0);
}
static void
snaplist_destroy(list_t *l, void *tag)
{
struct promotenode *snap;
if (l == NULL || !list_link_active(&l->list_head))
return;
while ((snap = list_tail(l)) != NULL) {
list_remove(l, snap);
dsl_dataset_rele(snap->ds, tag);
kmem_free(snap, sizeof (*snap));
}
list_destroy(l);
}
static int
promote_hold(dsl_dataset_promote_arg_t *ddpa, dsl_pool_t *dp, void *tag)
{
int error;
dsl_dir_t *dd;
struct promotenode *snap;
error = dsl_dataset_hold(dp, ddpa->ddpa_clonename, tag,
&ddpa->ddpa_clone);
if (error != 0)
return (error);
dd = ddpa->ddpa_clone->ds_dir;
if (ddpa->ddpa_clone->ds_is_snapshot ||
!dsl_dir_is_clone(dd)) {
dsl_dataset_rele(ddpa->ddpa_clone, tag);
return (SET_ERROR(EINVAL));
}
error = snaplist_make(dp, 0, dsl_dir_phys(dd)->dd_origin_obj,
&ddpa->shared_snaps, tag);
if (error != 0)
goto out;
error = snaplist_make(dp, 0, ddpa->ddpa_clone->ds_object,
&ddpa->clone_snaps, tag);
if (error != 0)
goto out;
snap = list_head(&ddpa->shared_snaps);
ASSERT3U(snap->ds->ds_object, ==, dsl_dir_phys(dd)->dd_origin_obj);
error = snaplist_make(dp, dsl_dir_phys(dd)->dd_origin_obj,
dsl_dir_phys(snap->ds->ds_dir)->dd_head_dataset_obj,
&ddpa->origin_snaps, tag);
if (error != 0)
goto out;
if (dsl_dir_phys(snap->ds->ds_dir)->dd_origin_obj != 0) {
error = dsl_dataset_hold_obj(dp,
dsl_dir_phys(snap->ds->ds_dir)->dd_origin_obj,
tag, &ddpa->origin_origin);
if (error != 0)
goto out;
}
out:
if (error != 0)
promote_rele(ddpa, tag);
return (error);
}
static void
promote_rele(dsl_dataset_promote_arg_t *ddpa, void *tag)
{
snaplist_destroy(&ddpa->shared_snaps, tag);
snaplist_destroy(&ddpa->clone_snaps, tag);
snaplist_destroy(&ddpa->origin_snaps, tag);
if (ddpa->origin_origin != NULL)
dsl_dataset_rele(ddpa->origin_origin, tag);
dsl_dataset_rele(ddpa->ddpa_clone, tag);
}
/*
* Promote a clone.
*
* If it fails due to a conflicting snapshot name, "conflsnap" will be filled
* in with the name. (It must be at least ZFS_MAX_DATASET_NAME_LEN bytes long.)
*/
int
dsl_dataset_promote(const char *name, char *conflsnap)
{
dsl_dataset_promote_arg_t ddpa = { 0 };
uint64_t numsnaps;
int error;
nvpair_t *snap_pair;
objset_t *os;
/*
* We will modify space proportional to the number of
* snapshots. Compute numsnaps.
*/
error = dmu_objset_hold(name, FTAG, &os);
if (error != 0)
return (error);
error = zap_count(dmu_objset_pool(os)->dp_meta_objset,
dsl_dataset_phys(dmu_objset_ds(os))->ds_snapnames_zapobj,
&numsnaps);
dmu_objset_rele(os, FTAG);
if (error != 0)
return (error);
ddpa.ddpa_clonename = name;
ddpa.err_ds = fnvlist_alloc();
ddpa.cr = CRED();
ddpa.proc = curproc;
error = dsl_sync_task(name, dsl_dataset_promote_check,
dsl_dataset_promote_sync, &ddpa,
2 + numsnaps, ZFS_SPACE_CHECK_RESERVED);
/*
* Return the first conflicting snapshot found.
*/
snap_pair = nvlist_next_nvpair(ddpa.err_ds, NULL);
if (snap_pair != NULL && conflsnap != NULL)
(void) strlcpy(conflsnap, nvpair_name(snap_pair),
ZFS_MAX_DATASET_NAME_LEN);
fnvlist_free(ddpa.err_ds);
return (error);
}
int
dsl_dataset_clone_swap_check_impl(dsl_dataset_t *clone,
dsl_dataset_t *origin_head, boolean_t force, void *owner, dmu_tx_t *tx)
{
/*
* "slack" factor for received datasets with refquota set on them.
* See the bottom of this function for details on its use.
*/
uint64_t refquota_slack = (uint64_t)DMU_MAX_ACCESS *
spa_asize_inflation;
int64_t unused_refres_delta;
/* they should both be heads */
if (clone->ds_is_snapshot ||
origin_head->ds_is_snapshot)
return (SET_ERROR(EINVAL));
/* if we are not forcing, the branch point should be just before them */
if (!force && clone->ds_prev != origin_head->ds_prev)
return (SET_ERROR(EINVAL));
/* clone should be the clone (unless they are unrelated) */
if (clone->ds_prev != NULL &&
clone->ds_prev != clone->ds_dir->dd_pool->dp_origin_snap &&
origin_head->ds_dir != clone->ds_prev->ds_dir)
return (SET_ERROR(EINVAL));
/* the clone should be a child of the origin */
if (clone->ds_dir->dd_parent != origin_head->ds_dir)
return (SET_ERROR(EINVAL));
/* origin_head shouldn't be modified unless 'force' */
if (!force &&
dsl_dataset_modified_since_snap(origin_head, origin_head->ds_prev))
return (SET_ERROR(ETXTBSY));
/* origin_head should have no long holds (e.g. is not mounted) */
if (dsl_dataset_handoff_check(origin_head, owner, tx))
return (SET_ERROR(EBUSY));
/* check amount of any unconsumed refreservation */
unused_refres_delta =
(int64_t)MIN(origin_head->ds_reserved,
dsl_dataset_phys(origin_head)->ds_unique_bytes) -
(int64_t)MIN(origin_head->ds_reserved,
dsl_dataset_phys(clone)->ds_unique_bytes);
if (unused_refres_delta > 0 &&
unused_refres_delta >
dsl_dir_space_available(origin_head->ds_dir, NULL, 0, TRUE))
return (SET_ERROR(ENOSPC));
/*
* The clone can't be too much over the head's refquota.
*
* To ensure that the entire refquota can be used, we allow one
* transaction to exceed the refquota. Therefore, this check
* needs to also allow for the space referenced to be more than the
* refquota. The maximum amount of space that one transaction can use
* on disk is DMU_MAX_ACCESS * spa_asize_inflation. Allowing this
* overage ensures that we are able to receive a filesystem that
* exceeds the refquota on the source system.
*
* So that overage is the refquota_slack we use below.
*/
if (origin_head->ds_quota != 0 &&
dsl_dataset_phys(clone)->ds_referenced_bytes >
origin_head->ds_quota + refquota_slack)
return (SET_ERROR(EDQUOT));
return (0);
}
static void
dsl_dataset_swap_remap_deadlists(dsl_dataset_t *clone,
dsl_dataset_t *origin, dmu_tx_t *tx)
{
uint64_t clone_remap_dl_obj, origin_remap_dl_obj;
dsl_pool_t *dp = dmu_tx_pool(tx);
ASSERT(dsl_pool_sync_context(dp));
clone_remap_dl_obj = dsl_dataset_get_remap_deadlist_object(clone);
origin_remap_dl_obj = dsl_dataset_get_remap_deadlist_object(origin);
if (clone_remap_dl_obj != 0) {
dsl_deadlist_close(&clone->ds_remap_deadlist);
dsl_dataset_unset_remap_deadlist_object(clone, tx);
}
if (origin_remap_dl_obj != 0) {
dsl_deadlist_close(&origin->ds_remap_deadlist);
dsl_dataset_unset_remap_deadlist_object(origin, tx);
}
if (clone_remap_dl_obj != 0) {
dsl_dataset_set_remap_deadlist_object(origin,
clone_remap_dl_obj, tx);
dsl_deadlist_open(&origin->ds_remap_deadlist,
dp->dp_meta_objset, clone_remap_dl_obj);
}
if (origin_remap_dl_obj != 0) {
dsl_dataset_set_remap_deadlist_object(clone,
origin_remap_dl_obj, tx);
dsl_deadlist_open(&clone->ds_remap_deadlist,
dp->dp_meta_objset, origin_remap_dl_obj);
}
}
void
dsl_dataset_clone_swap_sync_impl(dsl_dataset_t *clone,
dsl_dataset_t *origin_head, dmu_tx_t *tx)
{
dsl_pool_t *dp = dmu_tx_pool(tx);
int64_t unused_refres_delta;
ASSERT(clone->ds_reserved == 0);
/*
* NOTE: On DEBUG kernels there could be a race between this and
* the check function if spa_asize_inflation is adjusted...
*/
ASSERT(origin_head->ds_quota == 0 ||
dsl_dataset_phys(clone)->ds_unique_bytes <= origin_head->ds_quota +
DMU_MAX_ACCESS * spa_asize_inflation);
ASSERT3P(clone->ds_prev, ==, origin_head->ds_prev);
dsl_dir_cancel_waiters(origin_head->ds_dir);
/*
* Swap per-dataset feature flags.
*/
for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
if (!(spa_feature_table[f].fi_flags &
ZFEATURE_FLAG_PER_DATASET)) {
ASSERT(!dsl_dataset_feature_is_active(clone, f));
ASSERT(!dsl_dataset_feature_is_active(origin_head, f));
continue;
}
boolean_t clone_inuse = dsl_dataset_feature_is_active(clone, f);
void *clone_feature = clone->ds_feature[f];
boolean_t origin_head_inuse =
dsl_dataset_feature_is_active(origin_head, f);
void *origin_head_feature = origin_head->ds_feature[f];
if (clone_inuse)
dsl_dataset_deactivate_feature_impl(clone, f, tx);
if (origin_head_inuse)
dsl_dataset_deactivate_feature_impl(origin_head, f, tx);
if (clone_inuse) {
dsl_dataset_activate_feature(origin_head->ds_object, f,
clone_feature, tx);
origin_head->ds_feature[f] = clone_feature;
}
if (origin_head_inuse) {
dsl_dataset_activate_feature(clone->ds_object, f,
origin_head_feature, tx);
clone->ds_feature[f] = origin_head_feature;
}
}
dmu_buf_will_dirty(clone->ds_dbuf, tx);
dmu_buf_will_dirty(origin_head->ds_dbuf, tx);
if (clone->ds_objset != NULL) {
dmu_objset_evict(clone->ds_objset);
clone->ds_objset = NULL;
}
if (origin_head->ds_objset != NULL) {
dmu_objset_evict(origin_head->ds_objset);
origin_head->ds_objset = NULL;
}
unused_refres_delta =
(int64_t)MIN(origin_head->ds_reserved,
dsl_dataset_phys(origin_head)->ds_unique_bytes) -
(int64_t)MIN(origin_head->ds_reserved,
dsl_dataset_phys(clone)->ds_unique_bytes);
/*
* Reset origin's unique bytes.
*/
{
dsl_dataset_t *origin = clone->ds_prev;
uint64_t comp, uncomp;
dmu_buf_will_dirty(origin->ds_dbuf, tx);
dsl_deadlist_space_range(&clone->ds_deadlist,
dsl_dataset_phys(origin)->ds_prev_snap_txg, UINT64_MAX,
&dsl_dataset_phys(origin)->ds_unique_bytes, &comp, &uncomp);
}
/* swap blkptrs */
{
rrw_enter(&clone->ds_bp_rwlock, RW_WRITER, FTAG);
rrw_enter(&origin_head->ds_bp_rwlock, RW_WRITER, FTAG);
blkptr_t tmp;
tmp = dsl_dataset_phys(origin_head)->ds_bp;
dsl_dataset_phys(origin_head)->ds_bp =
dsl_dataset_phys(clone)->ds_bp;
dsl_dataset_phys(clone)->ds_bp = tmp;
rrw_exit(&origin_head->ds_bp_rwlock, FTAG);
rrw_exit(&clone->ds_bp_rwlock, FTAG);
}
/* set dd_*_bytes */
{
int64_t dused, dcomp, duncomp;
uint64_t cdl_used, cdl_comp, cdl_uncomp;
uint64_t odl_used, odl_comp, odl_uncomp;
ASSERT3U(dsl_dir_phys(clone->ds_dir)->
dd_used_breakdown[DD_USED_SNAP], ==, 0);
dsl_deadlist_space(&clone->ds_deadlist,
&cdl_used, &cdl_comp, &cdl_uncomp);
dsl_deadlist_space(&origin_head->ds_deadlist,
&odl_used, &odl_comp, &odl_uncomp);
dused = dsl_dataset_phys(clone)->ds_referenced_bytes +
cdl_used -
(dsl_dataset_phys(origin_head)->ds_referenced_bytes +
odl_used);
dcomp = dsl_dataset_phys(clone)->ds_compressed_bytes +
cdl_comp -
(dsl_dataset_phys(origin_head)->ds_compressed_bytes +
odl_comp);
duncomp = dsl_dataset_phys(clone)->ds_uncompressed_bytes +
cdl_uncomp -
(dsl_dataset_phys(origin_head)->ds_uncompressed_bytes +
odl_uncomp);
dsl_dir_diduse_space(origin_head->ds_dir, DD_USED_HEAD,
dused, dcomp, duncomp, tx);
dsl_dir_diduse_space(clone->ds_dir, DD_USED_HEAD,
-dused, -dcomp, -duncomp, tx);
/*
* The difference in the space used by snapshots is the
* difference in snapshot space due to the head's
* deadlist (since that's the only thing that's
* changing that affects the snapused).
*/
dsl_deadlist_space_range(&clone->ds_deadlist,
origin_head->ds_dir->dd_origin_txg, UINT64_MAX,
&cdl_used, &cdl_comp, &cdl_uncomp);
dsl_deadlist_space_range(&origin_head->ds_deadlist,
origin_head->ds_dir->dd_origin_txg, UINT64_MAX,
&odl_used, &odl_comp, &odl_uncomp);
dsl_dir_transfer_space(origin_head->ds_dir, cdl_used - odl_used,
DD_USED_HEAD, DD_USED_SNAP, tx);
}
/* swap ds_*_bytes */
SWITCH64(dsl_dataset_phys(origin_head)->ds_referenced_bytes,
dsl_dataset_phys(clone)->ds_referenced_bytes);
SWITCH64(dsl_dataset_phys(origin_head)->ds_compressed_bytes,
dsl_dataset_phys(clone)->ds_compressed_bytes);
SWITCH64(dsl_dataset_phys(origin_head)->ds_uncompressed_bytes,
dsl_dataset_phys(clone)->ds_uncompressed_bytes);
SWITCH64(dsl_dataset_phys(origin_head)->ds_unique_bytes,
dsl_dataset_phys(clone)->ds_unique_bytes);
/* apply any parent delta for change in unconsumed refreservation */
dsl_dir_diduse_space(origin_head->ds_dir, DD_USED_REFRSRV,
unused_refres_delta, 0, 0, tx);
/*
* Swap deadlists.
*/
dsl_deadlist_close(&clone->ds_deadlist);
dsl_deadlist_close(&origin_head->ds_deadlist);
SWITCH64(dsl_dataset_phys(origin_head)->ds_deadlist_obj,
dsl_dataset_phys(clone)->ds_deadlist_obj);
dsl_deadlist_open(&clone->ds_deadlist, dp->dp_meta_objset,
dsl_dataset_phys(clone)->ds_deadlist_obj);
dsl_deadlist_open(&origin_head->ds_deadlist, dp->dp_meta_objset,
dsl_dataset_phys(origin_head)->ds_deadlist_obj);
dsl_dataset_swap_remap_deadlists(clone, origin_head, tx);
/*
* If there is a bookmark at the origin, its "next dataset" is
* changing, so we need to reset its FBN.
*/
dsl_bookmark_next_changed(origin_head, origin_head->ds_prev, tx);
dsl_scan_ds_clone_swapped(origin_head, clone, tx);
/*
* Destroy any livelists associated with the clone or the origin,
* since after the swap the corresponding livelists are no longer
* valid.
*/
dsl_dir_remove_livelist(clone->ds_dir, tx, B_TRUE);
dsl_dir_remove_livelist(origin_head->ds_dir, tx, B_TRUE);
spa_history_log_internal_ds(clone, "clone swap", tx,
"parent=%s", origin_head->ds_dir->dd_myname);
}
/*
* Given a pool name and a dataset object number in that pool,
* return the name of that dataset.
*/
int
dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf)
{
dsl_pool_t *dp;
dsl_dataset_t *ds;
int error;
error = dsl_pool_hold(pname, FTAG, &dp);
if (error != 0)
return (error);
error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds);
if (error == 0) {
dsl_dataset_name(ds, buf);
dsl_dataset_rele(ds, FTAG);
}
dsl_pool_rele(dp, FTAG);
return (error);
}
int
dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota,
uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv)
{
int error = 0;
ASSERT3S(asize, >, 0);
/*
* *ref_rsrv is the portion of asize that will come from any
* unconsumed refreservation space.
*/
*ref_rsrv = 0;
mutex_enter(&ds->ds_lock);
/*
* Make a space adjustment for reserved bytes.
*/
if (ds->ds_reserved > dsl_dataset_phys(ds)->ds_unique_bytes) {
ASSERT3U(*used, >=,
ds->ds_reserved - dsl_dataset_phys(ds)->ds_unique_bytes);
*used -=
(ds->ds_reserved - dsl_dataset_phys(ds)->ds_unique_bytes);
*ref_rsrv =
asize - MIN(asize, parent_delta(ds, asize + inflight));
}
if (!check_quota || ds->ds_quota == 0) {
mutex_exit(&ds->ds_lock);
return (0);
}
/*
* If they are requesting more space, and our current estimate
* is over quota, they get to try again unless the actual
* on-disk is over quota and there are no pending changes (which
* may free up space for us).
*/
if (dsl_dataset_phys(ds)->ds_referenced_bytes + inflight >=
ds->ds_quota) {
if (inflight > 0 ||
dsl_dataset_phys(ds)->ds_referenced_bytes < ds->ds_quota)
error = SET_ERROR(ERESTART);
else
error = SET_ERROR(EDQUOT);
}
mutex_exit(&ds->ds_lock);
return (error);
}
typedef struct dsl_dataset_set_qr_arg {
const char *ddsqra_name;
zprop_source_t ddsqra_source;
uint64_t ddsqra_value;
} dsl_dataset_set_qr_arg_t;
-/* ARGSUSED */
static int
dsl_dataset_set_refquota_check(void *arg, dmu_tx_t *tx)
{
dsl_dataset_set_qr_arg_t *ddsqra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
int error;
uint64_t newval;
if (spa_version(dp->dp_spa) < SPA_VERSION_REFQUOTA)
return (SET_ERROR(ENOTSUP));
error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
if (error != 0)
return (error);
if (ds->ds_is_snapshot) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EINVAL));
}
error = dsl_prop_predict(ds->ds_dir,
zfs_prop_to_name(ZFS_PROP_REFQUOTA),
ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
if (newval == 0) {
dsl_dataset_rele(ds, FTAG);
return (0);
}
if (newval < dsl_dataset_phys(ds)->ds_referenced_bytes ||
newval < ds->ds_reserved) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ENOSPC));
}
dsl_dataset_rele(ds, FTAG);
return (0);
}
static void
dsl_dataset_set_refquota_sync(void *arg, dmu_tx_t *tx)
{
dsl_dataset_set_qr_arg_t *ddsqra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds = NULL;
uint64_t newval;
VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds));
dsl_prop_set_sync_impl(ds,
zfs_prop_to_name(ZFS_PROP_REFQUOTA),
ddsqra->ddsqra_source, sizeof (ddsqra->ddsqra_value), 1,
&ddsqra->ddsqra_value, tx);
VERIFY0(dsl_prop_get_int_ds(ds,
zfs_prop_to_name(ZFS_PROP_REFQUOTA), &newval));
if (ds->ds_quota != newval) {
dmu_buf_will_dirty(ds->ds_dbuf, tx);
ds->ds_quota = newval;
}
dsl_dataset_rele(ds, FTAG);
}
int
dsl_dataset_set_refquota(const char *dsname, zprop_source_t source,
uint64_t refquota)
{
dsl_dataset_set_qr_arg_t ddsqra;
ddsqra.ddsqra_name = dsname;
ddsqra.ddsqra_source = source;
ddsqra.ddsqra_value = refquota;
return (dsl_sync_task(dsname, dsl_dataset_set_refquota_check,
dsl_dataset_set_refquota_sync, &ddsqra, 0,
ZFS_SPACE_CHECK_EXTRA_RESERVED));
}
static int
dsl_dataset_set_refreservation_check(void *arg, dmu_tx_t *tx)
{
dsl_dataset_set_qr_arg_t *ddsqra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
int error;
uint64_t newval, unique;
if (spa_version(dp->dp_spa) < SPA_VERSION_REFRESERVATION)
return (SET_ERROR(ENOTSUP));
error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
if (error != 0)
return (error);
if (ds->ds_is_snapshot) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EINVAL));
}
error = dsl_prop_predict(ds->ds_dir,
zfs_prop_to_name(ZFS_PROP_REFRESERVATION),
ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
/*
* If we are doing the preliminary check in open context, the
* space estimates may be inaccurate.
*/
if (!dmu_tx_is_syncing(tx)) {
dsl_dataset_rele(ds, FTAG);
return (0);
}
mutex_enter(&ds->ds_lock);
if (!DS_UNIQUE_IS_ACCURATE(ds))
dsl_dataset_recalc_head_uniq(ds);
unique = dsl_dataset_phys(ds)->ds_unique_bytes;
mutex_exit(&ds->ds_lock);
if (MAX(unique, newval) > MAX(unique, ds->ds_reserved)) {
uint64_t delta = MAX(unique, newval) -
MAX(unique, ds->ds_reserved);
if (delta >
dsl_dir_space_available(ds->ds_dir, NULL, 0, B_TRUE) ||
(ds->ds_quota > 0 && newval > ds->ds_quota)) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ENOSPC));
}
}
dsl_dataset_rele(ds, FTAG);
return (0);
}
void
dsl_dataset_set_refreservation_sync_impl(dsl_dataset_t *ds,
zprop_source_t source, uint64_t value, dmu_tx_t *tx)
{
uint64_t newval;
uint64_t unique;
int64_t delta;
dsl_prop_set_sync_impl(ds, zfs_prop_to_name(ZFS_PROP_REFRESERVATION),
source, sizeof (value), 1, &value, tx);
VERIFY0(dsl_prop_get_int_ds(ds,
zfs_prop_to_name(ZFS_PROP_REFRESERVATION), &newval));
dmu_buf_will_dirty(ds->ds_dbuf, tx);
mutex_enter(&ds->ds_dir->dd_lock);
mutex_enter(&ds->ds_lock);
ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
unique = dsl_dataset_phys(ds)->ds_unique_bytes;
delta = MAX(0, (int64_t)(newval - unique)) -
MAX(0, (int64_t)(ds->ds_reserved - unique));
ds->ds_reserved = newval;
mutex_exit(&ds->ds_lock);
dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, delta, 0, 0, tx);
mutex_exit(&ds->ds_dir->dd_lock);
}
static void
dsl_dataset_set_refreservation_sync(void *arg, dmu_tx_t *tx)
{
dsl_dataset_set_qr_arg_t *ddsqra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds = NULL;
VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds));
dsl_dataset_set_refreservation_sync_impl(ds,
ddsqra->ddsqra_source, ddsqra->ddsqra_value, tx);
dsl_dataset_rele(ds, FTAG);
}
int
dsl_dataset_set_refreservation(const char *dsname, zprop_source_t source,
uint64_t refreservation)
{
dsl_dataset_set_qr_arg_t ddsqra;
ddsqra.ddsqra_name = dsname;
ddsqra.ddsqra_source = source;
ddsqra.ddsqra_value = refreservation;
return (dsl_sync_task(dsname, dsl_dataset_set_refreservation_check,
dsl_dataset_set_refreservation_sync, &ddsqra, 0,
ZFS_SPACE_CHECK_EXTRA_RESERVED));
}
typedef struct dsl_dataset_set_compression_arg {
const char *ddsca_name;
zprop_source_t ddsca_source;
uint64_t ddsca_value;
} dsl_dataset_set_compression_arg_t;
-/* ARGSUSED */
static int
dsl_dataset_set_compression_check(void *arg, dmu_tx_t *tx)
{
dsl_dataset_set_compression_arg_t *ddsca = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
uint64_t compval = ZIO_COMPRESS_ALGO(ddsca->ddsca_value);
spa_feature_t f = zio_compress_to_feature(compval);
if (f == SPA_FEATURE_NONE)
return (SET_ERROR(EINVAL));
if (!spa_feature_is_enabled(dp->dp_spa, f))
return (SET_ERROR(ENOTSUP));
return (0);
}
static void
dsl_dataset_set_compression_sync(void *arg, dmu_tx_t *tx)
{
dsl_dataset_set_compression_arg_t *ddsca = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds = NULL;
uint64_t compval = ZIO_COMPRESS_ALGO(ddsca->ddsca_value);
spa_feature_t f = zio_compress_to_feature(compval);
ASSERT3S(spa_feature_table[f].fi_type, ==, ZFEATURE_TYPE_BOOLEAN);
VERIFY0(dsl_dataset_hold(dp, ddsca->ddsca_name, FTAG, &ds));
if (zfeature_active(f, ds->ds_feature[f]) != B_TRUE) {
ds->ds_feature_activation[f] = (void *)B_TRUE;
dsl_dataset_activate_feature(ds->ds_object, f,
ds->ds_feature_activation[f], tx);
ds->ds_feature[f] = ds->ds_feature_activation[f];
}
dsl_dataset_rele(ds, FTAG);
}
int
dsl_dataset_set_compression(const char *dsname, zprop_source_t source,
uint64_t compression)
{
dsl_dataset_set_compression_arg_t ddsca;
/*
* The sync task is only required for zstd in order to activate
* the feature flag when the property is first set.
*/
if (ZIO_COMPRESS_ALGO(compression) != ZIO_COMPRESS_ZSTD)
return (0);
ddsca.ddsca_name = dsname;
ddsca.ddsca_source = source;
ddsca.ddsca_value = compression;
return (dsl_sync_task(dsname, dsl_dataset_set_compression_check,
dsl_dataset_set_compression_sync, &ddsca, 0,
ZFS_SPACE_CHECK_EXTRA_RESERVED));
}
/*
* Return (in *usedp) the amount of space referenced by "new" that was not
* referenced at the time the bookmark corresponds to. "New" may be a
* snapshot or a head. The bookmark must be before new, in
* new's filesystem (or its origin) -- caller verifies this.
*
* The written space is calculated by considering two components: First, we
* ignore any freed space, and calculate the written as new's used space
* minus old's used space. Next, we add in the amount of space that was freed
* between the two time points, thus reducing new's used space relative to
* old's. Specifically, this is the space that was born before
* zbm_creation_txg, and freed before new (ie. on new's deadlist or a
* previous deadlist).
*
* space freed [---------------------]
* snapshots ---O-------O--------O-------O------
* bookmark new
*
* Note, the bookmark's zbm_*_bytes_refd must be valid, but if the HAS_FBN
* flag is not set, we will calculate the freed_before_next based on the
* next snapshot's deadlist, rather than using zbm_*_freed_before_next_snap.
*/
static int
dsl_dataset_space_written_impl(zfs_bookmark_phys_t *bmp,
dsl_dataset_t *new, uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
{
int err = 0;
dsl_pool_t *dp = new->ds_dir->dd_pool;
ASSERT(dsl_pool_config_held(dp));
if (dsl_dataset_is_snapshot(new)) {
ASSERT3U(bmp->zbm_creation_txg, <,
dsl_dataset_phys(new)->ds_creation_txg);
}
*usedp = 0;
*usedp += dsl_dataset_phys(new)->ds_referenced_bytes;
*usedp -= bmp->zbm_referenced_bytes_refd;
*compp = 0;
*compp += dsl_dataset_phys(new)->ds_compressed_bytes;
*compp -= bmp->zbm_compressed_bytes_refd;
*uncompp = 0;
*uncompp += dsl_dataset_phys(new)->ds_uncompressed_bytes;
*uncompp -= bmp->zbm_uncompressed_bytes_refd;
dsl_dataset_t *snap = new;
while (dsl_dataset_phys(snap)->ds_prev_snap_txg >
bmp->zbm_creation_txg) {
uint64_t used, comp, uncomp;
dsl_deadlist_space_range(&snap->ds_deadlist,
0, bmp->zbm_creation_txg,
&used, &comp, &uncomp);
*usedp += used;
*compp += comp;
*uncompp += uncomp;
uint64_t snapobj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
if (snap != new)
dsl_dataset_rele(snap, FTAG);
err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &snap);
if (err != 0)
break;
}
/*
* We might not have the FBN if we are calculating written from
* a snapshot (because we didn't know the correct "next" snapshot
* until now).
*/
if (bmp->zbm_flags & ZBM_FLAG_HAS_FBN) {
*usedp += bmp->zbm_referenced_freed_before_next_snap;
*compp += bmp->zbm_compressed_freed_before_next_snap;
*uncompp += bmp->zbm_uncompressed_freed_before_next_snap;
} else {
ASSERT3U(dsl_dataset_phys(snap)->ds_prev_snap_txg, ==,
bmp->zbm_creation_txg);
uint64_t used, comp, uncomp;
dsl_deadlist_space(&snap->ds_deadlist, &used, &comp, &uncomp);
*usedp += used;
*compp += comp;
*uncompp += uncomp;
}
if (snap != new)
dsl_dataset_rele(snap, FTAG);
return (err);
}
/*
* Return (in *usedp) the amount of space written in new that was not
* present at the time the bookmark corresponds to. New may be a
* snapshot or the head. Old must be a bookmark before new, in
* new's filesystem (or its origin) -- caller verifies this.
*/
int
dsl_dataset_space_written_bookmark(zfs_bookmark_phys_t *bmp,
dsl_dataset_t *new, uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
{
if (!(bmp->zbm_flags & ZBM_FLAG_HAS_FBN))
return (SET_ERROR(ENOTSUP));
return (dsl_dataset_space_written_impl(bmp, new,
usedp, compp, uncompp));
}
/*
* Return (in *usedp) the amount of space written in new that is not
* present in oldsnap. New may be a snapshot or the head. Old must be
* a snapshot before new, in new's filesystem (or its origin). If not then
* fail and return EINVAL.
*/
int
dsl_dataset_space_written(dsl_dataset_t *oldsnap, dsl_dataset_t *new,
uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
{
if (!dsl_dataset_is_before(new, oldsnap, 0))
return (SET_ERROR(EINVAL));
zfs_bookmark_phys_t zbm = { 0 };
dsl_dataset_phys_t *dsp = dsl_dataset_phys(oldsnap);
zbm.zbm_guid = dsp->ds_guid;
zbm.zbm_creation_txg = dsp->ds_creation_txg;
zbm.zbm_creation_time = dsp->ds_creation_time;
zbm.zbm_referenced_bytes_refd = dsp->ds_referenced_bytes;
zbm.zbm_compressed_bytes_refd = dsp->ds_compressed_bytes;
zbm.zbm_uncompressed_bytes_refd = dsp->ds_uncompressed_bytes;
/*
* If oldsnap is the origin (or origin's origin, ...) of new,
* we can't easily calculate the effective FBN. Therefore,
* we do not set ZBM_FLAG_HAS_FBN, so that the _impl will calculate
* it relative to the correct "next": the next snapshot towards "new",
* rather than the next snapshot in oldsnap's dsl_dir.
*/
return (dsl_dataset_space_written_impl(&zbm, new,
usedp, compp, uncompp));
}
/*
* Return (in *usedp) the amount of space that will be reclaimed if firstsnap,
* lastsnap, and all snapshots in between are deleted.
*
* blocks that would be freed [---------------------------]
* snapshots ---O-------O--------O-------O--------O
* firstsnap lastsnap
*
* This is the set of blocks that were born after the snap before firstsnap,
* (birth > firstsnap->prev_snap_txg) and died before the snap after the
* last snap (ie, is on lastsnap->ds_next->ds_deadlist or an earlier deadlist).
* We calculate this by iterating over the relevant deadlists (from the snap
* after lastsnap, backward to the snap after firstsnap), summing up the
* space on the deadlist that was born after the snap before firstsnap.
*/
int
dsl_dataset_space_wouldfree(dsl_dataset_t *firstsnap,
dsl_dataset_t *lastsnap,
uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
{
int err = 0;
uint64_t snapobj;
dsl_pool_t *dp = firstsnap->ds_dir->dd_pool;
ASSERT(firstsnap->ds_is_snapshot);
ASSERT(lastsnap->ds_is_snapshot);
/*
* Check that the snapshots are in the same dsl_dir, and firstsnap
* is before lastsnap.
*/
if (firstsnap->ds_dir != lastsnap->ds_dir ||
dsl_dataset_phys(firstsnap)->ds_creation_txg >
dsl_dataset_phys(lastsnap)->ds_creation_txg)
return (SET_ERROR(EINVAL));
*usedp = *compp = *uncompp = 0;
snapobj = dsl_dataset_phys(lastsnap)->ds_next_snap_obj;
while (snapobj != firstsnap->ds_object) {
dsl_dataset_t *ds;
uint64_t used, comp, uncomp;
err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &ds);
if (err != 0)
break;
dsl_deadlist_space_range(&ds->ds_deadlist,
dsl_dataset_phys(firstsnap)->ds_prev_snap_txg, UINT64_MAX,
&used, &comp, &uncomp);
*usedp += used;
*compp += comp;
*uncompp += uncomp;
snapobj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
ASSERT3U(snapobj, !=, 0);
dsl_dataset_rele(ds, FTAG);
}
return (err);
}
/*
* Return TRUE if 'earlier' is an earlier snapshot in 'later's timeline.
* For example, they could both be snapshots of the same filesystem, and
* 'earlier' is before 'later'. Or 'earlier' could be the origin of
* 'later's filesystem. Or 'earlier' could be an older snapshot in the origin's
* filesystem. Or 'earlier' could be the origin's origin.
*
* If non-zero, earlier_txg is used instead of earlier's ds_creation_txg.
*/
boolean_t
dsl_dataset_is_before(dsl_dataset_t *later, dsl_dataset_t *earlier,
uint64_t earlier_txg)
{
dsl_pool_t *dp = later->ds_dir->dd_pool;
int error;
boolean_t ret;
ASSERT(dsl_pool_config_held(dp));
ASSERT(earlier->ds_is_snapshot || earlier_txg != 0);
if (earlier_txg == 0)
earlier_txg = dsl_dataset_phys(earlier)->ds_creation_txg;
if (later->ds_is_snapshot &&
earlier_txg >= dsl_dataset_phys(later)->ds_creation_txg)
return (B_FALSE);
if (later->ds_dir == earlier->ds_dir)
return (B_TRUE);
/*
* We check dd_origin_obj explicitly here rather than using
* dsl_dir_is_clone() so that we will return TRUE if "earlier"
* is $ORIGIN@$ORIGIN. dsl_dataset_space_written() depends on
* this behavior.
*/
if (dsl_dir_phys(later->ds_dir)->dd_origin_obj == 0)
return (B_FALSE);
dsl_dataset_t *origin;
error = dsl_dataset_hold_obj(dp,
dsl_dir_phys(later->ds_dir)->dd_origin_obj, FTAG, &origin);
if (error != 0)
return (B_FALSE);
if (dsl_dataset_phys(origin)->ds_creation_txg == earlier_txg &&
origin->ds_dir == earlier->ds_dir) {
dsl_dataset_rele(origin, FTAG);
return (B_TRUE);
}
ret = dsl_dataset_is_before(origin, earlier, earlier_txg);
dsl_dataset_rele(origin, FTAG);
return (ret);
}
void
dsl_dataset_zapify(dsl_dataset_t *ds, dmu_tx_t *tx)
{
objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
dmu_object_zapify(mos, ds->ds_object, DMU_OT_DSL_DATASET, tx);
}
boolean_t
dsl_dataset_is_zapified(dsl_dataset_t *ds)
{
dmu_object_info_t doi;
dmu_object_info_from_db(ds->ds_dbuf, &doi);
return (doi.doi_type == DMU_OTN_ZAP_METADATA);
}
boolean_t
dsl_dataset_has_resume_receive_state(dsl_dataset_t *ds)
{
return (dsl_dataset_is_zapified(ds) &&
zap_contains(ds->ds_dir->dd_pool->dp_meta_objset,
ds->ds_object, DS_FIELD_RESUME_TOGUID) == 0);
}
uint64_t
dsl_dataset_get_remap_deadlist_object(dsl_dataset_t *ds)
{
uint64_t remap_deadlist_obj;
int err;
if (!dsl_dataset_is_zapified(ds))
return (0);
err = zap_lookup(ds->ds_dir->dd_pool->dp_meta_objset, ds->ds_object,
DS_FIELD_REMAP_DEADLIST, sizeof (remap_deadlist_obj), 1,
&remap_deadlist_obj);
if (err != 0) {
VERIFY3S(err, ==, ENOENT);
return (0);
}
ASSERT(remap_deadlist_obj != 0);
return (remap_deadlist_obj);
}
boolean_t
dsl_dataset_remap_deadlist_exists(dsl_dataset_t *ds)
{
EQUIV(dsl_deadlist_is_open(&ds->ds_remap_deadlist),
dsl_dataset_get_remap_deadlist_object(ds) != 0);
return (dsl_deadlist_is_open(&ds->ds_remap_deadlist));
}
static void
dsl_dataset_set_remap_deadlist_object(dsl_dataset_t *ds, uint64_t obj,
dmu_tx_t *tx)
{
ASSERT(obj != 0);
dsl_dataset_zapify(ds, tx);
VERIFY0(zap_add(ds->ds_dir->dd_pool->dp_meta_objset, ds->ds_object,
DS_FIELD_REMAP_DEADLIST, sizeof (obj), 1, &obj, tx));
}
static void
dsl_dataset_unset_remap_deadlist_object(dsl_dataset_t *ds, dmu_tx_t *tx)
{
VERIFY0(zap_remove(ds->ds_dir->dd_pool->dp_meta_objset,
ds->ds_object, DS_FIELD_REMAP_DEADLIST, tx));
}
void
dsl_dataset_destroy_remap_deadlist(dsl_dataset_t *ds, dmu_tx_t *tx)
{
uint64_t remap_deadlist_object;
spa_t *spa = ds->ds_dir->dd_pool->dp_spa;
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(dsl_dataset_remap_deadlist_exists(ds));
remap_deadlist_object = ds->ds_remap_deadlist.dl_object;
dsl_deadlist_close(&ds->ds_remap_deadlist);
dsl_deadlist_free(spa_meta_objset(spa), remap_deadlist_object, tx);
dsl_dataset_unset_remap_deadlist_object(ds, tx);
spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
}
void
dsl_dataset_create_remap_deadlist(dsl_dataset_t *ds, dmu_tx_t *tx)
{
uint64_t remap_deadlist_obj;
spa_t *spa = ds->ds_dir->dd_pool->dp_spa;
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(MUTEX_HELD(&ds->ds_remap_deadlist_lock));
/*
* Currently we only create remap deadlists when there are indirect
* vdevs with referenced mappings.
*/
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL));
remap_deadlist_obj = dsl_deadlist_clone(
&ds->ds_deadlist, UINT64_MAX,
dsl_dataset_phys(ds)->ds_prev_snap_obj, tx);
dsl_dataset_set_remap_deadlist_object(ds,
remap_deadlist_obj, tx);
dsl_deadlist_open(&ds->ds_remap_deadlist, spa_meta_objset(spa),
remap_deadlist_obj);
spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
}
void
dsl_dataset_activate_redaction(dsl_dataset_t *ds, uint64_t *redact_snaps,
uint64_t num_redact_snaps, dmu_tx_t *tx)
{
uint64_t dsobj = ds->ds_object;
struct feature_type_uint64_array_arg *ftuaa =
kmem_zalloc(sizeof (*ftuaa), KM_SLEEP);
ftuaa->length = (int64_t)num_redact_snaps;
if (num_redact_snaps > 0) {
ftuaa->array = kmem_alloc(num_redact_snaps * sizeof (uint64_t),
KM_SLEEP);
bcopy(redact_snaps, ftuaa->array, num_redact_snaps *
sizeof (uint64_t));
}
dsl_dataset_activate_feature(dsobj, SPA_FEATURE_REDACTED_DATASETS,
ftuaa, tx);
ds->ds_feature[SPA_FEATURE_REDACTED_DATASETS] = ftuaa;
}
/* BEGIN CSTYLED */
#if defined(_LP64)
#define RECORDSIZE_PERM ZMOD_RW
#else
/* Limited to 1M on 32-bit platforms due to lack of virtual address space */
#define RECORDSIZE_PERM ZMOD_RD
#endif
ZFS_MODULE_PARAM(zfs, zfs_, max_recordsize, INT, RECORDSIZE_PERM,
"Max allowed record size");
ZFS_MODULE_PARAM(zfs, zfs_, allow_redacted_dataset_mount, INT, ZMOD_RW,
"Allow mounting of redacted datasets");
/* END CSTYLED */
EXPORT_SYMBOL(dsl_dataset_hold);
EXPORT_SYMBOL(dsl_dataset_hold_flags);
EXPORT_SYMBOL(dsl_dataset_hold_obj);
EXPORT_SYMBOL(dsl_dataset_hold_obj_flags);
EXPORT_SYMBOL(dsl_dataset_own);
EXPORT_SYMBOL(dsl_dataset_own_obj);
EXPORT_SYMBOL(dsl_dataset_name);
EXPORT_SYMBOL(dsl_dataset_rele);
EXPORT_SYMBOL(dsl_dataset_rele_flags);
EXPORT_SYMBOL(dsl_dataset_disown);
EXPORT_SYMBOL(dsl_dataset_tryown);
EXPORT_SYMBOL(dsl_dataset_create_sync);
EXPORT_SYMBOL(dsl_dataset_create_sync_dd);
EXPORT_SYMBOL(dsl_dataset_snapshot_check);
EXPORT_SYMBOL(dsl_dataset_snapshot_sync);
EXPORT_SYMBOL(dsl_dataset_promote);
EXPORT_SYMBOL(dsl_dataset_user_hold);
EXPORT_SYMBOL(dsl_dataset_user_release);
EXPORT_SYMBOL(dsl_dataset_get_holds);
EXPORT_SYMBOL(dsl_dataset_get_blkptr);
EXPORT_SYMBOL(dsl_dataset_get_spa);
EXPORT_SYMBOL(dsl_dataset_modified_since_snap);
EXPORT_SYMBOL(dsl_dataset_space_written);
EXPORT_SYMBOL(dsl_dataset_space_wouldfree);
EXPORT_SYMBOL(dsl_dataset_sync);
EXPORT_SYMBOL(dsl_dataset_block_born);
EXPORT_SYMBOL(dsl_dataset_block_kill);
EXPORT_SYMBOL(dsl_dataset_dirty);
EXPORT_SYMBOL(dsl_dataset_stats);
EXPORT_SYMBOL(dsl_dataset_fast_stat);
EXPORT_SYMBOL(dsl_dataset_space);
EXPORT_SYMBOL(dsl_dataset_fsid_guid);
EXPORT_SYMBOL(dsl_dsobj_to_dsname);
EXPORT_SYMBOL(dsl_dataset_check_quota);
EXPORT_SYMBOL(dsl_dataset_clone_swap_check_impl);
EXPORT_SYMBOL(dsl_dataset_clone_swap_sync_impl);
diff --git a/sys/contrib/openzfs/module/zfs/dsl_destroy.c b/sys/contrib/openzfs/module/zfs/dsl_destroy.c
index a2748197f29d..b32929b3320c 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_destroy.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_destroy.c
@@ -1,1281 +1,1281 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright (c) 2013 by Joyent, Inc. All rights reserved.
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/dsl_userhold.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_synctask.h>
#include <sys/dsl_destroy.h>
#include <sys/dsl_bookmark.h>
#include <sys/dmu_tx.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_dir.h>
#include <sys/dmu_traverse.h>
#include <sys/dsl_scan.h>
#include <sys/dmu_objset.h>
#include <sys/zap.h>
#include <sys/zfeature.h>
#include <sys/zfs_ioctl.h>
#include <sys/dsl_deleg.h>
#include <sys/dmu_impl.h>
#include <sys/zvol.h>
#include <sys/zcp.h>
#include <sys/dsl_deadlist.h>
#include <sys/zthr.h>
#include <sys/spa_impl.h>
int
dsl_destroy_snapshot_check_impl(dsl_dataset_t *ds, boolean_t defer)
{
if (!ds->ds_is_snapshot)
return (SET_ERROR(EINVAL));
if (dsl_dataset_long_held(ds))
return (SET_ERROR(EBUSY));
/*
* Only allow deferred destroy on pools that support it.
* NOTE: deferred destroy is only supported on snapshots.
*/
if (defer) {
if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
SPA_VERSION_USERREFS)
return (SET_ERROR(ENOTSUP));
return (0);
}
/*
* If this snapshot has an elevated user reference count,
* we can't destroy it yet.
*/
if (ds->ds_userrefs > 0)
return (SET_ERROR(EBUSY));
/*
* Can't delete a branch point.
*/
if (dsl_dataset_phys(ds)->ds_num_children > 1)
return (SET_ERROR(EEXIST));
return (0);
}
int
dsl_destroy_snapshot_check(void *arg, dmu_tx_t *tx)
{
dsl_destroy_snapshot_arg_t *ddsa = arg;
const char *dsname = ddsa->ddsa_name;
boolean_t defer = ddsa->ddsa_defer;
dsl_pool_t *dp = dmu_tx_pool(tx);
int error = 0;
dsl_dataset_t *ds;
error = dsl_dataset_hold(dp, dsname, FTAG, &ds);
/*
* If the snapshot does not exist, silently ignore it, and
* dsl_destroy_snapshot_sync() will be a no-op
* (it's "already destroyed").
*/
if (error == ENOENT)
return (0);
if (error == 0) {
error = dsl_destroy_snapshot_check_impl(ds, defer);
dsl_dataset_rele(ds, FTAG);
}
return (error);
}
struct process_old_arg {
dsl_dataset_t *ds;
dsl_dataset_t *ds_prev;
boolean_t after_branch_point;
zio_t *pio;
uint64_t used, comp, uncomp;
};
static int
process_old_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, dmu_tx_t *tx)
{
struct process_old_arg *poa = arg;
dsl_pool_t *dp = poa->ds->ds_dir->dd_pool;
ASSERT(!BP_IS_HOLE(bp));
if (bp->blk_birth <= dsl_dataset_phys(poa->ds)->ds_prev_snap_txg) {
dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, bp_freed, tx);
if (poa->ds_prev && !poa->after_branch_point &&
bp->blk_birth >
dsl_dataset_phys(poa->ds_prev)->ds_prev_snap_txg) {
dsl_dataset_phys(poa->ds_prev)->ds_unique_bytes +=
bp_get_dsize_sync(dp->dp_spa, bp);
}
} else {
poa->used += bp_get_dsize_sync(dp->dp_spa, bp);
poa->comp += BP_GET_PSIZE(bp);
poa->uncomp += BP_GET_UCSIZE(bp);
dsl_free_sync(poa->pio, dp, tx->tx_txg, bp);
}
return (0);
}
static void
process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev,
dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx)
{
struct process_old_arg poa = { 0 };
dsl_pool_t *dp = ds->ds_dir->dd_pool;
objset_t *mos = dp->dp_meta_objset;
uint64_t deadlist_obj;
ASSERT(ds->ds_deadlist.dl_oldfmt);
ASSERT(ds_next->ds_deadlist.dl_oldfmt);
poa.ds = ds;
poa.ds_prev = ds_prev;
poa.after_branch_point = after_branch_point;
poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
VERIFY0(bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj,
process_old_cb, &poa, tx));
VERIFY0(zio_wait(poa.pio));
ASSERT3U(poa.used, ==, dsl_dataset_phys(ds)->ds_unique_bytes);
/* change snapused */
dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
-poa.used, -poa.comp, -poa.uncomp, tx);
/* swap next's deadlist to our deadlist */
dsl_deadlist_close(&ds->ds_deadlist);
dsl_deadlist_close(&ds_next->ds_deadlist);
deadlist_obj = dsl_dataset_phys(ds)->ds_deadlist_obj;
dsl_dataset_phys(ds)->ds_deadlist_obj =
dsl_dataset_phys(ds_next)->ds_deadlist_obj;
dsl_dataset_phys(ds_next)->ds_deadlist_obj = deadlist_obj;
dsl_deadlist_open(&ds->ds_deadlist, mos,
dsl_dataset_phys(ds)->ds_deadlist_obj);
dsl_deadlist_open(&ds_next->ds_deadlist, mos,
dsl_dataset_phys(ds_next)->ds_deadlist_obj);
}
typedef struct remaining_clones_key {
dsl_dataset_t *rck_clone;
list_node_t rck_node;
} remaining_clones_key_t;
static remaining_clones_key_t *
rck_alloc(dsl_dataset_t *clone)
{
remaining_clones_key_t *rck = kmem_alloc(sizeof (*rck), KM_SLEEP);
rck->rck_clone = clone;
return (rck);
}
static void
dsl_dir_remove_clones_key_impl(dsl_dir_t *dd, uint64_t mintxg, dmu_tx_t *tx,
list_t *stack, void *tag)
{
objset_t *mos = dd->dd_pool->dp_meta_objset;
/*
* If it is the old version, dd_clones doesn't exist so we can't
* find the clones, but dsl_deadlist_remove_key() is a no-op so it
* doesn't matter.
*/
if (dsl_dir_phys(dd)->dd_clones == 0)
return;
zap_cursor_t *zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
zap_attribute_t *za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
for (zap_cursor_init(zc, mos, dsl_dir_phys(dd)->dd_clones);
zap_cursor_retrieve(zc, za) == 0;
zap_cursor_advance(zc)) {
dsl_dataset_t *clone;
VERIFY0(dsl_dataset_hold_obj(dd->dd_pool,
za->za_first_integer, tag, &clone));
if (clone->ds_dir->dd_origin_txg > mintxg) {
dsl_deadlist_remove_key(&clone->ds_deadlist,
mintxg, tx);
if (dsl_dataset_remap_deadlist_exists(clone)) {
dsl_deadlist_remove_key(
&clone->ds_remap_deadlist, mintxg, tx);
}
list_insert_head(stack, rck_alloc(clone));
} else {
dsl_dataset_rele(clone, tag);
}
}
zap_cursor_fini(zc);
kmem_free(za, sizeof (zap_attribute_t));
kmem_free(zc, sizeof (zap_cursor_t));
}
void
dsl_dir_remove_clones_key(dsl_dir_t *top_dd, uint64_t mintxg, dmu_tx_t *tx)
{
list_t stack;
list_create(&stack, sizeof (remaining_clones_key_t),
offsetof(remaining_clones_key_t, rck_node));
dsl_dir_remove_clones_key_impl(top_dd, mintxg, tx, &stack, FTAG);
for (remaining_clones_key_t *rck = list_remove_head(&stack);
rck != NULL; rck = list_remove_head(&stack)) {
dsl_dataset_t *clone = rck->rck_clone;
dsl_dir_t *clone_dir = clone->ds_dir;
kmem_free(rck, sizeof (*rck));
dsl_dir_remove_clones_key_impl(clone_dir, mintxg, tx,
&stack, FTAG);
dsl_dataset_rele(clone, FTAG);
}
list_destroy(&stack);
}
static void
dsl_destroy_snapshot_handle_remaps(dsl_dataset_t *ds, dsl_dataset_t *ds_next,
dmu_tx_t *tx)
{
dsl_pool_t *dp = ds->ds_dir->dd_pool;
/* Move blocks to be obsoleted to pool's obsolete list. */
if (dsl_dataset_remap_deadlist_exists(ds_next)) {
if (!bpobj_is_open(&dp->dp_obsolete_bpobj))
dsl_pool_create_obsolete_bpobj(dp, tx);
dsl_deadlist_move_bpobj(&ds_next->ds_remap_deadlist,
&dp->dp_obsolete_bpobj,
dsl_dataset_phys(ds)->ds_prev_snap_txg, tx);
}
/* Merge our deadlist into next's and free it. */
if (dsl_dataset_remap_deadlist_exists(ds)) {
uint64_t remap_deadlist_object =
dsl_dataset_get_remap_deadlist_object(ds);
ASSERT(remap_deadlist_object != 0);
mutex_enter(&ds_next->ds_remap_deadlist_lock);
if (!dsl_dataset_remap_deadlist_exists(ds_next))
dsl_dataset_create_remap_deadlist(ds_next, tx);
mutex_exit(&ds_next->ds_remap_deadlist_lock);
dsl_deadlist_merge(&ds_next->ds_remap_deadlist,
remap_deadlist_object, tx);
dsl_dataset_destroy_remap_deadlist(ds, tx);
}
}
void
dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx)
{
int after_branch_point = FALSE;
dsl_pool_t *dp = ds->ds_dir->dd_pool;
objset_t *mos = dp->dp_meta_objset;
dsl_dataset_t *ds_prev = NULL;
uint64_t obj;
ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
ASSERT3U(dsl_dataset_phys(ds)->ds_bp.blk_birth, <=, tx->tx_txg);
rrw_exit(&ds->ds_bp_rwlock, FTAG);
ASSERT(zfs_refcount_is_zero(&ds->ds_longholds));
if (defer &&
(ds->ds_userrefs > 0 ||
dsl_dataset_phys(ds)->ds_num_children > 1)) {
ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
dmu_buf_will_dirty(ds->ds_dbuf, tx);
dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_DEFER_DESTROY;
spa_history_log_internal_ds(ds, "defer_destroy", tx, " ");
return;
}
ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
/* We need to log before removing it from the namespace. */
spa_history_log_internal_ds(ds, "destroy", tx, " ");
dsl_scan_ds_destroyed(ds, tx);
obj = ds->ds_object;
boolean_t book_exists = dsl_bookmark_ds_destroyed(ds, tx);
for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
if (dsl_dataset_feature_is_active(ds, f))
dsl_dataset_deactivate_feature(ds, f, tx);
}
if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
ASSERT3P(ds->ds_prev, ==, NULL);
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &ds_prev));
after_branch_point =
(dsl_dataset_phys(ds_prev)->ds_next_snap_obj != obj);
dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
if (after_branch_point &&
dsl_dataset_phys(ds_prev)->ds_next_clones_obj != 0) {
dsl_dataset_remove_from_next_clones(ds_prev, obj, tx);
if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) {
VERIFY0(zap_add_int(mos,
dsl_dataset_phys(ds_prev)->
ds_next_clones_obj,
dsl_dataset_phys(ds)->ds_next_snap_obj,
tx));
}
}
if (!after_branch_point) {
dsl_dataset_phys(ds_prev)->ds_next_snap_obj =
dsl_dataset_phys(ds)->ds_next_snap_obj;
}
}
dsl_dataset_t *ds_next;
uint64_t old_unique;
uint64_t used = 0, comp = 0, uncomp = 0;
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_next_snap_obj, FTAG, &ds_next));
ASSERT3U(dsl_dataset_phys(ds_next)->ds_prev_snap_obj, ==, obj);
old_unique = dsl_dataset_phys(ds_next)->ds_unique_bytes;
dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
dsl_dataset_phys(ds_next)->ds_prev_snap_obj =
dsl_dataset_phys(ds)->ds_prev_snap_obj;
dsl_dataset_phys(ds_next)->ds_prev_snap_txg =
dsl_dataset_phys(ds)->ds_prev_snap_txg;
ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_txg, ==,
ds_prev ? dsl_dataset_phys(ds_prev)->ds_creation_txg : 0);
if (ds_next->ds_deadlist.dl_oldfmt) {
process_old_deadlist(ds, ds_prev, ds_next,
after_branch_point, tx);
} else {
/* Adjust prev's unique space. */
if (ds_prev && !after_branch_point) {
dsl_deadlist_space_range(&ds_next->ds_deadlist,
dsl_dataset_phys(ds_prev)->ds_prev_snap_txg,
dsl_dataset_phys(ds)->ds_prev_snap_txg,
&used, &comp, &uncomp);
dsl_dataset_phys(ds_prev)->ds_unique_bytes += used;
}
/* Adjust snapused. */
dsl_deadlist_space_range(&ds_next->ds_deadlist,
dsl_dataset_phys(ds)->ds_prev_snap_txg, UINT64_MAX,
&used, &comp, &uncomp);
dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
-used, -comp, -uncomp, tx);
/* Move blocks to be freed to pool's free list. */
dsl_deadlist_move_bpobj(&ds_next->ds_deadlist,
&dp->dp_free_bpobj, dsl_dataset_phys(ds)->ds_prev_snap_txg,
tx);
dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
DD_USED_HEAD, used, comp, uncomp, tx);
/* Merge our deadlist into next's and free it. */
dsl_deadlist_merge(&ds_next->ds_deadlist,
dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
/*
* We are done with the deadlist tree (generated/used
* by dsl_deadlist_move_bpobj() and dsl_deadlist_merge()).
* Discard it to save memory.
*/
dsl_deadlist_discard_tree(&ds_next->ds_deadlist);
}
dsl_deadlist_close(&ds->ds_deadlist);
dsl_deadlist_free(mos, dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
dmu_buf_will_dirty(ds->ds_dbuf, tx);
dsl_dataset_phys(ds)->ds_deadlist_obj = 0;
dsl_destroy_snapshot_handle_remaps(ds, ds_next, tx);
if (!book_exists) {
/* Collapse range in clone heads */
dsl_dir_remove_clones_key(ds->ds_dir,
dsl_dataset_phys(ds)->ds_creation_txg, tx);
}
if (ds_next->ds_is_snapshot) {
dsl_dataset_t *ds_nextnext;
/*
* Update next's unique to include blocks which
* were previously shared by only this snapshot
* and it. Those blocks will be born after the
* prev snap and before this snap, and will have
* died after the next snap and before the one
* after that (ie. be on the snap after next's
* deadlist).
*/
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds_next)->ds_next_snap_obj,
FTAG, &ds_nextnext));
dsl_deadlist_space_range(&ds_nextnext->ds_deadlist,
dsl_dataset_phys(ds)->ds_prev_snap_txg,
dsl_dataset_phys(ds)->ds_creation_txg,
&used, &comp, &uncomp);
dsl_dataset_phys(ds_next)->ds_unique_bytes += used;
dsl_dataset_rele(ds_nextnext, FTAG);
ASSERT3P(ds_next->ds_prev, ==, NULL);
/* Collapse range in this head. */
dsl_dataset_t *hds;
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj,
FTAG, &hds));
if (!book_exists) {
/* Collapse range in this head. */
dsl_deadlist_remove_key(&hds->ds_deadlist,
dsl_dataset_phys(ds)->ds_creation_txg, tx);
}
if (dsl_dataset_remap_deadlist_exists(hds)) {
dsl_deadlist_remove_key(&hds->ds_remap_deadlist,
dsl_dataset_phys(ds)->ds_creation_txg, tx);
}
dsl_dataset_rele(hds, FTAG);
} else {
ASSERT3P(ds_next->ds_prev, ==, ds);
dsl_dataset_rele(ds_next->ds_prev, ds_next);
ds_next->ds_prev = NULL;
if (ds_prev) {
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj,
ds_next, &ds_next->ds_prev));
}
dsl_dataset_recalc_head_uniq(ds_next);
/*
* Reduce the amount of our unconsumed refreservation
* being charged to our parent by the amount of
* new unique data we have gained.
*/
if (old_unique < ds_next->ds_reserved) {
int64_t mrsdelta;
uint64_t new_unique =
dsl_dataset_phys(ds_next)->ds_unique_bytes;
ASSERT(old_unique <= new_unique);
mrsdelta = MIN(new_unique - old_unique,
ds_next->ds_reserved - old_unique);
dsl_dir_diduse_space(ds->ds_dir,
DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
}
}
dsl_dataset_rele(ds_next, FTAG);
/*
* This must be done after the dsl_traverse(), because it will
* re-open the objset.
*/
if (ds->ds_objset) {
dmu_objset_evict(ds->ds_objset);
ds->ds_objset = NULL;
}
/* remove from snapshot namespace */
dsl_dataset_t *ds_head;
ASSERT(dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0);
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &ds_head));
VERIFY0(dsl_dataset_get_snapname(ds));
#ifdef ZFS_DEBUG
{
uint64_t val;
int err;
err = dsl_dataset_snap_lookup(ds_head,
ds->ds_snapname, &val);
ASSERT0(err);
ASSERT3U(val, ==, obj);
}
#endif
VERIFY0(dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx, B_TRUE));
dsl_dataset_rele(ds_head, FTAG);
if (ds_prev != NULL)
dsl_dataset_rele(ds_prev, FTAG);
spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) {
uint64_t count __maybe_unused;
ASSERT0(zap_count(mos,
dsl_dataset_phys(ds)->ds_next_clones_obj, &count) &&
count == 0);
VERIFY0(dmu_object_free(mos,
dsl_dataset_phys(ds)->ds_next_clones_obj, tx));
}
if (dsl_dataset_phys(ds)->ds_props_obj != 0)
VERIFY0(zap_destroy(mos, dsl_dataset_phys(ds)->ds_props_obj,
tx));
if (dsl_dataset_phys(ds)->ds_userrefs_obj != 0)
VERIFY0(zap_destroy(mos, dsl_dataset_phys(ds)->ds_userrefs_obj,
tx));
dsl_dir_rele(ds->ds_dir, ds);
ds->ds_dir = NULL;
dmu_object_free_zapified(mos, obj, tx);
}
void
dsl_destroy_snapshot_sync(void *arg, dmu_tx_t *tx)
{
dsl_destroy_snapshot_arg_t *ddsa = arg;
const char *dsname = ddsa->ddsa_name;
boolean_t defer = ddsa->ddsa_defer;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
int error = dsl_dataset_hold(dp, dsname, FTAG, &ds);
if (error == ENOENT)
return;
ASSERT0(error);
dsl_destroy_snapshot_sync_impl(ds, defer, tx);
zvol_remove_minors(dp->dp_spa, dsname, B_TRUE);
dsl_dataset_rele(ds, FTAG);
}
/*
* The semantics of this function are described in the comment above
* lzc_destroy_snaps(). To summarize:
*
* The snapshots must all be in the same pool.
*
* Snapshots that don't exist will be silently ignored (considered to be
* "already deleted").
*
* On success, all snaps will be destroyed and this will return 0.
* On failure, no snaps will be destroyed, the errlist will be filled in,
* and this will return an errno.
*/
int
dsl_destroy_snapshots_nvl(nvlist_t *snaps, boolean_t defer,
nvlist_t *errlist)
{
if (nvlist_next_nvpair(snaps, NULL) == NULL)
return (0);
/*
* lzc_destroy_snaps() is documented to take an nvlist whose
* values "don't matter". We need to convert that nvlist to
* one that we know can be converted to LUA.
*/
nvlist_t *snaps_normalized = fnvlist_alloc();
for (nvpair_t *pair = nvlist_next_nvpair(snaps, NULL);
pair != NULL; pair = nvlist_next_nvpair(snaps, pair)) {
fnvlist_add_boolean_value(snaps_normalized,
nvpair_name(pair), B_TRUE);
}
nvlist_t *arg = fnvlist_alloc();
fnvlist_add_nvlist(arg, "snaps", snaps_normalized);
fnvlist_free(snaps_normalized);
fnvlist_add_boolean_value(arg, "defer", defer);
nvlist_t *wrapper = fnvlist_alloc();
fnvlist_add_nvlist(wrapper, ZCP_ARG_ARGLIST, arg);
fnvlist_free(arg);
const char *program =
"arg = ...\n"
"snaps = arg['snaps']\n"
"defer = arg['defer']\n"
"errors = { }\n"
"has_errors = false\n"
"for snap, v in pairs(snaps) do\n"
" errno = zfs.check.destroy{snap, defer=defer}\n"
" zfs.debug('snap: ' .. snap .. ' errno: ' .. errno)\n"
" if errno == ENOENT then\n"
" snaps[snap] = nil\n"
" elseif errno ~= 0 then\n"
" errors[snap] = errno\n"
" has_errors = true\n"
" end\n"
"end\n"
"if has_errors then\n"
" return errors\n"
"end\n"
"for snap, v in pairs(snaps) do\n"
" errno = zfs.sync.destroy{snap, defer=defer}\n"
" assert(errno == 0)\n"
"end\n"
"return { }\n";
nvlist_t *result = fnvlist_alloc();
int error = zcp_eval(nvpair_name(nvlist_next_nvpair(snaps, NULL)),
program,
B_TRUE,
0,
zfs_lua_max_memlimit,
fnvlist_lookup_nvpair(wrapper, ZCP_ARG_ARGLIST), result);
if (error != 0) {
char *errorstr = NULL;
(void) nvlist_lookup_string(result, ZCP_RET_ERROR, &errorstr);
if (errorstr != NULL) {
zfs_dbgmsg("%s", errorstr);
}
fnvlist_free(wrapper);
fnvlist_free(result);
return (error);
}
fnvlist_free(wrapper);
/*
* lzc_destroy_snaps() is documented to fill the errlist with
* int32 values, so we need to convert the int64 values that are
* returned from LUA.
*/
int rv = 0;
nvlist_t *errlist_raw = fnvlist_lookup_nvlist(result, ZCP_RET_RETURN);
for (nvpair_t *pair = nvlist_next_nvpair(errlist_raw, NULL);
pair != NULL; pair = nvlist_next_nvpair(errlist_raw, pair)) {
int32_t val = (int32_t)fnvpair_value_int64(pair);
if (rv == 0)
rv = val;
fnvlist_add_int32(errlist, nvpair_name(pair), val);
}
fnvlist_free(result);
return (rv);
}
int
dsl_destroy_snapshot(const char *name, boolean_t defer)
{
int error;
nvlist_t *nvl = fnvlist_alloc();
nvlist_t *errlist = fnvlist_alloc();
fnvlist_add_boolean(nvl, name);
error = dsl_destroy_snapshots_nvl(nvl, defer, errlist);
fnvlist_free(errlist);
fnvlist_free(nvl);
return (error);
}
struct killarg {
dsl_dataset_t *ds;
dmu_tx_t *tx;
};
-/* ARGSUSED */
static int
kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
+ (void) spa, (void) dnp;
struct killarg *ka = arg;
dmu_tx_t *tx = ka->tx;
if (zb->zb_level == ZB_DNODE_LEVEL || BP_IS_HOLE(bp) ||
BP_IS_EMBEDDED(bp))
return (0);
if (zb->zb_level == ZB_ZIL_LEVEL) {
ASSERT(zilog != NULL);
/*
* It's a block in the intent log. It has no
* accounting, so just free it.
*/
dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
} else {
ASSERT(zilog == NULL);
ASSERT3U(bp->blk_birth, >,
dsl_dataset_phys(ka->ds)->ds_prev_snap_txg);
(void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
}
return (0);
}
static void
old_synchronous_dataset_destroy(dsl_dataset_t *ds, dmu_tx_t *tx)
{
struct killarg ka;
spa_history_log_internal_ds(ds, "destroy", tx,
"(synchronous, mintxg=%llu)",
(long long)dsl_dataset_phys(ds)->ds_prev_snap_txg);
/*
* Free everything that we point to (that's born after
* the previous snapshot, if we are a clone)
*
* NB: this should be very quick, because we already
* freed all the objects in open context.
*/
ka.ds = ds;
ka.tx = tx;
VERIFY0(traverse_dataset(ds,
dsl_dataset_phys(ds)->ds_prev_snap_txg, TRAVERSE_POST |
TRAVERSE_NO_DECRYPT, kill_blkptr, &ka));
ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
dsl_dataset_phys(ds)->ds_unique_bytes == 0);
}
int
dsl_destroy_head_check_impl(dsl_dataset_t *ds, int expected_holds)
{
int error;
uint64_t count;
objset_t *mos;
ASSERT(!ds->ds_is_snapshot);
if (ds->ds_is_snapshot)
return (SET_ERROR(EINVAL));
if (zfs_refcount_count(&ds->ds_longholds) != expected_holds)
return (SET_ERROR(EBUSY));
ASSERT0(ds->ds_dir->dd_activity_waiters);
mos = ds->ds_dir->dd_pool->dp_meta_objset;
/*
* Can't delete a head dataset if there are snapshots of it.
* (Except if the only snapshots are from the branch we cloned
* from.)
*/
if (ds->ds_prev != NULL &&
dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj == ds->ds_object)
return (SET_ERROR(EBUSY));
/*
* Can't delete if there are children of this fs.
*/
error = zap_count(mos,
dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, &count);
if (error != 0)
return (error);
if (count != 0)
return (SET_ERROR(EEXIST));
if (dsl_dir_is_clone(ds->ds_dir) && DS_IS_DEFER_DESTROY(ds->ds_prev) &&
dsl_dataset_phys(ds->ds_prev)->ds_num_children == 2 &&
ds->ds_prev->ds_userrefs == 0) {
/* We need to remove the origin snapshot as well. */
if (!zfs_refcount_is_zero(&ds->ds_prev->ds_longholds))
return (SET_ERROR(EBUSY));
}
return (0);
}
int
dsl_destroy_head_check(void *arg, dmu_tx_t *tx)
{
dsl_destroy_head_arg_t *ddha = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
int error;
error = dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds);
if (error != 0)
return (error);
error = dsl_destroy_head_check_impl(ds, 0);
dsl_dataset_rele(ds, FTAG);
return (error);
}
static void
dsl_dir_destroy_sync(uint64_t ddobj, dmu_tx_t *tx)
{
dsl_dir_t *dd;
dsl_pool_t *dp = dmu_tx_pool(tx);
objset_t *mos = dp->dp_meta_objset;
dd_used_t t;
ASSERT(RRW_WRITE_HELD(&dmu_tx_pool(tx)->dp_config_rwlock));
VERIFY0(dsl_dir_hold_obj(dp, ddobj, NULL, FTAG, &dd));
ASSERT0(dsl_dir_phys(dd)->dd_head_dataset_obj);
/* Decrement the filesystem count for all parent filesystems. */
if (dd->dd_parent != NULL)
dsl_fs_ss_count_adjust(dd->dd_parent, -1,
DD_FIELD_FILESYSTEM_COUNT, tx);
/*
* Remove our reservation. The impl() routine avoids setting the
* actual property, which would require the (already destroyed) ds.
*/
dsl_dir_set_reservation_sync_impl(dd, 0, tx);
ASSERT0(dsl_dir_phys(dd)->dd_used_bytes);
ASSERT0(dsl_dir_phys(dd)->dd_reserved);
for (t = 0; t < DD_USED_NUM; t++)
ASSERT0(dsl_dir_phys(dd)->dd_used_breakdown[t]);
if (dd->dd_crypto_obj != 0) {
dsl_crypto_key_destroy_sync(dd->dd_crypto_obj, tx);
(void) spa_keystore_unload_wkey_impl(dp->dp_spa, dd->dd_object);
}
VERIFY0(zap_destroy(mos, dsl_dir_phys(dd)->dd_child_dir_zapobj, tx));
VERIFY0(zap_destroy(mos, dsl_dir_phys(dd)->dd_props_zapobj, tx));
if (dsl_dir_phys(dd)->dd_clones != 0)
VERIFY0(zap_destroy(mos, dsl_dir_phys(dd)->dd_clones, tx));
VERIFY0(dsl_deleg_destroy(mos, dsl_dir_phys(dd)->dd_deleg_zapobj, tx));
VERIFY0(zap_remove(mos,
dsl_dir_phys(dd->dd_parent)->dd_child_dir_zapobj,
dd->dd_myname, tx));
dsl_dir_rele(dd, FTAG);
dmu_object_free_zapified(mos, ddobj, tx);
}
static void
dsl_clone_destroy_assert(dsl_dir_t *dd)
{
uint64_t used, comp, uncomp;
ASSERT(dsl_dir_is_clone(dd));
dsl_deadlist_space(&dd->dd_livelist, &used, &comp, &uncomp);
ASSERT3U(dsl_dir_phys(dd)->dd_used_bytes, ==, used);
ASSERT3U(dsl_dir_phys(dd)->dd_compressed_bytes, ==, comp);
/*
* Greater than because we do not track embedded block pointers in
* the livelist
*/
ASSERT3U(dsl_dir_phys(dd)->dd_uncompressed_bytes, >=, uncomp);
ASSERT(list_is_empty(&dd->dd_pending_allocs.bpl_list));
ASSERT(list_is_empty(&dd->dd_pending_frees.bpl_list));
}
/*
* Start the delete process for a clone. Free its zil, verify the space usage
* and queue the blkptrs for deletion by adding the livelist to the pool-wide
* delete queue.
*/
static void
dsl_async_clone_destroy(dsl_dataset_t *ds, dmu_tx_t *tx)
{
uint64_t zap_obj, to_delete, used, comp, uncomp;
objset_t *os;
dsl_dir_t *dd = ds->ds_dir;
dsl_pool_t *dp = dmu_tx_pool(tx);
objset_t *mos = dp->dp_meta_objset;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
VERIFY0(dmu_objset_from_ds(ds, &os));
uint64_t mintxg = 0;
dsl_deadlist_entry_t *dle = dsl_deadlist_first(&dd->dd_livelist);
if (dle != NULL)
mintxg = dle->dle_mintxg;
spa_history_log_internal_ds(ds, "destroy", tx,
"(livelist, mintxg=%llu)", (long long)mintxg);
/* Check that the clone is in a correct state to be deleted */
dsl_clone_destroy_assert(dd);
/* Destroy the zil */
zil_destroy_sync(dmu_objset_zil(os), tx);
VERIFY0(zap_lookup(mos, dd->dd_object,
DD_FIELD_LIVELIST, sizeof (uint64_t), 1, &to_delete));
/* Initialize deleted_clones entry to track livelists to cleanup */
int error = zap_lookup(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_DELETED_CLONES, sizeof (uint64_t), 1, &zap_obj);
if (error == ENOENT) {
zap_obj = zap_create(mos, DMU_OTN_ZAP_METADATA,
DMU_OT_NONE, 0, tx);
VERIFY0(zap_add(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_DELETED_CLONES, sizeof (uint64_t), 1,
&(zap_obj), tx));
spa->spa_livelists_to_delete = zap_obj;
} else if (error != 0) {
zfs_panic_recover("zfs: error %d was returned while looking "
"up DMU_POOL_DELETED_CLONES in the zap", error);
return;
}
VERIFY0(zap_add_int(mos, zap_obj, to_delete, tx));
/* Clone is no longer using space, now tracked by dp_free_dir */
dsl_deadlist_space(&dd->dd_livelist, &used, &comp, &uncomp);
dsl_dir_diduse_space(dd, DD_USED_HEAD,
-used, -comp, -dsl_dir_phys(dd)->dd_uncompressed_bytes,
tx);
dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
used, comp, uncomp, tx);
dsl_dir_remove_livelist(dd, tx, B_FALSE);
zthr_wakeup(spa->spa_livelist_delete_zthr);
}
/*
* Move the bptree into the pool's list of trees to clean up, update space
* accounting information and destroy the zil.
*/
static void
dsl_async_dataset_destroy(dsl_dataset_t *ds, dmu_tx_t *tx)
{
uint64_t used, comp, uncomp;
objset_t *os;
VERIFY0(dmu_objset_from_ds(ds, &os));
dsl_pool_t *dp = dmu_tx_pool(tx);
objset_t *mos = dp->dp_meta_objset;
spa_history_log_internal_ds(ds, "destroy", tx,
"(bptree, mintxg=%llu)",
(long long)dsl_dataset_phys(ds)->ds_prev_snap_txg);
zil_destroy_sync(dmu_objset_zil(os), tx);
if (!spa_feature_is_active(dp->dp_spa,
SPA_FEATURE_ASYNC_DESTROY)) {
dsl_scan_t *scn = dp->dp_scan;
spa_feature_incr(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY,
tx);
dp->dp_bptree_obj = bptree_alloc(mos, tx);
VERIFY0(zap_add(mos,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
&dp->dp_bptree_obj, tx));
ASSERT(!scn->scn_async_destroying);
scn->scn_async_destroying = B_TRUE;
}
used = dsl_dir_phys(ds->ds_dir)->dd_used_bytes;
comp = dsl_dir_phys(ds->ds_dir)->dd_compressed_bytes;
uncomp = dsl_dir_phys(ds->ds_dir)->dd_uncompressed_bytes;
ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
dsl_dataset_phys(ds)->ds_unique_bytes == used);
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
bptree_add(mos, dp->dp_bptree_obj,
&dsl_dataset_phys(ds)->ds_bp,
dsl_dataset_phys(ds)->ds_prev_snap_txg,
used, comp, uncomp, tx);
rrw_exit(&ds->ds_bp_rwlock, FTAG);
dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
-used, -comp, -uncomp, tx);
dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
used, comp, uncomp, tx);
}
void
dsl_destroy_head_sync_impl(dsl_dataset_t *ds, dmu_tx_t *tx)
{
dsl_pool_t *dp = dmu_tx_pool(tx);
objset_t *mos = dp->dp_meta_objset;
uint64_t obj, ddobj, prevobj = 0;
boolean_t rmorigin;
ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
ASSERT(ds->ds_prev == NULL ||
dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj != ds->ds_object);
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
ASSERT3U(dsl_dataset_phys(ds)->ds_bp.blk_birth, <=, tx->tx_txg);
rrw_exit(&ds->ds_bp_rwlock, FTAG);
ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
dsl_dir_cancel_waiters(ds->ds_dir);
rmorigin = (dsl_dir_is_clone(ds->ds_dir) &&
DS_IS_DEFER_DESTROY(ds->ds_prev) &&
dsl_dataset_phys(ds->ds_prev)->ds_num_children == 2 &&
ds->ds_prev->ds_userrefs == 0);
/* Remove our reservation. */
if (ds->ds_reserved != 0) {
dsl_dataset_set_refreservation_sync_impl(ds,
(ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
0, tx);
ASSERT0(ds->ds_reserved);
}
obj = ds->ds_object;
for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
if (dsl_dataset_feature_is_active(ds, f))
dsl_dataset_deactivate_feature(ds, f, tx);
}
dsl_scan_ds_destroyed(ds, tx);
if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
/* This is a clone */
ASSERT(ds->ds_prev != NULL);
ASSERT3U(dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj, !=,
obj);
ASSERT0(dsl_dataset_phys(ds)->ds_next_snap_obj);
dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
if (dsl_dataset_phys(ds->ds_prev)->ds_next_clones_obj != 0) {
dsl_dataset_remove_from_next_clones(ds->ds_prev,
obj, tx);
}
ASSERT3U(dsl_dataset_phys(ds->ds_prev)->ds_num_children, >, 1);
dsl_dataset_phys(ds->ds_prev)->ds_num_children--;
}
/*
* Destroy the deadlist. Unless it's a clone, the
* deadlist should be empty since the dataset has no snapshots.
* (If it's a clone, it's safe to ignore the deadlist contents
* since they are still referenced by the origin snapshot.)
*/
dsl_deadlist_close(&ds->ds_deadlist);
dsl_deadlist_free(mos, dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
dmu_buf_will_dirty(ds->ds_dbuf, tx);
dsl_dataset_phys(ds)->ds_deadlist_obj = 0;
if (dsl_dataset_remap_deadlist_exists(ds))
dsl_dataset_destroy_remap_deadlist(ds, tx);
/*
* Each destroy is responsible for both destroying (enqueuing
* to be destroyed) the blkptrs comprising the dataset as well as
* those belonging to the zil.
*/
if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist)) {
dsl_async_clone_destroy(ds, tx);
} else if (spa_feature_is_enabled(dp->dp_spa,
SPA_FEATURE_ASYNC_DESTROY)) {
dsl_async_dataset_destroy(ds, tx);
} else {
old_synchronous_dataset_destroy(ds, tx);
}
if (ds->ds_prev != NULL) {
if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
VERIFY0(zap_remove_int(mos,
dsl_dir_phys(ds->ds_prev->ds_dir)->dd_clones,
ds->ds_object, tx));
}
prevobj = ds->ds_prev->ds_object;
dsl_dataset_rele(ds->ds_prev, ds);
ds->ds_prev = NULL;
}
/*
* This must be done after the dsl_traverse(), because it will
* re-open the objset.
*/
if (ds->ds_objset) {
dmu_objset_evict(ds->ds_objset);
ds->ds_objset = NULL;
}
/* Erase the link in the dir */
dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj = 0;
ddobj = ds->ds_dir->dd_object;
ASSERT(dsl_dataset_phys(ds)->ds_snapnames_zapobj != 0);
VERIFY0(zap_destroy(mos,
dsl_dataset_phys(ds)->ds_snapnames_zapobj, tx));
if (ds->ds_bookmarks_obj != 0) {
void *cookie = NULL;
dsl_bookmark_node_t *dbn;
while ((dbn = avl_destroy_nodes(&ds->ds_bookmarks, &cookie)) !=
NULL) {
if (dbn->dbn_phys.zbm_redaction_obj != 0) {
VERIFY0(dmu_object_free(mos,
dbn->dbn_phys.zbm_redaction_obj, tx));
spa_feature_decr(dmu_objset_spa(mos),
SPA_FEATURE_REDACTION_BOOKMARKS, tx);
}
if (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN) {
spa_feature_decr(dmu_objset_spa(mos),
SPA_FEATURE_BOOKMARK_WRITTEN, tx);
}
spa_strfree(dbn->dbn_name);
mutex_destroy(&dbn->dbn_lock);
kmem_free(dbn, sizeof (*dbn));
}
avl_destroy(&ds->ds_bookmarks);
VERIFY0(zap_destroy(mos, ds->ds_bookmarks_obj, tx));
spa_feature_decr(dp->dp_spa, SPA_FEATURE_BOOKMARKS, tx);
}
spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
ASSERT0(dsl_dataset_phys(ds)->ds_next_clones_obj);
ASSERT0(dsl_dataset_phys(ds)->ds_props_obj);
ASSERT0(dsl_dataset_phys(ds)->ds_userrefs_obj);
dsl_dir_rele(ds->ds_dir, ds);
ds->ds_dir = NULL;
dmu_object_free_zapified(mos, obj, tx);
dsl_dir_destroy_sync(ddobj, tx);
if (rmorigin) {
dsl_dataset_t *prev;
VERIFY0(dsl_dataset_hold_obj(dp, prevobj, FTAG, &prev));
dsl_destroy_snapshot_sync_impl(prev, B_FALSE, tx);
dsl_dataset_rele(prev, FTAG);
}
}
void
dsl_destroy_head_sync(void *arg, dmu_tx_t *tx)
{
dsl_destroy_head_arg_t *ddha = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
VERIFY0(dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds));
dsl_destroy_head_sync_impl(ds, tx);
zvol_remove_minors(dp->dp_spa, ddha->ddha_name, B_TRUE);
dsl_dataset_rele(ds, FTAG);
}
static void
dsl_destroy_head_begin_sync(void *arg, dmu_tx_t *tx)
{
dsl_destroy_head_arg_t *ddha = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
VERIFY0(dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds));
/* Mark it as inconsistent on-disk, in case we crash */
dmu_buf_will_dirty(ds->ds_dbuf, tx);
dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_INCONSISTENT;
spa_history_log_internal_ds(ds, "destroy begin", tx, " ");
dsl_dataset_rele(ds, FTAG);
}
int
dsl_destroy_head(const char *name)
{
dsl_destroy_head_arg_t ddha;
int error;
spa_t *spa;
boolean_t isenabled;
#ifdef _KERNEL
zfs_destroy_unmount_origin(name);
#endif
error = spa_open(name, &spa, FTAG);
if (error != 0)
return (error);
isenabled = spa_feature_is_enabled(spa, SPA_FEATURE_ASYNC_DESTROY);
spa_close(spa, FTAG);
ddha.ddha_name = name;
if (!isenabled) {
objset_t *os;
error = dsl_sync_task(name, dsl_destroy_head_check,
dsl_destroy_head_begin_sync, &ddha,
0, ZFS_SPACE_CHECK_DESTROY);
if (error != 0)
return (error);
/*
* Head deletion is processed in one txg on old pools;
* remove the objects from open context so that the txg sync
* is not too long. This optimization can only work for
* encrypted datasets if the wrapping key is loaded.
*/
error = dmu_objset_own(name, DMU_OST_ANY, B_FALSE, B_TRUE,
FTAG, &os);
if (error == 0) {
uint64_t prev_snap_txg =
dsl_dataset_phys(dmu_objset_ds(os))->
ds_prev_snap_txg;
for (uint64_t obj = 0; error == 0;
error = dmu_object_next(os, &obj, FALSE,
prev_snap_txg))
(void) dmu_free_long_object(os, obj);
/* sync out all frees */
txg_wait_synced(dmu_objset_pool(os), 0);
dmu_objset_disown(os, B_TRUE, FTAG);
}
}
return (dsl_sync_task(name, dsl_destroy_head_check,
dsl_destroy_head_sync, &ddha, 0, ZFS_SPACE_CHECK_DESTROY));
}
/*
* Note, this function is used as the callback for dmu_objset_find(). We
* always return 0 so that we will continue to find and process
* inconsistent datasets, even if we encounter an error trying to
* process one of them.
*/
-/* ARGSUSED */
int
dsl_destroy_inconsistent(const char *dsname, void *arg)
{
+ (void) arg;
objset_t *os;
if (dmu_objset_hold(dsname, FTAG, &os) == 0) {
boolean_t need_destroy = DS_IS_INCONSISTENT(dmu_objset_ds(os));
/*
* If the dataset is inconsistent because a resumable receive
* has failed, then do not destroy it.
*/
if (dsl_dataset_has_resume_receive_state(dmu_objset_ds(os)))
need_destroy = B_FALSE;
dmu_objset_rele(os, FTAG);
if (need_destroy)
(void) dsl_destroy_head(dsname);
}
return (0);
}
#if defined(_KERNEL)
EXPORT_SYMBOL(dsl_destroy_head);
EXPORT_SYMBOL(dsl_destroy_head_sync_impl);
EXPORT_SYMBOL(dsl_dataset_user_hold_check_one);
EXPORT_SYMBOL(dsl_destroy_snapshot_sync_impl);
EXPORT_SYMBOL(dsl_destroy_inconsistent);
EXPORT_SYMBOL(dsl_dataset_user_release_tmp);
EXPORT_SYMBOL(dsl_destroy_head_check_impl);
#endif
diff --git a/sys/contrib/openzfs/module/zfs/dsl_dir.c b/sys/contrib/openzfs/module/zfs/dsl_dir.c
index 84caace4dbab..aca32ff9bbb9 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_dir.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_dir.c
@@ -1,2450 +1,2458 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
* Copyright (c) 2013 Martin Matuska. All rights reserved.
* Copyright (c) 2014 Joyent, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
*/
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_tx.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_synctask.h>
#include <sys/dsl_deleg.h>
#include <sys/dmu_impl.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/metaslab.h>
#include <sys/zap.h>
#include <sys/zio.h>
#include <sys/arc.h>
#include <sys/sunddi.h>
#include <sys/zfeature.h>
#include <sys/policy.h>
#include <sys/zfs_vfsops.h>
#include <sys/zfs_znode.h>
#include <sys/zvol.h>
#include <sys/zthr.h>
#include "zfs_namecheck.h"
#include "zfs_prop.h"
/*
* Filesystem and Snapshot Limits
* ------------------------------
*
* These limits are used to restrict the number of filesystems and/or snapshots
* that can be created at a given level in the tree or below. A typical
* use-case is with a delegated dataset where the administrator wants to ensure
* that a user within the zone is not creating too many additional filesystems
* or snapshots, even though they're not exceeding their space quota.
*
* The filesystem and snapshot counts are stored as extensible properties. This
* capability is controlled by a feature flag and must be enabled to be used.
* Once enabled, the feature is not active until the first limit is set. At
* that point, future operations to create/destroy filesystems or snapshots
* will validate and update the counts.
*
* Because the count properties will not exist before the feature is active,
* the counts are updated when a limit is first set on an uninitialized
* dsl_dir node in the tree (The filesystem/snapshot count on a node includes
* all of the nested filesystems/snapshots. Thus, a new leaf node has a
* filesystem count of 0 and a snapshot count of 0. Non-existent filesystem and
* snapshot count properties on a node indicate uninitialized counts on that
* node.) When first setting a limit on an uninitialized node, the code starts
* at the filesystem with the new limit and descends into all sub-filesystems
* to add the count properties.
*
* In practice this is lightweight since a limit is typically set when the
* filesystem is created and thus has no children. Once valid, changing the
* limit value won't require a re-traversal since the counts are already valid.
* When recursively fixing the counts, if a node with a limit is encountered
* during the descent, the counts are known to be valid and there is no need to
* descend into that filesystem's children. The counts on filesystems above the
* one with the new limit will still be uninitialized, unless a limit is
* eventually set on one of those filesystems. The counts are always recursively
* updated when a limit is set on a dataset, unless there is already a limit.
* When a new limit value is set on a filesystem with an existing limit, it is
* possible for the new limit to be less than the current count at that level
* since a user who can change the limit is also allowed to exceed the limit.
*
* Once the feature is active, then whenever a filesystem or snapshot is
* created, the code recurses up the tree, validating the new count against the
* limit at each initialized level. In practice, most levels will not have a
* limit set. If there is a limit at any initialized level up the tree, the
* check must pass or the creation will fail. Likewise, when a filesystem or
* snapshot is destroyed, the counts are recursively adjusted all the way up
* the initialized nodes in the tree. Renaming a filesystem into different point
* in the tree will first validate, then update the counts on each branch up to
* the common ancestor. A receive will also validate the counts and then update
* them.
*
* An exception to the above behavior is that the limit is not enforced if the
* user has permission to modify the limit. This is primarily so that
* recursive snapshots in the global zone always work. We want to prevent a
* denial-of-service in which a lower level delegated dataset could max out its
* limit and thus block recursive snapshots from being taken in the global zone.
* Because of this, it is possible for the snapshot count to be over the limit
* and snapshots taken in the global zone could cause a lower level dataset to
* hit or exceed its limit. The administrator taking the global zone recursive
* snapshot should be aware of this side-effect and behave accordingly.
* For consistency, the filesystem limit is also not enforced if the user can
* modify the limit.
*
* The filesystem and snapshot limits are validated by dsl_fs_ss_limit_check()
* and updated by dsl_fs_ss_count_adjust(). A new limit value is setup in
* dsl_dir_activate_fs_ss_limit() and the counts are adjusted, if necessary, by
* dsl_dir_init_fs_ss_count().
*/
-extern inline dsl_dir_phys_t *dsl_dir_phys(dsl_dir_t *dd);
-
static uint64_t dsl_dir_space_towrite(dsl_dir_t *dd);
typedef struct ddulrt_arg {
dsl_dir_t *ddulrta_dd;
uint64_t ddlrta_txg;
} ddulrt_arg_t;
static void
dsl_dir_evict_async(void *dbu)
{
dsl_dir_t *dd = dbu;
int t;
dsl_pool_t *dp __maybe_unused = dd->dd_pool;
dd->dd_dbuf = NULL;
for (t = 0; t < TXG_SIZE; t++) {
ASSERT(!txg_list_member(&dp->dp_dirty_dirs, dd, t));
ASSERT(dd->dd_tempreserved[t] == 0);
ASSERT(dd->dd_space_towrite[t] == 0);
}
if (dd->dd_parent)
dsl_dir_async_rele(dd->dd_parent, dd);
spa_async_close(dd->dd_pool->dp_spa, dd);
if (dsl_deadlist_is_open(&dd->dd_livelist))
dsl_dir_livelist_close(dd);
dsl_prop_fini(dd);
cv_destroy(&dd->dd_activity_cv);
mutex_destroy(&dd->dd_activity_lock);
mutex_destroy(&dd->dd_lock);
kmem_free(dd, sizeof (dsl_dir_t));
}
int
dsl_dir_hold_obj(dsl_pool_t *dp, uint64_t ddobj,
const char *tail, void *tag, dsl_dir_t **ddp)
{
dmu_buf_t *dbuf;
dsl_dir_t *dd;
dmu_object_info_t doi;
int err;
ASSERT(dsl_pool_config_held(dp));
err = dmu_bonus_hold(dp->dp_meta_objset, ddobj, tag, &dbuf);
if (err != 0)
return (err);
dd = dmu_buf_get_user(dbuf);
dmu_object_info_from_db(dbuf, &doi);
ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_DSL_DIR);
ASSERT3U(doi.doi_bonus_size, >=, sizeof (dsl_dir_phys_t));
if (dd == NULL) {
dsl_dir_t *winner;
dd = kmem_zalloc(sizeof (dsl_dir_t), KM_SLEEP);
dd->dd_object = ddobj;
dd->dd_dbuf = dbuf;
dd->dd_pool = dp;
mutex_init(&dd->dd_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&dd->dd_activity_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&dd->dd_activity_cv, NULL, CV_DEFAULT, NULL);
dsl_prop_init(dd);
if (dsl_dir_is_zapified(dd)) {
err = zap_lookup(dp->dp_meta_objset,
ddobj, DD_FIELD_CRYPTO_KEY_OBJ,
sizeof (uint64_t), 1, &dd->dd_crypto_obj);
if (err == 0) {
/* check for on-disk format errata */
if (dsl_dir_incompatible_encryption_version(
dd)) {
dp->dp_spa->spa_errata =
ZPOOL_ERRATA_ZOL_6845_ENCRYPTION;
}
} else if (err != ENOENT) {
goto errout;
}
}
dsl_dir_snap_cmtime_update(dd);
if (dsl_dir_phys(dd)->dd_parent_obj) {
err = dsl_dir_hold_obj(dp,
dsl_dir_phys(dd)->dd_parent_obj, NULL, dd,
&dd->dd_parent);
if (err != 0)
goto errout;
if (tail) {
#ifdef ZFS_DEBUG
uint64_t foundobj;
err = zap_lookup(dp->dp_meta_objset,
dsl_dir_phys(dd->dd_parent)->
dd_child_dir_zapobj, tail,
sizeof (foundobj), 1, &foundobj);
ASSERT(err || foundobj == ddobj);
#endif
(void) strlcpy(dd->dd_myname, tail,
sizeof (dd->dd_myname));
} else {
err = zap_value_search(dp->dp_meta_objset,
dsl_dir_phys(dd->dd_parent)->
dd_child_dir_zapobj,
ddobj, 0, dd->dd_myname);
}
if (err != 0)
goto errout;
} else {
(void) strlcpy(dd->dd_myname, spa_name(dp->dp_spa),
sizeof (dd->dd_myname));
}
if (dsl_dir_is_clone(dd)) {
dmu_buf_t *origin_bonus;
dsl_dataset_phys_t *origin_phys;
/*
* We can't open the origin dataset, because
* that would require opening this dsl_dir.
* Just look at its phys directly instead.
*/
err = dmu_bonus_hold(dp->dp_meta_objset,
dsl_dir_phys(dd)->dd_origin_obj, FTAG,
&origin_bonus);
if (err != 0)
goto errout;
origin_phys = origin_bonus->db_data;
dd->dd_origin_txg =
origin_phys->ds_creation_txg;
dmu_buf_rele(origin_bonus, FTAG);
if (dsl_dir_is_zapified(dd)) {
uint64_t obj;
err = zap_lookup(dp->dp_meta_objset,
dd->dd_object, DD_FIELD_LIVELIST,
sizeof (uint64_t), 1, &obj);
if (err == 0)
dsl_dir_livelist_open(dd, obj);
else if (err != ENOENT)
goto errout;
}
}
dmu_buf_init_user(&dd->dd_dbu, NULL, dsl_dir_evict_async,
&dd->dd_dbuf);
winner = dmu_buf_set_user_ie(dbuf, &dd->dd_dbu);
if (winner != NULL) {
if (dd->dd_parent)
dsl_dir_rele(dd->dd_parent, dd);
if (dsl_deadlist_is_open(&dd->dd_livelist))
dsl_dir_livelist_close(dd);
dsl_prop_fini(dd);
cv_destroy(&dd->dd_activity_cv);
mutex_destroy(&dd->dd_activity_lock);
mutex_destroy(&dd->dd_lock);
kmem_free(dd, sizeof (dsl_dir_t));
dd = winner;
} else {
spa_open_ref(dp->dp_spa, dd);
}
}
/*
* The dsl_dir_t has both open-to-close and instantiate-to-evict
* holds on the spa. We need the open-to-close holds because
* otherwise the spa_refcnt wouldn't change when we open a
* dir which the spa also has open, so we could incorrectly
* think it was OK to unload/export/destroy the pool. We need
* the instantiate-to-evict hold because the dsl_dir_t has a
* pointer to the dd_pool, which has a pointer to the spa_t.
*/
spa_open_ref(dp->dp_spa, tag);
ASSERT3P(dd->dd_pool, ==, dp);
ASSERT3U(dd->dd_object, ==, ddobj);
ASSERT3P(dd->dd_dbuf, ==, dbuf);
*ddp = dd;
return (0);
errout:
if (dd->dd_parent)
dsl_dir_rele(dd->dd_parent, dd);
if (dsl_deadlist_is_open(&dd->dd_livelist))
dsl_dir_livelist_close(dd);
dsl_prop_fini(dd);
cv_destroy(&dd->dd_activity_cv);
mutex_destroy(&dd->dd_activity_lock);
mutex_destroy(&dd->dd_lock);
kmem_free(dd, sizeof (dsl_dir_t));
dmu_buf_rele(dbuf, tag);
return (err);
}
void
dsl_dir_rele(dsl_dir_t *dd, void *tag)
{
dprintf_dd(dd, "%s\n", "");
spa_close(dd->dd_pool->dp_spa, tag);
dmu_buf_rele(dd->dd_dbuf, tag);
}
/*
* Remove a reference to the given dsl dir that is being asynchronously
* released. Async releases occur from a taskq performing eviction of
* dsl datasets and dirs. This process is identical to a normal release
* with the exception of using the async API for releasing the reference on
* the spa.
*/
void
dsl_dir_async_rele(dsl_dir_t *dd, void *tag)
{
dprintf_dd(dd, "%s\n", "");
spa_async_close(dd->dd_pool->dp_spa, tag);
dmu_buf_rele(dd->dd_dbuf, tag);
}
/* buf must be at least ZFS_MAX_DATASET_NAME_LEN bytes */
void
dsl_dir_name(dsl_dir_t *dd, char *buf)
{
if (dd->dd_parent) {
dsl_dir_name(dd->dd_parent, buf);
VERIFY3U(strlcat(buf, "/", ZFS_MAX_DATASET_NAME_LEN), <,
ZFS_MAX_DATASET_NAME_LEN);
} else {
buf[0] = '\0';
}
if (!MUTEX_HELD(&dd->dd_lock)) {
/*
* recursive mutex so that we can use
* dprintf_dd() with dd_lock held
*/
mutex_enter(&dd->dd_lock);
VERIFY3U(strlcat(buf, dd->dd_myname, ZFS_MAX_DATASET_NAME_LEN),
<, ZFS_MAX_DATASET_NAME_LEN);
mutex_exit(&dd->dd_lock);
} else {
VERIFY3U(strlcat(buf, dd->dd_myname, ZFS_MAX_DATASET_NAME_LEN),
<, ZFS_MAX_DATASET_NAME_LEN);
}
}
/* Calculate name length, avoiding all the strcat calls of dsl_dir_name */
int
dsl_dir_namelen(dsl_dir_t *dd)
{
int result = 0;
if (dd->dd_parent) {
/* parent's name + 1 for the "/" */
result = dsl_dir_namelen(dd->dd_parent) + 1;
}
if (!MUTEX_HELD(&dd->dd_lock)) {
/* see dsl_dir_name */
mutex_enter(&dd->dd_lock);
result += strlen(dd->dd_myname);
mutex_exit(&dd->dd_lock);
} else {
result += strlen(dd->dd_myname);
}
return (result);
}
static int
getcomponent(const char *path, char *component, const char **nextp)
{
char *p;
if ((path == NULL) || (path[0] == '\0'))
return (SET_ERROR(ENOENT));
/* This would be a good place to reserve some namespace... */
p = strpbrk(path, "/@");
if (p && (p[1] == '/' || p[1] == '@')) {
/* two separators in a row */
return (SET_ERROR(EINVAL));
}
if (p == NULL || p == path) {
/*
* if the first thing is an @ or /, it had better be an
* @ and it had better not have any more ats or slashes,
* and it had better have something after the @.
*/
if (p != NULL &&
(p[0] != '@' || strpbrk(path+1, "/@") || p[1] == '\0'))
return (SET_ERROR(EINVAL));
if (strlen(path) >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
(void) strlcpy(component, path, ZFS_MAX_DATASET_NAME_LEN);
p = NULL;
} else if (p[0] == '/') {
if (p - path >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
(void) strncpy(component, path, p - path);
component[p - path] = '\0';
p++;
} else if (p[0] == '@') {
/*
* if the next separator is an @, there better not be
* any more slashes.
*/
if (strchr(path, '/'))
return (SET_ERROR(EINVAL));
if (p - path >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
(void) strncpy(component, path, p - path);
component[p - path] = '\0';
} else {
panic("invalid p=%p", (void *)p);
}
*nextp = p;
return (0);
}
/*
* Return the dsl_dir_t, and possibly the last component which couldn't
* be found in *tail. The name must be in the specified dsl_pool_t. This
* thread must hold the dp_config_rwlock for the pool. Returns NULL if the
* path is bogus, or if tail==NULL and we couldn't parse the whole name.
* (*tail)[0] == '@' means that the last component is a snapshot.
*/
int
dsl_dir_hold(dsl_pool_t *dp, const char *name, void *tag,
dsl_dir_t **ddp, const char **tailp)
{
char *buf;
const char *spaname, *next, *nextnext = NULL;
int err;
dsl_dir_t *dd;
uint64_t ddobj;
buf = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
err = getcomponent(name, buf, &next);
if (err != 0)
goto error;
/* Make sure the name is in the specified pool. */
spaname = spa_name(dp->dp_spa);
if (strcmp(buf, spaname) != 0) {
err = SET_ERROR(EXDEV);
goto error;
}
ASSERT(dsl_pool_config_held(dp));
err = dsl_dir_hold_obj(dp, dp->dp_root_dir_obj, NULL, tag, &dd);
if (err != 0) {
goto error;
}
while (next != NULL) {
dsl_dir_t *child_dd;
err = getcomponent(next, buf, &nextnext);
if (err != 0)
break;
ASSERT(next[0] != '\0');
if (next[0] == '@')
break;
dprintf("looking up %s in obj%lld\n",
buf, (longlong_t)dsl_dir_phys(dd)->dd_child_dir_zapobj);
err = zap_lookup(dp->dp_meta_objset,
dsl_dir_phys(dd)->dd_child_dir_zapobj,
buf, sizeof (ddobj), 1, &ddobj);
if (err != 0) {
if (err == ENOENT)
err = 0;
break;
}
err = dsl_dir_hold_obj(dp, ddobj, buf, tag, &child_dd);
if (err != 0)
break;
dsl_dir_rele(dd, tag);
dd = child_dd;
next = nextnext;
}
if (err != 0) {
dsl_dir_rele(dd, tag);
goto error;
}
/*
* It's an error if there's more than one component left, or
* tailp==NULL and there's any component left.
*/
if (next != NULL &&
(tailp == NULL || (nextnext && nextnext[0] != '\0'))) {
/* bad path name */
dsl_dir_rele(dd, tag);
dprintf("next=%p (%s) tail=%p\n", next, next?next:"", tailp);
err = SET_ERROR(ENOENT);
}
if (tailp != NULL)
*tailp = next;
if (err == 0)
*ddp = dd;
error:
kmem_free(buf, ZFS_MAX_DATASET_NAME_LEN);
return (err);
}
/*
* If the counts are already initialized for this filesystem and its
* descendants then do nothing, otherwise initialize the counts.
*
* The counts on this filesystem, and those below, may be uninitialized due to
* either the use of a pre-existing pool which did not support the
* filesystem/snapshot limit feature, or one in which the feature had not yet
* been enabled.
*
* Recursively descend the filesystem tree and update the filesystem/snapshot
* counts on each filesystem below, then update the cumulative count on the
* current filesystem. If the filesystem already has a count set on it,
* then we know that its counts, and the counts on the filesystems below it,
* are already correct, so we don't have to update this filesystem.
*/
static void
dsl_dir_init_fs_ss_count(dsl_dir_t *dd, dmu_tx_t *tx)
{
uint64_t my_fs_cnt = 0;
uint64_t my_ss_cnt = 0;
dsl_pool_t *dp = dd->dd_pool;
objset_t *os = dp->dp_meta_objset;
zap_cursor_t *zc;
zap_attribute_t *za;
dsl_dataset_t *ds;
ASSERT(spa_feature_is_active(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT));
ASSERT(dsl_pool_config_held(dp));
ASSERT(dmu_tx_is_syncing(tx));
dsl_dir_zapify(dd, tx);
/*
* If the filesystem count has already been initialized then we
* don't need to recurse down any further.
*/
if (zap_contains(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT) == 0)
return;
zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
/* Iterate my child dirs */
for (zap_cursor_init(zc, os, dsl_dir_phys(dd)->dd_child_dir_zapobj);
zap_cursor_retrieve(zc, za) == 0; zap_cursor_advance(zc)) {
dsl_dir_t *chld_dd;
uint64_t count;
VERIFY0(dsl_dir_hold_obj(dp, za->za_first_integer, NULL, FTAG,
&chld_dd));
/*
* Ignore hidden ($FREE, $MOS & $ORIGIN) objsets.
*/
if (chld_dd->dd_myname[0] == '$') {
dsl_dir_rele(chld_dd, FTAG);
continue;
}
my_fs_cnt++; /* count this child */
dsl_dir_init_fs_ss_count(chld_dd, tx);
VERIFY0(zap_lookup(os, chld_dd->dd_object,
DD_FIELD_FILESYSTEM_COUNT, sizeof (count), 1, &count));
my_fs_cnt += count;
VERIFY0(zap_lookup(os, chld_dd->dd_object,
DD_FIELD_SNAPSHOT_COUNT, sizeof (count), 1, &count));
my_ss_cnt += count;
dsl_dir_rele(chld_dd, FTAG);
}
zap_cursor_fini(zc);
/* Count my snapshots (we counted children's snapshots above) */
VERIFY0(dsl_dataset_hold_obj(dd->dd_pool,
dsl_dir_phys(dd)->dd_head_dataset_obj, FTAG, &ds));
for (zap_cursor_init(zc, os, dsl_dataset_phys(ds)->ds_snapnames_zapobj);
zap_cursor_retrieve(zc, za) == 0;
zap_cursor_advance(zc)) {
/* Don't count temporary snapshots */
if (za->za_name[0] != '%')
my_ss_cnt++;
}
zap_cursor_fini(zc);
dsl_dataset_rele(ds, FTAG);
kmem_free(zc, sizeof (zap_cursor_t));
kmem_free(za, sizeof (zap_attribute_t));
/* we're in a sync task, update counts */
dmu_buf_will_dirty(dd->dd_dbuf, tx);
VERIFY0(zap_add(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT,
sizeof (my_fs_cnt), 1, &my_fs_cnt, tx));
VERIFY0(zap_add(os, dd->dd_object, DD_FIELD_SNAPSHOT_COUNT,
sizeof (my_ss_cnt), 1, &my_ss_cnt, tx));
}
static int
dsl_dir_actv_fs_ss_limit_check(void *arg, dmu_tx_t *tx)
{
char *ddname = (char *)arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
dsl_dir_t *dd;
int error;
error = dsl_dataset_hold(dp, ddname, FTAG, &ds);
if (error != 0)
return (error);
if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT)) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ENOTSUP));
}
dd = ds->ds_dir;
if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT) &&
dsl_dir_is_zapified(dd) &&
zap_contains(dp->dp_meta_objset, dd->dd_object,
DD_FIELD_FILESYSTEM_COUNT) == 0) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EALREADY));
}
dsl_dataset_rele(ds, FTAG);
return (0);
}
static void
dsl_dir_actv_fs_ss_limit_sync(void *arg, dmu_tx_t *tx)
{
char *ddname = (char *)arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
spa_t *spa;
VERIFY0(dsl_dataset_hold(dp, ddname, FTAG, &ds));
spa = dsl_dataset_get_spa(ds);
if (!spa_feature_is_active(spa, SPA_FEATURE_FS_SS_LIMIT)) {
/*
* Since the feature was not active and we're now setting a
* limit, increment the feature-active counter so that the
* feature becomes active for the first time.
*
* We are already in a sync task so we can update the MOS.
*/
spa_feature_incr(spa, SPA_FEATURE_FS_SS_LIMIT, tx);
}
/*
* Since we are now setting a non-UINT64_MAX limit on the filesystem,
* we need to ensure the counts are correct. Descend down the tree from
* this point and update all of the counts to be accurate.
*/
dsl_dir_init_fs_ss_count(ds->ds_dir, tx);
dsl_dataset_rele(ds, FTAG);
}
/*
* Make sure the feature is enabled and activate it if necessary.
* Since we're setting a limit, ensure the on-disk counts are valid.
* This is only called by the ioctl path when setting a limit value.
*
* We do not need to validate the new limit, since users who can change the
* limit are also allowed to exceed the limit.
*/
int
dsl_dir_activate_fs_ss_limit(const char *ddname)
{
int error;
error = dsl_sync_task(ddname, dsl_dir_actv_fs_ss_limit_check,
dsl_dir_actv_fs_ss_limit_sync, (void *)ddname, 0,
ZFS_SPACE_CHECK_RESERVED);
if (error == EALREADY)
error = 0;
return (error);
}
/*
* Used to determine if the filesystem_limit or snapshot_limit should be
* enforced. We allow the limit to be exceeded if the user has permission to
* write the property value. We pass in the creds that we got in the open
* context since we will always be the GZ root in syncing context. We also have
* to handle the case where we are allowed to change the limit on the current
* dataset, but there may be another limit in the tree above.
*
* We can never modify these two properties within a non-global zone. In
* addition, the other checks are modeled on zfs_secpolicy_write_perms. We
* can't use that function since we are already holding the dp_config_rwlock.
* In addition, we already have the dd and dealing with snapshots is simplified
* in this code.
*/
typedef enum {
ENFORCE_ALWAYS,
ENFORCE_NEVER,
ENFORCE_ABOVE
} enforce_res_t;
static enforce_res_t
dsl_enforce_ds_ss_limits(dsl_dir_t *dd, zfs_prop_t prop,
cred_t *cr, proc_t *proc)
{
enforce_res_t enforce = ENFORCE_ALWAYS;
uint64_t obj;
dsl_dataset_t *ds;
uint64_t zoned;
const char *zonedstr;
ASSERT(prop == ZFS_PROP_FILESYSTEM_LIMIT ||
prop == ZFS_PROP_SNAPSHOT_LIMIT);
#ifdef _KERNEL
if (crgetzoneid(cr) != GLOBAL_ZONEID)
return (ENFORCE_ALWAYS);
/*
* We are checking the saved credentials of the user process, which is
* not the current process. Note that we can't use secpolicy_zfs(),
* because it only works if the cred is that of the current process (on
* Linux).
*/
if (secpolicy_zfs_proc(cr, proc) == 0)
return (ENFORCE_NEVER);
+#else
+ (void) proc;
#endif
if ((obj = dsl_dir_phys(dd)->dd_head_dataset_obj) == 0)
return (ENFORCE_ALWAYS);
ASSERT(dsl_pool_config_held(dd->dd_pool));
if (dsl_dataset_hold_obj(dd->dd_pool, obj, FTAG, &ds) != 0)
return (ENFORCE_ALWAYS);
zonedstr = zfs_prop_to_name(ZFS_PROP_ZONED);
if (dsl_prop_get_ds(ds, zonedstr, 8, 1, &zoned, NULL) || zoned) {
/* Only root can access zoned fs's from the GZ */
enforce = ENFORCE_ALWAYS;
} else {
if (dsl_deleg_access_impl(ds, zfs_prop_to_name(prop), cr) == 0)
enforce = ENFORCE_ABOVE;
}
dsl_dataset_rele(ds, FTAG);
return (enforce);
}
/*
* Check if adding additional child filesystem(s) would exceed any filesystem
* limits or adding additional snapshot(s) would exceed any snapshot limits.
* The prop argument indicates which limit to check.
*
* Note that all filesystem limits up to the root (or the highest
* initialized) filesystem or the given ancestor must be satisfied.
*/
int
dsl_fs_ss_limit_check(dsl_dir_t *dd, uint64_t delta, zfs_prop_t prop,
dsl_dir_t *ancestor, cred_t *cr, proc_t *proc)
{
objset_t *os = dd->dd_pool->dp_meta_objset;
uint64_t limit, count;
char *count_prop;
enforce_res_t enforce;
int err = 0;
ASSERT(dsl_pool_config_held(dd->dd_pool));
ASSERT(prop == ZFS_PROP_FILESYSTEM_LIMIT ||
prop == ZFS_PROP_SNAPSHOT_LIMIT);
/*
* If we're allowed to change the limit, don't enforce the limit
* e.g. this can happen if a snapshot is taken by an administrative
* user in the global zone (i.e. a recursive snapshot by root).
* However, we must handle the case of delegated permissions where we
* are allowed to change the limit on the current dataset, but there
* is another limit in the tree above.
*/
enforce = dsl_enforce_ds_ss_limits(dd, prop, cr, proc);
if (enforce == ENFORCE_NEVER)
return (0);
/*
* e.g. if renaming a dataset with no snapshots, count adjustment
* is 0.
*/
if (delta == 0)
return (0);
if (prop == ZFS_PROP_SNAPSHOT_LIMIT) {
/*
* We don't enforce the limit for temporary snapshots. This is
* indicated by a NULL cred_t argument.
*/
if (cr == NULL)
return (0);
count_prop = DD_FIELD_SNAPSHOT_COUNT;
} else {
count_prop = DD_FIELD_FILESYSTEM_COUNT;
}
/*
* If an ancestor has been provided, stop checking the limit once we
* hit that dir. We need this during rename so that we don't overcount
* the check once we recurse up to the common ancestor.
*/
if (ancestor == dd)
return (0);
/*
* If we hit an uninitialized node while recursing up the tree, we can
* stop since we know there is no limit here (or above). The counts are
* not valid on this node and we know we won't touch this node's counts.
*/
if (!dsl_dir_is_zapified(dd))
return (0);
err = zap_lookup(os, dd->dd_object,
count_prop, sizeof (count), 1, &count);
if (err == ENOENT)
return (0);
if (err != 0)
return (err);
err = dsl_prop_get_dd(dd, zfs_prop_to_name(prop), 8, 1, &limit, NULL,
B_FALSE);
if (err != 0)
return (err);
/* Is there a limit which we've hit? */
if (enforce == ENFORCE_ALWAYS && (count + delta) > limit)
return (SET_ERROR(EDQUOT));
if (dd->dd_parent != NULL)
err = dsl_fs_ss_limit_check(dd->dd_parent, delta, prop,
ancestor, cr, proc);
return (err);
}
/*
* Adjust the filesystem or snapshot count for the specified dsl_dir_t and all
* parents. When a new filesystem/snapshot is created, increment the count on
* all parents, and when a filesystem/snapshot is destroyed, decrement the
* count.
*/
void
dsl_fs_ss_count_adjust(dsl_dir_t *dd, int64_t delta, const char *prop,
dmu_tx_t *tx)
{
int err;
objset_t *os = dd->dd_pool->dp_meta_objset;
uint64_t count;
ASSERT(dsl_pool_config_held(dd->dd_pool));
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(strcmp(prop, DD_FIELD_FILESYSTEM_COUNT) == 0 ||
strcmp(prop, DD_FIELD_SNAPSHOT_COUNT) == 0);
/*
* We don't do accounting for hidden ($FREE, $MOS & $ORIGIN) objsets.
*/
if (dd->dd_myname[0] == '$' && strcmp(prop,
DD_FIELD_FILESYSTEM_COUNT) == 0) {
return;
}
/*
* e.g. if renaming a dataset with no snapshots, count adjustment is 0
*/
if (delta == 0)
return;
/*
* If we hit an uninitialized node while recursing up the tree, we can
* stop since we know the counts are not valid on this node and we
* know we shouldn't touch this node's counts. An uninitialized count
* on the node indicates that either the feature has not yet been
* activated or there are no limits on this part of the tree.
*/
if (!dsl_dir_is_zapified(dd) || (err = zap_lookup(os, dd->dd_object,
prop, sizeof (count), 1, &count)) == ENOENT)
return;
VERIFY0(err);
count += delta;
/* Use a signed verify to make sure we're not neg. */
VERIFY3S(count, >=, 0);
VERIFY0(zap_update(os, dd->dd_object, prop, sizeof (count), 1, &count,
tx));
/* Roll up this additional count into our ancestors */
if (dd->dd_parent != NULL)
dsl_fs_ss_count_adjust(dd->dd_parent, delta, prop, tx);
}
uint64_t
dsl_dir_create_sync(dsl_pool_t *dp, dsl_dir_t *pds, const char *name,
dmu_tx_t *tx)
{
objset_t *mos = dp->dp_meta_objset;
uint64_t ddobj;
dsl_dir_phys_t *ddphys;
dmu_buf_t *dbuf;
ddobj = dmu_object_alloc(mos, DMU_OT_DSL_DIR, 0,
DMU_OT_DSL_DIR, sizeof (dsl_dir_phys_t), tx);
if (pds) {
VERIFY0(zap_add(mos, dsl_dir_phys(pds)->dd_child_dir_zapobj,
name, sizeof (uint64_t), 1, &ddobj, tx));
} else {
/* it's the root dir */
VERIFY0(zap_add(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1, &ddobj, tx));
}
VERIFY0(dmu_bonus_hold(mos, ddobj, FTAG, &dbuf));
dmu_buf_will_dirty(dbuf, tx);
ddphys = dbuf->db_data;
ddphys->dd_creation_time = gethrestime_sec();
if (pds) {
ddphys->dd_parent_obj = pds->dd_object;
/* update the filesystem counts */
dsl_fs_ss_count_adjust(pds, 1, DD_FIELD_FILESYSTEM_COUNT, tx);
}
ddphys->dd_props_zapobj = zap_create(mos,
DMU_OT_DSL_PROPS, DMU_OT_NONE, 0, tx);
ddphys->dd_child_dir_zapobj = zap_create(mos,
DMU_OT_DSL_DIR_CHILD_MAP, DMU_OT_NONE, 0, tx);
if (spa_version(dp->dp_spa) >= SPA_VERSION_USED_BREAKDOWN)
ddphys->dd_flags |= DD_FLAG_USED_BREAKDOWN;
dmu_buf_rele(dbuf, FTAG);
return (ddobj);
}
boolean_t
dsl_dir_is_clone(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_origin_obj &&
(dd->dd_pool->dp_origin_snap == NULL ||
dsl_dir_phys(dd)->dd_origin_obj !=
dd->dd_pool->dp_origin_snap->ds_object));
}
uint64_t
dsl_dir_get_used(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_used_bytes);
}
uint64_t
dsl_dir_get_compressed(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_compressed_bytes);
}
uint64_t
dsl_dir_get_quota(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_quota);
}
uint64_t
dsl_dir_get_reservation(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_reserved);
}
uint64_t
dsl_dir_get_compressratio(dsl_dir_t *dd)
{
/* a fixed point number, 100x the ratio */
return (dsl_dir_phys(dd)->dd_compressed_bytes == 0 ? 100 :
(dsl_dir_phys(dd)->dd_uncompressed_bytes * 100 /
dsl_dir_phys(dd)->dd_compressed_bytes));
}
uint64_t
dsl_dir_get_logicalused(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_uncompressed_bytes);
}
uint64_t
dsl_dir_get_usedsnap(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_SNAP]);
}
uint64_t
dsl_dir_get_usedds(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_HEAD]);
}
uint64_t
dsl_dir_get_usedrefreserv(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_REFRSRV]);
}
uint64_t
dsl_dir_get_usedchild(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_CHILD] +
dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_CHILD_RSRV]);
}
void
dsl_dir_get_origin(dsl_dir_t *dd, char *buf)
{
dsl_dataset_t *ds;
VERIFY0(dsl_dataset_hold_obj(dd->dd_pool,
dsl_dir_phys(dd)->dd_origin_obj, FTAG, &ds));
dsl_dataset_name(ds, buf);
dsl_dataset_rele(ds, FTAG);
}
int
dsl_dir_get_filesystem_count(dsl_dir_t *dd, uint64_t *count)
{
if (dsl_dir_is_zapified(dd)) {
objset_t *os = dd->dd_pool->dp_meta_objset;
return (zap_lookup(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT,
sizeof (*count), 1, count));
} else {
return (SET_ERROR(ENOENT));
}
}
int
dsl_dir_get_snapshot_count(dsl_dir_t *dd, uint64_t *count)
{
if (dsl_dir_is_zapified(dd)) {
objset_t *os = dd->dd_pool->dp_meta_objset;
return (zap_lookup(os, dd->dd_object, DD_FIELD_SNAPSHOT_COUNT,
sizeof (*count), 1, count));
} else {
return (SET_ERROR(ENOENT));
}
}
void
dsl_dir_stats(dsl_dir_t *dd, nvlist_t *nv)
{
mutex_enter(&dd->dd_lock);
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_QUOTA,
dsl_dir_get_quota(dd));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_RESERVATION,
dsl_dir_get_reservation(dd));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_LOGICALUSED,
dsl_dir_get_logicalused(dd));
if (dsl_dir_phys(dd)->dd_flags & DD_FLAG_USED_BREAKDOWN) {
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDSNAP,
dsl_dir_get_usedsnap(dd));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDDS,
dsl_dir_get_usedds(dd));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDREFRESERV,
dsl_dir_get_usedrefreserv(dd));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDCHILD,
dsl_dir_get_usedchild(dd));
}
mutex_exit(&dd->dd_lock);
uint64_t count;
if (dsl_dir_get_filesystem_count(dd, &count) == 0) {
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_FILESYSTEM_COUNT,
count);
}
if (dsl_dir_get_snapshot_count(dd, &count) == 0) {
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_SNAPSHOT_COUNT,
count);
}
if (dsl_dir_is_clone(dd)) {
char buf[ZFS_MAX_DATASET_NAME_LEN];
dsl_dir_get_origin(dd, buf);
dsl_prop_nvlist_add_string(nv, ZFS_PROP_ORIGIN, buf);
}
}
void
dsl_dir_dirty(dsl_dir_t *dd, dmu_tx_t *tx)
{
dsl_pool_t *dp = dd->dd_pool;
ASSERT(dsl_dir_phys(dd));
if (txg_list_add(&dp->dp_dirty_dirs, dd, tx->tx_txg)) {
/* up the hold count until we can be written out */
dmu_buf_add_ref(dd->dd_dbuf, dd);
}
}
static int64_t
parent_delta(dsl_dir_t *dd, uint64_t used, int64_t delta)
{
uint64_t old_accounted = MAX(used, dsl_dir_phys(dd)->dd_reserved);
uint64_t new_accounted =
MAX(used + delta, dsl_dir_phys(dd)->dd_reserved);
return (new_accounted - old_accounted);
}
void
dsl_dir_sync(dsl_dir_t *dd, dmu_tx_t *tx)
{
ASSERT(dmu_tx_is_syncing(tx));
mutex_enter(&dd->dd_lock);
ASSERT0(dd->dd_tempreserved[tx->tx_txg & TXG_MASK]);
dprintf_dd(dd, "txg=%llu towrite=%lluK\n", (u_longlong_t)tx->tx_txg,
(u_longlong_t)dd->dd_space_towrite[tx->tx_txg & TXG_MASK] / 1024);
dd->dd_space_towrite[tx->tx_txg & TXG_MASK] = 0;
mutex_exit(&dd->dd_lock);
/* release the hold from dsl_dir_dirty */
dmu_buf_rele(dd->dd_dbuf, dd);
}
static uint64_t
dsl_dir_space_towrite(dsl_dir_t *dd)
{
uint64_t space = 0;
ASSERT(MUTEX_HELD(&dd->dd_lock));
for (int i = 0; i < TXG_SIZE; i++) {
space += dd->dd_space_towrite[i & TXG_MASK];
ASSERT3U(dd->dd_space_towrite[i & TXG_MASK], >=, 0);
}
return (space);
}
/*
* How much space would dd have available if ancestor had delta applied
* to it? If ondiskonly is set, we're only interested in what's
* on-disk, not estimated pending changes.
*/
uint64_t
dsl_dir_space_available(dsl_dir_t *dd,
dsl_dir_t *ancestor, int64_t delta, int ondiskonly)
{
uint64_t parentspace, myspace, quota, used;
/*
* If there are no restrictions otherwise, assume we have
* unlimited space available.
*/
quota = UINT64_MAX;
parentspace = UINT64_MAX;
if (dd->dd_parent != NULL) {
parentspace = dsl_dir_space_available(dd->dd_parent,
ancestor, delta, ondiskonly);
}
mutex_enter(&dd->dd_lock);
if (dsl_dir_phys(dd)->dd_quota != 0)
quota = dsl_dir_phys(dd)->dd_quota;
used = dsl_dir_phys(dd)->dd_used_bytes;
if (!ondiskonly)
used += dsl_dir_space_towrite(dd);
if (dd->dd_parent == NULL) {
uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool,
ZFS_SPACE_CHECK_NORMAL);
quota = MIN(quota, poolsize);
}
if (dsl_dir_phys(dd)->dd_reserved > used && parentspace != UINT64_MAX) {
/*
* We have some space reserved, in addition to what our
* parent gave us.
*/
parentspace += dsl_dir_phys(dd)->dd_reserved - used;
}
if (dd == ancestor) {
ASSERT(delta <= 0);
ASSERT(used >= -delta);
used += delta;
if (parentspace != UINT64_MAX)
parentspace -= delta;
}
if (used > quota) {
/* over quota */
myspace = 0;
} else {
/*
* the lesser of the space provided by our parent and
* the space left in our quota
*/
myspace = MIN(parentspace, quota - used);
}
mutex_exit(&dd->dd_lock);
return (myspace);
}
struct tempreserve {
list_node_t tr_node;
dsl_dir_t *tr_ds;
uint64_t tr_size;
};
static int
dsl_dir_tempreserve_impl(dsl_dir_t *dd, uint64_t asize, boolean_t netfree,
boolean_t ignorequota, list_t *tr_list,
dmu_tx_t *tx, boolean_t first)
{
uint64_t txg;
uint64_t quota;
struct tempreserve *tr;
int retval;
uint64_t ref_rsrv;
top_of_function:
txg = tx->tx_txg;
retval = EDQUOT;
ref_rsrv = 0;
ASSERT3U(txg, !=, 0);
ASSERT3S(asize, >, 0);
mutex_enter(&dd->dd_lock);
/*
* Check against the dsl_dir's quota. We don't add in the delta
* when checking for over-quota because they get one free hit.
*/
uint64_t est_inflight = dsl_dir_space_towrite(dd);
for (int i = 0; i < TXG_SIZE; i++)
est_inflight += dd->dd_tempreserved[i];
uint64_t used_on_disk = dsl_dir_phys(dd)->dd_used_bytes;
/*
* On the first iteration, fetch the dataset's used-on-disk and
* refreservation values. Also, if checkrefquota is set, test if
* allocating this space would exceed the dataset's refquota.
*/
if (first && tx->tx_objset) {
int error;
dsl_dataset_t *ds = tx->tx_objset->os_dsl_dataset;
error = dsl_dataset_check_quota(ds, !netfree,
asize, est_inflight, &used_on_disk, &ref_rsrv);
if (error != 0) {
mutex_exit(&dd->dd_lock);
DMU_TX_STAT_BUMP(dmu_tx_quota);
return (error);
}
}
/*
* If this transaction will result in a net free of space,
* we want to let it through.
*/
if (ignorequota || netfree || dsl_dir_phys(dd)->dd_quota == 0)
quota = UINT64_MAX;
else
quota = dsl_dir_phys(dd)->dd_quota;
/*
* Adjust the quota against the actual pool size at the root
* minus any outstanding deferred frees.
* To ensure that it's possible to remove files from a full
* pool without inducing transient overcommits, we throttle
* netfree transactions against a quota that is slightly larger,
* but still within the pool's allocation slop. In cases where
* we're very close to full, this will allow a steady trickle of
* removes to get through.
*/
- uint64_t deferred = 0;
if (dd->dd_parent == NULL) {
uint64_t avail = dsl_pool_unreserved_space(dd->dd_pool,
(netfree) ?
ZFS_SPACE_CHECK_RESERVED : ZFS_SPACE_CHECK_NORMAL);
if (avail < quota) {
quota = avail;
retval = SET_ERROR(ENOSPC);
}
}
/*
* If they are requesting more space, and our current estimate
* is over quota, they get to try again unless the actual
- * on-disk is over quota and there are no pending changes (which
- * may free up space for us).
+ * on-disk is over quota and there are no pending changes
+ * or deferred frees (which may free up space for us).
*/
if (used_on_disk + est_inflight >= quota) {
- if (est_inflight > 0 || used_on_disk < quota ||
- (retval == ENOSPC && used_on_disk < quota + deferred))
- retval = ERESTART;
+ if (est_inflight > 0 || used_on_disk < quota) {
+ retval = SET_ERROR(ERESTART);
+ } else {
+ ASSERT3U(used_on_disk, >=, quota);
+
+ if (retval == ENOSPC && (used_on_disk - quota) <
+ dsl_pool_deferred_space(dd->dd_pool)) {
+ retval = SET_ERROR(ERESTART);
+ }
+ }
+
dprintf_dd(dd, "failing: used=%lluK inflight = %lluK "
"quota=%lluK tr=%lluK err=%d\n",
(u_longlong_t)used_on_disk>>10,
(u_longlong_t)est_inflight>>10,
(u_longlong_t)quota>>10, (u_longlong_t)asize>>10, retval);
mutex_exit(&dd->dd_lock);
DMU_TX_STAT_BUMP(dmu_tx_quota);
- return (SET_ERROR(retval));
+ return (retval);
}
/* We need to up our estimated delta before dropping dd_lock */
dd->dd_tempreserved[txg & TXG_MASK] += asize;
uint64_t parent_rsrv = parent_delta(dd, used_on_disk + est_inflight,
asize - ref_rsrv);
mutex_exit(&dd->dd_lock);
tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
tr->tr_ds = dd;
tr->tr_size = asize;
list_insert_tail(tr_list, tr);
/* see if it's OK with our parent */
if (dd->dd_parent != NULL && parent_rsrv != 0) {
/*
* Recurse on our parent without recursion. This has been
* observed to be potentially large stack usage even within
* the test suite. Largest seen stack was 7632 bytes on linux.
*/
dd = dd->dd_parent;
asize = parent_rsrv;
ignorequota = (dsl_dir_phys(dd)->dd_head_dataset_obj == 0);
first = B_FALSE;
goto top_of_function;
} else {
return (0);
}
}
/*
* Reserve space in this dsl_dir, to be used in this tx's txg.
* After the space has been dirtied (and dsl_dir_willuse_space()
* has been called), the reservation should be canceled, using
* dsl_dir_tempreserve_clear().
*/
int
dsl_dir_tempreserve_space(dsl_dir_t *dd, uint64_t lsize, uint64_t asize,
boolean_t netfree, void **tr_cookiep, dmu_tx_t *tx)
{
int err;
list_t *tr_list;
if (asize == 0) {
*tr_cookiep = NULL;
return (0);
}
tr_list = kmem_alloc(sizeof (list_t), KM_SLEEP);
list_create(tr_list, sizeof (struct tempreserve),
offsetof(struct tempreserve, tr_node));
ASSERT3S(asize, >, 0);
err = arc_tempreserve_space(dd->dd_pool->dp_spa, lsize, tx->tx_txg);
if (err == 0) {
struct tempreserve *tr;
tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
tr->tr_size = lsize;
list_insert_tail(tr_list, tr);
} else {
if (err == EAGAIN) {
/*
* If arc_memory_throttle() detected that pageout
* is running and we are low on memory, we delay new
* non-pageout transactions to give pageout an
* advantage.
*
* It is unfortunate to be delaying while the caller's
* locks are held.
*/
txg_delay(dd->dd_pool, tx->tx_txg,
MSEC2NSEC(10), MSEC2NSEC(10));
err = SET_ERROR(ERESTART);
}
}
if (err == 0) {
err = dsl_dir_tempreserve_impl(dd, asize, netfree,
B_FALSE, tr_list, tx, B_TRUE);
}
if (err != 0)
dsl_dir_tempreserve_clear(tr_list, tx);
else
*tr_cookiep = tr_list;
return (err);
}
/*
* Clear a temporary reservation that we previously made with
* dsl_dir_tempreserve_space().
*/
void
dsl_dir_tempreserve_clear(void *tr_cookie, dmu_tx_t *tx)
{
int txgidx = tx->tx_txg & TXG_MASK;
list_t *tr_list = tr_cookie;
struct tempreserve *tr;
ASSERT3U(tx->tx_txg, !=, 0);
if (tr_cookie == NULL)
return;
while ((tr = list_head(tr_list)) != NULL) {
if (tr->tr_ds) {
mutex_enter(&tr->tr_ds->dd_lock);
ASSERT3U(tr->tr_ds->dd_tempreserved[txgidx], >=,
tr->tr_size);
tr->tr_ds->dd_tempreserved[txgidx] -= tr->tr_size;
mutex_exit(&tr->tr_ds->dd_lock);
} else {
arc_tempreserve_clear(tr->tr_size);
}
list_remove(tr_list, tr);
kmem_free(tr, sizeof (struct tempreserve));
}
kmem_free(tr_list, sizeof (list_t));
}
/*
* This should be called from open context when we think we're going to write
* or free space, for example when dirtying data. Be conservative; it's okay
* to write less space or free more, but we don't want to write more or free
* less than the amount specified.
*
* NOTE: The behavior of this function is identical to the Illumos / FreeBSD
* version however it has been adjusted to use an iterative rather than
* recursive algorithm to minimize stack usage.
*/
void
dsl_dir_willuse_space(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx)
{
int64_t parent_space;
uint64_t est_used;
do {
mutex_enter(&dd->dd_lock);
if (space > 0)
dd->dd_space_towrite[tx->tx_txg & TXG_MASK] += space;
est_used = dsl_dir_space_towrite(dd) +
dsl_dir_phys(dd)->dd_used_bytes;
parent_space = parent_delta(dd, est_used, space);
mutex_exit(&dd->dd_lock);
/* Make sure that we clean up dd_space_to* */
dsl_dir_dirty(dd, tx);
dd = dd->dd_parent;
space = parent_space;
} while (space && dd);
}
/* call from syncing context when we actually write/free space for this dd */
void
dsl_dir_diduse_space(dsl_dir_t *dd, dd_used_t type,
int64_t used, int64_t compressed, int64_t uncompressed, dmu_tx_t *tx)
{
int64_t accounted_delta;
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(type < DD_USED_NUM);
dmu_buf_will_dirty(dd->dd_dbuf, tx);
/*
* dsl_dataset_set_refreservation_sync_impl() calls this with
* dd_lock held, so that it can atomically update
* ds->ds_reserved and the dsl_dir accounting, so that
* dsl_dataset_check_quota() can see dataset and dir accounting
* consistently.
*/
boolean_t needlock = !MUTEX_HELD(&dd->dd_lock);
if (needlock)
mutex_enter(&dd->dd_lock);
dsl_dir_phys_t *ddp = dsl_dir_phys(dd);
accounted_delta = parent_delta(dd, ddp->dd_used_bytes, used);
ASSERT(used >= 0 || ddp->dd_used_bytes >= -used);
ASSERT(compressed >= 0 || ddp->dd_compressed_bytes >= -compressed);
ASSERT(uncompressed >= 0 ||
ddp->dd_uncompressed_bytes >= -uncompressed);
ddp->dd_used_bytes += used;
ddp->dd_uncompressed_bytes += uncompressed;
ddp->dd_compressed_bytes += compressed;
if (ddp->dd_flags & DD_FLAG_USED_BREAKDOWN) {
ASSERT(used >= 0 || ddp->dd_used_breakdown[type] >= -used);
ddp->dd_used_breakdown[type] += used;
#ifdef ZFS_DEBUG
{
dd_used_t t;
uint64_t u = 0;
for (t = 0; t < DD_USED_NUM; t++)
u += ddp->dd_used_breakdown[t];
ASSERT3U(u, ==, ddp->dd_used_bytes);
}
#endif
}
if (needlock)
mutex_exit(&dd->dd_lock);
if (dd->dd_parent != NULL) {
dsl_dir_diduse_transfer_space(dd->dd_parent,
accounted_delta, compressed, uncompressed,
used, DD_USED_CHILD_RSRV, DD_USED_CHILD, tx);
}
}
void
dsl_dir_transfer_space(dsl_dir_t *dd, int64_t delta,
dd_used_t oldtype, dd_used_t newtype, dmu_tx_t *tx)
{
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(oldtype < DD_USED_NUM);
ASSERT(newtype < DD_USED_NUM);
dsl_dir_phys_t *ddp = dsl_dir_phys(dd);
if (delta == 0 ||
!(ddp->dd_flags & DD_FLAG_USED_BREAKDOWN))
return;
dmu_buf_will_dirty(dd->dd_dbuf, tx);
mutex_enter(&dd->dd_lock);
ASSERT(delta > 0 ?
ddp->dd_used_breakdown[oldtype] >= delta :
ddp->dd_used_breakdown[newtype] >= -delta);
ASSERT(ddp->dd_used_bytes >= ABS(delta));
ddp->dd_used_breakdown[oldtype] -= delta;
ddp->dd_used_breakdown[newtype] += delta;
mutex_exit(&dd->dd_lock);
}
void
dsl_dir_diduse_transfer_space(dsl_dir_t *dd, int64_t used,
int64_t compressed, int64_t uncompressed, int64_t tonew,
dd_used_t oldtype, dd_used_t newtype, dmu_tx_t *tx)
{
int64_t accounted_delta;
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(oldtype < DD_USED_NUM);
ASSERT(newtype < DD_USED_NUM);
dmu_buf_will_dirty(dd->dd_dbuf, tx);
mutex_enter(&dd->dd_lock);
dsl_dir_phys_t *ddp = dsl_dir_phys(dd);
accounted_delta = parent_delta(dd, ddp->dd_used_bytes, used);
ASSERT(used >= 0 || ddp->dd_used_bytes >= -used);
ASSERT(compressed >= 0 || ddp->dd_compressed_bytes >= -compressed);
ASSERT(uncompressed >= 0 ||
ddp->dd_uncompressed_bytes >= -uncompressed);
ddp->dd_used_bytes += used;
ddp->dd_uncompressed_bytes += uncompressed;
ddp->dd_compressed_bytes += compressed;
if (ddp->dd_flags & DD_FLAG_USED_BREAKDOWN) {
ASSERT(tonew - used <= 0 ||
ddp->dd_used_breakdown[oldtype] >= tonew - used);
ASSERT(tonew >= 0 ||
ddp->dd_used_breakdown[newtype] >= -tonew);
ddp->dd_used_breakdown[oldtype] -= tonew - used;
ddp->dd_used_breakdown[newtype] += tonew;
#ifdef ZFS_DEBUG
{
dd_used_t t;
uint64_t u = 0;
for (t = 0; t < DD_USED_NUM; t++)
u += ddp->dd_used_breakdown[t];
ASSERT3U(u, ==, ddp->dd_used_bytes);
}
#endif
}
mutex_exit(&dd->dd_lock);
if (dd->dd_parent != NULL) {
dsl_dir_diduse_transfer_space(dd->dd_parent,
accounted_delta, compressed, uncompressed,
used, DD_USED_CHILD_RSRV, DD_USED_CHILD, tx);
}
}
typedef struct dsl_dir_set_qr_arg {
const char *ddsqra_name;
zprop_source_t ddsqra_source;
uint64_t ddsqra_value;
} dsl_dir_set_qr_arg_t;
static int
dsl_dir_set_quota_check(void *arg, dmu_tx_t *tx)
{
dsl_dir_set_qr_arg_t *ddsqra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
int error;
uint64_t towrite, newval;
error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
if (error != 0)
return (error);
error = dsl_prop_predict(ds->ds_dir, "quota",
ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
if (newval == 0) {
dsl_dataset_rele(ds, FTAG);
return (0);
}
mutex_enter(&ds->ds_dir->dd_lock);
/*
* If we are doing the preliminary check in open context, and
* there are pending changes, then don't fail it, since the
* pending changes could under-estimate the amount of space to be
* freed up.
*/
towrite = dsl_dir_space_towrite(ds->ds_dir);
if ((dmu_tx_is_syncing(tx) || towrite == 0) &&
(newval < dsl_dir_phys(ds->ds_dir)->dd_reserved ||
newval < dsl_dir_phys(ds->ds_dir)->dd_used_bytes + towrite)) {
error = SET_ERROR(ENOSPC);
}
mutex_exit(&ds->ds_dir->dd_lock);
dsl_dataset_rele(ds, FTAG);
return (error);
}
static void
dsl_dir_set_quota_sync(void *arg, dmu_tx_t *tx)
{
dsl_dir_set_qr_arg_t *ddsqra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
uint64_t newval;
VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds));
if (spa_version(dp->dp_spa) >= SPA_VERSION_RECVD_PROPS) {
dsl_prop_set_sync_impl(ds, zfs_prop_to_name(ZFS_PROP_QUOTA),
ddsqra->ddsqra_source, sizeof (ddsqra->ddsqra_value), 1,
&ddsqra->ddsqra_value, tx);
VERIFY0(dsl_prop_get_int_ds(ds,
zfs_prop_to_name(ZFS_PROP_QUOTA), &newval));
} else {
newval = ddsqra->ddsqra_value;
spa_history_log_internal_ds(ds, "set", tx, "%s=%lld",
zfs_prop_to_name(ZFS_PROP_QUOTA), (longlong_t)newval);
}
dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
mutex_enter(&ds->ds_dir->dd_lock);
dsl_dir_phys(ds->ds_dir)->dd_quota = newval;
mutex_exit(&ds->ds_dir->dd_lock);
dsl_dataset_rele(ds, FTAG);
}
int
dsl_dir_set_quota(const char *ddname, zprop_source_t source, uint64_t quota)
{
dsl_dir_set_qr_arg_t ddsqra;
ddsqra.ddsqra_name = ddname;
ddsqra.ddsqra_source = source;
ddsqra.ddsqra_value = quota;
return (dsl_sync_task(ddname, dsl_dir_set_quota_check,
dsl_dir_set_quota_sync, &ddsqra, 0,
ZFS_SPACE_CHECK_EXTRA_RESERVED));
}
static int
dsl_dir_set_reservation_check(void *arg, dmu_tx_t *tx)
{
dsl_dir_set_qr_arg_t *ddsqra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
dsl_dir_t *dd;
uint64_t newval, used, avail;
int error;
error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
if (error != 0)
return (error);
dd = ds->ds_dir;
/*
* If we are doing the preliminary check in open context, the
* space estimates may be inaccurate.
*/
if (!dmu_tx_is_syncing(tx)) {
dsl_dataset_rele(ds, FTAG);
return (0);
}
error = dsl_prop_predict(ds->ds_dir,
zfs_prop_to_name(ZFS_PROP_RESERVATION),
ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
mutex_enter(&dd->dd_lock);
used = dsl_dir_phys(dd)->dd_used_bytes;
mutex_exit(&dd->dd_lock);
if (dd->dd_parent) {
avail = dsl_dir_space_available(dd->dd_parent,
NULL, 0, FALSE);
} else {
avail = dsl_pool_adjustedsize(dd->dd_pool,
ZFS_SPACE_CHECK_NORMAL) - used;
}
if (MAX(used, newval) > MAX(used, dsl_dir_phys(dd)->dd_reserved)) {
uint64_t delta = MAX(used, newval) -
MAX(used, dsl_dir_phys(dd)->dd_reserved);
if (delta > avail ||
(dsl_dir_phys(dd)->dd_quota > 0 &&
newval > dsl_dir_phys(dd)->dd_quota))
error = SET_ERROR(ENOSPC);
}
dsl_dataset_rele(ds, FTAG);
return (error);
}
void
dsl_dir_set_reservation_sync_impl(dsl_dir_t *dd, uint64_t value, dmu_tx_t *tx)
{
uint64_t used;
int64_t delta;
dmu_buf_will_dirty(dd->dd_dbuf, tx);
mutex_enter(&dd->dd_lock);
used = dsl_dir_phys(dd)->dd_used_bytes;
delta = MAX(used, value) - MAX(used, dsl_dir_phys(dd)->dd_reserved);
dsl_dir_phys(dd)->dd_reserved = value;
if (dd->dd_parent != NULL) {
/* Roll up this additional usage into our ancestors */
dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV,
delta, 0, 0, tx);
}
mutex_exit(&dd->dd_lock);
}
static void
dsl_dir_set_reservation_sync(void *arg, dmu_tx_t *tx)
{
dsl_dir_set_qr_arg_t *ddsqra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
uint64_t newval;
VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds));
if (spa_version(dp->dp_spa) >= SPA_VERSION_RECVD_PROPS) {
dsl_prop_set_sync_impl(ds,
zfs_prop_to_name(ZFS_PROP_RESERVATION),
ddsqra->ddsqra_source, sizeof (ddsqra->ddsqra_value), 1,
&ddsqra->ddsqra_value, tx);
VERIFY0(dsl_prop_get_int_ds(ds,
zfs_prop_to_name(ZFS_PROP_RESERVATION), &newval));
} else {
newval = ddsqra->ddsqra_value;
spa_history_log_internal_ds(ds, "set", tx, "%s=%lld",
zfs_prop_to_name(ZFS_PROP_RESERVATION),
(longlong_t)newval);
}
dsl_dir_set_reservation_sync_impl(ds->ds_dir, newval, tx);
dsl_dataset_rele(ds, FTAG);
}
int
dsl_dir_set_reservation(const char *ddname, zprop_source_t source,
uint64_t reservation)
{
dsl_dir_set_qr_arg_t ddsqra;
ddsqra.ddsqra_name = ddname;
ddsqra.ddsqra_source = source;
ddsqra.ddsqra_value = reservation;
return (dsl_sync_task(ddname, dsl_dir_set_reservation_check,
dsl_dir_set_reservation_sync, &ddsqra, 0,
ZFS_SPACE_CHECK_EXTRA_RESERVED));
}
static dsl_dir_t *
closest_common_ancestor(dsl_dir_t *ds1, dsl_dir_t *ds2)
{
for (; ds1; ds1 = ds1->dd_parent) {
dsl_dir_t *dd;
for (dd = ds2; dd; dd = dd->dd_parent) {
if (ds1 == dd)
return (dd);
}
}
return (NULL);
}
/*
* If delta is applied to dd, how much of that delta would be applied to
* ancestor? Syncing context only.
*/
static int64_t
would_change(dsl_dir_t *dd, int64_t delta, dsl_dir_t *ancestor)
{
if (dd == ancestor)
return (delta);
mutex_enter(&dd->dd_lock);
delta = parent_delta(dd, dsl_dir_phys(dd)->dd_used_bytes, delta);
mutex_exit(&dd->dd_lock);
return (would_change(dd->dd_parent, delta, ancestor));
}
typedef struct dsl_dir_rename_arg {
const char *ddra_oldname;
const char *ddra_newname;
cred_t *ddra_cred;
proc_t *ddra_proc;
} dsl_dir_rename_arg_t;
typedef struct dsl_valid_rename_arg {
int char_delta;
int nest_delta;
} dsl_valid_rename_arg_t;
-/* ARGSUSED */
static int
dsl_valid_rename(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
{
+ (void) dp;
dsl_valid_rename_arg_t *dvra = arg;
char namebuf[ZFS_MAX_DATASET_NAME_LEN];
dsl_dataset_name(ds, namebuf);
ASSERT3U(strnlen(namebuf, ZFS_MAX_DATASET_NAME_LEN),
<, ZFS_MAX_DATASET_NAME_LEN);
int namelen = strlen(namebuf) + dvra->char_delta;
int depth = get_dataset_depth(namebuf) + dvra->nest_delta;
if (namelen >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
if (dvra->nest_delta > 0 && depth >= zfs_max_dataset_nesting)
return (SET_ERROR(ENAMETOOLONG));
return (0);
}
static int
dsl_dir_rename_check(void *arg, dmu_tx_t *tx)
{
dsl_dir_rename_arg_t *ddra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dir_t *dd, *newparent;
dsl_valid_rename_arg_t dvra;
dsl_dataset_t *parentds;
objset_t *parentos;
const char *mynewname;
int error;
/* target dir should exist */
error = dsl_dir_hold(dp, ddra->ddra_oldname, FTAG, &dd, NULL);
if (error != 0)
return (error);
/* new parent should exist */
error = dsl_dir_hold(dp, ddra->ddra_newname, FTAG,
&newparent, &mynewname);
if (error != 0) {
dsl_dir_rele(dd, FTAG);
return (error);
}
/* can't rename to different pool */
if (dd->dd_pool != newparent->dd_pool) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (SET_ERROR(EXDEV));
}
/* new name should not already exist */
if (mynewname == NULL) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (SET_ERROR(EEXIST));
}
/* can't rename below anything but filesystems (eg. no ZVOLs) */
error = dsl_dataset_hold_obj(newparent->dd_pool,
dsl_dir_phys(newparent)->dd_head_dataset_obj, FTAG, &parentds);
if (error != 0) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (error);
}
error = dmu_objset_from_ds(parentds, &parentos);
if (error != 0) {
dsl_dataset_rele(parentds, FTAG);
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (error);
}
if (dmu_objset_type(parentos) != DMU_OST_ZFS) {
dsl_dataset_rele(parentds, FTAG);
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (SET_ERROR(ZFS_ERR_WRONG_PARENT));
}
dsl_dataset_rele(parentds, FTAG);
ASSERT3U(strnlen(ddra->ddra_newname, ZFS_MAX_DATASET_NAME_LEN),
<, ZFS_MAX_DATASET_NAME_LEN);
ASSERT3U(strnlen(ddra->ddra_oldname, ZFS_MAX_DATASET_NAME_LEN),
<, ZFS_MAX_DATASET_NAME_LEN);
dvra.char_delta = strlen(ddra->ddra_newname)
- strlen(ddra->ddra_oldname);
dvra.nest_delta = get_dataset_depth(ddra->ddra_newname)
- get_dataset_depth(ddra->ddra_oldname);
/* if the name length is growing, validate child name lengths */
if (dvra.char_delta > 0 || dvra.nest_delta > 0) {
error = dmu_objset_find_dp(dp, dd->dd_object, dsl_valid_rename,
&dvra, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
if (error != 0) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (error);
}
}
if (dmu_tx_is_syncing(tx)) {
if (spa_feature_is_active(dp->dp_spa,
SPA_FEATURE_FS_SS_LIMIT)) {
/*
* Although this is the check function and we don't
* normally make on-disk changes in check functions,
* we need to do that here.
*
* Ensure this portion of the tree's counts have been
* initialized in case the new parent has limits set.
*/
dsl_dir_init_fs_ss_count(dd, tx);
}
}
if (newparent != dd->dd_parent) {
/* is there enough space? */
uint64_t myspace =
MAX(dsl_dir_phys(dd)->dd_used_bytes,
dsl_dir_phys(dd)->dd_reserved);
objset_t *os = dd->dd_pool->dp_meta_objset;
uint64_t fs_cnt = 0;
uint64_t ss_cnt = 0;
if (dsl_dir_is_zapified(dd)) {
int err;
err = zap_lookup(os, dd->dd_object,
DD_FIELD_FILESYSTEM_COUNT, sizeof (fs_cnt), 1,
&fs_cnt);
if (err != ENOENT && err != 0) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (err);
}
/*
* have to add 1 for the filesystem itself that we're
* moving
*/
fs_cnt++;
err = zap_lookup(os, dd->dd_object,
DD_FIELD_SNAPSHOT_COUNT, sizeof (ss_cnt), 1,
&ss_cnt);
if (err != ENOENT && err != 0) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (err);
}
}
/* check for encryption errors */
error = dsl_dir_rename_crypt_check(dd, newparent);
if (error != 0) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (SET_ERROR(EACCES));
}
/* no rename into our descendant */
if (closest_common_ancestor(dd, newparent) == dd) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (SET_ERROR(EINVAL));
}
error = dsl_dir_transfer_possible(dd->dd_parent,
newparent, fs_cnt, ss_cnt, myspace,
ddra->ddra_cred, ddra->ddra_proc);
if (error != 0) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (error);
}
}
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (0);
}
static void
dsl_dir_rename_sync(void *arg, dmu_tx_t *tx)
{
dsl_dir_rename_arg_t *ddra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dir_t *dd, *newparent;
const char *mynewname;
objset_t *mos = dp->dp_meta_objset;
VERIFY0(dsl_dir_hold(dp, ddra->ddra_oldname, FTAG, &dd, NULL));
VERIFY0(dsl_dir_hold(dp, ddra->ddra_newname, FTAG, &newparent,
&mynewname));
/* Log this before we change the name. */
spa_history_log_internal_dd(dd, "rename", tx,
"-> %s", ddra->ddra_newname);
if (newparent != dd->dd_parent) {
objset_t *os = dd->dd_pool->dp_meta_objset;
uint64_t fs_cnt = 0;
uint64_t ss_cnt = 0;
/*
* We already made sure the dd counts were initialized in the
* check function.
*/
if (spa_feature_is_active(dp->dp_spa,
SPA_FEATURE_FS_SS_LIMIT)) {
VERIFY0(zap_lookup(os, dd->dd_object,
DD_FIELD_FILESYSTEM_COUNT, sizeof (fs_cnt), 1,
&fs_cnt));
/* add 1 for the filesystem itself that we're moving */
fs_cnt++;
VERIFY0(zap_lookup(os, dd->dd_object,
DD_FIELD_SNAPSHOT_COUNT, sizeof (ss_cnt), 1,
&ss_cnt));
}
dsl_fs_ss_count_adjust(dd->dd_parent, -fs_cnt,
DD_FIELD_FILESYSTEM_COUNT, tx);
dsl_fs_ss_count_adjust(newparent, fs_cnt,
DD_FIELD_FILESYSTEM_COUNT, tx);
dsl_fs_ss_count_adjust(dd->dd_parent, -ss_cnt,
DD_FIELD_SNAPSHOT_COUNT, tx);
dsl_fs_ss_count_adjust(newparent, ss_cnt,
DD_FIELD_SNAPSHOT_COUNT, tx);
dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD,
-dsl_dir_phys(dd)->dd_used_bytes,
-dsl_dir_phys(dd)->dd_compressed_bytes,
-dsl_dir_phys(dd)->dd_uncompressed_bytes, tx);
dsl_dir_diduse_space(newparent, DD_USED_CHILD,
dsl_dir_phys(dd)->dd_used_bytes,
dsl_dir_phys(dd)->dd_compressed_bytes,
dsl_dir_phys(dd)->dd_uncompressed_bytes, tx);
if (dsl_dir_phys(dd)->dd_reserved >
dsl_dir_phys(dd)->dd_used_bytes) {
uint64_t unused_rsrv = dsl_dir_phys(dd)->dd_reserved -
dsl_dir_phys(dd)->dd_used_bytes;
dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV,
-unused_rsrv, 0, 0, tx);
dsl_dir_diduse_space(newparent, DD_USED_CHILD_RSRV,
unused_rsrv, 0, 0, tx);
}
}
dmu_buf_will_dirty(dd->dd_dbuf, tx);
/* remove from old parent zapobj */
VERIFY0(zap_remove(mos,
dsl_dir_phys(dd->dd_parent)->dd_child_dir_zapobj,
dd->dd_myname, tx));
(void) strlcpy(dd->dd_myname, mynewname,
sizeof (dd->dd_myname));
dsl_dir_rele(dd->dd_parent, dd);
dsl_dir_phys(dd)->dd_parent_obj = newparent->dd_object;
VERIFY0(dsl_dir_hold_obj(dp,
newparent->dd_object, NULL, dd, &dd->dd_parent));
/* add to new parent zapobj */
VERIFY0(zap_add(mos, dsl_dir_phys(newparent)->dd_child_dir_zapobj,
dd->dd_myname, 8, 1, &dd->dd_object, tx));
/* TODO: A rename callback to avoid these layering violations. */
zfsvfs_update_fromname(ddra->ddra_oldname, ddra->ddra_newname);
zvol_rename_minors(dp->dp_spa, ddra->ddra_oldname,
ddra->ddra_newname, B_TRUE);
dsl_prop_notify_all(dd);
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
}
int
dsl_dir_rename(const char *oldname, const char *newname)
{
dsl_dir_rename_arg_t ddra;
ddra.ddra_oldname = oldname;
ddra.ddra_newname = newname;
ddra.ddra_cred = CRED();
ddra.ddra_proc = curproc;
return (dsl_sync_task(oldname,
dsl_dir_rename_check, dsl_dir_rename_sync, &ddra,
3, ZFS_SPACE_CHECK_RESERVED));
}
int
dsl_dir_transfer_possible(dsl_dir_t *sdd, dsl_dir_t *tdd,
uint64_t fs_cnt, uint64_t ss_cnt, uint64_t space,
cred_t *cr, proc_t *proc)
{
dsl_dir_t *ancestor;
int64_t adelta;
uint64_t avail;
int err;
ancestor = closest_common_ancestor(sdd, tdd);
adelta = would_change(sdd, -space, ancestor);
avail = dsl_dir_space_available(tdd, ancestor, adelta, FALSE);
if (avail < space)
return (SET_ERROR(ENOSPC));
err = dsl_fs_ss_limit_check(tdd, fs_cnt, ZFS_PROP_FILESYSTEM_LIMIT,
ancestor, cr, proc);
if (err != 0)
return (err);
err = dsl_fs_ss_limit_check(tdd, ss_cnt, ZFS_PROP_SNAPSHOT_LIMIT,
ancestor, cr, proc);
if (err != 0)
return (err);
return (0);
}
inode_timespec_t
dsl_dir_snap_cmtime(dsl_dir_t *dd)
{
inode_timespec_t t;
mutex_enter(&dd->dd_lock);
t = dd->dd_snap_cmtime;
mutex_exit(&dd->dd_lock);
return (t);
}
void
dsl_dir_snap_cmtime_update(dsl_dir_t *dd)
{
inode_timespec_t t;
gethrestime(&t);
mutex_enter(&dd->dd_lock);
dd->dd_snap_cmtime = t;
mutex_exit(&dd->dd_lock);
}
void
dsl_dir_zapify(dsl_dir_t *dd, dmu_tx_t *tx)
{
objset_t *mos = dd->dd_pool->dp_meta_objset;
dmu_object_zapify(mos, dd->dd_object, DMU_OT_DSL_DIR, tx);
}
boolean_t
dsl_dir_is_zapified(dsl_dir_t *dd)
{
dmu_object_info_t doi;
dmu_object_info_from_db(dd->dd_dbuf, &doi);
return (doi.doi_type == DMU_OTN_ZAP_METADATA);
}
void
dsl_dir_livelist_open(dsl_dir_t *dd, uint64_t obj)
{
objset_t *mos = dd->dd_pool->dp_meta_objset;
ASSERT(spa_feature_is_active(dd->dd_pool->dp_spa,
SPA_FEATURE_LIVELIST));
dsl_deadlist_open(&dd->dd_livelist, mos, obj);
bplist_create(&dd->dd_pending_allocs);
bplist_create(&dd->dd_pending_frees);
}
void
dsl_dir_livelist_close(dsl_dir_t *dd)
{
dsl_deadlist_close(&dd->dd_livelist);
bplist_destroy(&dd->dd_pending_allocs);
bplist_destroy(&dd->dd_pending_frees);
}
void
dsl_dir_remove_livelist(dsl_dir_t *dd, dmu_tx_t *tx, boolean_t total)
{
uint64_t obj;
dsl_pool_t *dp = dmu_tx_pool(tx);
spa_t *spa = dp->dp_spa;
livelist_condense_entry_t to_condense = spa->spa_to_condense;
if (!dsl_deadlist_is_open(&dd->dd_livelist))
return;
/*
* If the livelist being removed is set to be condensed, stop the
* condense zthr and indicate the cancellation in the spa_to_condense
* struct in case the condense no-wait synctask has already started
*/
zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr;
if (ll_condense_thread != NULL &&
(to_condense.ds != NULL) && (to_condense.ds->ds_dir == dd)) {
/*
* We use zthr_wait_cycle_done instead of zthr_cancel
* because we don't want to destroy the zthr, just have
* it skip its current task.
*/
spa->spa_to_condense.cancelled = B_TRUE;
zthr_wait_cycle_done(ll_condense_thread);
/*
* If we've returned from zthr_wait_cycle_done without
* clearing the to_condense data structure it's either
* because the no-wait synctask has started (which is
* indicated by 'syncing' field of to_condense) and we
* can expect it to clear to_condense on its own.
* Otherwise, we returned before the zthr ran. The
* checkfunc will now fail as cancelled == B_TRUE so we
* can safely NULL out ds, allowing a different dir's
* livelist to be condensed.
*
* We can be sure that the to_condense struct will not
* be repopulated at this stage because both this
* function and dsl_livelist_try_condense execute in
* syncing context.
*/
if ((spa->spa_to_condense.ds != NULL) &&
!spa->spa_to_condense.syncing) {
dmu_buf_rele(spa->spa_to_condense.ds->ds_dbuf,
spa);
spa->spa_to_condense.ds = NULL;
}
}
dsl_dir_livelist_close(dd);
VERIFY0(zap_lookup(dp->dp_meta_objset, dd->dd_object,
DD_FIELD_LIVELIST, sizeof (uint64_t), 1, &obj));
VERIFY0(zap_remove(dp->dp_meta_objset, dd->dd_object,
DD_FIELD_LIVELIST, tx));
if (total) {
dsl_deadlist_free(dp->dp_meta_objset, obj, tx);
spa_feature_decr(spa, SPA_FEATURE_LIVELIST, tx);
}
}
static int
dsl_dir_activity_in_progress(dsl_dir_t *dd, dsl_dataset_t *ds,
zfs_wait_activity_t activity, boolean_t *in_progress)
{
int error = 0;
ASSERT(MUTEX_HELD(&dd->dd_activity_lock));
switch (activity) {
case ZFS_WAIT_DELETEQ: {
#ifdef _KERNEL
objset_t *os;
error = dmu_objset_from_ds(ds, &os);
if (error != 0)
break;
mutex_enter(&os->os_user_ptr_lock);
void *user = dmu_objset_get_user(os);
mutex_exit(&os->os_user_ptr_lock);
if (dmu_objset_type(os) != DMU_OST_ZFS ||
user == NULL || zfs_get_vfs_flag_unmounted(os)) {
*in_progress = B_FALSE;
return (0);
}
uint64_t readonly = B_FALSE;
error = zfs_get_temporary_prop(ds, ZFS_PROP_READONLY, &readonly,
NULL);
if (error != 0)
break;
if (readonly || !spa_writeable(dd->dd_pool->dp_spa)) {
*in_progress = B_FALSE;
return (0);
}
uint64_t count, unlinked_obj;
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET, 8, 1,
&unlinked_obj);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
break;
}
error = zap_count(os, unlinked_obj, &count);
if (error == 0)
*in_progress = (count != 0);
break;
#else
/*
* The delete queue is ZPL specific, and libzpool doesn't have
* it. It doesn't make sense to wait for it.
*/
+ (void) ds;
*in_progress = B_FALSE;
break;
#endif
}
default:
panic("unrecognized value for activity %d", activity);
}
return (error);
}
int
dsl_dir_wait(dsl_dir_t *dd, dsl_dataset_t *ds, zfs_wait_activity_t activity,
boolean_t *waited)
{
int error = 0;
boolean_t in_progress;
dsl_pool_t *dp = dd->dd_pool;
for (;;) {
dsl_pool_config_enter(dp, FTAG);
error = dsl_dir_activity_in_progress(dd, ds, activity,
&in_progress);
dsl_pool_config_exit(dp, FTAG);
if (error != 0 || !in_progress)
break;
*waited = B_TRUE;
if (cv_wait_sig(&dd->dd_activity_cv, &dd->dd_activity_lock) ==
0 || dd->dd_activity_cancelled) {
error = SET_ERROR(EINTR);
break;
}
}
return (error);
}
void
dsl_dir_cancel_waiters(dsl_dir_t *dd)
{
mutex_enter(&dd->dd_activity_lock);
dd->dd_activity_cancelled = B_TRUE;
cv_broadcast(&dd->dd_activity_cv);
while (dd->dd_activity_waiters > 0)
cv_wait(&dd->dd_activity_cv, &dd->dd_activity_lock);
mutex_exit(&dd->dd_activity_lock);
}
#if defined(_KERNEL)
EXPORT_SYMBOL(dsl_dir_set_quota);
EXPORT_SYMBOL(dsl_dir_set_reservation);
#endif
diff --git a/sys/contrib/openzfs/module/zfs/dsl_pool.c b/sys/contrib/openzfs/module/zfs/dsl_pool.c
index e66c136a9e02..af7fa5af4e1e 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_pool.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_pool.c
@@ -1,1416 +1,1422 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2016 Nexenta Systems, Inc. All rights reserved.
*/
#include <sys/dsl_pool.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_synctask.h>
#include <sys/dsl_scan.h>
#include <sys/dnode.h>
#include <sys/dmu_tx.h>
#include <sys/dmu_objset.h>
#include <sys/arc.h>
#include <sys/zap.h>
#include <sys/zio.h>
#include <sys/zfs_context.h>
#include <sys/fs/zfs.h>
#include <sys/zfs_znode.h>
#include <sys/spa_impl.h>
#include <sys/vdev_impl.h>
#include <sys/metaslab_impl.h>
#include <sys/bptree.h>
#include <sys/zfeature.h>
#include <sys/zil_impl.h>
#include <sys/dsl_userhold.h>
#include <sys/trace_zfs.h>
#include <sys/mmp.h>
/*
* ZFS Write Throttle
* ------------------
*
* ZFS must limit the rate of incoming writes to the rate at which it is able
* to sync data modifications to the backend storage. Throttling by too much
* creates an artificial limit; throttling by too little can only be sustained
* for short periods and would lead to highly lumpy performance. On a per-pool
* basis, ZFS tracks the amount of modified (dirty) data. As operations change
* data, the amount of dirty data increases; as ZFS syncs out data, the amount
* of dirty data decreases. When the amount of dirty data exceeds a
* predetermined threshold further modifications are blocked until the amount
* of dirty data decreases (as data is synced out).
*
* The limit on dirty data is tunable, and should be adjusted according to
* both the IO capacity and available memory of the system. The larger the
* window, the more ZFS is able to aggregate and amortize metadata (and data)
* changes. However, memory is a limited resource, and allowing for more dirty
* data comes at the cost of keeping other useful data in memory (for example
* ZFS data cached by the ARC).
*
* Implementation
*
* As buffers are modified dsl_pool_willuse_space() increments both the per-
* txg (dp_dirty_pertxg[]) and poolwide (dp_dirty_total) accounting of
* dirty space used; dsl_pool_dirty_space() decrements those values as data
* is synced out from dsl_pool_sync(). While only the poolwide value is
* relevant, the per-txg value is useful for debugging. The tunable
* zfs_dirty_data_max determines the dirty space limit. Once that value is
* exceeded, new writes are halted until space frees up.
*
* The zfs_dirty_data_sync_percent tunable dictates the threshold at which we
* ensure that there is a txg syncing (see the comment in txg.c for a full
* description of transaction group stages).
*
* The IO scheduler uses both the dirty space limit and current amount of
* dirty data as inputs. Those values affect the number of concurrent IOs ZFS
* issues. See the comment in vdev_queue.c for details of the IO scheduler.
*
* The delay is also calculated based on the amount of dirty data. See the
* comment above dmu_tx_delay() for details.
*/
/*
* zfs_dirty_data_max will be set to zfs_dirty_data_max_percent% of all memory,
* capped at zfs_dirty_data_max_max. It can also be overridden with a module
* parameter.
*/
unsigned long zfs_dirty_data_max = 0;
unsigned long zfs_dirty_data_max_max = 0;
int zfs_dirty_data_max_percent = 10;
int zfs_dirty_data_max_max_percent = 25;
/*
* If there's at least this much dirty data (as a percentage of
* zfs_dirty_data_max), push out a txg. This should be less than
* zfs_vdev_async_write_active_min_dirty_percent.
*/
int zfs_dirty_data_sync_percent = 20;
/*
* Once there is this amount of dirty data, the dmu_tx_delay() will kick in
* and delay each transaction.
* This value should be >= zfs_vdev_async_write_active_max_dirty_percent.
*/
int zfs_delay_min_dirty_percent = 60;
/*
* This controls how quickly the delay approaches infinity.
* Larger values cause it to delay more for a given amount of dirty data.
* Therefore larger values will cause there to be less dirty data for a
* given throughput.
*
* For the smoothest delay, this value should be about 1 billion divided
* by the maximum number of operations per second. This will smoothly
* handle between 10x and 1/10th this number.
*
* Note: zfs_delay_scale * zfs_dirty_data_max must be < 2^64, due to the
* multiply in dmu_tx_delay().
*/
unsigned long zfs_delay_scale = 1000 * 1000 * 1000 / 2000;
/*
* This determines the number of threads used by the dp_sync_taskq.
*/
int zfs_sync_taskq_batch_pct = 75;
/*
* These tunables determine the behavior of how zil_itxg_clean() is
* called via zil_clean() in the context of spa_sync(). When an itxg
* list needs to be cleaned, TQ_NOSLEEP will be used when dispatching.
* If the dispatch fails, the call to zil_itxg_clean() will occur
* synchronously in the context of spa_sync(), which can negatively
* impact the performance of spa_sync() (e.g. in the case of the itxg
* list having a large number of itxs that needs to be cleaned).
*
* Thus, these tunables can be used to manipulate the behavior of the
* taskq used by zil_clean(); they determine the number of taskq entries
* that are pre-populated when the taskq is first created (via the
* "zfs_zil_clean_taskq_minalloc" tunable) and the maximum number of
* taskq entries that are cached after an on-demand allocation (via the
* "zfs_zil_clean_taskq_maxalloc").
*
* The idea being, we want to try reasonably hard to ensure there will
* already be a taskq entry pre-allocated by the time that it is needed
* by zil_clean(). This way, we can avoid the possibility of an
* on-demand allocation of a new taskq entry from failing, which would
* result in zil_itxg_clean() being called synchronously from zil_clean()
* (which can adversely affect performance of spa_sync()).
*
* Additionally, the number of threads used by the taskq can be
* configured via the "zfs_zil_clean_taskq_nthr_pct" tunable.
*/
int zfs_zil_clean_taskq_nthr_pct = 100;
int zfs_zil_clean_taskq_minalloc = 1024;
int zfs_zil_clean_taskq_maxalloc = 1024 * 1024;
int
dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **ddp)
{
uint64_t obj;
int err;
err = zap_lookup(dp->dp_meta_objset,
dsl_dir_phys(dp->dp_root_dir)->dd_child_dir_zapobj,
name, sizeof (obj), 1, &obj);
if (err)
return (err);
return (dsl_dir_hold_obj(dp, obj, name, dp, ddp));
}
static dsl_pool_t *
dsl_pool_open_impl(spa_t *spa, uint64_t txg)
{
dsl_pool_t *dp;
blkptr_t *bp = spa_get_rootblkptr(spa);
dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP);
dp->dp_spa = spa;
dp->dp_meta_rootbp = *bp;
rrw_init(&dp->dp_config_rwlock, B_TRUE);
txg_init(dp, txg);
mmp_init(spa);
txg_list_create(&dp->dp_dirty_datasets, spa,
offsetof(dsl_dataset_t, ds_dirty_link));
txg_list_create(&dp->dp_dirty_zilogs, spa,
offsetof(zilog_t, zl_dirty_link));
txg_list_create(&dp->dp_dirty_dirs, spa,
offsetof(dsl_dir_t, dd_dirty_link));
txg_list_create(&dp->dp_sync_tasks, spa,
offsetof(dsl_sync_task_t, dst_node));
txg_list_create(&dp->dp_early_sync_tasks, spa,
offsetof(dsl_sync_task_t, dst_node));
dp->dp_sync_taskq = taskq_create("dp_sync_taskq",
zfs_sync_taskq_batch_pct, minclsyspri, 1, INT_MAX,
TASKQ_THREADS_CPU_PCT);
dp->dp_zil_clean_taskq = taskq_create("dp_zil_clean_taskq",
zfs_zil_clean_taskq_nthr_pct, minclsyspri,
zfs_zil_clean_taskq_minalloc,
zfs_zil_clean_taskq_maxalloc,
TASKQ_PREPOPULATE | TASKQ_THREADS_CPU_PCT);
mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&dp->dp_spaceavail_cv, NULL, CV_DEFAULT, NULL);
dp->dp_zrele_taskq = taskq_create("z_zrele", 100, defclsyspri,
boot_ncpus * 8, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC |
TASKQ_THREADS_CPU_PCT);
dp->dp_unlinked_drain_taskq = taskq_create("z_unlinked_drain",
100, defclsyspri, boot_ncpus, INT_MAX,
TASKQ_PREPOPULATE | TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT);
return (dp);
}
int
dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp)
{
int err;
dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
/*
* Initialize the caller's dsl_pool_t structure before we actually open
* the meta objset. This is done because a self-healing write zio may
* be issued as part of dmu_objset_open_impl() and the spa needs its
* dsl_pool_t initialized in order to handle the write.
*/
*dpp = dp;
err = dmu_objset_open_impl(spa, NULL, &dp->dp_meta_rootbp,
&dp->dp_meta_objset);
if (err != 0) {
dsl_pool_close(dp);
*dpp = NULL;
}
return (err);
}
int
dsl_pool_open(dsl_pool_t *dp)
{
int err;
dsl_dir_t *dd;
dsl_dataset_t *ds;
uint64_t obj;
rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1,
&dp->dp_root_dir_obj);
if (err)
goto out;
err = dsl_dir_hold_obj(dp, dp->dp_root_dir_obj,
NULL, dp, &dp->dp_root_dir);
if (err)
goto out;
err = dsl_pool_open_special_dir(dp, MOS_DIR_NAME, &dp->dp_mos_dir);
if (err)
goto out;
if (spa_version(dp->dp_spa) >= SPA_VERSION_ORIGIN) {
err = dsl_pool_open_special_dir(dp, ORIGIN_DIR_NAME, &dd);
if (err)
goto out;
err = dsl_dataset_hold_obj(dp,
dsl_dir_phys(dd)->dd_head_dataset_obj, FTAG, &ds);
if (err == 0) {
err = dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj, dp,
&dp->dp_origin_snap);
dsl_dataset_rele(ds, FTAG);
}
dsl_dir_rele(dd, dp);
if (err)
goto out;
}
if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) {
err = dsl_pool_open_special_dir(dp, FREE_DIR_NAME,
&dp->dp_free_dir);
if (err)
goto out;
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj);
if (err)
goto out;
VERIFY0(bpobj_open(&dp->dp_free_bpobj,
dp->dp_meta_objset, obj));
}
if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_OBSOLETE_BPOBJ, sizeof (uint64_t), 1, &obj);
if (err == 0) {
VERIFY0(bpobj_open(&dp->dp_obsolete_bpobj,
dp->dp_meta_objset, obj));
} else if (err == ENOENT) {
/*
* We might not have created the remap bpobj yet.
*/
err = 0;
} else {
goto out;
}
}
/*
* Note: errors ignored, because the these special dirs, used for
* space accounting, are only created on demand.
*/
(void) dsl_pool_open_special_dir(dp, LEAK_DIR_NAME,
&dp->dp_leak_dir);
if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY)) {
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
&dp->dp_bptree_obj);
if (err != 0)
goto out;
}
if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMPTY_BPOBJ)) {
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_EMPTY_BPOBJ, sizeof (uint64_t), 1,
&dp->dp_empty_bpobj);
if (err != 0)
goto out;
}
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_TMP_USERREFS, sizeof (uint64_t), 1,
&dp->dp_tmp_userrefs_obj);
if (err == ENOENT)
err = 0;
if (err)
goto out;
err = dsl_scan_init(dp, dp->dp_tx.tx_open_txg);
out:
rrw_exit(&dp->dp_config_rwlock, FTAG);
return (err);
}
void
dsl_pool_close(dsl_pool_t *dp)
{
/*
* Drop our references from dsl_pool_open().
*
* Since we held the origin_snap from "syncing" context (which
* includes pool-opening context), it actually only got a "ref"
* and not a hold, so just drop that here.
*/
if (dp->dp_origin_snap != NULL)
dsl_dataset_rele(dp->dp_origin_snap, dp);
if (dp->dp_mos_dir != NULL)
dsl_dir_rele(dp->dp_mos_dir, dp);
if (dp->dp_free_dir != NULL)
dsl_dir_rele(dp->dp_free_dir, dp);
if (dp->dp_leak_dir != NULL)
dsl_dir_rele(dp->dp_leak_dir, dp);
if (dp->dp_root_dir != NULL)
dsl_dir_rele(dp->dp_root_dir, dp);
bpobj_close(&dp->dp_free_bpobj);
bpobj_close(&dp->dp_obsolete_bpobj);
/* undo the dmu_objset_open_impl(mos) from dsl_pool_open() */
if (dp->dp_meta_objset != NULL)
dmu_objset_evict(dp->dp_meta_objset);
txg_list_destroy(&dp->dp_dirty_datasets);
txg_list_destroy(&dp->dp_dirty_zilogs);
txg_list_destroy(&dp->dp_sync_tasks);
txg_list_destroy(&dp->dp_early_sync_tasks);
txg_list_destroy(&dp->dp_dirty_dirs);
taskq_destroy(dp->dp_zil_clean_taskq);
taskq_destroy(dp->dp_sync_taskq);
/*
* We can't set retry to TRUE since we're explicitly specifying
* a spa to flush. This is good enough; any missed buffers for
* this spa won't cause trouble, and they'll eventually fall
* out of the ARC just like any other unused buffer.
*/
arc_flush(dp->dp_spa, FALSE);
mmp_fini(dp->dp_spa);
txg_fini(dp);
dsl_scan_fini(dp);
dmu_buf_user_evict_wait();
rrw_destroy(&dp->dp_config_rwlock);
mutex_destroy(&dp->dp_lock);
cv_destroy(&dp->dp_spaceavail_cv);
taskq_destroy(dp->dp_unlinked_drain_taskq);
taskq_destroy(dp->dp_zrele_taskq);
if (dp->dp_blkstats != NULL) {
mutex_destroy(&dp->dp_blkstats->zab_lock);
vmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
}
kmem_free(dp, sizeof (dsl_pool_t));
}
void
dsl_pool_create_obsolete_bpobj(dsl_pool_t *dp, dmu_tx_t *tx)
{
uint64_t obj;
/*
* Currently, we only create the obsolete_bpobj where there are
* indirect vdevs with referenced mappings.
*/
ASSERT(spa_feature_is_active(dp->dp_spa, SPA_FEATURE_DEVICE_REMOVAL));
/* create and open the obsolete_bpobj */
obj = bpobj_alloc(dp->dp_meta_objset, SPA_OLD_MAXBLOCKSIZE, tx);
VERIFY0(bpobj_open(&dp->dp_obsolete_bpobj, dp->dp_meta_objset, obj));
VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_OBSOLETE_BPOBJ, sizeof (uint64_t), 1, &obj, tx));
spa_feature_incr(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
}
void
dsl_pool_destroy_obsolete_bpobj(dsl_pool_t *dp, dmu_tx_t *tx)
{
spa_feature_decr(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
VERIFY0(zap_remove(dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_OBSOLETE_BPOBJ, tx));
bpobj_free(dp->dp_meta_objset,
dp->dp_obsolete_bpobj.bpo_object, tx);
bpobj_close(&dp->dp_obsolete_bpobj);
}
dsl_pool_t *
-dsl_pool_create(spa_t *spa, nvlist_t *zplprops, dsl_crypto_params_t *dcp,
- uint64_t txg)
+dsl_pool_create(spa_t *spa, nvlist_t *zplprops __attribute__((unused)),
+ dsl_crypto_params_t *dcp, uint64_t txg)
{
int err;
dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg);
#ifdef _KERNEL
objset_t *os;
#else
objset_t *os __attribute__((unused));
#endif
dsl_dataset_t *ds;
uint64_t obj;
rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
/* create and open the MOS (meta-objset) */
dp->dp_meta_objset = dmu_objset_create_impl(spa,
NULL, &dp->dp_meta_rootbp, DMU_OST_META, tx);
spa->spa_meta_objset = dp->dp_meta_objset;
/* create the pool directory */
err = zap_create_claim(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_OT_OBJECT_DIRECTORY, DMU_OT_NONE, 0, tx);
ASSERT0(err);
/* Initialize scan structures */
VERIFY0(dsl_scan_init(dp, txg));
/* create and open the root dir */
dp->dp_root_dir_obj = dsl_dir_create_sync(dp, NULL, NULL, tx);
VERIFY0(dsl_dir_hold_obj(dp, dp->dp_root_dir_obj,
NULL, dp, &dp->dp_root_dir));
/* create and open the meta-objset dir */
(void) dsl_dir_create_sync(dp, dp->dp_root_dir, MOS_DIR_NAME, tx);
VERIFY0(dsl_pool_open_special_dir(dp,
MOS_DIR_NAME, &dp->dp_mos_dir));
if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
/* create and open the free dir */
(void) dsl_dir_create_sync(dp, dp->dp_root_dir,
FREE_DIR_NAME, tx);
VERIFY0(dsl_pool_open_special_dir(dp,
FREE_DIR_NAME, &dp->dp_free_dir));
/* create and open the free_bplist */
obj = bpobj_alloc(dp->dp_meta_objset, SPA_OLD_MAXBLOCKSIZE, tx);
VERIFY(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx) == 0);
VERIFY0(bpobj_open(&dp->dp_free_bpobj,
dp->dp_meta_objset, obj));
}
if (spa_version(spa) >= SPA_VERSION_DSL_SCRUB)
dsl_pool_create_origin(dp, tx);
/*
* Some features may be needed when creating the root dataset, so we
* create the feature objects here.
*/
if (spa_version(spa) >= SPA_VERSION_FEATURES)
spa_feature_create_zap_objects(spa, tx);
if (dcp != NULL && dcp->cp_crypt != ZIO_CRYPT_OFF &&
dcp->cp_crypt != ZIO_CRYPT_INHERIT)
spa_feature_enable(spa, SPA_FEATURE_ENCRYPTION, tx);
/* create the root dataset */
obj = dsl_dataset_create_sync_dd(dp->dp_root_dir, NULL, dcp, 0, tx);
/* create the root objset */
VERIFY0(dsl_dataset_hold_obj_flags(dp, obj,
DS_HOLD_FLAG_DECRYPT, FTAG, &ds));
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
os = dmu_objset_create_impl(dp->dp_spa, ds,
dsl_dataset_get_blkptr(ds), DMU_OST_ZFS, tx);
rrw_exit(&ds->ds_bp_rwlock, FTAG);
#ifdef _KERNEL
zfs_create_fs(os, kcred, zplprops, tx);
#endif
dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
dmu_tx_commit(tx);
rrw_exit(&dp->dp_config_rwlock, FTAG);
return (dp);
}
/*
* Account for the meta-objset space in its placeholder dsl_dir.
*/
void
dsl_pool_mos_diduse_space(dsl_pool_t *dp,
int64_t used, int64_t comp, int64_t uncomp)
{
ASSERT3U(comp, ==, uncomp); /* it's all metadata */
mutex_enter(&dp->dp_lock);
dp->dp_mos_used_delta += used;
dp->dp_mos_compressed_delta += comp;
dp->dp_mos_uncompressed_delta += uncomp;
mutex_exit(&dp->dp_lock);
}
static void
dsl_pool_sync_mos(dsl_pool_t *dp, dmu_tx_t *tx)
{
zio_t *zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
dmu_objset_sync(dp->dp_meta_objset, zio, tx);
VERIFY0(zio_wait(zio));
dmu_objset_sync_done(dp->dp_meta_objset, tx);
taskq_wait(dp->dp_sync_taskq);
multilist_destroy(&dp->dp_meta_objset->os_synced_dnodes);
dprintf_bp(&dp->dp_meta_rootbp, "meta objset rootbp is %s", "");
spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp);
}
static void
dsl_pool_dirty_delta(dsl_pool_t *dp, int64_t delta)
{
ASSERT(MUTEX_HELD(&dp->dp_lock));
if (delta < 0)
ASSERT3U(-delta, <=, dp->dp_dirty_total);
dp->dp_dirty_total += delta;
/*
* Note: we signal even when increasing dp_dirty_total.
* This ensures forward progress -- each thread wakes the next waiter.
*/
if (dp->dp_dirty_total < zfs_dirty_data_max)
cv_signal(&dp->dp_spaceavail_cv);
}
#ifdef ZFS_DEBUG
static boolean_t
dsl_early_sync_task_verify(dsl_pool_t *dp, uint64_t txg)
{
spa_t *spa = dp->dp_spa;
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
txg_list_t *tl = &vd->vdev_ms_list;
metaslab_t *ms;
for (ms = txg_list_head(tl, TXG_CLEAN(txg)); ms;
ms = txg_list_next(tl, ms, TXG_CLEAN(txg))) {
VERIFY(range_tree_is_empty(ms->ms_freeing));
VERIFY(range_tree_is_empty(ms->ms_checkpointing));
}
}
return (B_TRUE);
}
#endif
void
dsl_pool_sync(dsl_pool_t *dp, uint64_t txg)
{
zio_t *zio;
dmu_tx_t *tx;
dsl_dir_t *dd;
dsl_dataset_t *ds;
objset_t *mos = dp->dp_meta_objset;
list_t synced_datasets;
list_create(&synced_datasets, sizeof (dsl_dataset_t),
offsetof(dsl_dataset_t, ds_synced_link));
tx = dmu_tx_create_assigned(dp, txg);
/*
* Run all early sync tasks before writing out any dirty blocks.
* For more info on early sync tasks see block comment in
* dsl_early_sync_task().
*/
if (!txg_list_empty(&dp->dp_early_sync_tasks, txg)) {
dsl_sync_task_t *dst;
ASSERT3U(spa_sync_pass(dp->dp_spa), ==, 1);
while ((dst =
txg_list_remove(&dp->dp_early_sync_tasks, txg)) != NULL) {
ASSERT(dsl_early_sync_task_verify(dp, txg));
dsl_sync_task_sync(dst, tx);
}
ASSERT(dsl_early_sync_task_verify(dp, txg));
}
/*
* Write out all dirty blocks of dirty datasets.
*/
zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) {
/*
* We must not sync any non-MOS datasets twice, because
* we may have taken a snapshot of them. However, we
* may sync newly-created datasets on pass 2.
*/
ASSERT(!list_link_active(&ds->ds_synced_link));
list_insert_tail(&synced_datasets, ds);
dsl_dataset_sync(ds, zio, tx);
}
VERIFY0(zio_wait(zio));
/*
* Update the long range free counter after
* we're done syncing user data
*/
mutex_enter(&dp->dp_lock);
ASSERT(spa_sync_pass(dp->dp_spa) == 1 ||
dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] == 0);
dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] = 0;
mutex_exit(&dp->dp_lock);
/*
* After the data blocks have been written (ensured by the zio_wait()
* above), update the user/group/project space accounting. This happens
* in tasks dispatched to dp_sync_taskq, so wait for them before
* continuing.
*/
for (ds = list_head(&synced_datasets); ds != NULL;
ds = list_next(&synced_datasets, ds)) {
dmu_objset_sync_done(ds->ds_objset, tx);
}
taskq_wait(dp->dp_sync_taskq);
/*
* Sync the datasets again to push out the changes due to
* userspace updates. This must be done before we process the
* sync tasks, so that any snapshots will have the correct
* user accounting information (and we won't get confused
* about which blocks are part of the snapshot).
*/
zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) {
objset_t *os = ds->ds_objset;
ASSERT(list_link_active(&ds->ds_synced_link));
dmu_buf_rele(ds->ds_dbuf, ds);
dsl_dataset_sync(ds, zio, tx);
/*
* Release any key mappings created by calls to
* dsl_dataset_dirty() from the userquota accounting
* code paths.
*/
if (os->os_encrypted && !os->os_raw_receive &&
!os->os_next_write_raw[txg & TXG_MASK]) {
ASSERT3P(ds->ds_key_mapping, !=, NULL);
key_mapping_rele(dp->dp_spa, ds->ds_key_mapping, ds);
}
}
VERIFY0(zio_wait(zio));
/*
* Now that the datasets have been completely synced, we can
* clean up our in-memory structures accumulated while syncing:
*
* - move dead blocks from the pending deadlist and livelists
* to the on-disk versions
* - release hold from dsl_dataset_dirty()
* - release key mapping hold from dsl_dataset_dirty()
*/
while ((ds = list_remove_head(&synced_datasets)) != NULL) {
objset_t *os = ds->ds_objset;
if (os->os_encrypted && !os->os_raw_receive &&
!os->os_next_write_raw[txg & TXG_MASK]) {
ASSERT3P(ds->ds_key_mapping, !=, NULL);
key_mapping_rele(dp->dp_spa, ds->ds_key_mapping, ds);
}
dsl_dataset_sync_done(ds, tx);
}
while ((dd = txg_list_remove(&dp->dp_dirty_dirs, txg)) != NULL) {
dsl_dir_sync(dd, tx);
}
/*
* The MOS's space is accounted for in the pool/$MOS
* (dp_mos_dir). We can't modify the mos while we're syncing
* it, so we remember the deltas and apply them here.
*/
if (dp->dp_mos_used_delta != 0 || dp->dp_mos_compressed_delta != 0 ||
dp->dp_mos_uncompressed_delta != 0) {
dsl_dir_diduse_space(dp->dp_mos_dir, DD_USED_HEAD,
dp->dp_mos_used_delta,
dp->dp_mos_compressed_delta,
dp->dp_mos_uncompressed_delta, tx);
dp->dp_mos_used_delta = 0;
dp->dp_mos_compressed_delta = 0;
dp->dp_mos_uncompressed_delta = 0;
}
if (dmu_objset_is_dirty(mos, txg)) {
dsl_pool_sync_mos(dp, tx);
}
/*
* We have written all of the accounted dirty data, so our
* dp_space_towrite should now be zero. However, some seldom-used
* code paths do not adhere to this (e.g. dbuf_undirty()). Shore up
* the accounting of any dirtied space now.
*
* Note that, besides any dirty data from datasets, the amount of
* dirty data in the MOS is also accounted by the pool. Therefore,
* we want to do this cleanup after dsl_pool_sync_mos() so we don't
* attempt to update the accounting for the same dirty data twice.
* (i.e. at this point we only update the accounting for the space
* that we know that we "leaked").
*/
dsl_pool_undirty_space(dp, dp->dp_dirty_pertxg[txg & TXG_MASK], txg);
/*
* If we modify a dataset in the same txg that we want to destroy it,
* its dsl_dir's dd_dbuf will be dirty, and thus have a hold on it.
* dsl_dir_destroy_check() will fail if there are unexpected holds.
* Therefore, we want to sync the MOS (thus syncing the dd_dbuf
* and clearing the hold on it) before we process the sync_tasks.
* The MOS data dirtied by the sync_tasks will be synced on the next
* pass.
*/
if (!txg_list_empty(&dp->dp_sync_tasks, txg)) {
dsl_sync_task_t *dst;
/*
* No more sync tasks should have been added while we
* were syncing.
*/
ASSERT3U(spa_sync_pass(dp->dp_spa), ==, 1);
while ((dst = txg_list_remove(&dp->dp_sync_tasks, txg)) != NULL)
dsl_sync_task_sync(dst, tx);
}
dmu_tx_commit(tx);
DTRACE_PROBE2(dsl_pool_sync__done, dsl_pool_t *dp, dp, uint64_t, txg);
}
void
dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg)
{
zilog_t *zilog;
while ((zilog = txg_list_head(&dp->dp_dirty_zilogs, txg))) {
dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
/*
* We don't remove the zilog from the dp_dirty_zilogs
* list until after we've cleaned it. This ensures that
* callers of zilog_is_dirty() receive an accurate
* answer when they are racing with the spa sync thread.
*/
zil_clean(zilog, txg);
(void) txg_list_remove_this(&dp->dp_dirty_zilogs, zilog, txg);
ASSERT(!dmu_objset_is_dirty(zilog->zl_os, txg));
dmu_buf_rele(ds->ds_dbuf, zilog);
}
ASSERT(!dmu_objset_is_dirty(dp->dp_meta_objset, txg));
}
/*
* TRUE if the current thread is the tx_sync_thread or if we
* are being called from SPA context during pool initialization.
*/
int
dsl_pool_sync_context(dsl_pool_t *dp)
{
return (curthread == dp->dp_tx.tx_sync_thread ||
spa_is_initializing(dp->dp_spa) ||
taskq_member(dp->dp_sync_taskq, curthread));
}
/*
* This function returns the amount of allocatable space in the pool
* minus whatever space is currently reserved by ZFS for specific
* purposes. Specifically:
*
* 1] Any reserved SLOP space
* 2] Any space used by the checkpoint
* 3] Any space used for deferred frees
*
* The latter 2 are especially important because they are needed to
* rectify the SPA's and DMU's different understanding of how much space
* is used. Now the DMU is aware of that extra space tracked by the SPA
* without having to maintain a separate special dir (e.g similar to
* $MOS, $FREEING, and $LEAKED).
*
* Note: By deferred frees here, we mean the frees that were deferred
* in spa_sync() after sync pass 1 (spa_deferred_bpobj), and not the
* segments placed in ms_defer trees during metaslab_sync_done().
*/
uint64_t
dsl_pool_adjustedsize(dsl_pool_t *dp, zfs_space_check_t slop_policy)
{
spa_t *spa = dp->dp_spa;
uint64_t space, resv, adjustedsize;
uint64_t spa_deferred_frees =
spa->spa_deferred_bpobj.bpo_phys->bpo_bytes;
space = spa_get_dspace(spa)
- spa_get_checkpoint_space(spa) - spa_deferred_frees;
resv = spa_get_slop_space(spa);
switch (slop_policy) {
case ZFS_SPACE_CHECK_NORMAL:
break;
case ZFS_SPACE_CHECK_RESERVED:
resv >>= 1;
break;
case ZFS_SPACE_CHECK_EXTRA_RESERVED:
resv >>= 2;
break;
case ZFS_SPACE_CHECK_NONE:
resv = 0;
break;
default:
panic("invalid slop policy value: %d", slop_policy);
break;
}
adjustedsize = (space >= resv) ? (space - resv) : 0;
return (adjustedsize);
}
uint64_t
dsl_pool_unreserved_space(dsl_pool_t *dp, zfs_space_check_t slop_policy)
{
uint64_t poolsize = dsl_pool_adjustedsize(dp, slop_policy);
uint64_t deferred =
metaslab_class_get_deferred(spa_normal_class(dp->dp_spa));
uint64_t quota = (poolsize >= deferred) ? (poolsize - deferred) : 0;
return (quota);
}
+uint64_t
+dsl_pool_deferred_space(dsl_pool_t *dp)
+{
+ return (metaslab_class_get_deferred(spa_normal_class(dp->dp_spa)));
+}
+
boolean_t
dsl_pool_need_dirty_delay(dsl_pool_t *dp)
{
uint64_t delay_min_bytes =
zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100;
uint64_t dirty_min_bytes =
zfs_dirty_data_max * zfs_dirty_data_sync_percent / 100;
uint64_t dirty;
mutex_enter(&dp->dp_lock);
dirty = dp->dp_dirty_total;
mutex_exit(&dp->dp_lock);
if (dirty > dirty_min_bytes)
txg_kick(dp);
return (dirty > delay_min_bytes);
}
void
dsl_pool_dirty_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx)
{
if (space > 0) {
mutex_enter(&dp->dp_lock);
dp->dp_dirty_pertxg[tx->tx_txg & TXG_MASK] += space;
dsl_pool_dirty_delta(dp, space);
mutex_exit(&dp->dp_lock);
}
}
void
dsl_pool_undirty_space(dsl_pool_t *dp, int64_t space, uint64_t txg)
{
ASSERT3S(space, >=, 0);
if (space == 0)
return;
mutex_enter(&dp->dp_lock);
if (dp->dp_dirty_pertxg[txg & TXG_MASK] < space) {
/* XXX writing something we didn't dirty? */
space = dp->dp_dirty_pertxg[txg & TXG_MASK];
}
ASSERT3U(dp->dp_dirty_pertxg[txg & TXG_MASK], >=, space);
dp->dp_dirty_pertxg[txg & TXG_MASK] -= space;
ASSERT3U(dp->dp_dirty_total, >=, space);
dsl_pool_dirty_delta(dp, -space);
mutex_exit(&dp->dp_lock);
}
/* ARGSUSED */
static int
upgrade_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
{
dmu_tx_t *tx = arg;
dsl_dataset_t *ds, *prev = NULL;
int err;
err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
if (err)
return (err);
while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
err = dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
if (err) {
dsl_dataset_rele(ds, FTAG);
return (err);
}
if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object)
break;
dsl_dataset_rele(ds, FTAG);
ds = prev;
prev = NULL;
}
if (prev == NULL) {
prev = dp->dp_origin_snap;
/*
* The $ORIGIN can't have any data, or the accounting
* will be wrong.
*/
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
ASSERT0(dsl_dataset_phys(prev)->ds_bp.blk_birth);
rrw_exit(&ds->ds_bp_rwlock, FTAG);
/* The origin doesn't get attached to itself */
if (ds->ds_object == prev->ds_object) {
dsl_dataset_rele(ds, FTAG);
return (0);
}
dmu_buf_will_dirty(ds->ds_dbuf, tx);
dsl_dataset_phys(ds)->ds_prev_snap_obj = prev->ds_object;
dsl_dataset_phys(ds)->ds_prev_snap_txg =
dsl_dataset_phys(prev)->ds_creation_txg;
dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
dsl_dir_phys(ds->ds_dir)->dd_origin_obj = prev->ds_object;
dmu_buf_will_dirty(prev->ds_dbuf, tx);
dsl_dataset_phys(prev)->ds_num_children++;
if (dsl_dataset_phys(ds)->ds_next_snap_obj == 0) {
ASSERT(ds->ds_prev == NULL);
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj,
ds, &ds->ds_prev));
}
}
ASSERT3U(dsl_dir_phys(ds->ds_dir)->dd_origin_obj, ==, prev->ds_object);
ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_obj, ==, prev->ds_object);
if (dsl_dataset_phys(prev)->ds_next_clones_obj == 0) {
dmu_buf_will_dirty(prev->ds_dbuf, tx);
dsl_dataset_phys(prev)->ds_next_clones_obj =
zap_create(dp->dp_meta_objset,
DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
}
VERIFY0(zap_add_int(dp->dp_meta_objset,
dsl_dataset_phys(prev)->ds_next_clones_obj, ds->ds_object, tx));
dsl_dataset_rele(ds, FTAG);
if (prev != dp->dp_origin_snap)
dsl_dataset_rele(prev, FTAG);
return (0);
}
void
dsl_pool_upgrade_clones(dsl_pool_t *dp, dmu_tx_t *tx)
{
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(dp->dp_origin_snap != NULL);
VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, upgrade_clones_cb,
tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE));
}
/* ARGSUSED */
static int
upgrade_dir_clones_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
{
dmu_tx_t *tx = arg;
objset_t *mos = dp->dp_meta_objset;
if (dsl_dir_phys(ds->ds_dir)->dd_origin_obj != 0) {
dsl_dataset_t *origin;
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dir_phys(ds->ds_dir)->dd_origin_obj, FTAG, &origin));
if (dsl_dir_phys(origin->ds_dir)->dd_clones == 0) {
dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
dsl_dir_phys(origin->ds_dir)->dd_clones =
zap_create(mos, DMU_OT_DSL_CLONES, DMU_OT_NONE,
0, tx);
}
VERIFY0(zap_add_int(dp->dp_meta_objset,
dsl_dir_phys(origin->ds_dir)->dd_clones,
ds->ds_object, tx));
dsl_dataset_rele(origin, FTAG);
}
return (0);
}
void
dsl_pool_upgrade_dir_clones(dsl_pool_t *dp, dmu_tx_t *tx)
{
uint64_t obj;
ASSERT(dmu_tx_is_syncing(tx));
(void) dsl_dir_create_sync(dp, dp->dp_root_dir, FREE_DIR_NAME, tx);
VERIFY0(dsl_pool_open_special_dir(dp,
FREE_DIR_NAME, &dp->dp_free_dir));
/*
* We can't use bpobj_alloc(), because spa_version() still
* returns the old version, and we need a new-version bpobj with
* subobj support. So call dmu_object_alloc() directly.
*/
obj = dmu_object_alloc(dp->dp_meta_objset, DMU_OT_BPOBJ,
SPA_OLD_MAXBLOCKSIZE, DMU_OT_BPOBJ_HDR, sizeof (bpobj_phys_t), tx);
VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx));
VERIFY0(bpobj_open(&dp->dp_free_bpobj, dp->dp_meta_objset, obj));
VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
upgrade_dir_clones_cb, tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE));
}
void
dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx)
{
uint64_t dsobj;
dsl_dataset_t *ds;
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(dp->dp_origin_snap == NULL);
ASSERT(rrw_held(&dp->dp_config_rwlock, RW_WRITER));
/* create the origin dir, ds, & snap-ds */
dsobj = dsl_dataset_create_sync(dp->dp_root_dir, ORIGIN_DIR_NAME,
NULL, 0, kcred, NULL, tx);
VERIFY0(dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
dsl_dataset_snapshot_sync_impl(ds, ORIGIN_DIR_NAME, tx);
VERIFY0(dsl_dataset_hold_obj(dp, dsl_dataset_phys(ds)->ds_prev_snap_obj,
dp, &dp->dp_origin_snap));
dsl_dataset_rele(ds, FTAG);
}
taskq_t *
dsl_pool_zrele_taskq(dsl_pool_t *dp)
{
return (dp->dp_zrele_taskq);
}
taskq_t *
dsl_pool_unlinked_drain_taskq(dsl_pool_t *dp)
{
return (dp->dp_unlinked_drain_taskq);
}
/*
* Walk through the pool-wide zap object of temporary snapshot user holds
* and release them.
*/
void
dsl_pool_clean_tmp_userrefs(dsl_pool_t *dp)
{
zap_attribute_t za;
zap_cursor_t zc;
objset_t *mos = dp->dp_meta_objset;
uint64_t zapobj = dp->dp_tmp_userrefs_obj;
nvlist_t *holds;
if (zapobj == 0)
return;
ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
holds = fnvlist_alloc();
for (zap_cursor_init(&zc, mos, zapobj);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
char *htag;
nvlist_t *tags;
htag = strchr(za.za_name, '-');
*htag = '\0';
++htag;
if (nvlist_lookup_nvlist(holds, za.za_name, &tags) != 0) {
tags = fnvlist_alloc();
fnvlist_add_boolean(tags, htag);
fnvlist_add_nvlist(holds, za.za_name, tags);
fnvlist_free(tags);
} else {
fnvlist_add_boolean(tags, htag);
}
}
dsl_dataset_user_release_tmp(dp, holds);
fnvlist_free(holds);
zap_cursor_fini(&zc);
}
/*
* Create the pool-wide zap object for storing temporary snapshot holds.
*/
static void
dsl_pool_user_hold_create_obj(dsl_pool_t *dp, dmu_tx_t *tx)
{
objset_t *mos = dp->dp_meta_objset;
ASSERT(dp->dp_tmp_userrefs_obj == 0);
ASSERT(dmu_tx_is_syncing(tx));
dp->dp_tmp_userrefs_obj = zap_create_link(mos, DMU_OT_USERREFS,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_TMP_USERREFS, tx);
}
static int
dsl_pool_user_hold_rele_impl(dsl_pool_t *dp, uint64_t dsobj,
const char *tag, uint64_t now, dmu_tx_t *tx, boolean_t holding)
{
objset_t *mos = dp->dp_meta_objset;
uint64_t zapobj = dp->dp_tmp_userrefs_obj;
char *name;
int error;
ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
ASSERT(dmu_tx_is_syncing(tx));
/*
* If the pool was created prior to SPA_VERSION_USERREFS, the
* zap object for temporary holds might not exist yet.
*/
if (zapobj == 0) {
if (holding) {
dsl_pool_user_hold_create_obj(dp, tx);
zapobj = dp->dp_tmp_userrefs_obj;
} else {
return (SET_ERROR(ENOENT));
}
}
name = kmem_asprintf("%llx-%s", (u_longlong_t)dsobj, tag);
if (holding)
error = zap_add(mos, zapobj, name, 8, 1, &now, tx);
else
error = zap_remove(mos, zapobj, name, tx);
kmem_strfree(name);
return (error);
}
/*
* Add a temporary hold for the given dataset object and tag.
*/
int
dsl_pool_user_hold(dsl_pool_t *dp, uint64_t dsobj, const char *tag,
uint64_t now, dmu_tx_t *tx)
{
return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, now, tx, B_TRUE));
}
/*
* Release a temporary hold for the given dataset object and tag.
*/
int
dsl_pool_user_release(dsl_pool_t *dp, uint64_t dsobj, const char *tag,
dmu_tx_t *tx)
{
return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, 0,
tx, B_FALSE));
}
/*
* DSL Pool Configuration Lock
*
* The dp_config_rwlock protects against changes to DSL state (e.g. dataset
* creation / destruction / rename / property setting). It must be held for
* read to hold a dataset or dsl_dir. I.e. you must call
* dsl_pool_config_enter() or dsl_pool_hold() before calling
* dsl_{dataset,dir}_hold{_obj}. In most circumstances, the dp_config_rwlock
* must be held continuously until all datasets and dsl_dirs are released.
*
* The only exception to this rule is that if a "long hold" is placed on
* a dataset, then the dp_config_rwlock may be dropped while the dataset
* is still held. The long hold will prevent the dataset from being
* destroyed -- the destroy will fail with EBUSY. A long hold can be
* obtained by calling dsl_dataset_long_hold(), or by "owning" a dataset
* (by calling dsl_{dataset,objset}_{try}own{_obj}).
*
* Legitimate long-holders (including owners) should be long-running, cancelable
* tasks that should cause "zfs destroy" to fail. This includes DMU
* consumers (i.e. a ZPL filesystem being mounted or ZVOL being open),
* "zfs send", and "zfs diff". There are several other long-holders whose
* uses are suboptimal (e.g. "zfs promote", and zil_suspend()).
*
* The usual formula for long-holding would be:
* dsl_pool_hold()
* dsl_dataset_hold()
* ... perform checks ...
* dsl_dataset_long_hold()
* dsl_pool_rele()
* ... perform long-running task ...
* dsl_dataset_long_rele()
* dsl_dataset_rele()
*
* Note that when the long hold is released, the dataset is still held but
* the pool is not held. The dataset may change arbitrarily during this time
* (e.g. it could be destroyed). Therefore you shouldn't do anything to the
* dataset except release it.
*
* Operations generally fall somewhere into the following taxonomy:
*
* Read-Only Modifying
*
* Dataset Layer / MOS zfs get zfs destroy
*
* Individual Dataset read() write()
*
*
* Dataset Layer Operations
*
* Modifying operations should generally use dsl_sync_task(). The synctask
* infrastructure enforces proper locking strategy with respect to the
* dp_config_rwlock. See the comment above dsl_sync_task() for details.
*
* Read-only operations will manually hold the pool, then the dataset, obtain
* information from the dataset, then release the pool and dataset.
* dmu_objset_{hold,rele}() are convenience routines that also do the pool
* hold/rele.
*
*
* Operations On Individual Datasets
*
* Objects _within_ an objset should only be modified by the current 'owner'
* of the objset to prevent incorrect concurrent modification. Thus, use
* {dmu_objset,dsl_dataset}_own to mark some entity as the current owner,
* and fail with EBUSY if there is already an owner. The owner can then
* implement its own locking strategy, independent of the dataset layer's
* locking infrastructure.
* (E.g., the ZPL has its own set of locks to control concurrency. A regular
* vnop will not reach into the dataset layer).
*
* Ideally, objects would also only be read by the objset’s owner, so that we
* don’t observe state mid-modification.
* (E.g. the ZPL is creating a new object and linking it into a directory; if
* you don’t coordinate with the ZPL to hold ZPL-level locks, you could see an
* intermediate state. The ioctl level violates this but in pretty benign
* ways, e.g. reading the zpl props object.)
*/
int
dsl_pool_hold(const char *name, void *tag, dsl_pool_t **dp)
{
spa_t *spa;
int error;
error = spa_open(name, &spa, tag);
if (error == 0) {
*dp = spa_get_dsl(spa);
dsl_pool_config_enter(*dp, tag);
}
return (error);
}
void
dsl_pool_rele(dsl_pool_t *dp, void *tag)
{
dsl_pool_config_exit(dp, tag);
spa_close(dp->dp_spa, tag);
}
void
dsl_pool_config_enter(dsl_pool_t *dp, void *tag)
{
/*
* We use a "reentrant" reader-writer lock, but not reentrantly.
*
* The rrwlock can (with the track_all flag) track all reading threads,
* which is very useful for debugging which code path failed to release
* the lock, and for verifying that the *current* thread does hold
* the lock.
*
* (Unlike a rwlock, which knows that N threads hold it for
* read, but not *which* threads, so rw_held(RW_READER) returns TRUE
* if any thread holds it for read, even if this thread doesn't).
*/
ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER));
rrw_enter(&dp->dp_config_rwlock, RW_READER, tag);
}
void
dsl_pool_config_enter_prio(dsl_pool_t *dp, void *tag)
{
ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER));
rrw_enter_read_prio(&dp->dp_config_rwlock, tag);
}
void
dsl_pool_config_exit(dsl_pool_t *dp, void *tag)
{
rrw_exit(&dp->dp_config_rwlock, tag);
}
boolean_t
dsl_pool_config_held(dsl_pool_t *dp)
{
return (RRW_LOCK_HELD(&dp->dp_config_rwlock));
}
boolean_t
dsl_pool_config_held_writer(dsl_pool_t *dp)
{
return (RRW_WRITE_HELD(&dp->dp_config_rwlock));
}
EXPORT_SYMBOL(dsl_pool_config_enter);
EXPORT_SYMBOL(dsl_pool_config_exit);
/* BEGIN CSTYLED */
/* zfs_dirty_data_max_percent only applied at module load in arc_init(). */
ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max_percent, INT, ZMOD_RD,
"Max percent of RAM allowed to be dirty");
/* zfs_dirty_data_max_max_percent only applied at module load in arc_init(). */
ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max_max_percent, INT, ZMOD_RD,
"zfs_dirty_data_max upper bound as % of RAM");
ZFS_MODULE_PARAM(zfs, zfs_, delay_min_dirty_percent, INT, ZMOD_RW,
"Transaction delay threshold");
ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max, ULONG, ZMOD_RW,
"Determines the dirty space limit");
/* zfs_dirty_data_max_max only applied at module load in arc_init(). */
ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max_max, ULONG, ZMOD_RD,
"zfs_dirty_data_max upper bound in bytes");
ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_sync_percent, INT, ZMOD_RW,
"Dirty data txg sync threshold as a percentage of zfs_dirty_data_max");
ZFS_MODULE_PARAM(zfs, zfs_, delay_scale, ULONG, ZMOD_RW,
"How quickly delay approaches infinity");
ZFS_MODULE_PARAM(zfs, zfs_, sync_taskq_batch_pct, INT, ZMOD_RW,
"Max percent of CPUs that are used to sync dirty data");
ZFS_MODULE_PARAM(zfs_zil, zfs_zil_, clean_taskq_nthr_pct, INT, ZMOD_RW,
"Max percent of CPUs that are used per dp_sync_taskq");
ZFS_MODULE_PARAM(zfs_zil, zfs_zil_, clean_taskq_minalloc, INT, ZMOD_RW,
"Number of taskq entries that are pre-populated");
ZFS_MODULE_PARAM(zfs_zil, zfs_zil_, clean_taskq_maxalloc, INT, ZMOD_RW,
"Max number of taskq entries that are cached");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/dsl_prop.c b/sys/contrib/openzfs/module/zfs/dsl_prop.c
index dfa04d7681be..0089edf8683a 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_prop.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_prop.c
@@ -1,1287 +1,1287 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 by Delphix. All rights reserved.
* Copyright (c) 2013 Martin Matuska. All rights reserved.
* Copyright 2019 Joyent, Inc.
*/
#include <sys/zfs_context.h>
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_tx.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_synctask.h>
#include <sys/spa.h>
#include <sys/zap.h>
#include <sys/fs/zfs.h>
#include "zfs_prop.h"
#define ZPROP_INHERIT_SUFFIX "$inherit"
#define ZPROP_RECVD_SUFFIX "$recvd"
static int
dodefault(zfs_prop_t prop, int intsz, int numints, void *buf)
{
/*
* The setonce properties are read-only, BUT they still
* have a default value that can be used as the initial
* value.
*/
if (prop == ZPROP_INVAL ||
(zfs_prop_readonly(prop) && !zfs_prop_setonce(prop)))
return (SET_ERROR(ENOENT));
if (zfs_prop_get_type(prop) == PROP_TYPE_STRING) {
if (intsz != 1)
return (SET_ERROR(EOVERFLOW));
(void) strncpy(buf, zfs_prop_default_string(prop),
numints);
} else {
if (intsz != 8 || numints < 1)
return (SET_ERROR(EOVERFLOW));
*(uint64_t *)buf = zfs_prop_default_numeric(prop);
}
return (0);
}
int
dsl_prop_get_dd(dsl_dir_t *dd, const char *propname,
int intsz, int numints, void *buf, char *setpoint, boolean_t snapshot)
{
int err;
dsl_dir_t *target = dd;
objset_t *mos = dd->dd_pool->dp_meta_objset;
zfs_prop_t prop;
boolean_t inheritable;
boolean_t inheriting = B_FALSE;
char *inheritstr;
char *recvdstr;
ASSERT(dsl_pool_config_held(dd->dd_pool));
if (setpoint)
setpoint[0] = '\0';
prop = zfs_name_to_prop(propname);
inheritable = (prop == ZPROP_INVAL || zfs_prop_inheritable(prop));
inheritstr = kmem_asprintf("%s%s", propname, ZPROP_INHERIT_SUFFIX);
recvdstr = kmem_asprintf("%s%s", propname, ZPROP_RECVD_SUFFIX);
/*
* Note: dd may become NULL, therefore we shouldn't dereference it
* after this loop.
*/
for (; dd != NULL; dd = dd->dd_parent) {
if (dd != target || snapshot) {
if (!inheritable) {
err = SET_ERROR(ENOENT);
break;
}
inheriting = B_TRUE;
}
/* Check for a local value. */
err = zap_lookup(mos, dsl_dir_phys(dd)->dd_props_zapobj,
propname, intsz, numints, buf);
if (err != ENOENT) {
if (setpoint != NULL && err == 0)
dsl_dir_name(dd, setpoint);
break;
}
/*
* Skip the check for a received value if there is an explicit
* inheritance entry.
*/
err = zap_contains(mos, dsl_dir_phys(dd)->dd_props_zapobj,
inheritstr);
if (err != 0 && err != ENOENT)
break;
if (err == ENOENT) {
/* Check for a received value. */
err = zap_lookup(mos, dsl_dir_phys(dd)->dd_props_zapobj,
recvdstr, intsz, numints, buf);
if (err != ENOENT) {
if (setpoint != NULL && err == 0) {
if (inheriting) {
dsl_dir_name(dd, setpoint);
} else {
(void) strlcpy(setpoint,
ZPROP_SOURCE_VAL_RECVD,
MAXNAMELEN);
}
}
break;
}
}
/*
* If we found an explicit inheritance entry, err is zero even
* though we haven't yet found the value, so reinitializing err
* at the end of the loop (instead of at the beginning) ensures
* that err has a valid post-loop value.
*/
err = SET_ERROR(ENOENT);
}
if (err == ENOENT)
err = dodefault(prop, intsz, numints, buf);
kmem_strfree(inheritstr);
kmem_strfree(recvdstr);
return (err);
}
int
dsl_prop_get_ds(dsl_dataset_t *ds, const char *propname,
int intsz, int numints, void *buf, char *setpoint)
{
zfs_prop_t prop = zfs_name_to_prop(propname);
boolean_t inheritable;
uint64_t zapobj;
ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
inheritable = (prop == ZPROP_INVAL || zfs_prop_inheritable(prop));
zapobj = dsl_dataset_phys(ds)->ds_props_obj;
if (zapobj != 0) {
objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
int err;
ASSERT(ds->ds_is_snapshot);
/* Check for a local value. */
err = zap_lookup(mos, zapobj, propname, intsz, numints, buf);
if (err != ENOENT) {
if (setpoint != NULL && err == 0)
dsl_dataset_name(ds, setpoint);
return (err);
}
/*
* Skip the check for a received value if there is an explicit
* inheritance entry.
*/
if (inheritable) {
char *inheritstr = kmem_asprintf("%s%s", propname,
ZPROP_INHERIT_SUFFIX);
err = zap_contains(mos, zapobj, inheritstr);
kmem_strfree(inheritstr);
if (err != 0 && err != ENOENT)
return (err);
}
if (err == ENOENT) {
/* Check for a received value. */
char *recvdstr = kmem_asprintf("%s%s", propname,
ZPROP_RECVD_SUFFIX);
err = zap_lookup(mos, zapobj, recvdstr,
intsz, numints, buf);
kmem_strfree(recvdstr);
if (err != ENOENT) {
if (setpoint != NULL && err == 0)
(void) strlcpy(setpoint,
ZPROP_SOURCE_VAL_RECVD,
MAXNAMELEN);
return (err);
}
}
}
return (dsl_prop_get_dd(ds->ds_dir, propname,
intsz, numints, buf, setpoint, ds->ds_is_snapshot));
}
static dsl_prop_record_t *
dsl_prop_record_find(dsl_dir_t *dd, const char *propname)
{
dsl_prop_record_t *pr = NULL;
ASSERT(MUTEX_HELD(&dd->dd_lock));
for (pr = list_head(&dd->dd_props);
pr != NULL; pr = list_next(&dd->dd_props, pr)) {
if (strcmp(pr->pr_propname, propname) == 0)
break;
}
return (pr);
}
static dsl_prop_record_t *
dsl_prop_record_create(dsl_dir_t *dd, const char *propname)
{
dsl_prop_record_t *pr;
ASSERT(MUTEX_HELD(&dd->dd_lock));
pr = kmem_alloc(sizeof (dsl_prop_record_t), KM_SLEEP);
pr->pr_propname = spa_strdup(propname);
list_create(&pr->pr_cbs, sizeof (dsl_prop_cb_record_t),
offsetof(dsl_prop_cb_record_t, cbr_pr_node));
list_insert_head(&dd->dd_props, pr);
return (pr);
}
void
dsl_prop_init(dsl_dir_t *dd)
{
list_create(&dd->dd_props, sizeof (dsl_prop_record_t),
offsetof(dsl_prop_record_t, pr_node));
}
void
dsl_prop_fini(dsl_dir_t *dd)
{
dsl_prop_record_t *pr;
while ((pr = list_remove_head(&dd->dd_props)) != NULL) {
list_destroy(&pr->pr_cbs);
spa_strfree((char *)pr->pr_propname);
kmem_free(pr, sizeof (dsl_prop_record_t));
}
list_destroy(&dd->dd_props);
}
/*
* Register interest in the named property. We'll call the callback
* once to notify it of the current property value, and again each time
* the property changes, until this callback is unregistered.
*
* Return 0 on success, errno if the prop is not an integer value.
*/
int
dsl_prop_register(dsl_dataset_t *ds, const char *propname,
dsl_prop_changed_cb_t *callback, void *cbarg)
{
dsl_dir_t *dd = ds->ds_dir;
uint64_t value;
dsl_prop_record_t *pr;
dsl_prop_cb_record_t *cbr;
int err;
dsl_pool_t *dp __maybe_unused = dd->dd_pool;
ASSERT(dsl_pool_config_held(dp));
err = dsl_prop_get_int_ds(ds, propname, &value);
if (err != 0)
return (err);
cbr = kmem_alloc(sizeof (dsl_prop_cb_record_t), KM_SLEEP);
cbr->cbr_ds = ds;
cbr->cbr_func = callback;
cbr->cbr_arg = cbarg;
mutex_enter(&dd->dd_lock);
pr = dsl_prop_record_find(dd, propname);
if (pr == NULL)
pr = dsl_prop_record_create(dd, propname);
cbr->cbr_pr = pr;
list_insert_head(&pr->pr_cbs, cbr);
list_insert_head(&ds->ds_prop_cbs, cbr);
mutex_exit(&dd->dd_lock);
cbr->cbr_func(cbr->cbr_arg, value);
return (0);
}
int
dsl_prop_get(const char *dsname, const char *propname,
int intsz, int numints, void *buf, char *setpoint)
{
objset_t *os;
int error;
error = dmu_objset_hold(dsname, FTAG, &os);
if (error != 0)
return (error);
error = dsl_prop_get_ds(dmu_objset_ds(os), propname,
intsz, numints, buf, setpoint);
dmu_objset_rele(os, FTAG);
return (error);
}
/*
* Get the current property value. It may have changed by the time this
* function returns, so it is NOT safe to follow up with
* dsl_prop_register() and assume that the value has not changed in
* between.
*
* Return 0 on success, ENOENT if ddname is invalid.
*/
int
dsl_prop_get_integer(const char *ddname, const char *propname,
uint64_t *valuep, char *setpoint)
{
return (dsl_prop_get(ddname, propname, 8, 1, valuep, setpoint));
}
int
dsl_prop_get_int_ds(dsl_dataset_t *ds, const char *propname,
uint64_t *valuep)
{
return (dsl_prop_get_ds(ds, propname, 8, 1, valuep, NULL));
}
/*
* Predict the effective value of the given special property if it were set with
* the given value and source. This is not a general purpose function. It exists
* only to handle the special requirements of the quota and reservation
* properties. The fact that these properties are non-inheritable greatly
* simplifies the prediction logic.
*
* Returns 0 on success, a positive error code on failure, or -1 if called with
* a property not handled by this function.
*/
int
dsl_prop_predict(dsl_dir_t *dd, const char *propname,
zprop_source_t source, uint64_t value, uint64_t *newvalp)
{
zfs_prop_t prop = zfs_name_to_prop(propname);
objset_t *mos;
uint64_t zapobj;
uint64_t version;
char *recvdstr;
int err = 0;
switch (prop) {
case ZFS_PROP_QUOTA:
case ZFS_PROP_RESERVATION:
case ZFS_PROP_REFQUOTA:
case ZFS_PROP_REFRESERVATION:
break;
default:
return (-1);
}
mos = dd->dd_pool->dp_meta_objset;
zapobj = dsl_dir_phys(dd)->dd_props_zapobj;
recvdstr = kmem_asprintf("%s%s", propname, ZPROP_RECVD_SUFFIX);
version = spa_version(dd->dd_pool->dp_spa);
if (version < SPA_VERSION_RECVD_PROPS) {
if (source & ZPROP_SRC_NONE)
source = ZPROP_SRC_NONE;
else if (source & ZPROP_SRC_RECEIVED)
source = ZPROP_SRC_LOCAL;
}
switch ((int)source) {
case ZPROP_SRC_NONE:
/* Revert to the received value, if any. */
err = zap_lookup(mos, zapobj, recvdstr, 8, 1, newvalp);
if (err == ENOENT)
*newvalp = 0;
break;
case ZPROP_SRC_LOCAL:
*newvalp = value;
break;
case ZPROP_SRC_RECEIVED:
/*
* If there's no local setting, then the new received value will
* be the effective value.
*/
err = zap_lookup(mos, zapobj, propname, 8, 1, newvalp);
if (err == ENOENT)
*newvalp = value;
break;
case (ZPROP_SRC_NONE | ZPROP_SRC_RECEIVED):
/*
* We're clearing the received value, so the local setting (if
* it exists) remains the effective value.
*/
err = zap_lookup(mos, zapobj, propname, 8, 1, newvalp);
if (err == ENOENT)
*newvalp = 0;
break;
default:
panic("unexpected property source: %d", source);
}
kmem_strfree(recvdstr);
if (err == ENOENT)
return (0);
return (err);
}
/*
* Unregister this callback. Return 0 on success, ENOENT if ddname is
* invalid, or ENOMSG if no matching callback registered.
*
* NOTE: This function is no longer used internally but has been preserved
* to prevent breaking external consumers (Lustre, etc).
*/
int
dsl_prop_unregister(dsl_dataset_t *ds, const char *propname,
dsl_prop_changed_cb_t *callback, void *cbarg)
{
dsl_dir_t *dd = ds->ds_dir;
dsl_prop_cb_record_t *cbr;
mutex_enter(&dd->dd_lock);
for (cbr = list_head(&ds->ds_prop_cbs);
cbr; cbr = list_next(&ds->ds_prop_cbs, cbr)) {
if (cbr->cbr_ds == ds &&
cbr->cbr_func == callback &&
cbr->cbr_arg == cbarg &&
strcmp(cbr->cbr_pr->pr_propname, propname) == 0)
break;
}
if (cbr == NULL) {
mutex_exit(&dd->dd_lock);
return (SET_ERROR(ENOMSG));
}
list_remove(&ds->ds_prop_cbs, cbr);
list_remove(&cbr->cbr_pr->pr_cbs, cbr);
mutex_exit(&dd->dd_lock);
kmem_free(cbr, sizeof (dsl_prop_cb_record_t));
return (0);
}
/*
* Unregister all callbacks that are registered with the
* given callback argument.
*/
void
dsl_prop_unregister_all(dsl_dataset_t *ds, void *cbarg)
{
dsl_prop_cb_record_t *cbr, *next_cbr;
dsl_dir_t *dd = ds->ds_dir;
mutex_enter(&dd->dd_lock);
next_cbr = list_head(&ds->ds_prop_cbs);
while (next_cbr != NULL) {
cbr = next_cbr;
next_cbr = list_next(&ds->ds_prop_cbs, cbr);
if (cbr->cbr_arg == cbarg) {
list_remove(&ds->ds_prop_cbs, cbr);
list_remove(&cbr->cbr_pr->pr_cbs, cbr);
kmem_free(cbr, sizeof (dsl_prop_cb_record_t));
}
}
mutex_exit(&dd->dd_lock);
}
boolean_t
dsl_prop_hascb(dsl_dataset_t *ds)
{
return (!list_is_empty(&ds->ds_prop_cbs));
}
-/* ARGSUSED */
static int
dsl_prop_notify_all_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
{
+ (void) arg;
dsl_dir_t *dd = ds->ds_dir;
dsl_prop_record_t *pr;
dsl_prop_cb_record_t *cbr;
mutex_enter(&dd->dd_lock);
for (pr = list_head(&dd->dd_props);
pr; pr = list_next(&dd->dd_props, pr)) {
for (cbr = list_head(&pr->pr_cbs); cbr;
cbr = list_next(&pr->pr_cbs, cbr)) {
uint64_t value;
/*
* Callback entries do not have holds on their
* datasets so that datasets with registered
* callbacks are still eligible for eviction.
* Unlike operations to update properties on a
* single dataset, we are performing a recursive
* descent of related head datasets. The caller
* of this function only has a dataset hold on
* the passed in head dataset, not the snapshots
* associated with this dataset. Without a hold,
* the dataset pointer within callback records
* for snapshots can be invalidated by eviction
* at any time.
*
* Use dsl_dataset_try_add_ref() to verify
* that the dataset for a snapshot has not
* begun eviction processing and to prevent
* eviction from occurring for the duration of
* the callback. If the hold attempt fails,
* this object is already being evicted and the
* callback can be safely ignored.
*/
if (ds != cbr->cbr_ds &&
!dsl_dataset_try_add_ref(dp, cbr->cbr_ds, FTAG))
continue;
if (dsl_prop_get_ds(cbr->cbr_ds,
cbr->cbr_pr->pr_propname, sizeof (value), 1,
&value, NULL) == 0)
cbr->cbr_func(cbr->cbr_arg, value);
if (ds != cbr->cbr_ds)
dsl_dataset_rele(cbr->cbr_ds, FTAG);
}
}
mutex_exit(&dd->dd_lock);
return (0);
}
/*
* Update all property values for ddobj & its descendants. This is used
* when renaming the dir.
*/
void
dsl_prop_notify_all(dsl_dir_t *dd)
{
dsl_pool_t *dp = dd->dd_pool;
ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
(void) dmu_objset_find_dp(dp, dd->dd_object, dsl_prop_notify_all_cb,
NULL, DS_FIND_CHILDREN);
}
static void
dsl_prop_changed_notify(dsl_pool_t *dp, uint64_t ddobj,
const char *propname, uint64_t value, int first)
{
dsl_dir_t *dd;
dsl_prop_record_t *pr;
dsl_prop_cb_record_t *cbr;
objset_t *mos = dp->dp_meta_objset;
zap_cursor_t zc;
zap_attribute_t *za;
int err;
ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
err = dsl_dir_hold_obj(dp, ddobj, NULL, FTAG, &dd);
if (err)
return;
if (!first) {
/*
* If the prop is set here, then this change is not
* being inherited here or below; stop the recursion.
*/
err = zap_contains(mos, dsl_dir_phys(dd)->dd_props_zapobj,
propname);
if (err == 0) {
dsl_dir_rele(dd, FTAG);
return;
}
ASSERT3U(err, ==, ENOENT);
}
mutex_enter(&dd->dd_lock);
pr = dsl_prop_record_find(dd, propname);
if (pr != NULL) {
for (cbr = list_head(&pr->pr_cbs); cbr;
cbr = list_next(&pr->pr_cbs, cbr)) {
uint64_t propobj;
/*
* cbr->cbr_ds may be invalidated due to eviction,
* requiring the use of dsl_dataset_try_add_ref().
* See comment block in dsl_prop_notify_all_cb()
* for details.
*/
if (!dsl_dataset_try_add_ref(dp, cbr->cbr_ds, FTAG))
continue;
propobj = dsl_dataset_phys(cbr->cbr_ds)->ds_props_obj;
/*
* If the property is not set on this ds, then it is
* inherited here; call the callback.
*/
if (propobj == 0 ||
zap_contains(mos, propobj, propname) != 0)
cbr->cbr_func(cbr->cbr_arg, value);
dsl_dataset_rele(cbr->cbr_ds, FTAG);
}
}
mutex_exit(&dd->dd_lock);
za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
for (zap_cursor_init(&zc, mos,
dsl_dir_phys(dd)->dd_child_dir_zapobj);
zap_cursor_retrieve(&zc, za) == 0;
zap_cursor_advance(&zc)) {
dsl_prop_changed_notify(dp, za->za_first_integer,
propname, value, FALSE);
}
kmem_free(za, sizeof (zap_attribute_t));
zap_cursor_fini(&zc);
dsl_dir_rele(dd, FTAG);
}
void
dsl_prop_set_sync_impl(dsl_dataset_t *ds, const char *propname,
zprop_source_t source, int intsz, int numints, const void *value,
dmu_tx_t *tx)
{
objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
uint64_t zapobj, intval, dummy, count;
int isint;
char valbuf[32];
const char *valstr = NULL;
char *inheritstr;
char *recvdstr;
char *tbuf = NULL;
int err;
uint64_t version = spa_version(ds->ds_dir->dd_pool->dp_spa);
isint = (dodefault(zfs_name_to_prop(propname), 8, 1, &intval) == 0);
if (ds->ds_is_snapshot) {
ASSERT(version >= SPA_VERSION_SNAP_PROPS);
if (dsl_dataset_phys(ds)->ds_props_obj == 0 &&
(source & ZPROP_SRC_NONE) == 0) {
dmu_buf_will_dirty(ds->ds_dbuf, tx);
dsl_dataset_phys(ds)->ds_props_obj =
zap_create(mos,
DMU_OT_DSL_PROPS, DMU_OT_NONE, 0, tx);
}
zapobj = dsl_dataset_phys(ds)->ds_props_obj;
} else {
zapobj = dsl_dir_phys(ds->ds_dir)->dd_props_zapobj;
}
/* If we are removing objects from a non-existent ZAP just return */
if (zapobj == 0)
return;
if (version < SPA_VERSION_RECVD_PROPS) {
if (source & ZPROP_SRC_NONE)
source = ZPROP_SRC_NONE;
else if (source & ZPROP_SRC_RECEIVED)
source = ZPROP_SRC_LOCAL;
}
inheritstr = kmem_asprintf("%s%s", propname, ZPROP_INHERIT_SUFFIX);
recvdstr = kmem_asprintf("%s%s", propname, ZPROP_RECVD_SUFFIX);
switch ((int)source) {
case ZPROP_SRC_NONE:
/*
* revert to received value, if any (inherit -S)
* - remove propname
* - remove propname$inherit
*/
err = zap_remove(mos, zapobj, propname, tx);
ASSERT(err == 0 || err == ENOENT);
err = zap_remove(mos, zapobj, inheritstr, tx);
ASSERT(err == 0 || err == ENOENT);
break;
case ZPROP_SRC_LOCAL:
/*
* remove propname$inherit
* set propname -> value
*/
err = zap_remove(mos, zapobj, inheritstr, tx);
ASSERT(err == 0 || err == ENOENT);
VERIFY0(zap_update(mos, zapobj, propname,
intsz, numints, value, tx));
break;
case ZPROP_SRC_INHERITED:
/*
* explicitly inherit
* - remove propname
* - set propname$inherit
*/
err = zap_remove(mos, zapobj, propname, tx);
ASSERT(err == 0 || err == ENOENT);
if (version >= SPA_VERSION_RECVD_PROPS &&
dsl_prop_get_int_ds(ds, ZPROP_HAS_RECVD, &dummy) == 0) {
dummy = 0;
VERIFY0(zap_update(mos, zapobj, inheritstr,
8, 1, &dummy, tx));
}
break;
case ZPROP_SRC_RECEIVED:
/*
* set propname$recvd -> value
*/
err = zap_update(mos, zapobj, recvdstr,
intsz, numints, value, tx);
ASSERT(err == 0);
break;
case (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED):
/*
* clear local and received settings
* - remove propname
* - remove propname$inherit
* - remove propname$recvd
*/
err = zap_remove(mos, zapobj, propname, tx);
ASSERT(err == 0 || err == ENOENT);
err = zap_remove(mos, zapobj, inheritstr, tx);
ASSERT(err == 0 || err == ENOENT);
fallthrough;
case (ZPROP_SRC_NONE | ZPROP_SRC_RECEIVED):
/*
* remove propname$recvd
*/
err = zap_remove(mos, zapobj, recvdstr, tx);
ASSERT(err == 0 || err == ENOENT);
break;
default:
cmn_err(CE_PANIC, "unexpected property source: %d", source);
}
kmem_strfree(inheritstr);
kmem_strfree(recvdstr);
/*
* If we are left with an empty snap zap we can destroy it.
* This will prevent unnecessary calls to zap_lookup() in
* the "zfs list" and "zfs get" code paths.
*/
if (ds->ds_is_snapshot &&
zap_count(mos, zapobj, &count) == 0 && count == 0) {
dmu_buf_will_dirty(ds->ds_dbuf, tx);
dsl_dataset_phys(ds)->ds_props_obj = 0;
zap_destroy(mos, zapobj, tx);
}
if (isint) {
VERIFY0(dsl_prop_get_int_ds(ds, propname, &intval));
if (ds->ds_is_snapshot) {
dsl_prop_cb_record_t *cbr;
/*
* It's a snapshot; nothing can inherit this
* property, so just look for callbacks on this
* ds here.
*/
mutex_enter(&ds->ds_dir->dd_lock);
for (cbr = list_head(&ds->ds_prop_cbs); cbr;
cbr = list_next(&ds->ds_prop_cbs, cbr)) {
if (strcmp(cbr->cbr_pr->pr_propname,
propname) == 0)
cbr->cbr_func(cbr->cbr_arg, intval);
}
mutex_exit(&ds->ds_dir->dd_lock);
} else {
dsl_prop_changed_notify(ds->ds_dir->dd_pool,
ds->ds_dir->dd_object, propname, intval, TRUE);
}
(void) snprintf(valbuf, sizeof (valbuf),
"%lld", (longlong_t)intval);
valstr = valbuf;
} else {
if (source == ZPROP_SRC_LOCAL) {
valstr = value;
} else {
tbuf = kmem_alloc(ZAP_MAXVALUELEN, KM_SLEEP);
if (dsl_prop_get_ds(ds, propname, 1,
ZAP_MAXVALUELEN, tbuf, NULL) == 0)
valstr = tbuf;
}
}
spa_history_log_internal_ds(ds, (source == ZPROP_SRC_NONE ||
source == ZPROP_SRC_INHERITED) ? "inherit" : "set", tx,
"%s=%s", propname, (valstr == NULL ? "" : valstr));
if (tbuf != NULL)
kmem_free(tbuf, ZAP_MAXVALUELEN);
}
int
dsl_prop_set_int(const char *dsname, const char *propname,
zprop_source_t source, uint64_t value)
{
nvlist_t *nvl = fnvlist_alloc();
int error;
fnvlist_add_uint64(nvl, propname, value);
error = dsl_props_set(dsname, source, nvl);
fnvlist_free(nvl);
return (error);
}
int
dsl_prop_set_string(const char *dsname, const char *propname,
zprop_source_t source, const char *value)
{
nvlist_t *nvl = fnvlist_alloc();
int error;
fnvlist_add_string(nvl, propname, value);
error = dsl_props_set(dsname, source, nvl);
fnvlist_free(nvl);
return (error);
}
int
dsl_prop_inherit(const char *dsname, const char *propname,
zprop_source_t source)
{
nvlist_t *nvl = fnvlist_alloc();
int error;
fnvlist_add_boolean(nvl, propname);
error = dsl_props_set(dsname, source, nvl);
fnvlist_free(nvl);
return (error);
}
int
dsl_props_set_check(void *arg, dmu_tx_t *tx)
{
dsl_props_set_arg_t *dpsa = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
uint64_t version;
nvpair_t *elem = NULL;
int err;
err = dsl_dataset_hold(dp, dpsa->dpsa_dsname, FTAG, &ds);
if (err != 0)
return (err);
version = spa_version(ds->ds_dir->dd_pool->dp_spa);
while ((elem = nvlist_next_nvpair(dpsa->dpsa_props, elem)) != NULL) {
if (strlen(nvpair_name(elem)) >= ZAP_MAXNAMELEN) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ENAMETOOLONG));
}
if (nvpair_type(elem) == DATA_TYPE_STRING) {
char *valstr = fnvpair_value_string(elem);
if (strlen(valstr) >= (version <
SPA_VERSION_STMF_PROP ?
ZAP_OLDMAXVALUELEN : ZAP_MAXVALUELEN)) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(E2BIG));
}
}
}
if (ds->ds_is_snapshot && version < SPA_VERSION_SNAP_PROPS) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ENOTSUP));
}
dsl_dataset_rele(ds, FTAG);
return (0);
}
void
dsl_props_set_sync_impl(dsl_dataset_t *ds, zprop_source_t source,
nvlist_t *props, dmu_tx_t *tx)
{
nvpair_t *elem = NULL;
while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
nvpair_t *pair = elem;
const char *name = nvpair_name(pair);
if (nvpair_type(pair) == DATA_TYPE_NVLIST) {
/*
* This usually happens when we reuse the nvlist_t data
* returned by the counterpart dsl_prop_get_all_impl().
* For instance we do this to restore the original
* received properties when an error occurs in the
* zfs_ioc_recv() codepath.
*/
nvlist_t *attrs = fnvpair_value_nvlist(pair);
pair = fnvlist_lookup_nvpair(attrs, ZPROP_VALUE);
}
if (nvpair_type(pair) == DATA_TYPE_STRING) {
const char *value = fnvpair_value_string(pair);
dsl_prop_set_sync_impl(ds, name,
source, 1, strlen(value) + 1, value, tx);
} else if (nvpair_type(pair) == DATA_TYPE_UINT64) {
uint64_t intval = fnvpair_value_uint64(pair);
dsl_prop_set_sync_impl(ds, name,
source, sizeof (intval), 1, &intval, tx);
} else if (nvpair_type(pair) == DATA_TYPE_BOOLEAN) {
dsl_prop_set_sync_impl(ds, name,
source, 0, 0, NULL, tx);
} else {
panic("invalid nvpair type");
}
}
}
void
dsl_props_set_sync(void *arg, dmu_tx_t *tx)
{
dsl_props_set_arg_t *dpsa = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
VERIFY0(dsl_dataset_hold(dp, dpsa->dpsa_dsname, FTAG, &ds));
dsl_props_set_sync_impl(ds, dpsa->dpsa_source, dpsa->dpsa_props, tx);
dsl_dataset_rele(ds, FTAG);
}
/*
* All-or-nothing; if any prop can't be set, nothing will be modified.
*/
int
dsl_props_set(const char *dsname, zprop_source_t source, nvlist_t *props)
{
dsl_props_set_arg_t dpsa;
int nblks = 0;
dpsa.dpsa_dsname = dsname;
dpsa.dpsa_source = source;
dpsa.dpsa_props = props;
/*
* If the source includes NONE, then we will only be removing entries
* from the ZAP object. In that case don't check for ENOSPC.
*/
if ((source & ZPROP_SRC_NONE) == 0)
nblks = 2 * fnvlist_num_pairs(props);
return (dsl_sync_task(dsname, dsl_props_set_check, dsl_props_set_sync,
&dpsa, nblks, ZFS_SPACE_CHECK_RESERVED));
}
typedef enum dsl_prop_getflags {
DSL_PROP_GET_INHERITING = 0x1, /* searching parent of target ds */
DSL_PROP_GET_SNAPSHOT = 0x2, /* snapshot dataset */
DSL_PROP_GET_LOCAL = 0x4, /* local properties */
DSL_PROP_GET_RECEIVED = 0x8, /* received properties */
} dsl_prop_getflags_t;
static int
dsl_prop_get_all_impl(objset_t *mos, uint64_t propobj,
const char *setpoint, dsl_prop_getflags_t flags, nvlist_t *nv)
{
zap_cursor_t zc;
zap_attribute_t za;
int err = 0;
for (zap_cursor_init(&zc, mos, propobj);
(err = zap_cursor_retrieve(&zc, &za)) == 0;
zap_cursor_advance(&zc)) {
nvlist_t *propval;
zfs_prop_t prop;
char buf[ZAP_MAXNAMELEN];
char *valstr;
const char *suffix;
const char *propname;
const char *source;
suffix = strchr(za.za_name, '$');
if (suffix == NULL) {
/*
* Skip local properties if we only want received
* properties.
*/
if (flags & DSL_PROP_GET_RECEIVED)
continue;
propname = za.za_name;
source = setpoint;
} else if (strcmp(suffix, ZPROP_INHERIT_SUFFIX) == 0) {
/* Skip explicitly inherited entries. */
continue;
} else if (strcmp(suffix, ZPROP_RECVD_SUFFIX) == 0) {
if (flags & DSL_PROP_GET_LOCAL)
continue;
(void) strncpy(buf, za.za_name, (suffix - za.za_name));
buf[suffix - za.za_name] = '\0';
propname = buf;
if (!(flags & DSL_PROP_GET_RECEIVED)) {
/* Skip if locally overridden. */
err = zap_contains(mos, propobj, propname);
if (err == 0)
continue;
if (err != ENOENT)
break;
/* Skip if explicitly inherited. */
valstr = kmem_asprintf("%s%s", propname,
ZPROP_INHERIT_SUFFIX);
err = zap_contains(mos, propobj, valstr);
kmem_strfree(valstr);
if (err == 0)
continue;
if (err != ENOENT)
break;
}
source = ((flags & DSL_PROP_GET_INHERITING) ?
setpoint : ZPROP_SOURCE_VAL_RECVD);
} else {
/*
* For backward compatibility, skip suffixes we don't
* recognize.
*/
continue;
}
prop = zfs_name_to_prop(propname);
/* Skip non-inheritable properties. */
if ((flags & DSL_PROP_GET_INHERITING) && prop != ZPROP_INVAL &&
!zfs_prop_inheritable(prop))
continue;
/* Skip properties not valid for this type. */
if ((flags & DSL_PROP_GET_SNAPSHOT) && prop != ZPROP_INVAL &&
!zfs_prop_valid_for_type(prop, ZFS_TYPE_SNAPSHOT, B_FALSE))
continue;
/* Skip properties already defined. */
if (nvlist_exists(nv, propname))
continue;
VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
if (za.za_integer_length == 1) {
/*
* String property
*/
char *tmp = kmem_alloc(za.za_num_integers,
KM_SLEEP);
err = zap_lookup(mos, propobj,
za.za_name, 1, za.za_num_integers, tmp);
if (err != 0) {
kmem_free(tmp, za.za_num_integers);
break;
}
VERIFY(nvlist_add_string(propval, ZPROP_VALUE,
tmp) == 0);
kmem_free(tmp, za.za_num_integers);
} else {
/*
* Integer property
*/
ASSERT(za.za_integer_length == 8);
(void) nvlist_add_uint64(propval, ZPROP_VALUE,
za.za_first_integer);
}
VERIFY(nvlist_add_string(propval, ZPROP_SOURCE, source) == 0);
VERIFY(nvlist_add_nvlist(nv, propname, propval) == 0);
nvlist_free(propval);
}
zap_cursor_fini(&zc);
if (err == ENOENT)
err = 0;
return (err);
}
/*
* Iterate over all properties for this dataset and return them in an nvlist.
*/
static int
dsl_prop_get_all_ds(dsl_dataset_t *ds, nvlist_t **nvp,
dsl_prop_getflags_t flags)
{
dsl_dir_t *dd = ds->ds_dir;
dsl_pool_t *dp = dd->dd_pool;
objset_t *mos = dp->dp_meta_objset;
int err = 0;
char setpoint[ZFS_MAX_DATASET_NAME_LEN];
VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
if (ds->ds_is_snapshot)
flags |= DSL_PROP_GET_SNAPSHOT;
ASSERT(dsl_pool_config_held(dp));
if (dsl_dataset_phys(ds)->ds_props_obj != 0) {
ASSERT(flags & DSL_PROP_GET_SNAPSHOT);
dsl_dataset_name(ds, setpoint);
err = dsl_prop_get_all_impl(mos,
dsl_dataset_phys(ds)->ds_props_obj, setpoint, flags, *nvp);
if (err)
goto out;
}
for (; dd != NULL; dd = dd->dd_parent) {
if (dd != ds->ds_dir || (flags & DSL_PROP_GET_SNAPSHOT)) {
if (flags & (DSL_PROP_GET_LOCAL |
DSL_PROP_GET_RECEIVED))
break;
flags |= DSL_PROP_GET_INHERITING;
}
dsl_dir_name(dd, setpoint);
err = dsl_prop_get_all_impl(mos,
dsl_dir_phys(dd)->dd_props_zapobj, setpoint, flags, *nvp);
if (err)
break;
}
out:
if (err) {
nvlist_free(*nvp);
*nvp = NULL;
}
return (err);
}
boolean_t
dsl_prop_get_hasrecvd(const char *dsname)
{
uint64_t dummy;
return (0 ==
dsl_prop_get_integer(dsname, ZPROP_HAS_RECVD, &dummy, NULL));
}
static int
dsl_prop_set_hasrecvd_impl(const char *dsname, zprop_source_t source)
{
uint64_t version;
spa_t *spa;
int error = 0;
VERIFY0(spa_open(dsname, &spa, FTAG));
version = spa_version(spa);
spa_close(spa, FTAG);
if (version >= SPA_VERSION_RECVD_PROPS)
error = dsl_prop_set_int(dsname, ZPROP_HAS_RECVD, source, 0);
return (error);
}
/*
* Call after successfully receiving properties to ensure that only the first
* receive on or after SPA_VERSION_RECVD_PROPS blows away local properties.
*/
int
dsl_prop_set_hasrecvd(const char *dsname)
{
int error = 0;
if (!dsl_prop_get_hasrecvd(dsname))
error = dsl_prop_set_hasrecvd_impl(dsname, ZPROP_SRC_LOCAL);
return (error);
}
void
dsl_prop_unset_hasrecvd(const char *dsname)
{
VERIFY0(dsl_prop_set_hasrecvd_impl(dsname, ZPROP_SRC_NONE));
}
int
dsl_prop_get_all(objset_t *os, nvlist_t **nvp)
{
return (dsl_prop_get_all_ds(os->os_dsl_dataset, nvp, 0));
}
int
dsl_prop_get_received(const char *dsname, nvlist_t **nvp)
{
objset_t *os;
int error;
/*
* Received properties are not distinguishable from local properties
* until the dataset has received properties on or after
* SPA_VERSION_RECVD_PROPS.
*/
dsl_prop_getflags_t flags = (dsl_prop_get_hasrecvd(dsname) ?
DSL_PROP_GET_RECEIVED : DSL_PROP_GET_LOCAL);
error = dmu_objset_hold(dsname, FTAG, &os);
if (error != 0)
return (error);
error = dsl_prop_get_all_ds(os->os_dsl_dataset, nvp, flags);
dmu_objset_rele(os, FTAG);
return (error);
}
void
dsl_prop_nvlist_add_uint64(nvlist_t *nv, zfs_prop_t prop, uint64_t value)
{
nvlist_t *propval;
const char *propname = zfs_prop_to_name(prop);
uint64_t default_value;
if (nvlist_lookup_nvlist(nv, propname, &propval) == 0) {
VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, value) == 0);
return;
}
VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, value) == 0);
/* Indicate the default source if we can. */
if (dodefault(prop, 8, 1, &default_value) == 0 &&
value == default_value) {
VERIFY(nvlist_add_string(propval, ZPROP_SOURCE, "") == 0);
}
VERIFY(nvlist_add_nvlist(nv, propname, propval) == 0);
nvlist_free(propval);
}
void
dsl_prop_nvlist_add_string(nvlist_t *nv, zfs_prop_t prop, const char *value)
{
nvlist_t *propval;
const char *propname = zfs_prop_to_name(prop);
if (nvlist_lookup_nvlist(nv, propname, &propval) == 0) {
VERIFY(nvlist_add_string(propval, ZPROP_VALUE, value) == 0);
return;
}
VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_string(propval, ZPROP_VALUE, value) == 0);
VERIFY(nvlist_add_nvlist(nv, propname, propval) == 0);
nvlist_free(propval);
}
#if defined(_KERNEL)
EXPORT_SYMBOL(dsl_prop_register);
EXPORT_SYMBOL(dsl_prop_unregister);
EXPORT_SYMBOL(dsl_prop_unregister_all);
EXPORT_SYMBOL(dsl_prop_get);
EXPORT_SYMBOL(dsl_prop_get_integer);
EXPORT_SYMBOL(dsl_prop_get_all);
EXPORT_SYMBOL(dsl_prop_get_received);
EXPORT_SYMBOL(dsl_prop_get_ds);
EXPORT_SYMBOL(dsl_prop_get_int_ds);
EXPORT_SYMBOL(dsl_prop_get_dd);
EXPORT_SYMBOL(dsl_props_set);
EXPORT_SYMBOL(dsl_prop_set_int);
EXPORT_SYMBOL(dsl_prop_set_string);
EXPORT_SYMBOL(dsl_prop_inherit);
EXPORT_SYMBOL(dsl_prop_predict);
EXPORT_SYMBOL(dsl_prop_nvlist_add_uint64);
EXPORT_SYMBOL(dsl_prop_nvlist_add_string);
#endif
diff --git a/sys/contrib/openzfs/module/zfs/dsl_scan.c b/sys/contrib/openzfs/module/zfs/dsl_scan.c
index 62ee9bb9ab6c..fac658180e1f 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_scan.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_scan.c
@@ -1,4416 +1,4426 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2021 by Delphix. All rights reserved.
* Copyright 2016 Gary Mills
* Copyright (c) 2017, 2019, Datto Inc. All rights reserved.
* Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
* Copyright 2019 Joyent, Inc.
*/
#include <sys/dsl_scan.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_synctask.h>
#include <sys/dnode.h>
#include <sys/dmu_tx.h>
#include <sys/dmu_objset.h>
#include <sys/arc.h>
#include <sys/zap.h>
#include <sys/zio.h>
#include <sys/zfs_context.h>
#include <sys/fs/zfs.h>
#include <sys/zfs_znode.h>
#include <sys/spa_impl.h>
#include <sys/vdev_impl.h>
#include <sys/zil_impl.h>
#include <sys/zio_checksum.h>
#include <sys/ddt.h>
#include <sys/sa.h>
#include <sys/sa_impl.h>
#include <sys/zfeature.h>
#include <sys/abd.h>
#include <sys/range_tree.h>
#ifdef _KERNEL
#include <sys/zfs_vfsops.h>
#endif
/*
* Grand theory statement on scan queue sorting
*
* Scanning is implemented by recursively traversing all indirection levels
* in an object and reading all blocks referenced from said objects. This
* results in us approximately traversing the object from lowest logical
* offset to the highest. For best performance, we would want the logical
* blocks to be physically contiguous. However, this is frequently not the
* case with pools given the allocation patterns of copy-on-write filesystems.
* So instead, we put the I/Os into a reordering queue and issue them in a
* way that will most benefit physical disks (LBA-order).
*
* Queue management:
*
* Ideally, we would want to scan all metadata and queue up all block I/O
* prior to starting to issue it, because that allows us to do an optimal
* sorting job. This can however consume large amounts of memory. Therefore
* we continuously monitor the size of the queues and constrain them to 5%
* (zfs_scan_mem_lim_fact) of physmem. If the queues grow larger than this
* limit, we clear out a few of the largest extents at the head of the queues
* to make room for more scanning. Hopefully, these extents will be fairly
* large and contiguous, allowing us to approach sequential I/O throughput
* even without a fully sorted tree.
*
* Metadata scanning takes place in dsl_scan_visit(), which is called from
* dsl_scan_sync() every spa_sync(). If we have either fully scanned all
* metadata on the pool, or we need to make room in memory because our
* queues are too large, dsl_scan_visit() is postponed and
* scan_io_queues_run() is called from dsl_scan_sync() instead. This implies
* that metadata scanning and queued I/O issuing are mutually exclusive. This
* allows us to provide maximum sequential I/O throughput for the majority of
* I/O's issued since sequential I/O performance is significantly negatively
* impacted if it is interleaved with random I/O.
*
* Implementation Notes
*
* One side effect of the queued scanning algorithm is that the scanning code
* needs to be notified whenever a block is freed. This is needed to allow
* the scanning code to remove these I/Os from the issuing queue. Additionally,
* we do not attempt to queue gang blocks to be issued sequentially since this
* is very hard to do and would have an extremely limited performance benefit.
* Instead, we simply issue gang I/Os as soon as we find them using the legacy
* algorithm.
*
* Backwards compatibility
*
* This new algorithm is backwards compatible with the legacy on-disk data
* structures (and therefore does not require a new feature flag).
* Periodically during scanning (see zfs_scan_checkpoint_intval), the scan
* will stop scanning metadata (in logical order) and wait for all outstanding
* sorted I/O to complete. Once this is done, we write out a checkpoint
* bookmark, indicating that we have scanned everything logically before it.
* If the pool is imported on a machine without the new sorting algorithm,
* the scan simply resumes from the last checkpoint using the legacy algorithm.
*/
typedef int (scan_cb_t)(dsl_pool_t *, const blkptr_t *,
const zbookmark_phys_t *);
static scan_cb_t dsl_scan_scrub_cb;
static int scan_ds_queue_compare(const void *a, const void *b);
static int scan_prefetch_queue_compare(const void *a, const void *b);
static void scan_ds_queue_clear(dsl_scan_t *scn);
static void scan_ds_prefetch_queue_clear(dsl_scan_t *scn);
static boolean_t scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj,
uint64_t *txg);
static void scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg);
static void scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj);
static void scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx);
static uint64_t dsl_scan_count_data_disks(vdev_t *vd);
extern int zfs_vdev_async_write_active_min_dirty_percent;
/*
* By default zfs will check to ensure it is not over the hard memory
* limit before each txg. If finer-grained control of this is needed
* this value can be set to 1 to enable checking before scanning each
* block.
*/
int zfs_scan_strict_mem_lim = B_FALSE;
/*
* Maximum number of parallelly executed bytes per leaf vdev. We attempt
* to strike a balance here between keeping the vdev queues full of I/Os
* at all times and not overflowing the queues to cause long latency,
* which would cause long txg sync times. No matter what, we will not
* overload the drives with I/O, since that is protected by
* zfs_vdev_scrub_max_active.
*/
unsigned long zfs_scan_vdev_limit = 4 << 20;
int zfs_scan_issue_strategy = 0;
int zfs_scan_legacy = B_FALSE; /* don't queue & sort zios, go direct */
unsigned long zfs_scan_max_ext_gap = 2 << 20; /* in bytes */
/*
* fill_weight is non-tunable at runtime, so we copy it at module init from
* zfs_scan_fill_weight. Runtime adjustments to zfs_scan_fill_weight would
* break queue sorting.
*/
int zfs_scan_fill_weight = 3;
static uint64_t fill_weight;
/* See dsl_scan_should_clear() for details on the memory limit tunables */
uint64_t zfs_scan_mem_lim_min = 16 << 20; /* bytes */
uint64_t zfs_scan_mem_lim_soft_max = 128 << 20; /* bytes */
int zfs_scan_mem_lim_fact = 20; /* fraction of physmem */
int zfs_scan_mem_lim_soft_fact = 20; /* fraction of mem lim above */
int zfs_scrub_min_time_ms = 1000; /* min millisecs to scrub per txg */
int zfs_obsolete_min_time_ms = 500; /* min millisecs to obsolete per txg */
int zfs_free_min_time_ms = 1000; /* min millisecs to free per txg */
int zfs_resilver_min_time_ms = 3000; /* min millisecs to resilver per txg */
int zfs_scan_checkpoint_intval = 7200; /* in seconds */
int zfs_scan_suspend_progress = 0; /* set to prevent scans from progressing */
int zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */
int zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */
enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE;
/* max number of blocks to free in a single TXG */
unsigned long zfs_async_block_max_blocks = ULONG_MAX;
/* max number of dedup blocks to free in a single TXG */
unsigned long zfs_max_async_dedup_frees = 100000;
int zfs_resilver_disable_defer = 0; /* set to disable resilver deferring */
/*
* We wait a few txgs after importing a pool to begin scanning so that
* the import / mounting code isn't held up by scrub / resilver IO.
* Unfortunately, it is a bit difficult to determine exactly how long
* this will take since userspace will trigger fs mounts asynchronously
* and the kernel will create zvol minors asynchronously. As a result,
* the value provided here is a bit arbitrary, but represents a
* reasonable estimate of how many txgs it will take to finish fully
* importing a pool
*/
#define SCAN_IMPORT_WAIT_TXGS 5
#define DSL_SCAN_IS_SCRUB_RESILVER(scn) \
((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \
(scn)->scn_phys.scn_func == POOL_SCAN_RESILVER)
/*
* Enable/disable the processing of the free_bpobj object.
*/
int zfs_free_bpobj_enabled = 1;
/* the order has to match pool_scan_type */
static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = {
NULL,
dsl_scan_scrub_cb, /* POOL_SCAN_SCRUB */
dsl_scan_scrub_cb, /* POOL_SCAN_RESILVER */
};
/* In core node for the scn->scn_queue. Represents a dataset to be scanned */
typedef struct {
uint64_t sds_dsobj;
uint64_t sds_txg;
avl_node_t sds_node;
} scan_ds_t;
/*
* This controls what conditions are placed on dsl_scan_sync_state():
* SYNC_OPTIONAL) write out scn_phys iff scn_bytes_pending == 0
* SYNC_MANDATORY) write out scn_phys always. scn_bytes_pending must be 0.
* SYNC_CACHED) if scn_bytes_pending == 0, write out scn_phys. Otherwise
* write out the scn_phys_cached version.
* See dsl_scan_sync_state for details.
*/
typedef enum {
SYNC_OPTIONAL,
SYNC_MANDATORY,
SYNC_CACHED
} state_sync_type_t;
/*
* This struct represents the minimum information needed to reconstruct a
* zio for sequential scanning. This is useful because many of these will
* accumulate in the sequential IO queues before being issued, so saving
* memory matters here.
*/
typedef struct scan_io {
/* fields from blkptr_t */
uint64_t sio_blk_prop;
uint64_t sio_phys_birth;
uint64_t sio_birth;
zio_cksum_t sio_cksum;
uint32_t sio_nr_dvas;
/* fields from zio_t */
uint32_t sio_flags;
zbookmark_phys_t sio_zb;
/* members for queue sorting */
union {
avl_node_t sio_addr_node; /* link into issuing queue */
list_node_t sio_list_node; /* link for issuing to disk */
} sio_nodes;
/*
* There may be up to SPA_DVAS_PER_BP DVAs here from the bp,
* depending on how many were in the original bp. Only the
* first DVA is really used for sorting and issuing purposes.
* The other DVAs (if provided) simply exist so that the zio
* layer can find additional copies to repair from in the
* event of an error. This array must go at the end of the
* struct to allow this for the variable number of elements.
*/
dva_t sio_dva[0];
} scan_io_t;
#define SIO_SET_OFFSET(sio, x) DVA_SET_OFFSET(&(sio)->sio_dva[0], x)
#define SIO_SET_ASIZE(sio, x) DVA_SET_ASIZE(&(sio)->sio_dva[0], x)
#define SIO_GET_OFFSET(sio) DVA_GET_OFFSET(&(sio)->sio_dva[0])
#define SIO_GET_ASIZE(sio) DVA_GET_ASIZE(&(sio)->sio_dva[0])
#define SIO_GET_END_OFFSET(sio) \
(SIO_GET_OFFSET(sio) + SIO_GET_ASIZE(sio))
#define SIO_GET_MUSED(sio) \
(sizeof (scan_io_t) + ((sio)->sio_nr_dvas * sizeof (dva_t)))
struct dsl_scan_io_queue {
dsl_scan_t *q_scn; /* associated dsl_scan_t */
vdev_t *q_vd; /* top-level vdev that this queue represents */
/* trees used for sorting I/Os and extents of I/Os */
range_tree_t *q_exts_by_addr;
zfs_btree_t q_exts_by_size;
avl_tree_t q_sios_by_addr;
uint64_t q_sio_memused;
/* members for zio rate limiting */
uint64_t q_maxinflight_bytes;
uint64_t q_inflight_bytes;
kcondvar_t q_zio_cv; /* used under vd->vdev_scan_io_queue_lock */
/* per txg statistics */
uint64_t q_total_seg_size_this_txg;
uint64_t q_segs_this_txg;
uint64_t q_total_zio_size_this_txg;
uint64_t q_zios_this_txg;
};
/* private data for dsl_scan_prefetch_cb() */
typedef struct scan_prefetch_ctx {
zfs_refcount_t spc_refcnt; /* refcount for memory management */
dsl_scan_t *spc_scn; /* dsl_scan_t for the pool */
boolean_t spc_root; /* is this prefetch for an objset? */
uint8_t spc_indblkshift; /* dn_indblkshift of current dnode */
uint16_t spc_datablkszsec; /* dn_idatablkszsec of current dnode */
} scan_prefetch_ctx_t;
/* private data for dsl_scan_prefetch() */
typedef struct scan_prefetch_issue_ctx {
avl_node_t spic_avl_node; /* link into scn->scn_prefetch_queue */
scan_prefetch_ctx_t *spic_spc; /* spc for the callback */
blkptr_t spic_bp; /* bp to prefetch */
zbookmark_phys_t spic_zb; /* bookmark to prefetch */
} scan_prefetch_issue_ctx_t;
static void scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue);
static void scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue,
scan_io_t *sio);
static dsl_scan_io_queue_t *scan_io_queue_create(vdev_t *vd);
static void scan_io_queues_destroy(dsl_scan_t *scn);
static kmem_cache_t *sio_cache[SPA_DVAS_PER_BP];
/* sio->sio_nr_dvas must be set so we know which cache to free from */
static void
sio_free(scan_io_t *sio)
{
ASSERT3U(sio->sio_nr_dvas, >, 0);
ASSERT3U(sio->sio_nr_dvas, <=, SPA_DVAS_PER_BP);
kmem_cache_free(sio_cache[sio->sio_nr_dvas - 1], sio);
}
/* It is up to the caller to set sio->sio_nr_dvas for freeing */
static scan_io_t *
sio_alloc(unsigned short nr_dvas)
{
ASSERT3U(nr_dvas, >, 0);
ASSERT3U(nr_dvas, <=, SPA_DVAS_PER_BP);
return (kmem_cache_alloc(sio_cache[nr_dvas - 1], KM_SLEEP));
}
void
scan_init(void)
{
/*
* This is used in ext_size_compare() to weight segments
* based on how sparse they are. This cannot be changed
* mid-scan and the tree comparison functions don't currently
* have a mechanism for passing additional context to the
* compare functions. Thus we store this value globally and
* we only allow it to be set at module initialization time
*/
fill_weight = zfs_scan_fill_weight;
for (int i = 0; i < SPA_DVAS_PER_BP; i++) {
char name[36];
(void) snprintf(name, sizeof (name), "sio_cache_%d", i);
sio_cache[i] = kmem_cache_create(name,
(sizeof (scan_io_t) + ((i + 1) * sizeof (dva_t))),
0, NULL, NULL, NULL, NULL, NULL, 0);
}
}
void
scan_fini(void)
{
for (int i = 0; i < SPA_DVAS_PER_BP; i++) {
kmem_cache_destroy(sio_cache[i]);
}
}
static inline boolean_t
dsl_scan_is_running(const dsl_scan_t *scn)
{
return (scn->scn_phys.scn_state == DSS_SCANNING);
}
boolean_t
dsl_scan_resilvering(dsl_pool_t *dp)
{
return (dsl_scan_is_running(dp->dp_scan) &&
dp->dp_scan->scn_phys.scn_func == POOL_SCAN_RESILVER);
}
static inline void
sio2bp(const scan_io_t *sio, blkptr_t *bp)
{
bzero(bp, sizeof (*bp));
bp->blk_prop = sio->sio_blk_prop;
bp->blk_phys_birth = sio->sio_phys_birth;
bp->blk_birth = sio->sio_birth;
bp->blk_fill = 1; /* we always only work with data pointers */
bp->blk_cksum = sio->sio_cksum;
ASSERT3U(sio->sio_nr_dvas, >, 0);
ASSERT3U(sio->sio_nr_dvas, <=, SPA_DVAS_PER_BP);
bcopy(sio->sio_dva, bp->blk_dva, sio->sio_nr_dvas * sizeof (dva_t));
}
static inline void
bp2sio(const blkptr_t *bp, scan_io_t *sio, int dva_i)
{
sio->sio_blk_prop = bp->blk_prop;
sio->sio_phys_birth = bp->blk_phys_birth;
sio->sio_birth = bp->blk_birth;
sio->sio_cksum = bp->blk_cksum;
sio->sio_nr_dvas = BP_GET_NDVAS(bp);
/*
* Copy the DVAs to the sio. We need all copies of the block so
* that the self healing code can use the alternate copies if the
* first is corrupted. We want the DVA at index dva_i to be first
* in the sio since this is the primary one that we want to issue.
*/
for (int i = 0, j = dva_i; i < sio->sio_nr_dvas; i++, j++) {
sio->sio_dva[i] = bp->blk_dva[j % sio->sio_nr_dvas];
}
}
int
dsl_scan_init(dsl_pool_t *dp, uint64_t txg)
{
int err;
dsl_scan_t *scn;
spa_t *spa = dp->dp_spa;
uint64_t f;
scn = dp->dp_scan = kmem_zalloc(sizeof (dsl_scan_t), KM_SLEEP);
scn->scn_dp = dp;
/*
* It's possible that we're resuming a scan after a reboot so
* make sure that the scan_async_destroying flag is initialized
* appropriately.
*/
ASSERT(!scn->scn_async_destroying);
scn->scn_async_destroying = spa_feature_is_active(dp->dp_spa,
SPA_FEATURE_ASYNC_DESTROY);
/*
* Calculate the max number of in-flight bytes for pool-wide
* scanning operations (minimum 1MB). Limits for the issuing
* phase are done per top-level vdev and are handled separately.
*/
scn->scn_maxinflight_bytes = MAX(zfs_scan_vdev_limit *
dsl_scan_count_data_disks(spa->spa_root_vdev), 1ULL << 20);
avl_create(&scn->scn_queue, scan_ds_queue_compare, sizeof (scan_ds_t),
offsetof(scan_ds_t, sds_node));
avl_create(&scn->scn_prefetch_queue, scan_prefetch_queue_compare,
sizeof (scan_prefetch_issue_ctx_t),
offsetof(scan_prefetch_issue_ctx_t, spic_avl_node));
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
"scrub_func", sizeof (uint64_t), 1, &f);
if (err == 0) {
/*
* There was an old-style scrub in progress. Restart a
* new-style scrub from the beginning.
*/
scn->scn_restart_txg = txg;
zfs_dbgmsg("old-style scrub was in progress; "
"restarting new-style scrub in txg %llu",
(longlong_t)scn->scn_restart_txg);
/*
* Load the queue obj from the old location so that it
* can be freed by dsl_scan_done().
*/
(void) zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
"scrub_queue", sizeof (uint64_t), 1,
&scn->scn_phys.scn_queue_obj);
} else {
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
&scn->scn_phys);
/*
* Detect if the pool contains the signature of #2094. If it
* does properly update the scn->scn_phys structure and notify
* the administrator by setting an errata for the pool.
*/
if (err == EOVERFLOW) {
uint64_t zaptmp[SCAN_PHYS_NUMINTS + 1];
VERIFY3S(SCAN_PHYS_NUMINTS, ==, 24);
VERIFY3S(offsetof(dsl_scan_phys_t, scn_flags), ==,
(23 * sizeof (uint64_t)));
err = zap_lookup(dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SCAN,
sizeof (uint64_t), SCAN_PHYS_NUMINTS + 1, &zaptmp);
if (err == 0) {
uint64_t overflow = zaptmp[SCAN_PHYS_NUMINTS];
if (overflow & ~DSL_SCAN_FLAGS_MASK ||
scn->scn_async_destroying) {
spa->spa_errata =
ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY;
return (EOVERFLOW);
}
bcopy(zaptmp, &scn->scn_phys,
SCAN_PHYS_NUMINTS * sizeof (uint64_t));
scn->scn_phys.scn_flags = overflow;
/* Required scrub already in progress. */
if (scn->scn_phys.scn_state == DSS_FINISHED ||
scn->scn_phys.scn_state == DSS_CANCELED)
spa->spa_errata =
ZPOOL_ERRATA_ZOL_2094_SCRUB;
}
}
if (err == ENOENT)
return (0);
else if (err)
return (err);
/*
* We might be restarting after a reboot, so jump the issued
* counter to how far we've scanned. We know we're consistent
* up to here.
*/
scn->scn_issued_before_pass = scn->scn_phys.scn_examined;
if (dsl_scan_is_running(scn) &&
spa_prev_software_version(dp->dp_spa) < SPA_VERSION_SCAN) {
/*
* A new-type scrub was in progress on an old
* pool, and the pool was accessed by old
* software. Restart from the beginning, since
* the old software may have changed the pool in
* the meantime.
*/
scn->scn_restart_txg = txg;
zfs_dbgmsg("new-style scrub was modified "
"by old software; restarting in txg %llu",
(longlong_t)scn->scn_restart_txg);
} else if (dsl_scan_resilvering(dp)) {
/*
* If a resilver is in progress and there are already
* errors, restart it instead of finishing this scan and
* then restarting it. If there haven't been any errors
* then remember that the incore DTL is valid.
*/
if (scn->scn_phys.scn_errors > 0) {
scn->scn_restart_txg = txg;
zfs_dbgmsg("resilver can't excise DTL_MISSING "
"when finished; restarting in txg %llu",
(u_longlong_t)scn->scn_restart_txg);
} else {
/* it's safe to excise DTL when finished */
spa->spa_scrub_started = B_TRUE;
}
}
}
bcopy(&scn->scn_phys, &scn->scn_phys_cached, sizeof (scn->scn_phys));
/* reload the queue into the in-core state */
if (scn->scn_phys.scn_queue_obj != 0) {
zap_cursor_t zc;
zap_attribute_t za;
for (zap_cursor_init(&zc, dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj);
zap_cursor_retrieve(&zc, &za) == 0;
(void) zap_cursor_advance(&zc)) {
scan_ds_queue_insert(scn,
zfs_strtonum(za.za_name, NULL),
za.za_first_integer);
}
zap_cursor_fini(&zc);
}
spa_scan_stat_init(spa);
return (0);
}
void
dsl_scan_fini(dsl_pool_t *dp)
{
if (dp->dp_scan != NULL) {
dsl_scan_t *scn = dp->dp_scan;
if (scn->scn_taskq != NULL)
taskq_destroy(scn->scn_taskq);
scan_ds_queue_clear(scn);
avl_destroy(&scn->scn_queue);
scan_ds_prefetch_queue_clear(scn);
avl_destroy(&scn->scn_prefetch_queue);
kmem_free(dp->dp_scan, sizeof (dsl_scan_t));
dp->dp_scan = NULL;
}
}
static boolean_t
dsl_scan_restarting(dsl_scan_t *scn, dmu_tx_t *tx)
{
return (scn->scn_restart_txg != 0 &&
scn->scn_restart_txg <= tx->tx_txg);
}
boolean_t
dsl_scan_resilver_scheduled(dsl_pool_t *dp)
{
return ((dp->dp_scan && dp->dp_scan->scn_restart_txg != 0) ||
(spa_async_tasks(dp->dp_spa) & SPA_ASYNC_RESILVER));
}
boolean_t
dsl_scan_scrubbing(const dsl_pool_t *dp)
{
dsl_scan_phys_t *scn_phys = &dp->dp_scan->scn_phys;
return (scn_phys->scn_state == DSS_SCANNING &&
scn_phys->scn_func == POOL_SCAN_SCRUB);
}
boolean_t
dsl_scan_is_paused_scrub(const dsl_scan_t *scn)
{
return (dsl_scan_scrubbing(scn->scn_dp) &&
scn->scn_phys.scn_flags & DSF_SCRUB_PAUSED);
}
/*
* Writes out a persistent dsl_scan_phys_t record to the pool directory.
* Because we can be running in the block sorting algorithm, we do not always
* want to write out the record, only when it is "safe" to do so. This safety
* condition is achieved by making sure that the sorting queues are empty
* (scn_bytes_pending == 0). When this condition is not true, the sync'd state
* is inconsistent with how much actual scanning progress has been made. The
* kind of sync to be performed is specified by the sync_type argument. If the
* sync is optional, we only sync if the queues are empty. If the sync is
* mandatory, we do a hard ASSERT to make sure that the queues are empty. The
* third possible state is a "cached" sync. This is done in response to:
* 1) The dataset that was in the last sync'd dsl_scan_phys_t having been
* destroyed, so we wouldn't be able to restart scanning from it.
* 2) The snapshot that was in the last sync'd dsl_scan_phys_t having been
* superseded by a newer snapshot.
* 3) The dataset that was in the last sync'd dsl_scan_phys_t having been
* swapped with its clone.
* In all cases, a cached sync simply rewrites the last record we've written,
* just slightly modified. For the modifications that are performed to the
* last written dsl_scan_phys_t, see dsl_scan_ds_destroyed,
* dsl_scan_ds_snapshotted and dsl_scan_ds_clone_swapped.
*/
static void
dsl_scan_sync_state(dsl_scan_t *scn, dmu_tx_t *tx, state_sync_type_t sync_type)
{
int i;
spa_t *spa = scn->scn_dp->dp_spa;
ASSERT(sync_type != SYNC_MANDATORY || scn->scn_bytes_pending == 0);
if (scn->scn_bytes_pending == 0) {
for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
dsl_scan_io_queue_t *q = vd->vdev_scan_io_queue;
if (q == NULL)
continue;
mutex_enter(&vd->vdev_scan_io_queue_lock);
ASSERT3P(avl_first(&q->q_sios_by_addr), ==, NULL);
ASSERT3P(zfs_btree_first(&q->q_exts_by_size, NULL), ==,
NULL);
ASSERT3P(range_tree_first(q->q_exts_by_addr), ==, NULL);
mutex_exit(&vd->vdev_scan_io_queue_lock);
}
if (scn->scn_phys.scn_queue_obj != 0)
scan_ds_queue_sync(scn, tx);
VERIFY0(zap_update(scn->scn_dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
&scn->scn_phys, tx));
bcopy(&scn->scn_phys, &scn->scn_phys_cached,
sizeof (scn->scn_phys));
if (scn->scn_checkpointing)
zfs_dbgmsg("finish scan checkpoint");
scn->scn_checkpointing = B_FALSE;
scn->scn_last_checkpoint = ddi_get_lbolt();
} else if (sync_type == SYNC_CACHED) {
VERIFY0(zap_update(scn->scn_dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
&scn->scn_phys_cached, tx));
}
}
-/* ARGSUSED */
int
dsl_scan_setup_check(void *arg, dmu_tx_t *tx)
{
+ (void) arg;
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev;
if (dsl_scan_is_running(scn) || vdev_rebuild_active(rvd))
return (SET_ERROR(EBUSY));
return (0);
}
void
dsl_scan_setup_sync(void *arg, dmu_tx_t *tx)
{
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
pool_scan_func_t *funcp = arg;
dmu_object_type_t ot = 0;
dsl_pool_t *dp = scn->scn_dp;
spa_t *spa = dp->dp_spa;
ASSERT(!dsl_scan_is_running(scn));
ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS);
bzero(&scn->scn_phys, sizeof (scn->scn_phys));
scn->scn_phys.scn_func = *funcp;
scn->scn_phys.scn_state = DSS_SCANNING;
scn->scn_phys.scn_min_txg = 0;
scn->scn_phys.scn_max_txg = tx->tx_txg;
scn->scn_phys.scn_ddt_class_max = DDT_CLASSES - 1; /* the entire DDT */
scn->scn_phys.scn_start_time = gethrestime_sec();
scn->scn_phys.scn_errors = 0;
scn->scn_phys.scn_to_examine = spa->spa_root_vdev->vdev_stat.vs_alloc;
scn->scn_issued_before_pass = 0;
scn->scn_restart_txg = 0;
scn->scn_done_txg = 0;
scn->scn_last_checkpoint = 0;
scn->scn_checkpointing = B_FALSE;
spa_scan_stat_init(spa);
if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
scn->scn_phys.scn_ddt_class_max = zfs_scrub_ddt_class_max;
/* rewrite all disk labels */
vdev_config_dirty(spa->spa_root_vdev);
if (vdev_resilver_needed(spa->spa_root_vdev,
&scn->scn_phys.scn_min_txg, &scn->scn_phys.scn_max_txg)) {
nvlist_t *aux = fnvlist_alloc();
fnvlist_add_string(aux, ZFS_EV_RESILVER_TYPE,
"healing");
spa_event_notify(spa, NULL, aux,
ESC_ZFS_RESILVER_START);
nvlist_free(aux);
} else {
spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_START);
}
spa->spa_scrub_started = B_TRUE;
/*
* If this is an incremental scrub, limit the DDT scrub phase
* to just the auto-ditto class (for correctness); the rest
* of the scrub should go faster using top-down pruning.
*/
if (scn->scn_phys.scn_min_txg > TXG_INITIAL)
scn->scn_phys.scn_ddt_class_max = DDT_CLASS_DITTO;
/*
* When starting a resilver clear any existing rebuild state.
* This is required to prevent stale rebuild status from
* being reported when a rebuild is run, then a resilver and
* finally a scrub. In which case only the scrub status
* should be reported by 'zpool status'.
*/
if (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) {
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *vd = rvd->vdev_child[i];
vdev_rebuild_clear_sync(
(void *)(uintptr_t)vd->vdev_id, tx);
}
}
}
/* back to the generic stuff */
if (dp->dp_blkstats == NULL) {
dp->dp_blkstats =
vmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP);
mutex_init(&dp->dp_blkstats->zab_lock, NULL,
MUTEX_DEFAULT, NULL);
}
bzero(&dp->dp_blkstats->zab_type, sizeof (dp->dp_blkstats->zab_type));
if (spa_version(spa) < SPA_VERSION_DSL_SCRUB)
ot = DMU_OT_ZAP_OTHER;
scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset,
ot ? ot : DMU_OT_SCAN_QUEUE, DMU_OT_NONE, 0, tx);
bcopy(&scn->scn_phys, &scn->scn_phys_cached, sizeof (scn->scn_phys));
dsl_scan_sync_state(scn, tx, SYNC_MANDATORY);
spa_history_log_internal(spa, "scan setup", tx,
"func=%u mintxg=%llu maxtxg=%llu",
*funcp, (u_longlong_t)scn->scn_phys.scn_min_txg,
(u_longlong_t)scn->scn_phys.scn_max_txg);
}
/*
* Called by the ZFS_IOC_POOL_SCAN ioctl to start a scrub or resilver.
* Can also be called to resume a paused scrub.
*/
int
dsl_scan(dsl_pool_t *dp, pool_scan_func_t func)
{
spa_t *spa = dp->dp_spa;
dsl_scan_t *scn = dp->dp_scan;
/*
* Purge all vdev caches and probe all devices. We do this here
* rather than in sync context because this requires a writer lock
* on the spa_config lock, which we can't do from sync context. The
* spa_scrub_reopen flag indicates that vdev_open() should not
* attempt to start another scrub.
*/
spa_vdev_state_enter(spa, SCL_NONE);
spa->spa_scrub_reopen = B_TRUE;
vdev_reopen(spa->spa_root_vdev);
spa->spa_scrub_reopen = B_FALSE;
(void) spa_vdev_state_exit(spa, NULL, 0);
if (func == POOL_SCAN_RESILVER) {
dsl_scan_restart_resilver(spa->spa_dsl_pool, 0);
return (0);
}
if (func == POOL_SCAN_SCRUB && dsl_scan_is_paused_scrub(scn)) {
/* got scrub start cmd, resume paused scrub */
int err = dsl_scrub_set_pause_resume(scn->scn_dp,
POOL_SCRUB_NORMAL);
if (err == 0) {
spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_RESUME);
return (SET_ERROR(ECANCELED));
}
return (SET_ERROR(err));
}
return (dsl_sync_task(spa_name(spa), dsl_scan_setup_check,
dsl_scan_setup_sync, &func, 0, ZFS_SPACE_CHECK_EXTRA_RESERVED));
}
-/* ARGSUSED */
static void
dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx)
{
static const char *old_names[] = {
"scrub_bookmark",
"scrub_ddt_bookmark",
"scrub_ddt_class_max",
"scrub_queue",
"scrub_min_txg",
"scrub_max_txg",
"scrub_func",
"scrub_errors",
NULL
};
dsl_pool_t *dp = scn->scn_dp;
spa_t *spa = dp->dp_spa;
int i;
/* Remove any remnants of an old-style scrub. */
for (i = 0; old_names[i]; i++) {
(void) zap_remove(dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, old_names[i], tx);
}
if (scn->scn_phys.scn_queue_obj != 0) {
VERIFY0(dmu_object_free(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, tx));
scn->scn_phys.scn_queue_obj = 0;
}
scan_ds_queue_clear(scn);
scan_ds_prefetch_queue_clear(scn);
scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED;
/*
* If we were "restarted" from a stopped state, don't bother
* with anything else.
*/
if (!dsl_scan_is_running(scn)) {
ASSERT(!scn->scn_is_sorted);
return;
}
if (scn->scn_is_sorted) {
scan_io_queues_destroy(scn);
scn->scn_is_sorted = B_FALSE;
if (scn->scn_taskq != NULL) {
taskq_destroy(scn->scn_taskq);
scn->scn_taskq = NULL;
}
}
scn->scn_phys.scn_state = complete ? DSS_FINISHED : DSS_CANCELED;
spa_notify_waiters(spa);
if (dsl_scan_restarting(scn, tx))
spa_history_log_internal(spa, "scan aborted, restarting", tx,
"errors=%llu", (u_longlong_t)spa_get_errlog_size(spa));
else if (!complete)
spa_history_log_internal(spa, "scan cancelled", tx,
"errors=%llu", (u_longlong_t)spa_get_errlog_size(spa));
else
spa_history_log_internal(spa, "scan done", tx,
"errors=%llu", (u_longlong_t)spa_get_errlog_size(spa));
if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
spa->spa_scrub_active = B_FALSE;
/*
* If the scrub/resilver completed, update all DTLs to
* reflect this. Whether it succeeded or not, vacate
* all temporary scrub DTLs.
*
* As the scrub does not currently support traversing
* data that have been freed but are part of a checkpoint,
* we don't mark the scrub as done in the DTLs as faults
* may still exist in those vdevs.
*/
if (complete &&
!spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg,
scn->scn_phys.scn_max_txg, B_TRUE, B_FALSE);
if (scn->scn_phys.scn_min_txg) {
nvlist_t *aux = fnvlist_alloc();
fnvlist_add_string(aux, ZFS_EV_RESILVER_TYPE,
"healing");
spa_event_notify(spa, NULL, aux,
ESC_ZFS_RESILVER_FINISH);
nvlist_free(aux);
} else {
spa_event_notify(spa, NULL, NULL,
ESC_ZFS_SCRUB_FINISH);
}
} else {
vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg,
0, B_TRUE, B_FALSE);
}
spa_errlog_rotate(spa);
/*
* Don't clear flag until after vdev_dtl_reassess to ensure that
* DTL_MISSING will get updated when possible.
*/
spa->spa_scrub_started = B_FALSE;
/*
* We may have finished replacing a device.
* Let the async thread assess this and handle the detach.
*/
spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
/*
* Clear any resilver_deferred flags in the config.
* If there are drives that need resilvering, kick
* off an asynchronous request to start resilver.
* vdev_clear_resilver_deferred() may update the config
* before the resilver can restart. In the event of
* a crash during this period, the spa loading code
* will find the drives that need to be resilvered
* and start the resilver then.
*/
if (spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER) &&
vdev_clear_resilver_deferred(spa->spa_root_vdev, tx)) {
spa_history_log_internal(spa,
"starting deferred resilver", tx, "errors=%llu",
(u_longlong_t)spa_get_errlog_size(spa));
spa_async_request(spa, SPA_ASYNC_RESILVER);
}
/* Clear recent error events (i.e. duplicate events tracking) */
if (complete)
zfs_ereport_clear(spa, NULL);
}
scn->scn_phys.scn_end_time = gethrestime_sec();
if (spa->spa_errata == ZPOOL_ERRATA_ZOL_2094_SCRUB)
spa->spa_errata = 0;
ASSERT(!dsl_scan_is_running(scn));
}
-/* ARGSUSED */
static int
dsl_scan_cancel_check(void *arg, dmu_tx_t *tx)
{
+ (void) arg;
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
if (!dsl_scan_is_running(scn))
return (SET_ERROR(ENOENT));
return (0);
}
-/* ARGSUSED */
static void
dsl_scan_cancel_sync(void *arg, dmu_tx_t *tx)
{
+ (void) arg;
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
dsl_scan_done(scn, B_FALSE, tx);
dsl_scan_sync_state(scn, tx, SYNC_MANDATORY);
spa_event_notify(scn->scn_dp->dp_spa, NULL, NULL, ESC_ZFS_SCRUB_ABORT);
}
int
dsl_scan_cancel(dsl_pool_t *dp)
{
return (dsl_sync_task(spa_name(dp->dp_spa), dsl_scan_cancel_check,
dsl_scan_cancel_sync, NULL, 3, ZFS_SPACE_CHECK_RESERVED));
}
static int
dsl_scrub_pause_resume_check(void *arg, dmu_tx_t *tx)
{
pool_scrub_cmd_t *cmd = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_scan_t *scn = dp->dp_scan;
if (*cmd == POOL_SCRUB_PAUSE) {
/* can't pause a scrub when there is no in-progress scrub */
if (!dsl_scan_scrubbing(dp))
return (SET_ERROR(ENOENT));
/* can't pause a paused scrub */
if (dsl_scan_is_paused_scrub(scn))
return (SET_ERROR(EBUSY));
} else if (*cmd != POOL_SCRUB_NORMAL) {
return (SET_ERROR(ENOTSUP));
}
return (0);
}
static void
dsl_scrub_pause_resume_sync(void *arg, dmu_tx_t *tx)
{
pool_scrub_cmd_t *cmd = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
spa_t *spa = dp->dp_spa;
dsl_scan_t *scn = dp->dp_scan;
if (*cmd == POOL_SCRUB_PAUSE) {
/* can't pause a scrub when there is no in-progress scrub */
spa->spa_scan_pass_scrub_pause = gethrestime_sec();
scn->scn_phys.scn_flags |= DSF_SCRUB_PAUSED;
scn->scn_phys_cached.scn_flags |= DSF_SCRUB_PAUSED;
dsl_scan_sync_state(scn, tx, SYNC_CACHED);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_PAUSED);
spa_notify_waiters(spa);
} else {
ASSERT3U(*cmd, ==, POOL_SCRUB_NORMAL);
if (dsl_scan_is_paused_scrub(scn)) {
/*
* We need to keep track of how much time we spend
* paused per pass so that we can adjust the scrub rate
* shown in the output of 'zpool status'
*/
spa->spa_scan_pass_scrub_spent_paused +=
gethrestime_sec() - spa->spa_scan_pass_scrub_pause;
spa->spa_scan_pass_scrub_pause = 0;
scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED;
scn->scn_phys_cached.scn_flags &= ~DSF_SCRUB_PAUSED;
dsl_scan_sync_state(scn, tx, SYNC_CACHED);
}
}
}
/*
* Set scrub pause/resume state if it makes sense to do so
*/
int
dsl_scrub_set_pause_resume(const dsl_pool_t *dp, pool_scrub_cmd_t cmd)
{
return (dsl_sync_task(spa_name(dp->dp_spa),
dsl_scrub_pause_resume_check, dsl_scrub_pause_resume_sync, &cmd, 3,
ZFS_SPACE_CHECK_RESERVED));
}
/* start a new scan, or restart an existing one. */
void
dsl_scan_restart_resilver(dsl_pool_t *dp, uint64_t txg)
{
if (txg == 0) {
dmu_tx_t *tx;
tx = dmu_tx_create_dd(dp->dp_mos_dir);
VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT));
txg = dmu_tx_get_txg(tx);
dp->dp_scan->scn_restart_txg = txg;
dmu_tx_commit(tx);
} else {
dp->dp_scan->scn_restart_txg = txg;
}
zfs_dbgmsg("restarting resilver txg=%llu", (longlong_t)txg);
}
void
dsl_free(dsl_pool_t *dp, uint64_t txg, const blkptr_t *bp)
{
zio_free(dp->dp_spa, txg, bp);
}
void
dsl_free_sync(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp)
{
ASSERT(dsl_pool_sync_context(dp));
zio_nowait(zio_free_sync(pio, dp->dp_spa, txg, bpp, pio->io_flags));
}
static int
scan_ds_queue_compare(const void *a, const void *b)
{
const scan_ds_t *sds_a = a, *sds_b = b;
if (sds_a->sds_dsobj < sds_b->sds_dsobj)
return (-1);
if (sds_a->sds_dsobj == sds_b->sds_dsobj)
return (0);
return (1);
}
static void
scan_ds_queue_clear(dsl_scan_t *scn)
{
void *cookie = NULL;
scan_ds_t *sds;
while ((sds = avl_destroy_nodes(&scn->scn_queue, &cookie)) != NULL) {
kmem_free(sds, sizeof (*sds));
}
}
static boolean_t
scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj, uint64_t *txg)
{
scan_ds_t srch, *sds;
srch.sds_dsobj = dsobj;
sds = avl_find(&scn->scn_queue, &srch, NULL);
if (sds != NULL && txg != NULL)
*txg = sds->sds_txg;
return (sds != NULL);
}
static void
scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg)
{
scan_ds_t *sds;
avl_index_t where;
sds = kmem_zalloc(sizeof (*sds), KM_SLEEP);
sds->sds_dsobj = dsobj;
sds->sds_txg = txg;
VERIFY3P(avl_find(&scn->scn_queue, sds, &where), ==, NULL);
avl_insert(&scn->scn_queue, sds, where);
}
static void
scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj)
{
scan_ds_t srch, *sds;
srch.sds_dsobj = dsobj;
sds = avl_find(&scn->scn_queue, &srch, NULL);
VERIFY(sds != NULL);
avl_remove(&scn->scn_queue, sds);
kmem_free(sds, sizeof (*sds));
}
static void
scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx)
{
dsl_pool_t *dp = scn->scn_dp;
spa_t *spa = dp->dp_spa;
dmu_object_type_t ot = (spa_version(spa) >= SPA_VERSION_DSL_SCRUB) ?
DMU_OT_SCAN_QUEUE : DMU_OT_ZAP_OTHER;
ASSERT0(scn->scn_bytes_pending);
ASSERT(scn->scn_phys.scn_queue_obj != 0);
VERIFY0(dmu_object_free(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, tx));
scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, ot,
DMU_OT_NONE, 0, tx);
for (scan_ds_t *sds = avl_first(&scn->scn_queue);
sds != NULL; sds = AVL_NEXT(&scn->scn_queue, sds)) {
VERIFY0(zap_add_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, sds->sds_dsobj,
sds->sds_txg, tx));
}
}
/*
* Computes the memory limit state that we're currently in. A sorted scan
* needs quite a bit of memory to hold the sorting queue, so we need to
* reasonably constrain the size so it doesn't impact overall system
* performance. We compute two limits:
* 1) Hard memory limit: if the amount of memory used by the sorting
* queues on a pool gets above this value, we stop the metadata
* scanning portion and start issuing the queued up and sorted
* I/Os to reduce memory usage.
* This limit is calculated as a fraction of physmem (by default 5%).
* We constrain the lower bound of the hard limit to an absolute
* minimum of zfs_scan_mem_lim_min (default: 16 MiB). We also constrain
* the upper bound to 5% of the total pool size - no chance we'll
* ever need that much memory, but just to keep the value in check.
* 2) Soft memory limit: once we hit the hard memory limit, we start
* issuing I/O to reduce queue memory usage, but we don't want to
* completely empty out the queues, since we might be able to find I/Os
* that will fill in the gaps of our non-sequential IOs at some point
* in the future. So we stop the issuing of I/Os once the amount of
* memory used drops below the soft limit (at which point we stop issuing
* I/O and start scanning metadata again).
*
* This limit is calculated by subtracting a fraction of the hard
* limit from the hard limit. By default this fraction is 5%, so
* the soft limit is 95% of the hard limit. We cap the size of the
* difference between the hard and soft limits at an absolute
* maximum of zfs_scan_mem_lim_soft_max (default: 128 MiB) - this is
* sufficient to not cause too frequent switching between the
* metadata scan and I/O issue (even at 2k recordsize, 128 MiB's
* worth of queues is about 1.2 GiB of on-pool data, so scanning
* that should take at least a decent fraction of a second).
*/
static boolean_t
dsl_scan_should_clear(dsl_scan_t *scn)
{
spa_t *spa = scn->scn_dp->dp_spa;
vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev;
uint64_t alloc, mlim_hard, mlim_soft, mused;
alloc = metaslab_class_get_alloc(spa_normal_class(spa));
alloc += metaslab_class_get_alloc(spa_special_class(spa));
alloc += metaslab_class_get_alloc(spa_dedup_class(spa));
mlim_hard = MAX((physmem / zfs_scan_mem_lim_fact) * PAGESIZE,
zfs_scan_mem_lim_min);
mlim_hard = MIN(mlim_hard, alloc / 20);
mlim_soft = mlim_hard - MIN(mlim_hard / zfs_scan_mem_lim_soft_fact,
zfs_scan_mem_lim_soft_max);
mused = 0;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *tvd = rvd->vdev_child[i];
dsl_scan_io_queue_t *queue;
mutex_enter(&tvd->vdev_scan_io_queue_lock);
queue = tvd->vdev_scan_io_queue;
if (queue != NULL) {
/* # extents in exts_by_size = # in exts_by_addr */
mused += zfs_btree_numnodes(&queue->q_exts_by_size) *
sizeof (range_seg_gap_t) + queue->q_sio_memused;
}
mutex_exit(&tvd->vdev_scan_io_queue_lock);
}
dprintf("current scan memory usage: %llu bytes\n", (longlong_t)mused);
if (mused == 0)
ASSERT0(scn->scn_bytes_pending);
/*
* If we are above our hard limit, we need to clear out memory.
* If we are below our soft limit, we need to accumulate sequential IOs.
* Otherwise, we should keep doing whatever we are currently doing.
*/
if (mused >= mlim_hard)
return (B_TRUE);
else if (mused < mlim_soft)
return (B_FALSE);
else
return (scn->scn_clearing);
}
static boolean_t
dsl_scan_check_suspend(dsl_scan_t *scn, const zbookmark_phys_t *zb)
{
/* we never skip user/group accounting objects */
if (zb && (int64_t)zb->zb_object < 0)
return (B_FALSE);
if (scn->scn_suspending)
return (B_TRUE); /* we're already suspending */
if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark))
return (B_FALSE); /* we're resuming */
/* We only know how to resume from level-0 and objset blocks. */
if (zb && (zb->zb_level != 0 && zb->zb_level != ZB_ROOT_LEVEL))
return (B_FALSE);
/*
* We suspend if:
* - we have scanned for at least the minimum time (default 1 sec
* for scrub, 3 sec for resilver), and either we have sufficient
* dirty data that we are starting to write more quickly
* (default 30%), someone is explicitly waiting for this txg
* to complete, or we have used up all of the time in the txg
* timeout (default 5 sec).
* or
* - the spa is shutting down because this pool is being exported
* or the machine is rebooting.
* or
* - the scan queue has reached its memory use limit
*/
uint64_t curr_time_ns = gethrtime();
uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time;
uint64_t sync_time_ns = curr_time_ns -
scn->scn_dp->dp_spa->spa_sync_starttime;
int dirty_pct = scn->scn_dp->dp_dirty_total * 100 / zfs_dirty_data_max;
int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
zfs_resilver_min_time_ms : zfs_scrub_min_time_ms;
if ((NSEC2MSEC(scan_time_ns) > mintime &&
(dirty_pct >= zfs_vdev_async_write_active_min_dirty_percent ||
txg_sync_waiting(scn->scn_dp) ||
NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) ||
spa_shutting_down(scn->scn_dp->dp_spa) ||
(zfs_scan_strict_mem_lim && dsl_scan_should_clear(scn))) {
if (zb && zb->zb_level == ZB_ROOT_LEVEL) {
dprintf("suspending at first available bookmark "
"%llx/%llx/%llx/%llx\n",
(longlong_t)zb->zb_objset,
(longlong_t)zb->zb_object,
(longlong_t)zb->zb_level,
(longlong_t)zb->zb_blkid);
SET_BOOKMARK(&scn->scn_phys.scn_bookmark,
zb->zb_objset, 0, 0, 0);
} else if (zb != NULL) {
dprintf("suspending at bookmark %llx/%llx/%llx/%llx\n",
(longlong_t)zb->zb_objset,
(longlong_t)zb->zb_object,
(longlong_t)zb->zb_level,
(longlong_t)zb->zb_blkid);
scn->scn_phys.scn_bookmark = *zb;
} else {
#ifdef ZFS_DEBUG
dsl_scan_phys_t *scnp = &scn->scn_phys;
dprintf("suspending at at DDT bookmark "
"%llx/%llx/%llx/%llx\n",
(longlong_t)scnp->scn_ddt_bookmark.ddb_class,
(longlong_t)scnp->scn_ddt_bookmark.ddb_type,
(longlong_t)scnp->scn_ddt_bookmark.ddb_checksum,
(longlong_t)scnp->scn_ddt_bookmark.ddb_cursor);
#endif
}
scn->scn_suspending = B_TRUE;
return (B_TRUE);
}
return (B_FALSE);
}
typedef struct zil_scan_arg {
dsl_pool_t *zsa_dp;
zil_header_t *zsa_zh;
} zil_scan_arg_t;
-/* ARGSUSED */
static int
dsl_scan_zil_block(zilog_t *zilog, const blkptr_t *bp, void *arg,
uint64_t claim_txg)
{
+ (void) zilog;
zil_scan_arg_t *zsa = arg;
dsl_pool_t *dp = zsa->zsa_dp;
dsl_scan_t *scn = dp->dp_scan;
zil_header_t *zh = zsa->zsa_zh;
zbookmark_phys_t zb;
ASSERT(!BP_IS_REDACTED(bp));
if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg)
return (0);
/*
* One block ("stubby") can be allocated a long time ago; we
* want to visit that one because it has been allocated
* (on-disk) even if it hasn't been claimed (even though for
* scrub there's nothing to do to it).
*/
if (claim_txg == 0 && bp->blk_birth >= spa_min_claim_txg(dp->dp_spa))
return (0);
SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET],
ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
return (0);
}
-/* ARGSUSED */
static int
dsl_scan_zil_record(zilog_t *zilog, const lr_t *lrc, void *arg,
uint64_t claim_txg)
{
+ (void) zilog;
if (lrc->lrc_txtype == TX_WRITE) {
zil_scan_arg_t *zsa = arg;
dsl_pool_t *dp = zsa->zsa_dp;
dsl_scan_t *scn = dp->dp_scan;
zil_header_t *zh = zsa->zsa_zh;
const lr_write_t *lr = (const lr_write_t *)lrc;
const blkptr_t *bp = &lr->lr_blkptr;
zbookmark_phys_t zb;
ASSERT(!BP_IS_REDACTED(bp));
if (BP_IS_HOLE(bp) ||
bp->blk_birth <= scn->scn_phys.scn_cur_min_txg)
return (0);
/*
* birth can be < claim_txg if this record's txg is
* already txg sync'ed (but this log block contains
* other records that are not synced)
*/
if (claim_txg == 0 || bp->blk_birth < claim_txg)
return (0);
SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET],
lr->lr_foid, ZB_ZIL_LEVEL,
lr->lr_offset / BP_GET_LSIZE(bp));
VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
}
return (0);
}
static void
dsl_scan_zil(dsl_pool_t *dp, zil_header_t *zh)
{
uint64_t claim_txg = zh->zh_claim_txg;
zil_scan_arg_t zsa = { dp, zh };
zilog_t *zilog;
ASSERT(spa_writeable(dp->dp_spa));
/*
* We only want to visit blocks that have been claimed but not yet
* replayed (or, in read-only mode, blocks that *would* be claimed).
*/
if (claim_txg == 0)
return;
zilog = zil_alloc(dp->dp_meta_objset, zh);
(void) zil_parse(zilog, dsl_scan_zil_block, dsl_scan_zil_record, &zsa,
claim_txg, B_FALSE);
zil_free(zilog);
}
/*
* We compare scan_prefetch_issue_ctx_t's based on their bookmarks. The idea
* here is to sort the AVL tree by the order each block will be needed.
*/
static int
scan_prefetch_queue_compare(const void *a, const void *b)
{
const scan_prefetch_issue_ctx_t *spic_a = a, *spic_b = b;
const scan_prefetch_ctx_t *spc_a = spic_a->spic_spc;
const scan_prefetch_ctx_t *spc_b = spic_b->spic_spc;
return (zbookmark_compare(spc_a->spc_datablkszsec,
spc_a->spc_indblkshift, spc_b->spc_datablkszsec,
spc_b->spc_indblkshift, &spic_a->spic_zb, &spic_b->spic_zb));
}
static void
scan_prefetch_ctx_rele(scan_prefetch_ctx_t *spc, void *tag)
{
if (zfs_refcount_remove(&spc->spc_refcnt, tag) == 0) {
zfs_refcount_destroy(&spc->spc_refcnt);
kmem_free(spc, sizeof (scan_prefetch_ctx_t));
}
}
static scan_prefetch_ctx_t *
scan_prefetch_ctx_create(dsl_scan_t *scn, dnode_phys_t *dnp, void *tag)
{
scan_prefetch_ctx_t *spc;
spc = kmem_alloc(sizeof (scan_prefetch_ctx_t), KM_SLEEP);
zfs_refcount_create(&spc->spc_refcnt);
zfs_refcount_add(&spc->spc_refcnt, tag);
spc->spc_scn = scn;
if (dnp != NULL) {
spc->spc_datablkszsec = dnp->dn_datablkszsec;
spc->spc_indblkshift = dnp->dn_indblkshift;
spc->spc_root = B_FALSE;
} else {
spc->spc_datablkszsec = 0;
spc->spc_indblkshift = 0;
spc->spc_root = B_TRUE;
}
return (spc);
}
static void
scan_prefetch_ctx_add_ref(scan_prefetch_ctx_t *spc, void *tag)
{
zfs_refcount_add(&spc->spc_refcnt, tag);
}
static void
scan_ds_prefetch_queue_clear(dsl_scan_t *scn)
{
spa_t *spa = scn->scn_dp->dp_spa;
void *cookie = NULL;
scan_prefetch_issue_ctx_t *spic = NULL;
mutex_enter(&spa->spa_scrub_lock);
while ((spic = avl_destroy_nodes(&scn->scn_prefetch_queue,
&cookie)) != NULL) {
scan_prefetch_ctx_rele(spic->spic_spc, scn);
kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
}
mutex_exit(&spa->spa_scrub_lock);
}
static boolean_t
dsl_scan_check_prefetch_resume(scan_prefetch_ctx_t *spc,
const zbookmark_phys_t *zb)
{
zbookmark_phys_t *last_zb = &spc->spc_scn->scn_prefetch_bookmark;
dnode_phys_t tmp_dnp;
dnode_phys_t *dnp = (spc->spc_root) ? NULL : &tmp_dnp;
if (zb->zb_objset != last_zb->zb_objset)
return (B_TRUE);
if ((int64_t)zb->zb_object < 0)
return (B_FALSE);
tmp_dnp.dn_datablkszsec = spc->spc_datablkszsec;
tmp_dnp.dn_indblkshift = spc->spc_indblkshift;
if (zbookmark_subtree_completed(dnp, zb, last_zb))
return (B_TRUE);
return (B_FALSE);
}
static void
dsl_scan_prefetch(scan_prefetch_ctx_t *spc, blkptr_t *bp, zbookmark_phys_t *zb)
{
avl_index_t idx;
dsl_scan_t *scn = spc->spc_scn;
spa_t *spa = scn->scn_dp->dp_spa;
scan_prefetch_issue_ctx_t *spic;
if (zfs_no_scrub_prefetch || BP_IS_REDACTED(bp))
return;
if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg ||
(BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE &&
BP_GET_TYPE(bp) != DMU_OT_OBJSET))
return;
if (dsl_scan_check_prefetch_resume(spc, zb))
return;
scan_prefetch_ctx_add_ref(spc, scn);
spic = kmem_alloc(sizeof (scan_prefetch_issue_ctx_t), KM_SLEEP);
spic->spic_spc = spc;
spic->spic_bp = *bp;
spic->spic_zb = *zb;
/*
* Add the IO to the queue of blocks to prefetch. This allows us to
* prioritize blocks that we will need first for the main traversal
* thread.
*/
mutex_enter(&spa->spa_scrub_lock);
if (avl_find(&scn->scn_prefetch_queue, spic, &idx) != NULL) {
/* this block is already queued for prefetch */
kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
scan_prefetch_ctx_rele(spc, scn);
mutex_exit(&spa->spa_scrub_lock);
return;
}
avl_insert(&scn->scn_prefetch_queue, spic, idx);
cv_broadcast(&spa->spa_scrub_io_cv);
mutex_exit(&spa->spa_scrub_lock);
}
static void
dsl_scan_prefetch_dnode(dsl_scan_t *scn, dnode_phys_t *dnp,
uint64_t objset, uint64_t object)
{
int i;
zbookmark_phys_t zb;
scan_prefetch_ctx_t *spc;
if (dnp->dn_nblkptr == 0 && !(dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
return;
SET_BOOKMARK(&zb, objset, object, 0, 0);
spc = scan_prefetch_ctx_create(scn, dnp, FTAG);
for (i = 0; i < dnp->dn_nblkptr; i++) {
zb.zb_level = BP_GET_LEVEL(&dnp->dn_blkptr[i]);
zb.zb_blkid = i;
dsl_scan_prefetch(spc, &dnp->dn_blkptr[i], &zb);
}
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
zb.zb_level = 0;
zb.zb_blkid = DMU_SPILL_BLKID;
dsl_scan_prefetch(spc, DN_SPILL_BLKPTR(dnp), &zb);
}
scan_prefetch_ctx_rele(spc, FTAG);
}
static void
dsl_scan_prefetch_cb(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
arc_buf_t *buf, void *private)
{
+ (void) zio;
scan_prefetch_ctx_t *spc = private;
dsl_scan_t *scn = spc->spc_scn;
spa_t *spa = scn->scn_dp->dp_spa;
/* broadcast that the IO has completed for rate limiting purposes */
mutex_enter(&spa->spa_scrub_lock);
ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp));
spa->spa_scrub_inflight -= BP_GET_PSIZE(bp);
cv_broadcast(&spa->spa_scrub_io_cv);
mutex_exit(&spa->spa_scrub_lock);
/* if there was an error or we are done prefetching, just cleanup */
if (buf == NULL || scn->scn_prefetch_stop)
goto out;
if (BP_GET_LEVEL(bp) > 0) {
int i;
blkptr_t *cbp;
int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
zbookmark_phys_t czb;
for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) {
SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
zb->zb_level - 1, zb->zb_blkid * epb + i);
dsl_scan_prefetch(spc, cbp, &czb);
}
} else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
dnode_phys_t *cdnp;
int i;
int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
for (i = 0, cdnp = buf->b_data; i < epb;
i += cdnp->dn_extra_slots + 1,
cdnp += cdnp->dn_extra_slots + 1) {
dsl_scan_prefetch_dnode(scn, cdnp,
zb->zb_objset, zb->zb_blkid * epb + i);
}
} else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
objset_phys_t *osp = buf->b_data;
dsl_scan_prefetch_dnode(scn, &osp->os_meta_dnode,
zb->zb_objset, DMU_META_DNODE_OBJECT);
if (OBJSET_BUF_HAS_USERUSED(buf)) {
dsl_scan_prefetch_dnode(scn,
&osp->os_groupused_dnode, zb->zb_objset,
DMU_GROUPUSED_OBJECT);
dsl_scan_prefetch_dnode(scn,
&osp->os_userused_dnode, zb->zb_objset,
DMU_USERUSED_OBJECT);
}
}
out:
if (buf != NULL)
arc_buf_destroy(buf, private);
scan_prefetch_ctx_rele(spc, scn);
}
-/* ARGSUSED */
static void
dsl_scan_prefetch_thread(void *arg)
{
dsl_scan_t *scn = arg;
spa_t *spa = scn->scn_dp->dp_spa;
scan_prefetch_issue_ctx_t *spic;
/* loop until we are told to stop */
while (!scn->scn_prefetch_stop) {
arc_flags_t flags = ARC_FLAG_NOWAIT |
ARC_FLAG_PRESCIENT_PREFETCH | ARC_FLAG_PREFETCH;
int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD;
mutex_enter(&spa->spa_scrub_lock);
/*
* Wait until we have an IO to issue and are not above our
* maximum in flight limit.
*/
while (!scn->scn_prefetch_stop &&
(avl_numnodes(&scn->scn_prefetch_queue) == 0 ||
spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes)) {
cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
}
/* recheck if we should stop since we waited for the cv */
if (scn->scn_prefetch_stop) {
mutex_exit(&spa->spa_scrub_lock);
break;
}
/* remove the prefetch IO from the tree */
spic = avl_first(&scn->scn_prefetch_queue);
spa->spa_scrub_inflight += BP_GET_PSIZE(&spic->spic_bp);
avl_remove(&scn->scn_prefetch_queue, spic);
mutex_exit(&spa->spa_scrub_lock);
if (BP_IS_PROTECTED(&spic->spic_bp)) {
ASSERT(BP_GET_TYPE(&spic->spic_bp) == DMU_OT_DNODE ||
BP_GET_TYPE(&spic->spic_bp) == DMU_OT_OBJSET);
ASSERT3U(BP_GET_LEVEL(&spic->spic_bp), ==, 0);
zio_flags |= ZIO_FLAG_RAW;
}
/* issue the prefetch asynchronously */
(void) arc_read(scn->scn_zio_root, scn->scn_dp->dp_spa,
&spic->spic_bp, dsl_scan_prefetch_cb, spic->spic_spc,
ZIO_PRIORITY_SCRUB, zio_flags, &flags, &spic->spic_zb);
kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
}
ASSERT(scn->scn_prefetch_stop);
/* free any prefetches we didn't get to complete */
mutex_enter(&spa->spa_scrub_lock);
while ((spic = avl_first(&scn->scn_prefetch_queue)) != NULL) {
avl_remove(&scn->scn_prefetch_queue, spic);
scan_prefetch_ctx_rele(spic->spic_spc, scn);
kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
}
ASSERT0(avl_numnodes(&scn->scn_prefetch_queue));
mutex_exit(&spa->spa_scrub_lock);
}
static boolean_t
dsl_scan_check_resume(dsl_scan_t *scn, const dnode_phys_t *dnp,
const zbookmark_phys_t *zb)
{
/*
* We never skip over user/group accounting objects (obj<0)
*/
if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark) &&
(int64_t)zb->zb_object >= 0) {
/*
* If we already visited this bp & everything below (in
* a prior txg sync), don't bother doing it again.
*/
if (zbookmark_subtree_completed(dnp, zb,
&scn->scn_phys.scn_bookmark))
return (B_TRUE);
/*
* If we found the block we're trying to resume from, or
* we went past it to a different object, zero it out to
* indicate that it's OK to start checking for suspending
* again.
*/
if (bcmp(zb, &scn->scn_phys.scn_bookmark, sizeof (*zb)) == 0 ||
zb->zb_object > scn->scn_phys.scn_bookmark.zb_object) {
dprintf("resuming at %llx/%llx/%llx/%llx\n",
(longlong_t)zb->zb_objset,
(longlong_t)zb->zb_object,
(longlong_t)zb->zb_level,
(longlong_t)zb->zb_blkid);
bzero(&scn->scn_phys.scn_bookmark, sizeof (*zb));
}
}
return (B_FALSE);
}
static void dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb,
dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn,
dmu_objset_type_t ostype, dmu_tx_t *tx);
inline __attribute__((always_inline)) static void dsl_scan_visitdnode(
dsl_scan_t *, dsl_dataset_t *ds, dmu_objset_type_t ostype,
dnode_phys_t *dnp, uint64_t object, dmu_tx_t *tx);
/*
* Return nonzero on i/o error.
* Return new buf to write out in *bufp.
*/
inline __attribute__((always_inline)) static int
dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype,
dnode_phys_t *dnp, const blkptr_t *bp,
const zbookmark_phys_t *zb, dmu_tx_t *tx)
{
dsl_pool_t *dp = scn->scn_dp;
int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD;
int err;
ASSERT(!BP_IS_REDACTED(bp));
+ /*
+ * There is an unlikely case of encountering dnodes with contradicting
+ * dn_bonuslen and DNODE_FLAG_SPILL_BLKPTR flag before in files created
+ * or modified before commit 4254acb was merged. As it is not possible
+ * to know which of the two is correct, report an error.
+ */
+ if (dnp != NULL &&
+ dnp->dn_bonuslen > DN_MAX_BONUS_LEN(dnp)) {
+ scn->scn_phys.scn_errors++;
+ spa_log_error(dp->dp_spa, zb);
+ return (SET_ERROR(EINVAL));
+ }
+
if (BP_GET_LEVEL(bp) > 0) {
arc_flags_t flags = ARC_FLAG_WAIT;
int i;
blkptr_t *cbp;
int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
arc_buf_t *buf;
err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf,
ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb);
if (err) {
scn->scn_phys.scn_errors++;
return (err);
}
for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) {
zbookmark_phys_t czb;
SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
zb->zb_level - 1,
zb->zb_blkid * epb + i);
dsl_scan_visitbp(cbp, &czb, dnp,
ds, scn, ostype, tx);
}
arc_buf_destroy(buf, &buf);
} else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
arc_flags_t flags = ARC_FLAG_WAIT;
dnode_phys_t *cdnp;
int i;
int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
arc_buf_t *buf;
if (BP_IS_PROTECTED(bp)) {
ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
zio_flags |= ZIO_FLAG_RAW;
}
err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf,
ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb);
if (err) {
scn->scn_phys.scn_errors++;
return (err);
}
for (i = 0, cdnp = buf->b_data; i < epb;
i += cdnp->dn_extra_slots + 1,
cdnp += cdnp->dn_extra_slots + 1) {
dsl_scan_visitdnode(scn, ds, ostype,
cdnp, zb->zb_blkid * epb + i, tx);
}
arc_buf_destroy(buf, &buf);
} else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
arc_flags_t flags = ARC_FLAG_WAIT;
objset_phys_t *osp;
arc_buf_t *buf;
err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf,
ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb);
if (err) {
scn->scn_phys.scn_errors++;
return (err);
}
osp = buf->b_data;
dsl_scan_visitdnode(scn, ds, osp->os_type,
&osp->os_meta_dnode, DMU_META_DNODE_OBJECT, tx);
if (OBJSET_BUF_HAS_USERUSED(buf)) {
/*
* We also always visit user/group/project accounting
* objects, and never skip them, even if we are
* suspending. This is necessary so that the
* space deltas from this txg get integrated.
*/
if (OBJSET_BUF_HAS_PROJECTUSED(buf))
dsl_scan_visitdnode(scn, ds, osp->os_type,
&osp->os_projectused_dnode,
DMU_PROJECTUSED_OBJECT, tx);
dsl_scan_visitdnode(scn, ds, osp->os_type,
&osp->os_groupused_dnode,
DMU_GROUPUSED_OBJECT, tx);
dsl_scan_visitdnode(scn, ds, osp->os_type,
&osp->os_userused_dnode,
DMU_USERUSED_OBJECT, tx);
}
arc_buf_destroy(buf, &buf);
}
return (0);
}
inline __attribute__((always_inline)) static void
dsl_scan_visitdnode(dsl_scan_t *scn, dsl_dataset_t *ds,
dmu_objset_type_t ostype, dnode_phys_t *dnp,
uint64_t object, dmu_tx_t *tx)
{
int j;
for (j = 0; j < dnp->dn_nblkptr; j++) {
zbookmark_phys_t czb;
SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object,
dnp->dn_nlevels - 1, j);
dsl_scan_visitbp(&dnp->dn_blkptr[j],
&czb, dnp, ds, scn, ostype, tx);
}
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
zbookmark_phys_t czb;
SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object,
0, DMU_SPILL_BLKID);
dsl_scan_visitbp(DN_SPILL_BLKPTR(dnp),
&czb, dnp, ds, scn, ostype, tx);
}
}
/*
* The arguments are in this order because mdb can only print the
* first 5; we want them to be useful.
*/
static void
dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb,
dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn,
dmu_objset_type_t ostype, dmu_tx_t *tx)
{
dsl_pool_t *dp = scn->scn_dp;
blkptr_t *bp_toread = NULL;
if (dsl_scan_check_suspend(scn, zb))
return;
if (dsl_scan_check_resume(scn, dnp, zb))
return;
scn->scn_visited_this_txg++;
/*
* This debugging is commented out to conserve stack space. This
* function is called recursively and the debugging adds several
* bytes to the stack for each call. It can be commented back in
* if required to debug an issue in dsl_scan_visitbp().
*
* dprintf_bp(bp,
* "visiting ds=%p/%llu zb=%llx/%llx/%llx/%llx bp=%p",
* ds, ds ? ds->ds_object : 0,
* zb->zb_objset, zb->zb_object, zb->zb_level, zb->zb_blkid,
* bp);
*/
if (BP_IS_HOLE(bp)) {
scn->scn_holes_this_txg++;
return;
}
if (BP_IS_REDACTED(bp)) {
ASSERT(dsl_dataset_feature_is_active(ds,
SPA_FEATURE_REDACTED_DATASETS));
return;
}
if (bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) {
scn->scn_lt_min_this_txg++;
return;
}
bp_toread = kmem_alloc(sizeof (blkptr_t), KM_SLEEP);
*bp_toread = *bp;
if (dsl_scan_recurse(scn, ds, ostype, dnp, bp_toread, zb, tx) != 0)
goto out;
/*
* If dsl_scan_ddt() has already visited this block, it will have
* already done any translations or scrubbing, so don't call the
* callback again.
*/
if (ddt_class_contains(dp->dp_spa,
scn->scn_phys.scn_ddt_class_max, bp)) {
scn->scn_ddt_contained_this_txg++;
goto out;
}
/*
* If this block is from the future (after cur_max_txg), then we
* are doing this on behalf of a deleted snapshot, and we will
* revisit the future block on the next pass of this dataset.
* Don't scan it now unless we need to because something
* under it was modified.
*/
if (BP_PHYSICAL_BIRTH(bp) > scn->scn_phys.scn_cur_max_txg) {
scn->scn_gt_max_this_txg++;
goto out;
}
scan_funcs[scn->scn_phys.scn_func](dp, bp, zb);
out:
kmem_free(bp_toread, sizeof (blkptr_t));
}
static void
dsl_scan_visit_rootbp(dsl_scan_t *scn, dsl_dataset_t *ds, blkptr_t *bp,
dmu_tx_t *tx)
{
zbookmark_phys_t zb;
scan_prefetch_ctx_t *spc;
SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
if (ZB_IS_ZERO(&scn->scn_phys.scn_bookmark)) {
SET_BOOKMARK(&scn->scn_prefetch_bookmark,
zb.zb_objset, 0, 0, 0);
} else {
scn->scn_prefetch_bookmark = scn->scn_phys.scn_bookmark;
}
scn->scn_objsets_visited_this_txg++;
spc = scan_prefetch_ctx_create(scn, NULL, FTAG);
dsl_scan_prefetch(spc, bp, &zb);
scan_prefetch_ctx_rele(spc, FTAG);
dsl_scan_visitbp(bp, &zb, NULL, ds, scn, DMU_OST_NONE, tx);
dprintf_ds(ds, "finished scan%s", "");
}
static void
ds_destroyed_scn_phys(dsl_dataset_t *ds, dsl_scan_phys_t *scn_phys)
{
if (scn_phys->scn_bookmark.zb_objset == ds->ds_object) {
if (ds->ds_is_snapshot) {
/*
* Note:
* - scn_cur_{min,max}_txg stays the same.
* - Setting the flag is not really necessary if
* scn_cur_max_txg == scn_max_txg, because there
* is nothing after this snapshot that we care
* about. However, we set it anyway and then
* ignore it when we retraverse it in
* dsl_scan_visitds().
*/
scn_phys->scn_bookmark.zb_objset =
dsl_dataset_phys(ds)->ds_next_snap_obj;
zfs_dbgmsg("destroying ds %llu; currently traversing; "
"reset zb_objset to %llu",
(u_longlong_t)ds->ds_object,
(u_longlong_t)dsl_dataset_phys(ds)->
ds_next_snap_obj);
scn_phys->scn_flags |= DSF_VISIT_DS_AGAIN;
} else {
SET_BOOKMARK(&scn_phys->scn_bookmark,
ZB_DESTROYED_OBJSET, 0, 0, 0);
zfs_dbgmsg("destroying ds %llu; currently traversing; "
"reset bookmark to -1,0,0,0",
(u_longlong_t)ds->ds_object);
}
}
}
/*
* Invoked when a dataset is destroyed. We need to make sure that:
*
* 1) If it is the dataset that was currently being scanned, we write
* a new dsl_scan_phys_t and marking the objset reference in it
* as destroyed.
* 2) Remove it from the work queue, if it was present.
*
* If the dataset was actually a snapshot, instead of marking the dataset
* as destroyed, we instead substitute the next snapshot in line.
*/
void
dsl_scan_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx)
{
dsl_pool_t *dp = ds->ds_dir->dd_pool;
dsl_scan_t *scn = dp->dp_scan;
uint64_t mintxg;
if (!dsl_scan_is_running(scn))
return;
ds_destroyed_scn_phys(ds, &scn->scn_phys);
ds_destroyed_scn_phys(ds, &scn->scn_phys_cached);
if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) {
scan_ds_queue_remove(scn, ds->ds_object);
if (ds->ds_is_snapshot)
scan_ds_queue_insert(scn,
dsl_dataset_phys(ds)->ds_next_snap_obj, mintxg);
}
if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj,
ds->ds_object, &mintxg) == 0) {
ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds->ds_object, tx));
if (ds->ds_is_snapshot) {
/*
* We keep the same mintxg; it could be >
* ds_creation_txg if the previous snapshot was
* deleted too.
*/
VERIFY(zap_add_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj,
dsl_dataset_phys(ds)->ds_next_snap_obj,
mintxg, tx) == 0);
zfs_dbgmsg("destroying ds %llu; in queue; "
"replacing with %llu",
(u_longlong_t)ds->ds_object,
(u_longlong_t)dsl_dataset_phys(ds)->
ds_next_snap_obj);
} else {
zfs_dbgmsg("destroying ds %llu; in queue; removing",
(u_longlong_t)ds->ds_object);
}
}
/*
* dsl_scan_sync() should be called after this, and should sync
* out our changed state, but just to be safe, do it here.
*/
dsl_scan_sync_state(scn, tx, SYNC_CACHED);
}
static void
ds_snapshotted_bookmark(dsl_dataset_t *ds, zbookmark_phys_t *scn_bookmark)
{
if (scn_bookmark->zb_objset == ds->ds_object) {
scn_bookmark->zb_objset =
dsl_dataset_phys(ds)->ds_prev_snap_obj;
zfs_dbgmsg("snapshotting ds %llu; currently traversing; "
"reset zb_objset to %llu",
(u_longlong_t)ds->ds_object,
(u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj);
}
}
/*
* Called when a dataset is snapshotted. If we were currently traversing
* this snapshot, we reset our bookmark to point at the newly created
* snapshot. We also modify our work queue to remove the old snapshot and
* replace with the new one.
*/
void
dsl_scan_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx)
{
dsl_pool_t *dp = ds->ds_dir->dd_pool;
dsl_scan_t *scn = dp->dp_scan;
uint64_t mintxg;
if (!dsl_scan_is_running(scn))
return;
ASSERT(dsl_dataset_phys(ds)->ds_prev_snap_obj != 0);
ds_snapshotted_bookmark(ds, &scn->scn_phys.scn_bookmark);
ds_snapshotted_bookmark(ds, &scn->scn_phys_cached.scn_bookmark);
if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) {
scan_ds_queue_remove(scn, ds->ds_object);
scan_ds_queue_insert(scn,
dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg);
}
if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj,
ds->ds_object, &mintxg) == 0) {
VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds->ds_object, tx));
VERIFY(zap_add_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj,
dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg, tx) == 0);
zfs_dbgmsg("snapshotting ds %llu; in queue; "
"replacing with %llu",
(u_longlong_t)ds->ds_object,
(u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj);
}
dsl_scan_sync_state(scn, tx, SYNC_CACHED);
}
static void
ds_clone_swapped_bookmark(dsl_dataset_t *ds1, dsl_dataset_t *ds2,
zbookmark_phys_t *scn_bookmark)
{
if (scn_bookmark->zb_objset == ds1->ds_object) {
scn_bookmark->zb_objset = ds2->ds_object;
zfs_dbgmsg("clone_swap ds %llu; currently traversing; "
"reset zb_objset to %llu",
(u_longlong_t)ds1->ds_object,
(u_longlong_t)ds2->ds_object);
} else if (scn_bookmark->zb_objset == ds2->ds_object) {
scn_bookmark->zb_objset = ds1->ds_object;
zfs_dbgmsg("clone_swap ds %llu; currently traversing; "
"reset zb_objset to %llu",
(u_longlong_t)ds2->ds_object,
(u_longlong_t)ds1->ds_object);
}
}
/*
* Called when an origin dataset and its clone are swapped. If we were
* currently traversing the dataset, we need to switch to traversing the
* newly promoted clone.
*/
void
dsl_scan_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx)
{
dsl_pool_t *dp = ds1->ds_dir->dd_pool;
dsl_scan_t *scn = dp->dp_scan;
uint64_t mintxg1, mintxg2;
boolean_t ds1_queued, ds2_queued;
if (!dsl_scan_is_running(scn))
return;
ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys.scn_bookmark);
ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys_cached.scn_bookmark);
/*
* Handle the in-memory scan queue.
*/
ds1_queued = scan_ds_queue_contains(scn, ds1->ds_object, &mintxg1);
ds2_queued = scan_ds_queue_contains(scn, ds2->ds_object, &mintxg2);
/* Sanity checking. */
if (ds1_queued) {
ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
}
if (ds2_queued) {
ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
}
if (ds1_queued && ds2_queued) {
/*
* If both are queued, we don't need to do anything.
* The swapping code below would not handle this case correctly,
* since we can't insert ds2 if it is already there. That's
* because scan_ds_queue_insert() prohibits a duplicate insert
* and panics.
*/
} else if (ds1_queued) {
scan_ds_queue_remove(scn, ds1->ds_object);
scan_ds_queue_insert(scn, ds2->ds_object, mintxg1);
} else if (ds2_queued) {
scan_ds_queue_remove(scn, ds2->ds_object);
scan_ds_queue_insert(scn, ds1->ds_object, mintxg2);
}
/*
* Handle the on-disk scan queue.
* The on-disk state is an out-of-date version of the in-memory state,
* so the in-memory and on-disk values for ds1_queued and ds2_queued may
* be different. Therefore we need to apply the swap logic to the
* on-disk state independently of the in-memory state.
*/
ds1_queued = zap_lookup_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds1->ds_object, &mintxg1) == 0;
ds2_queued = zap_lookup_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds2->ds_object, &mintxg2) == 0;
/* Sanity checking. */
if (ds1_queued) {
ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
}
if (ds2_queued) {
ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
}
if (ds1_queued && ds2_queued) {
/*
* If both are queued, we don't need to do anything.
* Alternatively, we could check for EEXIST from
* zap_add_int_key() and back out to the original state, but
* that would be more work than checking for this case upfront.
*/
} else if (ds1_queued) {
VERIFY3S(0, ==, zap_remove_int(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds1->ds_object, tx));
VERIFY3S(0, ==, zap_add_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds2->ds_object, mintxg1, tx));
zfs_dbgmsg("clone_swap ds %llu; in queue; "
"replacing with %llu",
(u_longlong_t)ds1->ds_object,
(u_longlong_t)ds2->ds_object);
} else if (ds2_queued) {
VERIFY3S(0, ==, zap_remove_int(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds2->ds_object, tx));
VERIFY3S(0, ==, zap_add_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds1->ds_object, mintxg2, tx));
zfs_dbgmsg("clone_swap ds %llu; in queue; "
"replacing with %llu",
(u_longlong_t)ds2->ds_object,
(u_longlong_t)ds1->ds_object);
}
dsl_scan_sync_state(scn, tx, SYNC_CACHED);
}
-/* ARGSUSED */
static int
enqueue_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
{
uint64_t originobj = *(uint64_t *)arg;
dsl_dataset_t *ds;
int err;
dsl_scan_t *scn = dp->dp_scan;
if (dsl_dir_phys(hds->ds_dir)->dd_origin_obj != originobj)
return (0);
err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
if (err)
return (err);
while (dsl_dataset_phys(ds)->ds_prev_snap_obj != originobj) {
dsl_dataset_t *prev;
err = dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
dsl_dataset_rele(ds, FTAG);
if (err)
return (err);
ds = prev;
}
scan_ds_queue_insert(scn, ds->ds_object,
dsl_dataset_phys(ds)->ds_prev_snap_txg);
dsl_dataset_rele(ds, FTAG);
return (0);
}
static void
dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx)
{
dsl_pool_t *dp = scn->scn_dp;
dsl_dataset_t *ds;
VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
if (scn->scn_phys.scn_cur_min_txg >=
scn->scn_phys.scn_max_txg) {
/*
* This can happen if this snapshot was created after the
* scan started, and we already completed a previous snapshot
* that was created after the scan started. This snapshot
* only references blocks with:
*
* birth < our ds_creation_txg
* cur_min_txg is no less than ds_creation_txg.
* We have already visited these blocks.
* or
* birth > scn_max_txg
* The scan requested not to visit these blocks.
*
* Subsequent snapshots (and clones) can reference our
* blocks, or blocks with even higher birth times.
* Therefore we do not need to visit them either,
* so we do not add them to the work queue.
*
* Note that checking for cur_min_txg >= cur_max_txg
* is not sufficient, because in that case we may need to
* visit subsequent snapshots. This happens when min_txg > 0,
* which raises cur_min_txg. In this case we will visit
* this dataset but skip all of its blocks, because the
* rootbp's birth time is < cur_min_txg. Then we will
* add the next snapshots/clones to the work queue.
*/
char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
dsl_dataset_name(ds, dsname);
zfs_dbgmsg("scanning dataset %llu (%s) is unnecessary because "
"cur_min_txg (%llu) >= max_txg (%llu)",
(longlong_t)dsobj, dsname,
(longlong_t)scn->scn_phys.scn_cur_min_txg,
(longlong_t)scn->scn_phys.scn_max_txg);
kmem_free(dsname, MAXNAMELEN);
goto out;
}
/*
* Only the ZIL in the head (non-snapshot) is valid. Even though
* snapshots can have ZIL block pointers (which may be the same
* BP as in the head), they must be ignored. In addition, $ORIGIN
* doesn't have a objset (i.e. its ds_bp is a hole) so we don't
* need to look for a ZIL in it either. So we traverse the ZIL here,
* rather than in scan_recurse(), because the regular snapshot
* block-sharing rules don't apply to it.
*/
if (!dsl_dataset_is_snapshot(ds) &&
(dp->dp_origin_snap == NULL ||
ds->ds_dir != dp->dp_origin_snap->ds_dir)) {
objset_t *os;
if (dmu_objset_from_ds(ds, &os) != 0) {
goto out;
}
dsl_scan_zil(dp, &os->os_zil_header);
}
/*
* Iterate over the bps in this ds.
*/
dmu_buf_will_dirty(ds->ds_dbuf, tx);
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
dsl_scan_visit_rootbp(scn, ds, &dsl_dataset_phys(ds)->ds_bp, tx);
rrw_exit(&ds->ds_bp_rwlock, FTAG);
char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
dsl_dataset_name(ds, dsname);
zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; "
"suspending=%u",
(longlong_t)dsobj, dsname,
(longlong_t)scn->scn_phys.scn_cur_min_txg,
(longlong_t)scn->scn_phys.scn_cur_max_txg,
(int)scn->scn_suspending);
kmem_free(dsname, ZFS_MAX_DATASET_NAME_LEN);
if (scn->scn_suspending)
goto out;
/*
* We've finished this pass over this dataset.
*/
/*
* If we did not completely visit this dataset, do another pass.
*/
if (scn->scn_phys.scn_flags & DSF_VISIT_DS_AGAIN) {
zfs_dbgmsg("incomplete pass; visiting again");
scn->scn_phys.scn_flags &= ~DSF_VISIT_DS_AGAIN;
scan_ds_queue_insert(scn, ds->ds_object,
scn->scn_phys.scn_cur_max_txg);
goto out;
}
/*
* Add descendant datasets to work queue.
*/
if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) {
scan_ds_queue_insert(scn,
dsl_dataset_phys(ds)->ds_next_snap_obj,
dsl_dataset_phys(ds)->ds_creation_txg);
}
if (dsl_dataset_phys(ds)->ds_num_children > 1) {
boolean_t usenext = B_FALSE;
if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) {
uint64_t count;
/*
* A bug in a previous version of the code could
* cause upgrade_clones_cb() to not set
* ds_next_snap_obj when it should, leading to a
* missing entry. Therefore we can only use the
* next_clones_obj when its count is correct.
*/
int err = zap_count(dp->dp_meta_objset,
dsl_dataset_phys(ds)->ds_next_clones_obj, &count);
if (err == 0 &&
count == dsl_dataset_phys(ds)->ds_num_children - 1)
usenext = B_TRUE;
}
if (usenext) {
zap_cursor_t zc;
zap_attribute_t za;
for (zap_cursor_init(&zc, dp->dp_meta_objset,
dsl_dataset_phys(ds)->ds_next_clones_obj);
zap_cursor_retrieve(&zc, &za) == 0;
(void) zap_cursor_advance(&zc)) {
scan_ds_queue_insert(scn,
zfs_strtonum(za.za_name, NULL),
dsl_dataset_phys(ds)->ds_creation_txg);
}
zap_cursor_fini(&zc);
} else {
VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
enqueue_clones_cb, &ds->ds_object,
DS_FIND_CHILDREN));
}
}
out:
dsl_dataset_rele(ds, FTAG);
}
-/* ARGSUSED */
static int
enqueue_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
{
+ (void) arg;
dsl_dataset_t *ds;
int err;
dsl_scan_t *scn = dp->dp_scan;
err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
if (err)
return (err);
while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
dsl_dataset_t *prev;
err = dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
if (err) {
dsl_dataset_rele(ds, FTAG);
return (err);
}
/*
* If this is a clone, we don't need to worry about it for now.
*/
if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object) {
dsl_dataset_rele(ds, FTAG);
dsl_dataset_rele(prev, FTAG);
return (0);
}
dsl_dataset_rele(ds, FTAG);
ds = prev;
}
scan_ds_queue_insert(scn, ds->ds_object,
dsl_dataset_phys(ds)->ds_prev_snap_txg);
dsl_dataset_rele(ds, FTAG);
return (0);
}
-/* ARGSUSED */
void
dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum,
ddt_entry_t *dde, dmu_tx_t *tx)
{
+ (void) tx;
const ddt_key_t *ddk = &dde->dde_key;
ddt_phys_t *ddp = dde->dde_phys;
blkptr_t bp;
zbookmark_phys_t zb = { 0 };
- int p;
if (!dsl_scan_is_running(scn))
return;
/*
* This function is special because it is the only thing
* that can add scan_io_t's to the vdev scan queues from
* outside dsl_scan_sync(). For the most part this is ok
* as long as it is called from within syncing context.
* However, dsl_scan_sync() expects that no new sio's will
* be added between when all the work for a scan is done
* and the next txg when the scan is actually marked as
* completed. This check ensures we do not issue new sio's
* during this period.
*/
if (scn->scn_done_txg != 0)
return;
- for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
+ for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
if (ddp->ddp_phys_birth == 0 ||
ddp->ddp_phys_birth > scn->scn_phys.scn_max_txg)
continue;
ddt_bp_create(checksum, ddk, ddp, &bp);
scn->scn_visited_this_txg++;
scan_funcs[scn->scn_phys.scn_func](scn->scn_dp, &bp, &zb);
}
}
/*
* Scrub/dedup interaction.
*
* If there are N references to a deduped block, we don't want to scrub it
* N times -- ideally, we should scrub it exactly once.
*
* We leverage the fact that the dde's replication class (enum ddt_class)
* is ordered from highest replication class (DDT_CLASS_DITTO) to lowest
* (DDT_CLASS_UNIQUE) so that we may walk the DDT in that order.
*
* To prevent excess scrubbing, the scrub begins by walking the DDT
* to find all blocks with refcnt > 1, and scrubs each of these once.
* Since there are two replication classes which contain blocks with
* refcnt > 1, we scrub the highest replication class (DDT_CLASS_DITTO) first.
* Finally the top-down scrub begins, only visiting blocks with refcnt == 1.
*
* There would be nothing more to say if a block's refcnt couldn't change
* during a scrub, but of course it can so we must account for changes
* in a block's replication class.
*
* Here's an example of what can occur:
*
* If a block has refcnt > 1 during the DDT scrub phase, but has refcnt == 1
* when visited during the top-down scrub phase, it will be scrubbed twice.
* This negates our scrub optimization, but is otherwise harmless.
*
* If a block has refcnt == 1 during the DDT scrub phase, but has refcnt > 1
* on each visit during the top-down scrub phase, it will never be scrubbed.
* To catch this, ddt_sync_entry() notifies the scrub code whenever a block's
* reference class transitions to a higher level (i.e DDT_CLASS_UNIQUE to
* DDT_CLASS_DUPLICATE); if it transitions from refcnt == 1 to refcnt > 1
* while a scrub is in progress, it scrubs the block right then.
*/
static void
dsl_scan_ddt(dsl_scan_t *scn, dmu_tx_t *tx)
{
ddt_bookmark_t *ddb = &scn->scn_phys.scn_ddt_bookmark;
ddt_entry_t dde;
int error;
uint64_t n = 0;
bzero(&dde, sizeof (ddt_entry_t));
while ((error = ddt_walk(scn->scn_dp->dp_spa, ddb, &dde)) == 0) {
ddt_t *ddt;
if (ddb->ddb_class > scn->scn_phys.scn_ddt_class_max)
break;
dprintf("visiting ddb=%llu/%llu/%llu/%llx\n",
(longlong_t)ddb->ddb_class,
(longlong_t)ddb->ddb_type,
(longlong_t)ddb->ddb_checksum,
(longlong_t)ddb->ddb_cursor);
/* There should be no pending changes to the dedup table */
ddt = scn->scn_dp->dp_spa->spa_ddt[ddb->ddb_checksum];
ASSERT(avl_first(&ddt->ddt_tree) == NULL);
dsl_scan_ddt_entry(scn, ddb->ddb_checksum, &dde, tx);
n++;
if (dsl_scan_check_suspend(scn, NULL))
break;
}
zfs_dbgmsg("scanned %llu ddt entries with class_max = %u; "
"suspending=%u", (longlong_t)n,
(int)scn->scn_phys.scn_ddt_class_max, (int)scn->scn_suspending);
ASSERT(error == 0 || error == ENOENT);
ASSERT(error != ENOENT ||
ddb->ddb_class > scn->scn_phys.scn_ddt_class_max);
}
static uint64_t
dsl_scan_ds_maxtxg(dsl_dataset_t *ds)
{
uint64_t smt = ds->ds_dir->dd_pool->dp_scan->scn_phys.scn_max_txg;
if (ds->ds_is_snapshot)
return (MIN(smt, dsl_dataset_phys(ds)->ds_creation_txg));
return (smt);
}
static void
dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx)
{
scan_ds_t *sds;
dsl_pool_t *dp = scn->scn_dp;
if (scn->scn_phys.scn_ddt_bookmark.ddb_class <=
scn->scn_phys.scn_ddt_class_max) {
scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg;
scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg;
dsl_scan_ddt(scn, tx);
if (scn->scn_suspending)
return;
}
if (scn->scn_phys.scn_bookmark.zb_objset == DMU_META_OBJSET) {
/* First do the MOS & ORIGIN */
scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg;
scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg;
dsl_scan_visit_rootbp(scn, NULL,
&dp->dp_meta_rootbp, tx);
spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp);
if (scn->scn_suspending)
return;
if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) {
VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
enqueue_cb, NULL, DS_FIND_CHILDREN));
} else {
dsl_scan_visitds(scn,
dp->dp_origin_snap->ds_object, tx);
}
ASSERT(!scn->scn_suspending);
} else if (scn->scn_phys.scn_bookmark.zb_objset !=
ZB_DESTROYED_OBJSET) {
uint64_t dsobj = scn->scn_phys.scn_bookmark.zb_objset;
/*
* If we were suspended, continue from here. Note if the
* ds we were suspended on was deleted, the zb_objset may
* be -1, so we will skip this and find a new objset
* below.
*/
dsl_scan_visitds(scn, dsobj, tx);
if (scn->scn_suspending)
return;
}
/*
* In case we suspended right at the end of the ds, zero the
* bookmark so we don't think that we're still trying to resume.
*/
bzero(&scn->scn_phys.scn_bookmark, sizeof (zbookmark_phys_t));
/*
* Keep pulling things out of the dataset avl queue. Updates to the
* persistent zap-object-as-queue happen only at checkpoints.
*/
while ((sds = avl_first(&scn->scn_queue)) != NULL) {
dsl_dataset_t *ds;
uint64_t dsobj = sds->sds_dsobj;
uint64_t txg = sds->sds_txg;
/* dequeue and free the ds from the queue */
scan_ds_queue_remove(scn, dsobj);
sds = NULL;
/* set up min / max txg */
VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
if (txg != 0) {
scn->scn_phys.scn_cur_min_txg =
MAX(scn->scn_phys.scn_min_txg, txg);
} else {
scn->scn_phys.scn_cur_min_txg =
MAX(scn->scn_phys.scn_min_txg,
dsl_dataset_phys(ds)->ds_prev_snap_txg);
}
scn->scn_phys.scn_cur_max_txg = dsl_scan_ds_maxtxg(ds);
dsl_dataset_rele(ds, FTAG);
dsl_scan_visitds(scn, dsobj, tx);
if (scn->scn_suspending)
return;
}
/* No more objsets to fetch, we're done */
scn->scn_phys.scn_bookmark.zb_objset = ZB_DESTROYED_OBJSET;
ASSERT0(scn->scn_suspending);
}
static uint64_t
dsl_scan_count_data_disks(vdev_t *rvd)
{
uint64_t i, leaves = 0;
for (i = 0; i < rvd->vdev_children; i++) {
vdev_t *vd = rvd->vdev_child[i];
if (vd->vdev_islog || vd->vdev_isspare || vd->vdev_isl2cache)
continue;
leaves += vdev_get_ndisks(vd) - vdev_get_nparity(vd);
}
return (leaves);
}
static void
scan_io_queues_update_zio_stats(dsl_scan_io_queue_t *q, const blkptr_t *bp)
{
int i;
uint64_t cur_size = 0;
for (i = 0; i < BP_GET_NDVAS(bp); i++) {
cur_size += DVA_GET_ASIZE(&bp->blk_dva[i]);
}
q->q_total_zio_size_this_txg += cur_size;
q->q_zios_this_txg++;
}
static void
scan_io_queues_update_seg_stats(dsl_scan_io_queue_t *q, uint64_t start,
uint64_t end)
{
q->q_total_seg_size_this_txg += end - start;
q->q_segs_this_txg++;
}
static boolean_t
scan_io_queue_check_suspend(dsl_scan_t *scn)
{
/* See comment in dsl_scan_check_suspend() */
uint64_t curr_time_ns = gethrtime();
uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time;
uint64_t sync_time_ns = curr_time_ns -
scn->scn_dp->dp_spa->spa_sync_starttime;
int dirty_pct = scn->scn_dp->dp_dirty_total * 100 / zfs_dirty_data_max;
int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
zfs_resilver_min_time_ms : zfs_scrub_min_time_ms;
return ((NSEC2MSEC(scan_time_ns) > mintime &&
(dirty_pct >= zfs_vdev_async_write_active_min_dirty_percent ||
txg_sync_waiting(scn->scn_dp) ||
NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) ||
spa_shutting_down(scn->scn_dp->dp_spa));
}
/*
* Given a list of scan_io_t's in io_list, this issues the I/Os out to
* disk. This consumes the io_list and frees the scan_io_t's. This is
* called when emptying queues, either when we're up against the memory
* limit or when we have finished scanning. Returns B_TRUE if we stopped
* processing the list before we finished. Any sios that were not issued
* will remain in the io_list.
*/
static boolean_t
scan_io_queue_issue(dsl_scan_io_queue_t *queue, list_t *io_list)
{
dsl_scan_t *scn = queue->q_scn;
scan_io_t *sio;
int64_t bytes_issued = 0;
boolean_t suspended = B_FALSE;
while ((sio = list_head(io_list)) != NULL) {
blkptr_t bp;
if (scan_io_queue_check_suspend(scn)) {
suspended = B_TRUE;
break;
}
sio2bp(sio, &bp);
bytes_issued += SIO_GET_ASIZE(sio);
scan_exec_io(scn->scn_dp, &bp, sio->sio_flags,
&sio->sio_zb, queue);
(void) list_remove_head(io_list);
scan_io_queues_update_zio_stats(queue, &bp);
sio_free(sio);
}
atomic_add_64(&scn->scn_bytes_pending, -bytes_issued);
return (suspended);
}
/*
* This function removes sios from an IO queue which reside within a given
* range_seg_t and inserts them (in offset order) into a list. Note that
* we only ever return a maximum of 32 sios at once. If there are more sios
* to process within this segment that did not make it onto the list we
* return B_TRUE and otherwise B_FALSE.
*/
static boolean_t
scan_io_queue_gather(dsl_scan_io_queue_t *queue, range_seg_t *rs, list_t *list)
{
scan_io_t *srch_sio, *sio, *next_sio;
avl_index_t idx;
uint_t num_sios = 0;
int64_t bytes_issued = 0;
ASSERT(rs != NULL);
ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
srch_sio = sio_alloc(1);
srch_sio->sio_nr_dvas = 1;
SIO_SET_OFFSET(srch_sio, rs_get_start(rs, queue->q_exts_by_addr));
/*
* The exact start of the extent might not contain any matching zios,
* so if that's the case, examine the next one in the tree.
*/
sio = avl_find(&queue->q_sios_by_addr, srch_sio, &idx);
sio_free(srch_sio);
if (sio == NULL)
sio = avl_nearest(&queue->q_sios_by_addr, idx, AVL_AFTER);
while (sio != NULL && SIO_GET_OFFSET(sio) < rs_get_end(rs,
queue->q_exts_by_addr) && num_sios <= 32) {
ASSERT3U(SIO_GET_OFFSET(sio), >=, rs_get_start(rs,
queue->q_exts_by_addr));
ASSERT3U(SIO_GET_END_OFFSET(sio), <=, rs_get_end(rs,
queue->q_exts_by_addr));
next_sio = AVL_NEXT(&queue->q_sios_by_addr, sio);
avl_remove(&queue->q_sios_by_addr, sio);
queue->q_sio_memused -= SIO_GET_MUSED(sio);
bytes_issued += SIO_GET_ASIZE(sio);
num_sios++;
list_insert_tail(list, sio);
sio = next_sio;
}
/*
* We limit the number of sios we process at once to 32 to avoid
* biting off more than we can chew. If we didn't take everything
* in the segment we update it to reflect the work we were able to
* complete. Otherwise, we remove it from the range tree entirely.
*/
if (sio != NULL && SIO_GET_OFFSET(sio) < rs_get_end(rs,
queue->q_exts_by_addr)) {
range_tree_adjust_fill(queue->q_exts_by_addr, rs,
-bytes_issued);
range_tree_resize_segment(queue->q_exts_by_addr, rs,
SIO_GET_OFFSET(sio), rs_get_end(rs,
queue->q_exts_by_addr) - SIO_GET_OFFSET(sio));
return (B_TRUE);
} else {
uint64_t rstart = rs_get_start(rs, queue->q_exts_by_addr);
uint64_t rend = rs_get_end(rs, queue->q_exts_by_addr);
range_tree_remove(queue->q_exts_by_addr, rstart, rend - rstart);
return (B_FALSE);
}
}
/*
* This is called from the queue emptying thread and selects the next
* extent from which we are to issue I/Os. The behavior of this function
* depends on the state of the scan, the current memory consumption and
* whether or not we are performing a scan shutdown.
* 1) We select extents in an elevator algorithm (LBA-order) if the scan
* needs to perform a checkpoint
* 2) We select the largest available extent if we are up against the
* memory limit.
* 3) Otherwise we don't select any extents.
*/
static range_seg_t *
scan_io_queue_fetch_ext(dsl_scan_io_queue_t *queue)
{
dsl_scan_t *scn = queue->q_scn;
range_tree_t *rt = queue->q_exts_by_addr;
ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
ASSERT(scn->scn_is_sorted);
/* handle tunable overrides */
if (scn->scn_checkpointing || scn->scn_clearing) {
if (zfs_scan_issue_strategy == 1) {
return (range_tree_first(rt));
} else if (zfs_scan_issue_strategy == 2) {
/*
* We need to get the original entry in the by_addr
* tree so we can modify it.
*/
range_seg_t *size_rs =
zfs_btree_first(&queue->q_exts_by_size, NULL);
if (size_rs == NULL)
return (NULL);
uint64_t start = rs_get_start(size_rs, rt);
uint64_t size = rs_get_end(size_rs, rt) - start;
range_seg_t *addr_rs = range_tree_find(rt, start,
size);
ASSERT3P(addr_rs, !=, NULL);
ASSERT3U(rs_get_start(size_rs, rt), ==,
rs_get_start(addr_rs, rt));
ASSERT3U(rs_get_end(size_rs, rt), ==,
rs_get_end(addr_rs, rt));
return (addr_rs);
}
}
/*
* During normal clearing, we want to issue our largest segments
* first, keeping IO as sequential as possible, and leaving the
* smaller extents for later with the hope that they might eventually
* grow to larger sequential segments. However, when the scan is
* checkpointing, no new extents will be added to the sorting queue,
* so the way we are sorted now is as good as it will ever get.
* In this case, we instead switch to issuing extents in LBA order.
*/
if (scn->scn_checkpointing) {
return (range_tree_first(rt));
} else if (scn->scn_clearing) {
/*
* We need to get the original entry in the by_addr
* tree so we can modify it.
*/
range_seg_t *size_rs = zfs_btree_first(&queue->q_exts_by_size,
NULL);
if (size_rs == NULL)
return (NULL);
uint64_t start = rs_get_start(size_rs, rt);
uint64_t size = rs_get_end(size_rs, rt) - start;
range_seg_t *addr_rs = range_tree_find(rt, start, size);
ASSERT3P(addr_rs, !=, NULL);
ASSERT3U(rs_get_start(size_rs, rt), ==, rs_get_start(addr_rs,
rt));
ASSERT3U(rs_get_end(size_rs, rt), ==, rs_get_end(addr_rs, rt));
return (addr_rs);
} else {
return (NULL);
}
}
static void
scan_io_queues_run_one(void *arg)
{
dsl_scan_io_queue_t *queue = arg;
kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock;
boolean_t suspended = B_FALSE;
range_seg_t *rs = NULL;
scan_io_t *sio = NULL;
list_t sio_list;
ASSERT(queue->q_scn->scn_is_sorted);
list_create(&sio_list, sizeof (scan_io_t),
offsetof(scan_io_t, sio_nodes.sio_list_node));
mutex_enter(q_lock);
/* Calculate maximum in-flight bytes for this vdev. */
queue->q_maxinflight_bytes = MAX(1, zfs_scan_vdev_limit *
(vdev_get_ndisks(queue->q_vd) - vdev_get_nparity(queue->q_vd)));
/* reset per-queue scan statistics for this txg */
queue->q_total_seg_size_this_txg = 0;
queue->q_segs_this_txg = 0;
queue->q_total_zio_size_this_txg = 0;
queue->q_zios_this_txg = 0;
/* loop until we run out of time or sios */
while ((rs = scan_io_queue_fetch_ext(queue)) != NULL) {
uint64_t seg_start = 0, seg_end = 0;
boolean_t more_left = B_TRUE;
ASSERT(list_is_empty(&sio_list));
/* loop while we still have sios left to process in this rs */
while (more_left) {
scan_io_t *first_sio, *last_sio;
/*
* We have selected which extent needs to be
* processed next. Gather up the corresponding sios.
*/
more_left = scan_io_queue_gather(queue, rs, &sio_list);
ASSERT(!list_is_empty(&sio_list));
first_sio = list_head(&sio_list);
last_sio = list_tail(&sio_list);
seg_end = SIO_GET_END_OFFSET(last_sio);
if (seg_start == 0)
seg_start = SIO_GET_OFFSET(first_sio);
/*
* Issuing sios can take a long time so drop the
* queue lock. The sio queue won't be updated by
* other threads since we're in syncing context so
* we can be sure that our trees will remain exactly
* as we left them.
*/
mutex_exit(q_lock);
suspended = scan_io_queue_issue(queue, &sio_list);
mutex_enter(q_lock);
if (suspended)
break;
}
/* update statistics for debugging purposes */
scan_io_queues_update_seg_stats(queue, seg_start, seg_end);
if (suspended)
break;
}
/*
* If we were suspended in the middle of processing,
* requeue any unfinished sios and exit.
*/
while ((sio = list_head(&sio_list)) != NULL) {
list_remove(&sio_list, sio);
scan_io_queue_insert_impl(queue, sio);
}
mutex_exit(q_lock);
list_destroy(&sio_list);
}
/*
* Performs an emptying run on all scan queues in the pool. This just
* punches out one thread per top-level vdev, each of which processes
* only that vdev's scan queue. We can parallelize the I/O here because
* we know that each queue's I/Os only affect its own top-level vdev.
*
* This function waits for the queue runs to complete, and must be
* called from dsl_scan_sync (or in general, syncing context).
*/
static void
scan_io_queues_run(dsl_scan_t *scn)
{
spa_t *spa = scn->scn_dp->dp_spa;
ASSERT(scn->scn_is_sorted);
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
if (scn->scn_bytes_pending == 0)
return;
if (scn->scn_taskq == NULL) {
int nthreads = spa->spa_root_vdev->vdev_children;
/*
* We need to make this taskq *always* execute as many
* threads in parallel as we have top-level vdevs and no
* less, otherwise strange serialization of the calls to
* scan_io_queues_run_one can occur during spa_sync runs
* and that significantly impacts performance.
*/
scn->scn_taskq = taskq_create("dsl_scan_iss", nthreads,
minclsyspri, nthreads, nthreads, TASKQ_PREPOPULATE);
}
for (uint64_t i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
mutex_enter(&vd->vdev_scan_io_queue_lock);
if (vd->vdev_scan_io_queue != NULL) {
VERIFY(taskq_dispatch(scn->scn_taskq,
scan_io_queues_run_one, vd->vdev_scan_io_queue,
TQ_SLEEP) != TASKQID_INVALID);
}
mutex_exit(&vd->vdev_scan_io_queue_lock);
}
/*
* Wait for the queues to finish issuing their IOs for this run
* before we return. There may still be IOs in flight at this
* point.
*/
taskq_wait(scn->scn_taskq);
}
static boolean_t
dsl_scan_async_block_should_pause(dsl_scan_t *scn)
{
uint64_t elapsed_nanosecs;
if (zfs_recover)
return (B_FALSE);
if (zfs_async_block_max_blocks != 0 &&
scn->scn_visited_this_txg >= zfs_async_block_max_blocks) {
return (B_TRUE);
}
if (zfs_max_async_dedup_frees != 0 &&
scn->scn_dedup_frees_this_txg >= zfs_max_async_dedup_frees) {
return (B_TRUE);
}
elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time;
return (elapsed_nanosecs / NANOSEC > zfs_txg_timeout ||
(NSEC2MSEC(elapsed_nanosecs) > scn->scn_async_block_min_time_ms &&
txg_sync_waiting(scn->scn_dp)) ||
spa_shutting_down(scn->scn_dp->dp_spa));
}
static int
dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
dsl_scan_t *scn = arg;
if (!scn->scn_is_bptree ||
(BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)) {
if (dsl_scan_async_block_should_pause(scn))
return (SET_ERROR(ERESTART));
}
zio_nowait(zio_free_sync(scn->scn_zio_root, scn->scn_dp->dp_spa,
dmu_tx_get_txg(tx), bp, 0));
dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD,
-bp_get_dsize_sync(scn->scn_dp->dp_spa, bp),
-BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx);
scn->scn_visited_this_txg++;
if (BP_GET_DEDUP(bp))
scn->scn_dedup_frees_this_txg++;
return (0);
}
static void
dsl_scan_update_stats(dsl_scan_t *scn)
{
spa_t *spa = scn->scn_dp->dp_spa;
uint64_t i;
uint64_t seg_size_total = 0, zio_size_total = 0;
uint64_t seg_count_total = 0, zio_count_total = 0;
for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
dsl_scan_io_queue_t *queue = vd->vdev_scan_io_queue;
if (queue == NULL)
continue;
seg_size_total += queue->q_total_seg_size_this_txg;
zio_size_total += queue->q_total_zio_size_this_txg;
seg_count_total += queue->q_segs_this_txg;
zio_count_total += queue->q_zios_this_txg;
}
if (seg_count_total == 0 || zio_count_total == 0) {
scn->scn_avg_seg_size_this_txg = 0;
scn->scn_avg_zio_size_this_txg = 0;
scn->scn_segs_this_txg = 0;
scn->scn_zios_this_txg = 0;
return;
}
scn->scn_avg_seg_size_this_txg = seg_size_total / seg_count_total;
scn->scn_avg_zio_size_this_txg = zio_size_total / zio_count_total;
scn->scn_segs_this_txg = seg_count_total;
scn->scn_zios_this_txg = zio_count_total;
}
static int
bpobj_dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
ASSERT(!bp_freed);
return (dsl_scan_free_block_cb(arg, bp, tx));
}
static int
dsl_scan_obsolete_block_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
ASSERT(!bp_freed);
dsl_scan_t *scn = arg;
const dva_t *dva = &bp->blk_dva[0];
if (dsl_scan_async_block_should_pause(scn))
return (SET_ERROR(ERESTART));
spa_vdev_indirect_mark_obsolete(scn->scn_dp->dp_spa,
DVA_GET_VDEV(dva), DVA_GET_OFFSET(dva),
DVA_GET_ASIZE(dva), tx);
scn->scn_visited_this_txg++;
return (0);
}
boolean_t
dsl_scan_active(dsl_scan_t *scn)
{
spa_t *spa = scn->scn_dp->dp_spa;
uint64_t used = 0, comp, uncomp;
boolean_t clones_left;
if (spa->spa_load_state != SPA_LOAD_NONE)
return (B_FALSE);
if (spa_shutting_down(spa))
return (B_FALSE);
if ((dsl_scan_is_running(scn) && !dsl_scan_is_paused_scrub(scn)) ||
(scn->scn_async_destroying && !scn->scn_async_stalled))
return (B_TRUE);
if (spa_version(scn->scn_dp->dp_spa) >= SPA_VERSION_DEADLISTS) {
(void) bpobj_space(&scn->scn_dp->dp_free_bpobj,
&used, &comp, &uncomp);
}
clones_left = spa_livelist_delete_check(spa);
return ((used != 0) || (clones_left));
}
static boolean_t
dsl_scan_check_deferred(vdev_t *vd)
{
boolean_t need_resilver = B_FALSE;
for (int c = 0; c < vd->vdev_children; c++) {
need_resilver |=
dsl_scan_check_deferred(vd->vdev_child[c]);
}
if (!vdev_is_concrete(vd) || vd->vdev_aux ||
!vd->vdev_ops->vdev_op_leaf)
return (need_resilver);
if (!vd->vdev_resilver_deferred)
need_resilver = B_TRUE;
return (need_resilver);
}
static boolean_t
dsl_scan_need_resilver(spa_t *spa, const dva_t *dva, size_t psize,
uint64_t phys_birth)
{
vdev_t *vd;
vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
if (vd->vdev_ops == &vdev_indirect_ops) {
/*
* The indirect vdev can point to multiple
* vdevs. For simplicity, always create
* the resilver zio_t. zio_vdev_io_start()
* will bypass the child resilver i/o's if
* they are on vdevs that don't have DTL's.
*/
return (B_TRUE);
}
if (DVA_GET_GANG(dva)) {
/*
* Gang members may be spread across multiple
* vdevs, so the best estimate we have is the
* scrub range, which has already been checked.
* XXX -- it would be better to change our
* allocation policy to ensure that all
* gang members reside on the same vdev.
*/
return (B_TRUE);
}
/*
* Check if the top-level vdev must resilver this offset.
* When the offset does not intersect with a dirty leaf DTL
* then it may be possible to skip the resilver IO. The psize
* is provided instead of asize to simplify the check for RAIDZ.
*/
if (!vdev_dtl_need_resilver(vd, dva, psize, phys_birth))
return (B_FALSE);
/*
* Check that this top-level vdev has a device under it which
* is resilvering and is not deferred.
*/
if (!dsl_scan_check_deferred(vd))
return (B_FALSE);
return (B_TRUE);
}
static int
dsl_process_async_destroys(dsl_pool_t *dp, dmu_tx_t *tx)
{
dsl_scan_t *scn = dp->dp_scan;
spa_t *spa = dp->dp_spa;
int err = 0;
if (spa_suspend_async_destroy(spa))
return (0);
if (zfs_free_bpobj_enabled &&
spa_version(spa) >= SPA_VERSION_DEADLISTS) {
scn->scn_is_bptree = B_FALSE;
scn->scn_async_block_min_time_ms = zfs_free_min_time_ms;
scn->scn_zio_root = zio_root(spa, NULL,
NULL, ZIO_FLAG_MUSTSUCCEED);
err = bpobj_iterate(&dp->dp_free_bpobj,
bpobj_dsl_scan_free_block_cb, scn, tx);
VERIFY0(zio_wait(scn->scn_zio_root));
scn->scn_zio_root = NULL;
if (err != 0 && err != ERESTART)
zfs_panic_recover("error %u from bpobj_iterate()", err);
}
if (err == 0 && spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) {
ASSERT(scn->scn_async_destroying);
scn->scn_is_bptree = B_TRUE;
scn->scn_zio_root = zio_root(spa, NULL,
NULL, ZIO_FLAG_MUSTSUCCEED);
err = bptree_iterate(dp->dp_meta_objset,
dp->dp_bptree_obj, B_TRUE, dsl_scan_free_block_cb, scn, tx);
VERIFY0(zio_wait(scn->scn_zio_root));
scn->scn_zio_root = NULL;
if (err == EIO || err == ECKSUM) {
err = 0;
} else if (err != 0 && err != ERESTART) {
zfs_panic_recover("error %u from "
"traverse_dataset_destroyed()", err);
}
if (bptree_is_empty(dp->dp_meta_objset, dp->dp_bptree_obj)) {
/* finished; deactivate async destroy feature */
spa_feature_decr(spa, SPA_FEATURE_ASYNC_DESTROY, tx);
ASSERT(!spa_feature_is_active(spa,
SPA_FEATURE_ASYNC_DESTROY));
VERIFY0(zap_remove(dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_BPTREE_OBJ, tx));
VERIFY0(bptree_free(dp->dp_meta_objset,
dp->dp_bptree_obj, tx));
dp->dp_bptree_obj = 0;
scn->scn_async_destroying = B_FALSE;
scn->scn_async_stalled = B_FALSE;
} else {
/*
* If we didn't make progress, mark the async
* destroy as stalled, so that we will not initiate
* a spa_sync() on its behalf. Note that we only
* check this if we are not finished, because if the
* bptree had no blocks for us to visit, we can
* finish without "making progress".
*/
scn->scn_async_stalled =
(scn->scn_visited_this_txg == 0);
}
}
if (scn->scn_visited_this_txg) {
zfs_dbgmsg("freed %llu blocks in %llums from "
"free_bpobj/bptree txg %llu; err=%u",
(longlong_t)scn->scn_visited_this_txg,
(longlong_t)
NSEC2MSEC(gethrtime() - scn->scn_sync_start_time),
(longlong_t)tx->tx_txg, err);
scn->scn_visited_this_txg = 0;
scn->scn_dedup_frees_this_txg = 0;
/*
* Write out changes to the DDT that may be required as a
* result of the blocks freed. This ensures that the DDT
* is clean when a scrub/resilver runs.
*/
ddt_sync(spa, tx->tx_txg);
}
if (err != 0)
return (err);
if (dp->dp_free_dir != NULL && !scn->scn_async_destroying &&
zfs_free_leak_on_eio &&
(dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes != 0 ||
dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes != 0 ||
dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes != 0)) {
/*
* We have finished background destroying, but there is still
* some space left in the dp_free_dir. Transfer this leaked
* space to the dp_leak_dir.
*/
if (dp->dp_leak_dir == NULL) {
rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
(void) dsl_dir_create_sync(dp, dp->dp_root_dir,
LEAK_DIR_NAME, tx);
VERIFY0(dsl_pool_open_special_dir(dp,
LEAK_DIR_NAME, &dp->dp_leak_dir));
rrw_exit(&dp->dp_config_rwlock, FTAG);
}
dsl_dir_diduse_space(dp->dp_leak_dir, DD_USED_HEAD,
dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes,
dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes,
dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx);
dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
-dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes,
-dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes,
-dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx);
}
if (dp->dp_free_dir != NULL && !scn->scn_async_destroying &&
!spa_livelist_delete_check(spa)) {
/* finished; verify that space accounting went to zero */
ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes);
ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes);
ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes);
}
spa_notify_waiters(spa);
EQUIV(bpobj_is_open(&dp->dp_obsolete_bpobj),
0 == zap_contains(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_OBSOLETE_BPOBJ));
if (err == 0 && bpobj_is_open(&dp->dp_obsolete_bpobj)) {
ASSERT(spa_feature_is_active(dp->dp_spa,
SPA_FEATURE_OBSOLETE_COUNTS));
scn->scn_is_bptree = B_FALSE;
scn->scn_async_block_min_time_ms = zfs_obsolete_min_time_ms;
err = bpobj_iterate(&dp->dp_obsolete_bpobj,
dsl_scan_obsolete_block_cb, scn, tx);
if (err != 0 && err != ERESTART)
zfs_panic_recover("error %u from bpobj_iterate()", err);
if (bpobj_is_empty(&dp->dp_obsolete_bpobj))
dsl_pool_destroy_obsolete_bpobj(dp, tx);
}
return (0);
}
/*
* This is the primary entry point for scans that is called from syncing
* context. Scans must happen entirely during syncing context so that we
* can guarantee that blocks we are currently scanning will not change out
* from under us. While a scan is active, this function controls how quickly
* transaction groups proceed, instead of the normal handling provided by
* txg_sync_thread().
*/
void
dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx)
{
int err = 0;
dsl_scan_t *scn = dp->dp_scan;
spa_t *spa = dp->dp_spa;
state_sync_type_t sync_type = SYNC_OPTIONAL;
if (spa->spa_resilver_deferred &&
!spa_feature_is_active(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER))
spa_feature_incr(spa, SPA_FEATURE_RESILVER_DEFER, tx);
/*
* Check for scn_restart_txg before checking spa_load_state, so
* that we can restart an old-style scan while the pool is being
* imported (see dsl_scan_init). We also restart scans if there
* is a deferred resilver and the user has manually disabled
* deferred resilvers via the tunable.
*/
if (dsl_scan_restarting(scn, tx) ||
(spa->spa_resilver_deferred && zfs_resilver_disable_defer)) {
pool_scan_func_t func = POOL_SCAN_SCRUB;
dsl_scan_done(scn, B_FALSE, tx);
if (vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL))
func = POOL_SCAN_RESILVER;
zfs_dbgmsg("restarting scan func=%u txg=%llu",
func, (longlong_t)tx->tx_txg);
dsl_scan_setup_sync(&func, tx);
}
/*
* Only process scans in sync pass 1.
*/
if (spa_sync_pass(spa) > 1)
return;
/*
* If the spa is shutting down, then stop scanning. This will
* ensure that the scan does not dirty any new data during the
* shutdown phase.
*/
if (spa_shutting_down(spa))
return;
/*
* If the scan is inactive due to a stalled async destroy, try again.
*/
if (!scn->scn_async_stalled && !dsl_scan_active(scn))
return;
/* reset scan statistics */
scn->scn_visited_this_txg = 0;
scn->scn_dedup_frees_this_txg = 0;
scn->scn_holes_this_txg = 0;
scn->scn_lt_min_this_txg = 0;
scn->scn_gt_max_this_txg = 0;
scn->scn_ddt_contained_this_txg = 0;
scn->scn_objsets_visited_this_txg = 0;
scn->scn_avg_seg_size_this_txg = 0;
scn->scn_segs_this_txg = 0;
scn->scn_avg_zio_size_this_txg = 0;
scn->scn_zios_this_txg = 0;
scn->scn_suspending = B_FALSE;
scn->scn_sync_start_time = gethrtime();
spa->spa_scrub_active = B_TRUE;
/*
* First process the async destroys. If we suspend, don't do
* any scrubbing or resilvering. This ensures that there are no
* async destroys while we are scanning, so the scan code doesn't
* have to worry about traversing it. It is also faster to free the
* blocks than to scrub them.
*/
err = dsl_process_async_destroys(dp, tx);
if (err != 0)
return;
if (!dsl_scan_is_running(scn) || dsl_scan_is_paused_scrub(scn))
return;
/*
* Wait a few txgs after importing to begin scanning so that
* we can get the pool imported quickly.
*/
if (spa->spa_syncing_txg < spa->spa_first_txg + SCAN_IMPORT_WAIT_TXGS)
return;
/*
* zfs_scan_suspend_progress can be set to disable scan progress.
* We don't want to spin the txg_sync thread, so we add a delay
* here to simulate the time spent doing a scan. This is mostly
* useful for testing and debugging.
*/
if (zfs_scan_suspend_progress) {
uint64_t scan_time_ns = gethrtime() - scn->scn_sync_start_time;
int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
zfs_resilver_min_time_ms : zfs_scrub_min_time_ms;
while (zfs_scan_suspend_progress &&
!txg_sync_waiting(scn->scn_dp) &&
!spa_shutting_down(scn->scn_dp->dp_spa) &&
NSEC2MSEC(scan_time_ns) < mintime) {
delay(hz);
scan_time_ns = gethrtime() - scn->scn_sync_start_time;
}
return;
}
/*
* It is possible to switch from unsorted to sorted at any time,
* but afterwards the scan will remain sorted unless reloaded from
* a checkpoint after a reboot.
*/
if (!zfs_scan_legacy) {
scn->scn_is_sorted = B_TRUE;
if (scn->scn_last_checkpoint == 0)
scn->scn_last_checkpoint = ddi_get_lbolt();
}
/*
* For sorted scans, determine what kind of work we will be doing
* this txg based on our memory limitations and whether or not we
* need to perform a checkpoint.
*/
if (scn->scn_is_sorted) {
/*
* If we are over our checkpoint interval, set scn_clearing
* so that we can begin checkpointing immediately. The
* checkpoint allows us to save a consistent bookmark
* representing how much data we have scrubbed so far.
* Otherwise, use the memory limit to determine if we should
* scan for metadata or start issue scrub IOs. We accumulate
* metadata until we hit our hard memory limit at which point
* we issue scrub IOs until we are at our soft memory limit.
*/
if (scn->scn_checkpointing ||
ddi_get_lbolt() - scn->scn_last_checkpoint >
SEC_TO_TICK(zfs_scan_checkpoint_intval)) {
if (!scn->scn_checkpointing)
zfs_dbgmsg("begin scan checkpoint");
scn->scn_checkpointing = B_TRUE;
scn->scn_clearing = B_TRUE;
} else {
boolean_t should_clear = dsl_scan_should_clear(scn);
if (should_clear && !scn->scn_clearing) {
zfs_dbgmsg("begin scan clearing");
scn->scn_clearing = B_TRUE;
} else if (!should_clear && scn->scn_clearing) {
zfs_dbgmsg("finish scan clearing");
scn->scn_clearing = B_FALSE;
}
}
} else {
ASSERT0(scn->scn_checkpointing);
ASSERT0(scn->scn_clearing);
}
if (!scn->scn_clearing && scn->scn_done_txg == 0) {
/* Need to scan metadata for more blocks to scrub */
dsl_scan_phys_t *scnp = &scn->scn_phys;
taskqid_t prefetch_tqid;
/*
* Recalculate the max number of in-flight bytes for pool-wide
* scanning operations (minimum 1MB). Limits for the issuing
* phase are done per top-level vdev and are handled separately.
*/
scn->scn_maxinflight_bytes = MAX(zfs_scan_vdev_limit *
dsl_scan_count_data_disks(spa->spa_root_vdev), 1ULL << 20);
if (scnp->scn_ddt_bookmark.ddb_class <=
scnp->scn_ddt_class_max) {
ASSERT(ZB_IS_ZERO(&scnp->scn_bookmark));
zfs_dbgmsg("doing scan sync txg %llu; "
"ddt bm=%llu/%llu/%llu/%llx",
(longlong_t)tx->tx_txg,
(longlong_t)scnp->scn_ddt_bookmark.ddb_class,
(longlong_t)scnp->scn_ddt_bookmark.ddb_type,
(longlong_t)scnp->scn_ddt_bookmark.ddb_checksum,
(longlong_t)scnp->scn_ddt_bookmark.ddb_cursor);
} else {
zfs_dbgmsg("doing scan sync txg %llu; "
"bm=%llu/%llu/%llu/%llu",
(longlong_t)tx->tx_txg,
(longlong_t)scnp->scn_bookmark.zb_objset,
(longlong_t)scnp->scn_bookmark.zb_object,
(longlong_t)scnp->scn_bookmark.zb_level,
(longlong_t)scnp->scn_bookmark.zb_blkid);
}
scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
NULL, ZIO_FLAG_CANFAIL);
scn->scn_prefetch_stop = B_FALSE;
prefetch_tqid = taskq_dispatch(dp->dp_sync_taskq,
dsl_scan_prefetch_thread, scn, TQ_SLEEP);
ASSERT(prefetch_tqid != TASKQID_INVALID);
dsl_pool_config_enter(dp, FTAG);
dsl_scan_visit(scn, tx);
dsl_pool_config_exit(dp, FTAG);
mutex_enter(&dp->dp_spa->spa_scrub_lock);
scn->scn_prefetch_stop = B_TRUE;
cv_broadcast(&spa->spa_scrub_io_cv);
mutex_exit(&dp->dp_spa->spa_scrub_lock);
taskq_wait_id(dp->dp_sync_taskq, prefetch_tqid);
(void) zio_wait(scn->scn_zio_root);
scn->scn_zio_root = NULL;
zfs_dbgmsg("scan visited %llu blocks in %llums "
"(%llu os's, %llu holes, %llu < mintxg, "
"%llu in ddt, %llu > maxtxg)",
(longlong_t)scn->scn_visited_this_txg,
(longlong_t)NSEC2MSEC(gethrtime() -
scn->scn_sync_start_time),
(longlong_t)scn->scn_objsets_visited_this_txg,
(longlong_t)scn->scn_holes_this_txg,
(longlong_t)scn->scn_lt_min_this_txg,
(longlong_t)scn->scn_ddt_contained_this_txg,
(longlong_t)scn->scn_gt_max_this_txg);
if (!scn->scn_suspending) {
ASSERT0(avl_numnodes(&scn->scn_queue));
scn->scn_done_txg = tx->tx_txg + 1;
if (scn->scn_is_sorted) {
scn->scn_checkpointing = B_TRUE;
scn->scn_clearing = B_TRUE;
}
zfs_dbgmsg("scan complete txg %llu",
(longlong_t)tx->tx_txg);
}
} else if (scn->scn_is_sorted && scn->scn_bytes_pending != 0) {
ASSERT(scn->scn_clearing);
/* need to issue scrubbing IOs from per-vdev queues */
scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
NULL, ZIO_FLAG_CANFAIL);
scan_io_queues_run(scn);
(void) zio_wait(scn->scn_zio_root);
scn->scn_zio_root = NULL;
/* calculate and dprintf the current memory usage */
(void) dsl_scan_should_clear(scn);
dsl_scan_update_stats(scn);
zfs_dbgmsg("scan issued %llu blocks (%llu segs) in %llums "
"(avg_block_size = %llu, avg_seg_size = %llu)",
(longlong_t)scn->scn_zios_this_txg,
(longlong_t)scn->scn_segs_this_txg,
(longlong_t)NSEC2MSEC(gethrtime() -
scn->scn_sync_start_time),
(longlong_t)scn->scn_avg_zio_size_this_txg,
(longlong_t)scn->scn_avg_seg_size_this_txg);
} else if (scn->scn_done_txg != 0 && scn->scn_done_txg <= tx->tx_txg) {
/* Finished with everything. Mark the scrub as complete */
zfs_dbgmsg("scan issuing complete txg %llu",
(longlong_t)tx->tx_txg);
ASSERT3U(scn->scn_done_txg, !=, 0);
ASSERT0(spa->spa_scrub_inflight);
ASSERT0(scn->scn_bytes_pending);
dsl_scan_done(scn, B_TRUE, tx);
sync_type = SYNC_MANDATORY;
}
dsl_scan_sync_state(scn, tx, sync_type);
}
static void
count_block(dsl_scan_t *scn, zfs_all_blkstats_t *zab, const blkptr_t *bp)
{
int i;
/*
* Don't count embedded bp's, since we already did the work of
* scanning these when we scanned the containing block.
*/
if (BP_IS_EMBEDDED(bp))
return;
/*
* Update the spa's stats on how many bytes we have issued.
* Sequential scrubs create a zio for each DVA of the bp. Each
* of these will include all DVAs for repair purposes, but the
* zio code will only try the first one unless there is an issue.
* Therefore, we should only count the first DVA for these IOs.
*/
if (scn->scn_is_sorted) {
atomic_add_64(&scn->scn_dp->dp_spa->spa_scan_pass_issued,
DVA_GET_ASIZE(&bp->blk_dva[0]));
} else {
spa_t *spa = scn->scn_dp->dp_spa;
for (i = 0; i < BP_GET_NDVAS(bp); i++) {
atomic_add_64(&spa->spa_scan_pass_issued,
DVA_GET_ASIZE(&bp->blk_dva[i]));
}
}
/*
* If we resume after a reboot, zab will be NULL; don't record
* incomplete stats in that case.
*/
if (zab == NULL)
return;
mutex_enter(&zab->zab_lock);
for (i = 0; i < 4; i++) {
int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS;
int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL;
if (t & DMU_OT_NEWTYPE)
t = DMU_OT_OTHER;
zfs_blkstat_t *zb = &zab->zab_type[l][t];
int equal;
zb->zb_count++;
zb->zb_asize += BP_GET_ASIZE(bp);
zb->zb_lsize += BP_GET_LSIZE(bp);
zb->zb_psize += BP_GET_PSIZE(bp);
zb->zb_gangs += BP_COUNT_GANG(bp);
switch (BP_GET_NDVAS(bp)) {
case 2:
if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[1]))
zb->zb_ditto_2_of_2_samevdev++;
break;
case 3:
equal = (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[1])) +
(DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[2])) +
(DVA_GET_VDEV(&bp->blk_dva[1]) ==
DVA_GET_VDEV(&bp->blk_dva[2]));
if (equal == 1)
zb->zb_ditto_2_of_3_samevdev++;
else if (equal == 3)
zb->zb_ditto_3_of_3_samevdev++;
break;
}
}
mutex_exit(&zab->zab_lock);
}
static void
scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue, scan_io_t *sio)
{
avl_index_t idx;
int64_t asize = SIO_GET_ASIZE(sio);
dsl_scan_t *scn = queue->q_scn;
ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
if (avl_find(&queue->q_sios_by_addr, sio, &idx) != NULL) {
/* block is already scheduled for reading */
atomic_add_64(&scn->scn_bytes_pending, -asize);
sio_free(sio);
return;
}
avl_insert(&queue->q_sios_by_addr, sio, idx);
queue->q_sio_memused += SIO_GET_MUSED(sio);
range_tree_add(queue->q_exts_by_addr, SIO_GET_OFFSET(sio), asize);
}
/*
* Given all the info we got from our metadata scanning process, we
* construct a scan_io_t and insert it into the scan sorting queue. The
* I/O must already be suitable for us to process. This is controlled
* by dsl_scan_enqueue().
*/
static void
scan_io_queue_insert(dsl_scan_io_queue_t *queue, const blkptr_t *bp, int dva_i,
int zio_flags, const zbookmark_phys_t *zb)
{
dsl_scan_t *scn = queue->q_scn;
scan_io_t *sio = sio_alloc(BP_GET_NDVAS(bp));
ASSERT0(BP_IS_GANG(bp));
ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
bp2sio(bp, sio, dva_i);
sio->sio_flags = zio_flags;
sio->sio_zb = *zb;
/*
* Increment the bytes pending counter now so that we can't
* get an integer underflow in case the worker processes the
* zio before we get to incrementing this counter.
*/
atomic_add_64(&scn->scn_bytes_pending, SIO_GET_ASIZE(sio));
scan_io_queue_insert_impl(queue, sio);
}
/*
* Given a set of I/O parameters as discovered by the metadata traversal
* process, attempts to place the I/O into the sorted queues (if allowed),
* or immediately executes the I/O.
*/
static void
dsl_scan_enqueue(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
const zbookmark_phys_t *zb)
{
spa_t *spa = dp->dp_spa;
ASSERT(!BP_IS_EMBEDDED(bp));
/*
* Gang blocks are hard to issue sequentially, so we just issue them
* here immediately instead of queuing them.
*/
if (!dp->dp_scan->scn_is_sorted || BP_IS_GANG(bp)) {
scan_exec_io(dp, bp, zio_flags, zb, NULL);
return;
}
for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
dva_t dva;
vdev_t *vdev;
dva = bp->blk_dva[i];
vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&dva));
ASSERT(vdev != NULL);
mutex_enter(&vdev->vdev_scan_io_queue_lock);
if (vdev->vdev_scan_io_queue == NULL)
vdev->vdev_scan_io_queue = scan_io_queue_create(vdev);
ASSERT(dp->dp_scan != NULL);
scan_io_queue_insert(vdev->vdev_scan_io_queue, bp,
i, zio_flags, zb);
mutex_exit(&vdev->vdev_scan_io_queue_lock);
}
}
static int
dsl_scan_scrub_cb(dsl_pool_t *dp,
const blkptr_t *bp, const zbookmark_phys_t *zb)
{
dsl_scan_t *scn = dp->dp_scan;
spa_t *spa = dp->dp_spa;
uint64_t phys_birth = BP_PHYSICAL_BIRTH(bp);
size_t psize = BP_GET_PSIZE(bp);
boolean_t needs_io = B_FALSE;
int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL;
if (phys_birth <= scn->scn_phys.scn_min_txg ||
phys_birth >= scn->scn_phys.scn_max_txg) {
count_block(scn, dp->dp_blkstats, bp);
return (0);
}
/* Embedded BP's have phys_birth==0, so we reject them above. */
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT(DSL_SCAN_IS_SCRUB_RESILVER(scn));
if (scn->scn_phys.scn_func == POOL_SCAN_SCRUB) {
zio_flags |= ZIO_FLAG_SCRUB;
needs_io = B_TRUE;
} else {
ASSERT3U(scn->scn_phys.scn_func, ==, POOL_SCAN_RESILVER);
zio_flags |= ZIO_FLAG_RESILVER;
needs_io = B_FALSE;
}
/* If it's an intent log block, failure is expected. */
if (zb->zb_level == ZB_ZIL_LEVEL)
zio_flags |= ZIO_FLAG_SPECULATIVE;
for (int d = 0; d < BP_GET_NDVAS(bp); d++) {
const dva_t *dva = &bp->blk_dva[d];
/*
* Keep track of how much data we've examined so that
* zpool(8) status can make useful progress reports.
*/
scn->scn_phys.scn_examined += DVA_GET_ASIZE(dva);
spa->spa_scan_pass_exam += DVA_GET_ASIZE(dva);
/* if it's a resilver, this may not be in the target range */
if (!needs_io)
needs_io = dsl_scan_need_resilver(spa, dva, psize,
phys_birth);
}
if (needs_io && !zfs_no_scrub_io) {
dsl_scan_enqueue(dp, bp, zio_flags, zb);
} else {
count_block(scn, dp->dp_blkstats, bp);
}
/* do not relocate this block */
return (0);
}
static void
dsl_scan_scrub_done(zio_t *zio)
{
spa_t *spa = zio->io_spa;
blkptr_t *bp = zio->io_bp;
dsl_scan_io_queue_t *queue = zio->io_private;
abd_free(zio->io_abd);
if (queue == NULL) {
mutex_enter(&spa->spa_scrub_lock);
ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp));
spa->spa_scrub_inflight -= BP_GET_PSIZE(bp);
cv_broadcast(&spa->spa_scrub_io_cv);
mutex_exit(&spa->spa_scrub_lock);
} else {
mutex_enter(&queue->q_vd->vdev_scan_io_queue_lock);
ASSERT3U(queue->q_inflight_bytes, >=, BP_GET_PSIZE(bp));
queue->q_inflight_bytes -= BP_GET_PSIZE(bp);
cv_broadcast(&queue->q_zio_cv);
mutex_exit(&queue->q_vd->vdev_scan_io_queue_lock);
}
if (zio->io_error && (zio->io_error != ECKSUM ||
!(zio->io_flags & ZIO_FLAG_SPECULATIVE))) {
atomic_inc_64(&spa->spa_dsl_pool->dp_scan->scn_phys.scn_errors);
}
}
/*
* Given a scanning zio's information, executes the zio. The zio need
* not necessarily be only sortable, this function simply executes the
* zio, no matter what it is. The optional queue argument allows the
* caller to specify that they want per top level vdev IO rate limiting
* instead of the legacy global limiting.
*/
static void
scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue)
{
spa_t *spa = dp->dp_spa;
dsl_scan_t *scn = dp->dp_scan;
size_t size = BP_GET_PSIZE(bp);
abd_t *data = abd_alloc_for_io(size, B_FALSE);
if (queue == NULL) {
ASSERT3U(scn->scn_maxinflight_bytes, >, 0);
mutex_enter(&spa->spa_scrub_lock);
while (spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes)
cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
spa->spa_scrub_inflight += BP_GET_PSIZE(bp);
mutex_exit(&spa->spa_scrub_lock);
} else {
kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock;
ASSERT3U(queue->q_maxinflight_bytes, >, 0);
mutex_enter(q_lock);
while (queue->q_inflight_bytes >= queue->q_maxinflight_bytes)
cv_wait(&queue->q_zio_cv, q_lock);
queue->q_inflight_bytes += BP_GET_PSIZE(bp);
mutex_exit(q_lock);
}
count_block(scn, dp->dp_blkstats, bp);
zio_nowait(zio_read(scn->scn_zio_root, spa, bp, data, size,
dsl_scan_scrub_done, queue, ZIO_PRIORITY_SCRUB, zio_flags, zb));
}
/*
* This is the primary extent sorting algorithm. We balance two parameters:
* 1) how many bytes of I/O are in an extent
* 2) how well the extent is filled with I/O (as a fraction of its total size)
* Since we allow extents to have gaps between their constituent I/Os, it's
* possible to have a fairly large extent that contains the same amount of
* I/O bytes than a much smaller extent, which just packs the I/O more tightly.
* The algorithm sorts based on a score calculated from the extent's size,
* the relative fill volume (in %) and a "fill weight" parameter that controls
* the split between whether we prefer larger extents or more well populated
* extents:
*
* SCORE = FILL_IN_BYTES + (FILL_IN_PERCENT * FILL_IN_BYTES * FILL_WEIGHT)
*
* Example:
* 1) assume extsz = 64 MiB
* 2) assume fill = 32 MiB (extent is half full)
* 3) assume fill_weight = 3
* 4) SCORE = 32M + (((32M * 100) / 64M) * 3 * 32M) / 100
* SCORE = 32M + (50 * 3 * 32M) / 100
* SCORE = 32M + (4800M / 100)
* SCORE = 32M + 48M
* ^ ^
* | +--- final total relative fill-based score
* +--------- final total fill-based score
* SCORE = 80M
*
* As can be seen, at fill_ratio=3, the algorithm is slightly biased towards
* extents that are more completely filled (in a 3:2 ratio) vs just larger.
* Note that as an optimization, we replace multiplication and division by
* 100 with bitshifting by 7 (which effectively multiplies and divides by 128).
*/
static int
ext_size_compare(const void *x, const void *y)
{
const range_seg_gap_t *rsa = x, *rsb = y;
uint64_t sa = rsa->rs_end - rsa->rs_start;
uint64_t sb = rsb->rs_end - rsb->rs_start;
uint64_t score_a, score_b;
score_a = rsa->rs_fill + ((((rsa->rs_fill << 7) / sa) *
fill_weight * rsa->rs_fill) >> 7);
score_b = rsb->rs_fill + ((((rsb->rs_fill << 7) / sb) *
fill_weight * rsb->rs_fill) >> 7);
if (score_a > score_b)
return (-1);
if (score_a == score_b) {
if (rsa->rs_start < rsb->rs_start)
return (-1);
if (rsa->rs_start == rsb->rs_start)
return (0);
return (1);
}
return (1);
}
/*
* Comparator for the q_sios_by_addr tree. Sorting is simply performed
* based on LBA-order (from lowest to highest).
*/
static int
sio_addr_compare(const void *x, const void *y)
{
const scan_io_t *a = x, *b = y;
return (TREE_CMP(SIO_GET_OFFSET(a), SIO_GET_OFFSET(b)));
}
/* IO queues are created on demand when they are needed. */
static dsl_scan_io_queue_t *
scan_io_queue_create(vdev_t *vd)
{
dsl_scan_t *scn = vd->vdev_spa->spa_dsl_pool->dp_scan;
dsl_scan_io_queue_t *q = kmem_zalloc(sizeof (*q), KM_SLEEP);
q->q_scn = scn;
q->q_vd = vd;
q->q_sio_memused = 0;
cv_init(&q->q_zio_cv, NULL, CV_DEFAULT, NULL);
q->q_exts_by_addr = range_tree_create_impl(&rt_btree_ops, RANGE_SEG_GAP,
&q->q_exts_by_size, 0, 0, ext_size_compare, zfs_scan_max_ext_gap);
avl_create(&q->q_sios_by_addr, sio_addr_compare,
sizeof (scan_io_t), offsetof(scan_io_t, sio_nodes.sio_addr_node));
return (q);
}
/*
* Destroys a scan queue and all segments and scan_io_t's contained in it.
* No further execution of I/O occurs, anything pending in the queue is
* simply freed without being executed.
*/
void
dsl_scan_io_queue_destroy(dsl_scan_io_queue_t *queue)
{
dsl_scan_t *scn = queue->q_scn;
scan_io_t *sio;
void *cookie = NULL;
int64_t bytes_dequeued = 0;
ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
while ((sio = avl_destroy_nodes(&queue->q_sios_by_addr, &cookie)) !=
NULL) {
ASSERT(range_tree_contains(queue->q_exts_by_addr,
SIO_GET_OFFSET(sio), SIO_GET_ASIZE(sio)));
bytes_dequeued += SIO_GET_ASIZE(sio);
queue->q_sio_memused -= SIO_GET_MUSED(sio);
sio_free(sio);
}
ASSERT0(queue->q_sio_memused);
atomic_add_64(&scn->scn_bytes_pending, -bytes_dequeued);
range_tree_vacate(queue->q_exts_by_addr, NULL, queue);
range_tree_destroy(queue->q_exts_by_addr);
avl_destroy(&queue->q_sios_by_addr);
cv_destroy(&queue->q_zio_cv);
kmem_free(queue, sizeof (*queue));
}
/*
* Properly transfers a dsl_scan_queue_t from `svd' to `tvd'. This is
* called on behalf of vdev_top_transfer when creating or destroying
* a mirror vdev due to zpool attach/detach.
*/
void
dsl_scan_io_queue_vdev_xfer(vdev_t *svd, vdev_t *tvd)
{
mutex_enter(&svd->vdev_scan_io_queue_lock);
mutex_enter(&tvd->vdev_scan_io_queue_lock);
VERIFY3P(tvd->vdev_scan_io_queue, ==, NULL);
tvd->vdev_scan_io_queue = svd->vdev_scan_io_queue;
svd->vdev_scan_io_queue = NULL;
if (tvd->vdev_scan_io_queue != NULL)
tvd->vdev_scan_io_queue->q_vd = tvd;
mutex_exit(&tvd->vdev_scan_io_queue_lock);
mutex_exit(&svd->vdev_scan_io_queue_lock);
}
static void
scan_io_queues_destroy(dsl_scan_t *scn)
{
vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *tvd = rvd->vdev_child[i];
mutex_enter(&tvd->vdev_scan_io_queue_lock);
if (tvd->vdev_scan_io_queue != NULL)
dsl_scan_io_queue_destroy(tvd->vdev_scan_io_queue);
tvd->vdev_scan_io_queue = NULL;
mutex_exit(&tvd->vdev_scan_io_queue_lock);
}
}
static void
dsl_scan_freed_dva(spa_t *spa, const blkptr_t *bp, int dva_i)
{
dsl_pool_t *dp = spa->spa_dsl_pool;
dsl_scan_t *scn = dp->dp_scan;
vdev_t *vdev;
kmutex_t *q_lock;
dsl_scan_io_queue_t *queue;
scan_io_t *srch_sio, *sio;
avl_index_t idx;
uint64_t start, size;
vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&bp->blk_dva[dva_i]));
ASSERT(vdev != NULL);
q_lock = &vdev->vdev_scan_io_queue_lock;
queue = vdev->vdev_scan_io_queue;
mutex_enter(q_lock);
if (queue == NULL) {
mutex_exit(q_lock);
return;
}
srch_sio = sio_alloc(BP_GET_NDVAS(bp));
bp2sio(bp, srch_sio, dva_i);
start = SIO_GET_OFFSET(srch_sio);
size = SIO_GET_ASIZE(srch_sio);
/*
* We can find the zio in two states:
* 1) Cold, just sitting in the queue of zio's to be issued at
* some point in the future. In this case, all we do is
* remove the zio from the q_sios_by_addr tree, decrement
* its data volume from the containing range_seg_t and
* resort the q_exts_by_size tree to reflect that the
* range_seg_t has lost some of its 'fill'. We don't shorten
* the range_seg_t - this is usually rare enough not to be
* worth the extra hassle of trying keep track of precise
* extent boundaries.
* 2) Hot, where the zio is currently in-flight in
* dsl_scan_issue_ios. In this case, we can't simply
* reach in and stop the in-flight zio's, so we instead
* block the caller. Eventually, dsl_scan_issue_ios will
* be done with issuing the zio's it gathered and will
* signal us.
*/
sio = avl_find(&queue->q_sios_by_addr, srch_sio, &idx);
sio_free(srch_sio);
if (sio != NULL) {
int64_t asize = SIO_GET_ASIZE(sio);
blkptr_t tmpbp;
/* Got it while it was cold in the queue */
ASSERT3U(start, ==, SIO_GET_OFFSET(sio));
ASSERT3U(size, ==, asize);
avl_remove(&queue->q_sios_by_addr, sio);
queue->q_sio_memused -= SIO_GET_MUSED(sio);
ASSERT(range_tree_contains(queue->q_exts_by_addr, start, size));
range_tree_remove_fill(queue->q_exts_by_addr, start, size);
/*
* We only update scn_bytes_pending in the cold path,
* otherwise it will already have been accounted for as
* part of the zio's execution.
*/
atomic_add_64(&scn->scn_bytes_pending, -asize);
/* count the block as though we issued it */
sio2bp(sio, &tmpbp);
count_block(scn, dp->dp_blkstats, &tmpbp);
sio_free(sio);
}
mutex_exit(q_lock);
}
/*
* Callback invoked when a zio_free() zio is executing. This needs to be
* intercepted to prevent the zio from deallocating a particular portion
* of disk space and it then getting reallocated and written to, while we
* still have it queued up for processing.
*/
void
dsl_scan_freed(spa_t *spa, const blkptr_t *bp)
{
dsl_pool_t *dp = spa->spa_dsl_pool;
dsl_scan_t *scn = dp->dp_scan;
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT(scn != NULL);
if (!dsl_scan_is_running(scn))
return;
for (int i = 0; i < BP_GET_NDVAS(bp); i++)
dsl_scan_freed_dva(spa, bp, i);
}
/*
* Check if a vdev needs resilvering (non-empty DTL), if so, and resilver has
* not started, start it. Otherwise, only restart if max txg in DTL range is
* greater than the max txg in the current scan. If the DTL max is less than
* the scan max, then the vdev has not missed any new data since the resilver
* started, so a restart is not needed.
*/
void
dsl_scan_assess_vdev(dsl_pool_t *dp, vdev_t *vd)
{
uint64_t min, max;
if (!vdev_resilver_needed(vd, &min, &max))
return;
if (!dsl_scan_resilvering(dp)) {
spa_async_request(dp->dp_spa, SPA_ASYNC_RESILVER);
return;
}
if (max <= dp->dp_scan->scn_phys.scn_max_txg)
return;
/* restart is needed, check if it can be deferred */
if (spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER))
vdev_defer_resilver(vd);
else
spa_async_request(dp->dp_spa, SPA_ASYNC_RESILVER);
}
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs, zfs_, scan_vdev_limit, ULONG, ZMOD_RW,
"Max bytes in flight per leaf vdev for scrubs and resilvers");
ZFS_MODULE_PARAM(zfs, zfs_, scrub_min_time_ms, INT, ZMOD_RW,
"Min millisecs to scrub per txg");
ZFS_MODULE_PARAM(zfs, zfs_, obsolete_min_time_ms, INT, ZMOD_RW,
"Min millisecs to obsolete per txg");
ZFS_MODULE_PARAM(zfs, zfs_, free_min_time_ms, INT, ZMOD_RW,
"Min millisecs to free per txg");
ZFS_MODULE_PARAM(zfs, zfs_, resilver_min_time_ms, INT, ZMOD_RW,
"Min millisecs to resilver per txg");
ZFS_MODULE_PARAM(zfs, zfs_, scan_suspend_progress, INT, ZMOD_RW,
"Set to prevent scans from progressing");
ZFS_MODULE_PARAM(zfs, zfs_, no_scrub_io, INT, ZMOD_RW,
"Set to disable scrub I/O");
ZFS_MODULE_PARAM(zfs, zfs_, no_scrub_prefetch, INT, ZMOD_RW,
"Set to disable scrub prefetching");
ZFS_MODULE_PARAM(zfs, zfs_, async_block_max_blocks, ULONG, ZMOD_RW,
"Max number of blocks freed in one txg");
ZFS_MODULE_PARAM(zfs, zfs_, max_async_dedup_frees, ULONG, ZMOD_RW,
"Max number of dedup blocks freed in one txg");
ZFS_MODULE_PARAM(zfs, zfs_, free_bpobj_enabled, INT, ZMOD_RW,
"Enable processing of the free_bpobj");
ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_fact, INT, ZMOD_RW,
"Fraction of RAM for scan hard limit");
ZFS_MODULE_PARAM(zfs, zfs_, scan_issue_strategy, INT, ZMOD_RW,
"IO issuing strategy during scrubbing. "
"0 = default, 1 = LBA, 2 = size");
ZFS_MODULE_PARAM(zfs, zfs_, scan_legacy, INT, ZMOD_RW,
"Scrub using legacy non-sequential method");
ZFS_MODULE_PARAM(zfs, zfs_, scan_checkpoint_intval, INT, ZMOD_RW,
"Scan progress on-disk checkpointing interval");
ZFS_MODULE_PARAM(zfs, zfs_, scan_max_ext_gap, ULONG, ZMOD_RW,
"Max gap in bytes between sequential scrub / resilver I/Os");
ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_soft_fact, INT, ZMOD_RW,
"Fraction of hard limit used as soft limit");
ZFS_MODULE_PARAM(zfs, zfs_, scan_strict_mem_lim, INT, ZMOD_RW,
"Tunable to attempt to reduce lock contention");
ZFS_MODULE_PARAM(zfs, zfs_, scan_fill_weight, INT, ZMOD_RW,
"Tunable to adjust bias towards more filled segments during scans");
ZFS_MODULE_PARAM(zfs, zfs_, resilver_disable_defer, INT, ZMOD_RW,
"Process all resilvers immediately");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/dsl_synctask.c b/sys/contrib/openzfs/module/zfs/dsl_synctask.c
index 148e8fff2437..9fc9d4011d11 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_synctask.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_synctask.c
@@ -1,257 +1,257 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017 by Delphix. All rights reserved.
*/
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_synctask.h>
#include <sys/metaslab.h>
#define DST_AVG_BLKSHIFT 14
-/* ARGSUSED */
static int
dsl_null_checkfunc(void *arg, dmu_tx_t *tx)
{
+ (void) arg, (void) tx;
return (0);
}
static int
dsl_sync_task_common(const char *pool, dsl_checkfunc_t *checkfunc,
dsl_syncfunc_t *syncfunc, dsl_sigfunc_t *sigfunc, void *arg,
int blocks_modified, zfs_space_check_t space_check, boolean_t early)
{
spa_t *spa;
dmu_tx_t *tx;
int err;
dsl_sync_task_t dst = { { { NULL } } };
dsl_pool_t *dp;
err = spa_open(pool, &spa, FTAG);
if (err != 0)
return (err);
dp = spa_get_dsl(spa);
top:
tx = dmu_tx_create_dd(dp->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
dst.dst_pool = dp;
dst.dst_txg = dmu_tx_get_txg(tx);
dst.dst_space = blocks_modified << DST_AVG_BLKSHIFT;
dst.dst_space_check = space_check;
dst.dst_checkfunc = checkfunc != NULL ? checkfunc : dsl_null_checkfunc;
dst.dst_syncfunc = syncfunc;
dst.dst_arg = arg;
dst.dst_error = 0;
dst.dst_nowaiter = B_FALSE;
dsl_pool_config_enter(dp, FTAG);
err = dst.dst_checkfunc(arg, tx);
dsl_pool_config_exit(dp, FTAG);
if (err != 0) {
dmu_tx_commit(tx);
spa_close(spa, FTAG);
return (err);
}
txg_list_t *task_list = (early) ?
&dp->dp_early_sync_tasks : &dp->dp_sync_tasks;
VERIFY(txg_list_add_tail(task_list, &dst, dst.dst_txg));
dmu_tx_commit(tx);
if (sigfunc != NULL && txg_wait_synced_sig(dp, dst.dst_txg)) {
/* current contract is to call func once */
sigfunc(arg, tx);
sigfunc = NULL; /* in case we're performing an EAGAIN retry */
}
txg_wait_synced(dp, dst.dst_txg);
if (dst.dst_error == EAGAIN) {
txg_wait_synced(dp, dst.dst_txg + TXG_DEFER_SIZE);
goto top;
}
spa_close(spa, FTAG);
return (dst.dst_error);
}
/*
* Called from open context to perform a callback in syncing context. Waits
* for the operation to complete.
*
* The checkfunc will be called from open context as a preliminary check
* which can quickly fail. If it succeeds, it will be called again from
* syncing context. The checkfunc should generally be designed to work
* properly in either context, but if necessary it can check
* dmu_tx_is_syncing(tx).
*
* The synctask infrastructure enforces proper locking strategy with respect
* to the dp_config_rwlock -- the lock will always be held when the callbacks
* are called. It will be held for read during the open-context (preliminary)
* call to the checkfunc, and then held for write from syncing context during
* the calls to the check and sync funcs.
*
* A dataset or pool name can be passed as the first argument. Typically,
* the check func will hold, check the return value of the hold, and then
* release the dataset. The sync func will VERIFYO(hold()) the dataset.
* This is safe because no changes can be made between the check and sync funcs,
* and the sync func will only be called if the check func successfully opened
* the dataset.
*/
int
dsl_sync_task(const char *pool, dsl_checkfunc_t *checkfunc,
dsl_syncfunc_t *syncfunc, void *arg,
int blocks_modified, zfs_space_check_t space_check)
{
return (dsl_sync_task_common(pool, checkfunc, syncfunc, NULL, arg,
blocks_modified, space_check, B_FALSE));
}
/*
* An early synctask works exactly as a standard synctask with one important
* difference on the way it is handled during syncing context. Standard
* synctasks run after we've written out all the dirty blocks of dirty
* datasets. Early synctasks are executed before writing out any dirty data,
* and thus before standard synctasks.
*
* For that reason, early synctasks can affect the process of writing dirty
* changes to disk for the txg that they run and should be used with caution.
* In addition, early synctasks should not dirty any metaslabs as this would
* invalidate the precondition/invariant for subsequent early synctasks.
* [see dsl_pool_sync() and dsl_early_sync_task_verify()]
*/
int
dsl_early_sync_task(const char *pool, dsl_checkfunc_t *checkfunc,
dsl_syncfunc_t *syncfunc, void *arg,
int blocks_modified, zfs_space_check_t space_check)
{
return (dsl_sync_task_common(pool, checkfunc, syncfunc, NULL, arg,
blocks_modified, space_check, B_TRUE));
}
/*
* A standard synctask that can be interrupted from a signal. The sigfunc
* is called once if a signal occurred while waiting for the task to sync.
*/
int
dsl_sync_task_sig(const char *pool, dsl_checkfunc_t *checkfunc,
dsl_syncfunc_t *syncfunc, dsl_sigfunc_t *sigfunc, void *arg,
int blocks_modified, zfs_space_check_t space_check)
{
return (dsl_sync_task_common(pool, checkfunc, syncfunc, sigfunc, arg,
blocks_modified, space_check, B_FALSE));
}
static void
dsl_sync_task_nowait_common(dsl_pool_t *dp, dsl_syncfunc_t *syncfunc, void *arg,
dmu_tx_t *tx, boolean_t early)
{
dsl_sync_task_t *dst = kmem_zalloc(sizeof (*dst), KM_SLEEP);
dst->dst_pool = dp;
dst->dst_txg = dmu_tx_get_txg(tx);
dst->dst_space_check = ZFS_SPACE_CHECK_NONE;
dst->dst_checkfunc = dsl_null_checkfunc;
dst->dst_syncfunc = syncfunc;
dst->dst_arg = arg;
dst->dst_error = 0;
dst->dst_nowaiter = B_TRUE;
txg_list_t *task_list = (early) ?
&dp->dp_early_sync_tasks : &dp->dp_sync_tasks;
VERIFY(txg_list_add_tail(task_list, dst, dst->dst_txg));
}
void
dsl_sync_task_nowait(dsl_pool_t *dp, dsl_syncfunc_t *syncfunc, void *arg,
dmu_tx_t *tx)
{
dsl_sync_task_nowait_common(dp, syncfunc, arg, tx, B_FALSE);
}
void
dsl_early_sync_task_nowait(dsl_pool_t *dp, dsl_syncfunc_t *syncfunc, void *arg,
dmu_tx_t *tx)
{
dsl_sync_task_nowait_common(dp, syncfunc, arg, tx, B_TRUE);
}
/*
* Called in syncing context to execute the synctask.
*/
void
dsl_sync_task_sync(dsl_sync_task_t *dst, dmu_tx_t *tx)
{
dsl_pool_t *dp = dst->dst_pool;
ASSERT0(dst->dst_error);
/*
* Check for sufficient space.
*
* When the sync task was created, the caller specified the
* type of space checking required. See the comment in
* zfs_space_check_t for details on the semantics of each
* type of space checking.
*
* We just check against what's on-disk; we don't want any
* in-flight accounting to get in our way, because open context
* may have already used up various in-core limits
* (arc_tempreserve, dsl_pool_tempreserve).
*/
if (dst->dst_space_check != ZFS_SPACE_CHECK_NONE) {
uint64_t quota = dsl_pool_unreserved_space(dp,
dst->dst_space_check);
uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes;
/* MOS space is triple-dittoed, so we multiply by 3. */
if (used + dst->dst_space * 3 > quota) {
dst->dst_error = SET_ERROR(ENOSPC);
if (dst->dst_nowaiter)
kmem_free(dst, sizeof (*dst));
return;
}
}
/*
* Check for errors by calling checkfunc.
*/
rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
dst->dst_error = dst->dst_checkfunc(dst->dst_arg, tx);
if (dst->dst_error == 0)
dst->dst_syncfunc(dst->dst_arg, tx);
rrw_exit(&dp->dp_config_rwlock, FTAG);
if (dst->dst_nowaiter)
kmem_free(dst, sizeof (*dst));
}
#if defined(_KERNEL)
EXPORT_SYMBOL(dsl_sync_task);
EXPORT_SYMBOL(dsl_sync_task_nowait);
#endif
diff --git a/sys/contrib/openzfs/module/zfs/fm.c b/sys/contrib/openzfs/module/zfs/fm.c
index b8a1c7c8a5ca..7981568b8df7 100644
--- a/sys/contrib/openzfs/module/zfs/fm.c
+++ b/sys/contrib/openzfs/module/zfs/fm.c
@@ -1,1372 +1,1372 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
*/
/*
* Fault Management Architecture (FMA) Resource and Protocol Support
*
* The routines contained herein provide services to support kernel subsystems
* in publishing fault management telemetry (see PSARC 2002/412 and 2003/089).
*
* Name-Value Pair Lists
*
* The embodiment of an FMA protocol element (event, fmri or authority) is a
* name-value pair list (nvlist_t). FMA-specific nvlist constructor and
* destructor functions, fm_nvlist_create() and fm_nvlist_destroy(), are used
* to create an nvpair list using custom allocators. Callers may choose to
* allocate either from the kernel memory allocator, or from a preallocated
* buffer, useful in constrained contexts like high-level interrupt routines.
*
* Protocol Event and FMRI Construction
*
* Convenience routines are provided to construct nvlist events according to
* the FMA Event Protocol and Naming Schema specification for ereports and
* FMRIs for the dev, cpu, hc, mem, legacy hc and de schemes.
*
* ENA Manipulation
*
* Routines to generate ENA formats 0, 1 and 2 are available as well as
* routines to increment formats 1 and 2. Individual fields within the
* ENA are extractable via fm_ena_time_get(), fm_ena_id_get(),
* fm_ena_format_get() and fm_ena_gen_get().
*/
#include <sys/types.h>
#include <sys/time.h>
#include <sys/list.h>
#include <sys/nvpair.h>
#include <sys/cmn_err.h>
#include <sys/sysmacros.h>
#include <sys/sunddi.h>
#include <sys/systeminfo.h>
#include <sys/fm/util.h>
#include <sys/fm/protocol.h>
#include <sys/kstat.h>
#include <sys/zfs_context.h>
#ifdef _KERNEL
#include <sys/atomic.h>
#include <sys/condvar.h>
#include <sys/zfs_ioctl.h>
int zfs_zevent_len_max = 512;
static int zevent_len_cur = 0;
static int zevent_waiters = 0;
static int zevent_flags = 0;
/* Num events rate limited since the last time zfs_zevent_next() was called */
static uint64_t ratelimit_dropped = 0;
/*
* The EID (Event IDentifier) is used to uniquely tag a zevent when it is
* posted. The posted EIDs are monotonically increasing but not persistent.
* They will be reset to the initial value (1) each time the kernel module is
* loaded.
*/
static uint64_t zevent_eid = 0;
static kmutex_t zevent_lock;
static list_t zevent_list;
static kcondvar_t zevent_cv;
#endif /* _KERNEL */
/*
* Common fault management kstats to record event generation failures
*/
struct erpt_kstat {
kstat_named_t erpt_dropped; /* num erpts dropped on post */
kstat_named_t erpt_set_failed; /* num erpt set failures */
kstat_named_t fmri_set_failed; /* num fmri set failures */
kstat_named_t payload_set_failed; /* num payload set failures */
kstat_named_t erpt_duplicates; /* num duplicate erpts */
};
static struct erpt_kstat erpt_kstat_data = {
{ "erpt-dropped", KSTAT_DATA_UINT64 },
{ "erpt-set-failed", KSTAT_DATA_UINT64 },
{ "fmri-set-failed", KSTAT_DATA_UINT64 },
{ "payload-set-failed", KSTAT_DATA_UINT64 },
{ "erpt-duplicates", KSTAT_DATA_UINT64 }
};
kstat_t *fm_ksp;
#ifdef _KERNEL
static zevent_t *
zfs_zevent_alloc(void)
{
zevent_t *ev;
ev = kmem_zalloc(sizeof (zevent_t), KM_SLEEP);
list_create(&ev->ev_ze_list, sizeof (zfs_zevent_t),
offsetof(zfs_zevent_t, ze_node));
list_link_init(&ev->ev_node);
return (ev);
}
static void
zfs_zevent_free(zevent_t *ev)
{
/* Run provided cleanup callback */
ev->ev_cb(ev->ev_nvl, ev->ev_detector);
list_destroy(&ev->ev_ze_list);
kmem_free(ev, sizeof (zevent_t));
}
static void
zfs_zevent_drain(zevent_t *ev)
{
zfs_zevent_t *ze;
ASSERT(MUTEX_HELD(&zevent_lock));
list_remove(&zevent_list, ev);
/* Remove references to this event in all private file data */
while ((ze = list_head(&ev->ev_ze_list)) != NULL) {
list_remove(&ev->ev_ze_list, ze);
ze->ze_zevent = NULL;
ze->ze_dropped++;
}
zfs_zevent_free(ev);
}
void
zfs_zevent_drain_all(int *count)
{
zevent_t *ev;
mutex_enter(&zevent_lock);
while ((ev = list_head(&zevent_list)) != NULL)
zfs_zevent_drain(ev);
*count = zevent_len_cur;
zevent_len_cur = 0;
mutex_exit(&zevent_lock);
}
/*
* New zevents are inserted at the head. If the maximum queue
* length is exceeded a zevent will be drained from the tail.
* As part of this any user space processes which currently have
* a reference to this zevent_t in their private data will have
* this reference set to NULL.
*/
static void
zfs_zevent_insert(zevent_t *ev)
{
ASSERT(MUTEX_HELD(&zevent_lock));
list_insert_head(&zevent_list, ev);
if (zevent_len_cur >= zfs_zevent_len_max)
zfs_zevent_drain(list_tail(&zevent_list));
else
zevent_len_cur++;
}
/*
* Post a zevent. The cb will be called when nvl and detector are no longer
* needed, i.e.:
* - An error happened and a zevent can't be posted. In this case, cb is called
* before zfs_zevent_post() returns.
* - The event is being drained and freed.
*/
int
zfs_zevent_post(nvlist_t *nvl, nvlist_t *detector, zevent_cb_t *cb)
{
inode_timespec_t tv;
int64_t tv_array[2];
uint64_t eid;
size_t nvl_size = 0;
zevent_t *ev;
int error;
ASSERT(cb != NULL);
gethrestime(&tv);
tv_array[0] = tv.tv_sec;
tv_array[1] = tv.tv_nsec;
error = nvlist_add_int64_array(nvl, FM_EREPORT_TIME, tv_array, 2);
if (error) {
atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
goto out;
}
eid = atomic_inc_64_nv(&zevent_eid);
error = nvlist_add_uint64(nvl, FM_EREPORT_EID, eid);
if (error) {
atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
goto out;
}
error = nvlist_size(nvl, &nvl_size, NV_ENCODE_NATIVE);
if (error) {
atomic_inc_64(&erpt_kstat_data.erpt_dropped.value.ui64);
goto out;
}
if (nvl_size > ERPT_DATA_SZ || nvl_size == 0) {
atomic_inc_64(&erpt_kstat_data.erpt_dropped.value.ui64);
error = EOVERFLOW;
goto out;
}
ev = zfs_zevent_alloc();
if (ev == NULL) {
atomic_inc_64(&erpt_kstat_data.erpt_dropped.value.ui64);
error = ENOMEM;
goto out;
}
ev->ev_nvl = nvl;
ev->ev_detector = detector;
ev->ev_cb = cb;
ev->ev_eid = eid;
mutex_enter(&zevent_lock);
zfs_zevent_insert(ev);
cv_broadcast(&zevent_cv);
mutex_exit(&zevent_lock);
out:
if (error)
cb(nvl, detector);
return (error);
}
void
zfs_zevent_track_duplicate(void)
{
atomic_inc_64(&erpt_kstat_data.erpt_duplicates.value.ui64);
}
static int
zfs_zevent_minor_to_state(minor_t minor, zfs_zevent_t **ze)
{
*ze = zfsdev_get_state(minor, ZST_ZEVENT);
if (*ze == NULL)
return (SET_ERROR(EBADF));
return (0);
}
zfs_file_t *
zfs_zevent_fd_hold(int fd, minor_t *minorp, zfs_zevent_t **ze)
{
zfs_file_t *fp = zfs_file_get(fd);
if (fp == NULL)
return (NULL);
int error = zfsdev_getminor(fp, minorp);
if (error == 0)
error = zfs_zevent_minor_to_state(*minorp, ze);
if (error) {
zfs_zevent_fd_rele(fp);
fp = NULL;
}
return (fp);
}
void
zfs_zevent_fd_rele(zfs_file_t *fp)
{
zfs_file_put(fp);
}
/*
* Get the next zevent in the stream and place a copy in 'event'. This
* may fail with ENOMEM if the encoded nvlist size exceeds the passed
* 'event_size'. In this case the stream pointer is not advanced and
* and 'event_size' is set to the minimum required buffer size.
*/
int
zfs_zevent_next(zfs_zevent_t *ze, nvlist_t **event, uint64_t *event_size,
uint64_t *dropped)
{
zevent_t *ev;
size_t size;
int error = 0;
mutex_enter(&zevent_lock);
if (ze->ze_zevent == NULL) {
/* New stream start at the beginning/tail */
ev = list_tail(&zevent_list);
if (ev == NULL) {
error = ENOENT;
goto out;
}
} else {
/*
* Existing stream continue with the next element and remove
* ourselves from the wait queue for the previous element
*/
ev = list_prev(&zevent_list, ze->ze_zevent);
if (ev == NULL) {
error = ENOENT;
goto out;
}
}
VERIFY(nvlist_size(ev->ev_nvl, &size, NV_ENCODE_NATIVE) == 0);
if (size > *event_size) {
*event_size = size;
error = ENOMEM;
goto out;
}
if (ze->ze_zevent)
list_remove(&ze->ze_zevent->ev_ze_list, ze);
ze->ze_zevent = ev;
list_insert_head(&ev->ev_ze_list, ze);
(void) nvlist_dup(ev->ev_nvl, event, KM_SLEEP);
*dropped = ze->ze_dropped;
#ifdef _KERNEL
/* Include events dropped due to rate limiting */
*dropped += atomic_swap_64(&ratelimit_dropped, 0);
#endif
ze->ze_dropped = 0;
out:
mutex_exit(&zevent_lock);
return (error);
}
/*
* Wait in an interruptible state for any new events.
*/
int
zfs_zevent_wait(zfs_zevent_t *ze)
{
int error = EAGAIN;
mutex_enter(&zevent_lock);
zevent_waiters++;
while (error == EAGAIN) {
if (zevent_flags & ZEVENT_SHUTDOWN) {
error = SET_ERROR(ESHUTDOWN);
break;
}
error = cv_wait_sig(&zevent_cv, &zevent_lock);
if (signal_pending(current)) {
error = SET_ERROR(EINTR);
break;
} else if (!list_is_empty(&zevent_list)) {
error = 0;
continue;
} else {
error = EAGAIN;
}
}
zevent_waiters--;
mutex_exit(&zevent_lock);
return (error);
}
/*
* The caller may seek to a specific EID by passing that EID. If the EID
* is still available in the posted list of events the cursor is positioned
* there. Otherwise ENOENT is returned and the cursor is not moved.
*
* There are two reserved EIDs which may be passed and will never fail.
* ZEVENT_SEEK_START positions the cursor at the start of the list, and
* ZEVENT_SEEK_END positions the cursor at the end of the list.
*/
int
zfs_zevent_seek(zfs_zevent_t *ze, uint64_t eid)
{
zevent_t *ev;
int error = 0;
mutex_enter(&zevent_lock);
if (eid == ZEVENT_SEEK_START) {
if (ze->ze_zevent)
list_remove(&ze->ze_zevent->ev_ze_list, ze);
ze->ze_zevent = NULL;
goto out;
}
if (eid == ZEVENT_SEEK_END) {
if (ze->ze_zevent)
list_remove(&ze->ze_zevent->ev_ze_list, ze);
ev = list_head(&zevent_list);
if (ev) {
ze->ze_zevent = ev;
list_insert_head(&ev->ev_ze_list, ze);
} else {
ze->ze_zevent = NULL;
}
goto out;
}
for (ev = list_tail(&zevent_list); ev != NULL;
ev = list_prev(&zevent_list, ev)) {
if (ev->ev_eid == eid) {
if (ze->ze_zevent)
list_remove(&ze->ze_zevent->ev_ze_list, ze);
ze->ze_zevent = ev;
list_insert_head(&ev->ev_ze_list, ze);
break;
}
}
if (ev == NULL)
error = ENOENT;
out:
mutex_exit(&zevent_lock);
return (error);
}
void
zfs_zevent_init(zfs_zevent_t **zep)
{
zfs_zevent_t *ze;
ze = *zep = kmem_zalloc(sizeof (zfs_zevent_t), KM_SLEEP);
list_link_init(&ze->ze_node);
}
void
zfs_zevent_destroy(zfs_zevent_t *ze)
{
mutex_enter(&zevent_lock);
if (ze->ze_zevent)
list_remove(&ze->ze_zevent->ev_ze_list, ze);
mutex_exit(&zevent_lock);
kmem_free(ze, sizeof (zfs_zevent_t));
}
#endif /* _KERNEL */
/*
* Wrappers for FM nvlist allocators
*/
-/* ARGSUSED */
static void *
i_fm_alloc(nv_alloc_t *nva, size_t size)
{
+ (void) nva;
return (kmem_zalloc(size, KM_SLEEP));
}
-/* ARGSUSED */
static void
i_fm_free(nv_alloc_t *nva, void *buf, size_t size)
{
+ (void) nva;
kmem_free(buf, size);
}
const nv_alloc_ops_t fm_mem_alloc_ops = {
.nv_ao_init = NULL,
.nv_ao_fini = NULL,
.nv_ao_alloc = i_fm_alloc,
.nv_ao_free = i_fm_free,
.nv_ao_reset = NULL
};
/*
* Create and initialize a new nv_alloc_t for a fixed buffer, buf. A pointer
* to the newly allocated nv_alloc_t structure is returned upon success or NULL
* is returned to indicate that the nv_alloc structure could not be created.
*/
nv_alloc_t *
fm_nva_xcreate(char *buf, size_t bufsz)
{
nv_alloc_t *nvhdl = kmem_zalloc(sizeof (nv_alloc_t), KM_SLEEP);
if (bufsz == 0 || nv_alloc_init(nvhdl, nv_fixed_ops, buf, bufsz) != 0) {
kmem_free(nvhdl, sizeof (nv_alloc_t));
return (NULL);
}
return (nvhdl);
}
/*
* Destroy a previously allocated nv_alloc structure. The fixed buffer
* associated with nva must be freed by the caller.
*/
void
fm_nva_xdestroy(nv_alloc_t *nva)
{
nv_alloc_fini(nva);
kmem_free(nva, sizeof (nv_alloc_t));
}
/*
* Create a new nv list. A pointer to a new nv list structure is returned
* upon success or NULL is returned to indicate that the structure could
* not be created. The newly created nv list is created and managed by the
* operations installed in nva. If nva is NULL, the default FMA nva
* operations are installed and used.
*
* When called from the kernel and nva == NULL, this function must be called
* from passive kernel context with no locks held that can prevent a
* sleeping memory allocation from occurring. Otherwise, this function may
* be called from other kernel contexts as long a valid nva created via
* fm_nva_create() is supplied.
*/
nvlist_t *
fm_nvlist_create(nv_alloc_t *nva)
{
int hdl_alloced = 0;
nvlist_t *nvl;
nv_alloc_t *nvhdl;
if (nva == NULL) {
nvhdl = kmem_zalloc(sizeof (nv_alloc_t), KM_SLEEP);
if (nv_alloc_init(nvhdl, &fm_mem_alloc_ops, NULL, 0) != 0) {
kmem_free(nvhdl, sizeof (nv_alloc_t));
return (NULL);
}
hdl_alloced = 1;
} else {
nvhdl = nva;
}
if (nvlist_xalloc(&nvl, NV_UNIQUE_NAME, nvhdl) != 0) {
if (hdl_alloced) {
nv_alloc_fini(nvhdl);
kmem_free(nvhdl, sizeof (nv_alloc_t));
}
return (NULL);
}
return (nvl);
}
/*
* Destroy a previously allocated nvlist structure. flag indicates whether
* or not the associated nva structure should be freed (FM_NVA_FREE) or
* retained (FM_NVA_RETAIN). Retaining the nv alloc structure allows
* it to be re-used for future nvlist creation operations.
*/
void
fm_nvlist_destroy(nvlist_t *nvl, int flag)
{
nv_alloc_t *nva = nvlist_lookup_nv_alloc(nvl);
nvlist_free(nvl);
if (nva != NULL) {
if (flag == FM_NVA_FREE)
fm_nva_xdestroy(nva);
}
}
int
i_fm_payload_set(nvlist_t *payload, const char *name, va_list ap)
{
int nelem, ret = 0;
data_type_t type;
while (ret == 0 && name != NULL) {
type = va_arg(ap, data_type_t);
switch (type) {
case DATA_TYPE_BYTE:
ret = nvlist_add_byte(payload, name,
va_arg(ap, uint_t));
break;
case DATA_TYPE_BYTE_ARRAY:
nelem = va_arg(ap, int);
ret = nvlist_add_byte_array(payload, name,
va_arg(ap, uchar_t *), nelem);
break;
case DATA_TYPE_BOOLEAN_VALUE:
ret = nvlist_add_boolean_value(payload, name,
va_arg(ap, boolean_t));
break;
case DATA_TYPE_BOOLEAN_ARRAY:
nelem = va_arg(ap, int);
ret = nvlist_add_boolean_array(payload, name,
va_arg(ap, boolean_t *), nelem);
break;
case DATA_TYPE_INT8:
ret = nvlist_add_int8(payload, name,
va_arg(ap, int));
break;
case DATA_TYPE_INT8_ARRAY:
nelem = va_arg(ap, int);
ret = nvlist_add_int8_array(payload, name,
va_arg(ap, int8_t *), nelem);
break;
case DATA_TYPE_UINT8:
ret = nvlist_add_uint8(payload, name,
va_arg(ap, uint_t));
break;
case DATA_TYPE_UINT8_ARRAY:
nelem = va_arg(ap, int);
ret = nvlist_add_uint8_array(payload, name,
va_arg(ap, uint8_t *), nelem);
break;
case DATA_TYPE_INT16:
ret = nvlist_add_int16(payload, name,
va_arg(ap, int));
break;
case DATA_TYPE_INT16_ARRAY:
nelem = va_arg(ap, int);
ret = nvlist_add_int16_array(payload, name,
va_arg(ap, int16_t *), nelem);
break;
case DATA_TYPE_UINT16:
ret = nvlist_add_uint16(payload, name,
va_arg(ap, uint_t));
break;
case DATA_TYPE_UINT16_ARRAY:
nelem = va_arg(ap, int);
ret = nvlist_add_uint16_array(payload, name,
va_arg(ap, uint16_t *), nelem);
break;
case DATA_TYPE_INT32:
ret = nvlist_add_int32(payload, name,
va_arg(ap, int32_t));
break;
case DATA_TYPE_INT32_ARRAY:
nelem = va_arg(ap, int);
ret = nvlist_add_int32_array(payload, name,
va_arg(ap, int32_t *), nelem);
break;
case DATA_TYPE_UINT32:
ret = nvlist_add_uint32(payload, name,
va_arg(ap, uint32_t));
break;
case DATA_TYPE_UINT32_ARRAY:
nelem = va_arg(ap, int);
ret = nvlist_add_uint32_array(payload, name,
va_arg(ap, uint32_t *), nelem);
break;
case DATA_TYPE_INT64:
ret = nvlist_add_int64(payload, name,
va_arg(ap, int64_t));
break;
case DATA_TYPE_INT64_ARRAY:
nelem = va_arg(ap, int);
ret = nvlist_add_int64_array(payload, name,
va_arg(ap, int64_t *), nelem);
break;
case DATA_TYPE_UINT64:
ret = nvlist_add_uint64(payload, name,
va_arg(ap, uint64_t));
break;
case DATA_TYPE_UINT64_ARRAY:
nelem = va_arg(ap, int);
ret = nvlist_add_uint64_array(payload, name,
va_arg(ap, uint64_t *), nelem);
break;
case DATA_TYPE_STRING:
ret = nvlist_add_string(payload, name,
va_arg(ap, char *));
break;
case DATA_TYPE_STRING_ARRAY:
nelem = va_arg(ap, int);
ret = nvlist_add_string_array(payload, name,
va_arg(ap, char **), nelem);
break;
case DATA_TYPE_NVLIST:
ret = nvlist_add_nvlist(payload, name,
va_arg(ap, nvlist_t *));
break;
case DATA_TYPE_NVLIST_ARRAY:
nelem = va_arg(ap, int);
ret = nvlist_add_nvlist_array(payload, name,
va_arg(ap, nvlist_t **), nelem);
break;
default:
ret = EINVAL;
}
name = va_arg(ap, char *);
}
return (ret);
}
void
fm_payload_set(nvlist_t *payload, ...)
{
int ret;
const char *name;
va_list ap;
va_start(ap, payload);
name = va_arg(ap, char *);
ret = i_fm_payload_set(payload, name, ap);
va_end(ap);
if (ret)
atomic_inc_64(&erpt_kstat_data.payload_set_failed.value.ui64);
}
/*
* Set-up and validate the members of an ereport event according to:
*
* Member name Type Value
* ====================================================
* class string ereport
* version uint8_t 0
* ena uint64_t <ena>
* detector nvlist_t <detector>
* ereport-payload nvlist_t <var args>
*
* We don't actually add a 'version' member to the payload. Really,
* the version quoted to us by our caller is that of the category 1
* "ereport" event class (and we require FM_EREPORT_VERS0) but
* the payload version of the actual leaf class event under construction
* may be something else. Callers should supply a version in the varargs,
* or (better) we could take two version arguments - one for the
* ereport category 1 classification (expect FM_EREPORT_VERS0) and one
* for the leaf class.
*/
void
fm_ereport_set(nvlist_t *ereport, int version, const char *erpt_class,
uint64_t ena, const nvlist_t *detector, ...)
{
char ereport_class[FM_MAX_CLASS];
const char *name;
va_list ap;
int ret;
if (version != FM_EREPORT_VERS0) {
atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
return;
}
(void) snprintf(ereport_class, FM_MAX_CLASS, "%s.%s",
FM_EREPORT_CLASS, erpt_class);
if (nvlist_add_string(ereport, FM_CLASS, ereport_class) != 0) {
atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
return;
}
if (nvlist_add_uint64(ereport, FM_EREPORT_ENA, ena)) {
atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
}
if (nvlist_add_nvlist(ereport, FM_EREPORT_DETECTOR,
(nvlist_t *)detector) != 0) {
atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
}
va_start(ap, detector);
name = va_arg(ap, const char *);
ret = i_fm_payload_set(ereport, name, ap);
va_end(ap);
if (ret)
atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
}
/*
* Set-up and validate the members of an hc fmri according to;
*
* Member name Type Value
* ===================================================
* version uint8_t 0
* auth nvlist_t <auth>
* hc-name string <name>
* hc-id string <id>
*
* Note that auth and hc-id are optional members.
*/
#define HC_MAXPAIRS 20
#define HC_MAXNAMELEN 50
static int
fm_fmri_hc_set_common(nvlist_t *fmri, int version, const nvlist_t *auth)
{
if (version != FM_HC_SCHEME_VERSION) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return (0);
}
if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0 ||
nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_HC) != 0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return (0);
}
if (auth != NULL && nvlist_add_nvlist(fmri, FM_FMRI_AUTHORITY,
(nvlist_t *)auth) != 0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return (0);
}
return (1);
}
void
fm_fmri_hc_set(nvlist_t *fmri, int version, const nvlist_t *auth,
nvlist_t *snvl, int npairs, ...)
{
nv_alloc_t *nva = nvlist_lookup_nv_alloc(fmri);
nvlist_t *pairs[HC_MAXPAIRS];
va_list ap;
int i;
if (!fm_fmri_hc_set_common(fmri, version, auth))
return;
npairs = MIN(npairs, HC_MAXPAIRS);
va_start(ap, npairs);
for (i = 0; i < npairs; i++) {
const char *name = va_arg(ap, const char *);
uint32_t id = va_arg(ap, uint32_t);
char idstr[11];
(void) snprintf(idstr, sizeof (idstr), "%u", id);
pairs[i] = fm_nvlist_create(nva);
if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, name) != 0 ||
nvlist_add_string(pairs[i], FM_FMRI_HC_ID, idstr) != 0) {
atomic_inc_64(
&erpt_kstat_data.fmri_set_failed.value.ui64);
}
}
va_end(ap);
if (nvlist_add_nvlist_array(fmri, FM_FMRI_HC_LIST, pairs, npairs) != 0)
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
for (i = 0; i < npairs; i++)
fm_nvlist_destroy(pairs[i], FM_NVA_RETAIN);
if (snvl != NULL) {
if (nvlist_add_nvlist(fmri, FM_FMRI_HC_SPECIFIC, snvl) != 0) {
atomic_inc_64(
&erpt_kstat_data.fmri_set_failed.value.ui64);
}
}
}
void
fm_fmri_hc_create(nvlist_t *fmri, int version, const nvlist_t *auth,
nvlist_t *snvl, nvlist_t *bboard, int npairs, ...)
{
nv_alloc_t *nva = nvlist_lookup_nv_alloc(fmri);
nvlist_t *pairs[HC_MAXPAIRS];
nvlist_t **hcl;
uint_t n;
int i, j;
va_list ap;
char *hcname, *hcid;
if (!fm_fmri_hc_set_common(fmri, version, auth))
return;
/*
* copy the bboard nvpairs to the pairs array
*/
if (nvlist_lookup_nvlist_array(bboard, FM_FMRI_HC_LIST, &hcl, &n)
!= 0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
for (i = 0; i < n; i++) {
if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_NAME,
&hcname) != 0) {
atomic_inc_64(
&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_ID, &hcid) != 0) {
atomic_inc_64(
&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
pairs[i] = fm_nvlist_create(nva);
if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, hcname) != 0 ||
nvlist_add_string(pairs[i], FM_FMRI_HC_ID, hcid) != 0) {
for (j = 0; j <= i; j++) {
if (pairs[j] != NULL)
fm_nvlist_destroy(pairs[j],
FM_NVA_RETAIN);
}
atomic_inc_64(
&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
}
/*
* create the pairs from passed in pairs
*/
npairs = MIN(npairs, HC_MAXPAIRS);
va_start(ap, npairs);
for (i = n; i < npairs + n; i++) {
const char *name = va_arg(ap, const char *);
uint32_t id = va_arg(ap, uint32_t);
char idstr[11];
(void) snprintf(idstr, sizeof (idstr), "%u", id);
pairs[i] = fm_nvlist_create(nva);
if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, name) != 0 ||
nvlist_add_string(pairs[i], FM_FMRI_HC_ID, idstr) != 0) {
for (j = 0; j <= i; j++) {
if (pairs[j] != NULL)
fm_nvlist_destroy(pairs[j],
FM_NVA_RETAIN);
}
atomic_inc_64(
&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
}
va_end(ap);
/*
* Create the fmri hc list
*/
if (nvlist_add_nvlist_array(fmri, FM_FMRI_HC_LIST, pairs,
npairs + n) != 0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
for (i = 0; i < npairs + n; i++) {
fm_nvlist_destroy(pairs[i], FM_NVA_RETAIN);
}
if (snvl != NULL) {
if (nvlist_add_nvlist(fmri, FM_FMRI_HC_SPECIFIC, snvl) != 0) {
atomic_inc_64(
&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
}
}
/*
* Set-up and validate the members of an dev fmri according to:
*
* Member name Type Value
* ====================================================
* version uint8_t 0
* auth nvlist_t <auth>
* devpath string <devpath>
* [devid] string <devid>
* [target-port-l0id] string <target-port-lun0-id>
*
* Note that auth and devid are optional members.
*/
void
fm_fmri_dev_set(nvlist_t *fmri_dev, int version, const nvlist_t *auth,
const char *devpath, const char *devid, const char *tpl0)
{
int err = 0;
if (version != DEV_SCHEME_VERSION0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
err |= nvlist_add_uint8(fmri_dev, FM_VERSION, version);
err |= nvlist_add_string(fmri_dev, FM_FMRI_SCHEME, FM_FMRI_SCHEME_DEV);
if (auth != NULL) {
err |= nvlist_add_nvlist(fmri_dev, FM_FMRI_AUTHORITY,
(nvlist_t *)auth);
}
err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_PATH, devpath);
if (devid != NULL)
err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_ID, devid);
if (tpl0 != NULL)
err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_TGTPTLUN0, tpl0);
if (err)
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
}
/*
* Set-up and validate the members of an cpu fmri according to:
*
* Member name Type Value
* ====================================================
* version uint8_t 0
* auth nvlist_t <auth>
* cpuid uint32_t <cpu_id>
* cpumask uint8_t <cpu_mask>
* serial uint64_t <serial_id>
*
* Note that auth, cpumask, serial are optional members.
*
*/
void
fm_fmri_cpu_set(nvlist_t *fmri_cpu, int version, const nvlist_t *auth,
uint32_t cpu_id, uint8_t *cpu_maskp, const char *serial_idp)
{
uint64_t *failedp = &erpt_kstat_data.fmri_set_failed.value.ui64;
if (version < CPU_SCHEME_VERSION1) {
atomic_inc_64(failedp);
return;
}
if (nvlist_add_uint8(fmri_cpu, FM_VERSION, version) != 0) {
atomic_inc_64(failedp);
return;
}
if (nvlist_add_string(fmri_cpu, FM_FMRI_SCHEME,
FM_FMRI_SCHEME_CPU) != 0) {
atomic_inc_64(failedp);
return;
}
if (auth != NULL && nvlist_add_nvlist(fmri_cpu, FM_FMRI_AUTHORITY,
(nvlist_t *)auth) != 0)
atomic_inc_64(failedp);
if (nvlist_add_uint32(fmri_cpu, FM_FMRI_CPU_ID, cpu_id) != 0)
atomic_inc_64(failedp);
if (cpu_maskp != NULL && nvlist_add_uint8(fmri_cpu, FM_FMRI_CPU_MASK,
*cpu_maskp) != 0)
atomic_inc_64(failedp);
if (serial_idp == NULL || nvlist_add_string(fmri_cpu,
FM_FMRI_CPU_SERIAL_ID, (char *)serial_idp) != 0)
atomic_inc_64(failedp);
}
/*
* Set-up and validate the members of a mem according to:
*
* Member name Type Value
* ====================================================
* version uint8_t 0
* auth nvlist_t <auth> [optional]
* unum string <unum>
* serial string <serial> [optional*]
* offset uint64_t <offset> [optional]
*
* * serial is required if offset is present
*/
void
fm_fmri_mem_set(nvlist_t *fmri, int version, const nvlist_t *auth,
const char *unum, const char *serial, uint64_t offset)
{
if (version != MEM_SCHEME_VERSION0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
if (!serial && (offset != (uint64_t)-1)) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
if (nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_MEM) != 0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
if (auth != NULL) {
if (nvlist_add_nvlist(fmri, FM_FMRI_AUTHORITY,
(nvlist_t *)auth) != 0) {
atomic_inc_64(
&erpt_kstat_data.fmri_set_failed.value.ui64);
}
}
if (nvlist_add_string(fmri, FM_FMRI_MEM_UNUM, unum) != 0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
}
if (serial != NULL) {
if (nvlist_add_string_array(fmri, FM_FMRI_MEM_SERIAL_ID,
(char **)&serial, 1) != 0) {
atomic_inc_64(
&erpt_kstat_data.fmri_set_failed.value.ui64);
}
if (offset != (uint64_t)-1 && nvlist_add_uint64(fmri,
FM_FMRI_MEM_OFFSET, offset) != 0) {
atomic_inc_64(
&erpt_kstat_data.fmri_set_failed.value.ui64);
}
}
}
void
fm_fmri_zfs_set(nvlist_t *fmri, int version, uint64_t pool_guid,
uint64_t vdev_guid)
{
if (version != ZFS_SCHEME_VERSION0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
if (nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_ZFS) != 0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
return;
}
if (nvlist_add_uint64(fmri, FM_FMRI_ZFS_POOL, pool_guid) != 0) {
atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
}
if (vdev_guid != 0) {
if (nvlist_add_uint64(fmri, FM_FMRI_ZFS_VDEV, vdev_guid) != 0) {
atomic_inc_64(
&erpt_kstat_data.fmri_set_failed.value.ui64);
}
}
}
uint64_t
fm_ena_increment(uint64_t ena)
{
uint64_t new_ena;
switch (ENA_FORMAT(ena)) {
case FM_ENA_FMT1:
new_ena = ena + (1 << ENA_FMT1_GEN_SHFT);
break;
case FM_ENA_FMT2:
new_ena = ena + (1 << ENA_FMT2_GEN_SHFT);
break;
default:
new_ena = 0;
}
return (new_ena);
}
uint64_t
fm_ena_generate_cpu(uint64_t timestamp, processorid_t cpuid, uchar_t format)
{
uint64_t ena = 0;
switch (format) {
case FM_ENA_FMT1:
if (timestamp) {
ena = (uint64_t)((format & ENA_FORMAT_MASK) |
((cpuid << ENA_FMT1_CPUID_SHFT) &
ENA_FMT1_CPUID_MASK) |
((timestamp << ENA_FMT1_TIME_SHFT) &
ENA_FMT1_TIME_MASK));
} else {
ena = (uint64_t)((format & ENA_FORMAT_MASK) |
((cpuid << ENA_FMT1_CPUID_SHFT) &
ENA_FMT1_CPUID_MASK) |
((gethrtime() << ENA_FMT1_TIME_SHFT) &
ENA_FMT1_TIME_MASK));
}
break;
case FM_ENA_FMT2:
ena = (uint64_t)((format & ENA_FORMAT_MASK) |
((timestamp << ENA_FMT2_TIME_SHFT) & ENA_FMT2_TIME_MASK));
break;
default:
break;
}
return (ena);
}
uint64_t
fm_ena_generate(uint64_t timestamp, uchar_t format)
{
uint64_t ena;
kpreempt_disable();
ena = fm_ena_generate_cpu(timestamp, getcpuid(), format);
kpreempt_enable();
return (ena);
}
uint64_t
fm_ena_generation_get(uint64_t ena)
{
uint64_t gen;
switch (ENA_FORMAT(ena)) {
case FM_ENA_FMT1:
gen = (ena & ENA_FMT1_GEN_MASK) >> ENA_FMT1_GEN_SHFT;
break;
case FM_ENA_FMT2:
gen = (ena & ENA_FMT2_GEN_MASK) >> ENA_FMT2_GEN_SHFT;
break;
default:
gen = 0;
break;
}
return (gen);
}
uchar_t
fm_ena_format_get(uint64_t ena)
{
return (ENA_FORMAT(ena));
}
uint64_t
fm_ena_id_get(uint64_t ena)
{
uint64_t id;
switch (ENA_FORMAT(ena)) {
case FM_ENA_FMT1:
id = (ena & ENA_FMT1_ID_MASK) >> ENA_FMT1_ID_SHFT;
break;
case FM_ENA_FMT2:
id = (ena & ENA_FMT2_ID_MASK) >> ENA_FMT2_ID_SHFT;
break;
default:
id = 0;
}
return (id);
}
uint64_t
fm_ena_time_get(uint64_t ena)
{
uint64_t time;
switch (ENA_FORMAT(ena)) {
case FM_ENA_FMT1:
time = (ena & ENA_FMT1_TIME_MASK) >> ENA_FMT1_TIME_SHFT;
break;
case FM_ENA_FMT2:
time = (ena & ENA_FMT2_TIME_MASK) >> ENA_FMT2_TIME_SHFT;
break;
default:
time = 0;
}
return (time);
}
#ifdef _KERNEL
/*
* Helper function to increment ereport dropped count. Used by the event
* rate limiting code to give feedback to the user about how many events were
* rate limited by including them in the 'dropped' count.
*/
void
fm_erpt_dropped_increment(void)
{
atomic_inc_64(&ratelimit_dropped);
}
void
fm_init(void)
{
zevent_len_cur = 0;
zevent_flags = 0;
/* Initialize zevent allocation and generation kstats */
fm_ksp = kstat_create("zfs", 0, "fm", "misc", KSTAT_TYPE_NAMED,
sizeof (struct erpt_kstat) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL);
if (fm_ksp != NULL) {
fm_ksp->ks_data = &erpt_kstat_data;
kstat_install(fm_ksp);
} else {
cmn_err(CE_NOTE, "failed to create fm/misc kstat\n");
}
mutex_init(&zevent_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&zevent_list, sizeof (zevent_t),
offsetof(zevent_t, ev_node));
cv_init(&zevent_cv, NULL, CV_DEFAULT, NULL);
zfs_ereport_init();
}
void
fm_fini(void)
{
int count;
zfs_ereport_fini();
zfs_zevent_drain_all(&count);
mutex_enter(&zevent_lock);
cv_broadcast(&zevent_cv);
zevent_flags |= ZEVENT_SHUTDOWN;
while (zevent_waiters > 0) {
mutex_exit(&zevent_lock);
schedule();
mutex_enter(&zevent_lock);
}
mutex_exit(&zevent_lock);
cv_destroy(&zevent_cv);
list_destroy(&zevent_list);
mutex_destroy(&zevent_lock);
if (fm_ksp != NULL) {
kstat_delete(fm_ksp);
fm_ksp = NULL;
}
}
#endif /* _KERNEL */
ZFS_MODULE_PARAM(zfs_zevent, zfs_zevent_, len_max, INT, ZMOD_RW,
"Max event queue length");
diff --git a/sys/contrib/openzfs/module/zfs/gzip.c b/sys/contrib/openzfs/module/zfs/gzip.c
index e2c6e59969d6..48191241bd7d 100644
--- a/sys/contrib/openzfs/module/zfs/gzip.c
+++ b/sys/contrib/openzfs/module/zfs/gzip.c
@@ -1,106 +1,106 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/debug.h>
#include <sys/types.h>
#include <sys/strings.h>
#include <sys/qat.h>
#include <sys/zio_compress.h>
#ifdef _KERNEL
#include <sys/zmod.h>
typedef size_t zlen_t;
#define compress_func z_compress_level
#define uncompress_func z_uncompress
#else /* _KERNEL */
#include <zlib.h>
typedef uLongf zlen_t;
#define compress_func compress2
#define uncompress_func uncompress
#endif
size_t
gzip_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
{
int ret;
zlen_t dstlen = d_len;
ASSERT(d_len <= s_len);
/* check if hardware accelerator can be used */
if (qat_dc_use_accel(s_len)) {
ret = qat_compress(QAT_COMPRESS, s_start, s_len, d_start,
d_len, &dstlen);
if (ret == CPA_STATUS_SUCCESS) {
return ((size_t)dstlen);
} else if (ret == CPA_STATUS_INCOMPRESSIBLE) {
if (d_len != s_len)
return (s_len);
bcopy(s_start, d_start, s_len);
return (s_len);
}
/* if hardware compression fails, do it again with software */
}
if (compress_func(d_start, &dstlen, s_start, s_len, n) != Z_OK) {
if (d_len != s_len)
return (s_len);
bcopy(s_start, d_start, s_len);
return (s_len);
}
return ((size_t)dstlen);
}
-/*ARGSUSED*/
int
gzip_decompress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
{
+ (void) n;
zlen_t dstlen = d_len;
ASSERT(d_len >= s_len);
/* check if hardware accelerator can be used */
if (qat_dc_use_accel(d_len)) {
if (qat_compress(QAT_DECOMPRESS, s_start, s_len,
d_start, d_len, &dstlen) == CPA_STATUS_SUCCESS)
return (0);
/* if hardware de-compress fail, do it again with software */
}
if (uncompress_func(d_start, &dstlen, s_start, s_len) != Z_OK)
return (-1);
return (0);
}
diff --git a/sys/contrib/openzfs/module/zfs/lz4.c b/sys/contrib/openzfs/module/zfs/lz4.c
index 9da9d9e00635..827ba2b662f5 100644
--- a/sys/contrib/openzfs/module/zfs/lz4.c
+++ b/sys/contrib/openzfs/module/zfs/lz4.c
@@ -1,1084 +1,1082 @@
/*
* LZ4 - Fast LZ compression algorithm
* Header File
* Copyright (C) 2011-2013, Yann Collet.
* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You can contact the author at :
* - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
* - LZ4 source repository : http://code.google.com/p/lz4/
*/
#include <sys/zfs_context.h>
#include <sys/zio_compress.h>
static int real_LZ4_compress(const char *source, char *dest, int isize,
int osize);
static int LZ4_uncompress_unknownOutputSize(const char *source, char *dest,
int isize, int maxOutputSize);
static int LZ4_compressCtx(void *ctx, const char *source, char *dest,
int isize, int osize);
static int LZ4_compress64kCtx(void *ctx, const char *source, char *dest,
int isize, int osize);
static void *lz4_alloc(int flags);
static void lz4_free(void *ctx);
-/*ARGSUSED*/
size_t
lz4_compress_zfs(void *s_start, void *d_start, size_t s_len,
size_t d_len, int n)
{
+ (void) n;
uint32_t bufsiz;
char *dest = d_start;
ASSERT(d_len >= sizeof (bufsiz));
bufsiz = real_LZ4_compress(s_start, &dest[sizeof (bufsiz)], s_len,
d_len - sizeof (bufsiz));
/* Signal an error if the compression routine returned zero. */
if (bufsiz == 0)
return (s_len);
/*
* The exact compressed size is needed by the decompression routine,
* so it is stored at the start of the buffer. Note that this may be
* less than the compressed block size, which is rounded up to a
* multiple of 1<<ashift.
*/
*(uint32_t *)dest = BE_32(bufsiz);
return (bufsiz + sizeof (bufsiz));
}
-/*ARGSUSED*/
int
lz4_decompress_zfs(void *s_start, void *d_start, size_t s_len,
size_t d_len, int n)
{
+ (void) n;
const char *src = s_start;
uint32_t bufsiz = BE_IN32(src);
/* invalid compressed buffer size encoded at start */
if (bufsiz + sizeof (bufsiz) > s_len)
return (1);
/*
* Returns 0 on success (decompression function returned non-negative)
* and non-zero on failure (decompression function returned negative).
*/
return (LZ4_uncompress_unknownOutputSize(&src[sizeof (bufsiz)],
d_start, bufsiz, d_len) < 0);
}
/*
* LZ4 API Description:
*
* Simple Functions:
* real_LZ4_compress() :
* isize : is the input size. Max supported value is ~1.9GB
* return : the number of bytes written in buffer dest
* or 0 if the compression fails (if LZ4_COMPRESSMIN is set).
* note : destination buffer must be already allocated.
* destination buffer must be sized to handle worst cases
* situations (input data not compressible) worst case size
* evaluation is provided by function LZ4_compressBound().
*
* real_LZ4_uncompress() :
* osize : is the output size, therefore the original size
* return : the number of bytes read in the source buffer.
* If the source stream is malformed, the function will stop
* decoding and return a negative result, indicating the byte
* position of the faulty instruction. This function never
* writes beyond dest + osize, and is therefore protected
* against malicious data packets.
* note : destination buffer must be already allocated
* note : real_LZ4_uncompress() is not used in ZFS so its code
* is not present here.
*
* Advanced Functions
*
* LZ4_compressBound() :
* Provides the maximum size that LZ4 may output in a "worst case"
* scenario (input data not compressible) primarily useful for memory
* allocation of output buffer.
*
* isize : is the input size. Max supported value is ~1.9GB
* return : maximum output size in a "worst case" scenario
* note : this function is limited by "int" range (2^31-1)
*
* LZ4_uncompress_unknownOutputSize() :
* isize : is the input size, therefore the compressed size
* maxOutputSize : is the size of the destination buffer (which must be
* already allocated)
* return : the number of bytes decoded in the destination buffer
* (necessarily <= maxOutputSize). If the source stream is
* malformed, the function will stop decoding and return a
* negative result, indicating the byte position of the faulty
* instruction. This function never writes beyond dest +
* maxOutputSize, and is therefore protected against malicious
* data packets.
* note : Destination buffer must be already allocated.
* This version is slightly slower than real_LZ4_uncompress()
*
* LZ4_compressCtx() :
* This function explicitly handles the CTX memory structure.
*
* ILLUMOS CHANGES: the CTX memory structure must be explicitly allocated
* by the caller (either on the stack or using kmem_cache_alloc). Passing
* NULL isn't valid.
*
* LZ4_compress64kCtx() :
* Same as LZ4_compressCtx(), but specific to small inputs (<64KB).
* isize *Must* be <64KB, otherwise the output will be corrupted.
*
* ILLUMOS CHANGES: the CTX memory structure must be explicitly allocated
* by the caller (either on the stack or using kmem_cache_alloc). Passing
* NULL isn't valid.
*/
/*
* Tuning parameters
*/
/*
* COMPRESSIONLEVEL: Increasing this value improves compression ratio
* Lowering this value reduces memory usage. Reduced memory usage
* typically improves speed, due to cache effect (ex: L1 32KB for Intel,
* L1 64KB for AMD). Memory usage formula : N->2^(N+2) Bytes
* (examples : 12 -> 16KB ; 17 -> 512KB)
*/
#define COMPRESSIONLEVEL 12
/*
* NOTCOMPRESSIBLE_CONFIRMATION: Decreasing this value will make the
* algorithm skip faster data segments considered "incompressible".
* This may decrease compression ratio dramatically, but will be
* faster on incompressible data. Increasing this value will make
* the algorithm search more before declaring a segment "incompressible".
* This could improve compression a bit, but will be slower on
* incompressible data. The default value (6) is recommended.
*/
#define NOTCOMPRESSIBLE_CONFIRMATION 6
/*
* BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE: This will provide a boost to
* performance for big endian cpu, but the resulting compressed stream
* will be incompatible with little-endian CPU. You can set this option
* to 1 in situations where data will stay within closed environment.
* This option is useless on Little_Endian CPU (such as x86).
*/
/* #define BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE 1 */
/*
* CPU Feature Detection
*/
/* 32 or 64 bits ? */
#if defined(_LP64)
#define LZ4_ARCH64 1
#else
#define LZ4_ARCH64 0
#endif
/*
* Little Endian or Big Endian?
* Note: overwrite the below #define if you know your architecture endianness.
*/
#if defined(_ZFS_BIG_ENDIAN)
#define LZ4_BIG_ENDIAN 1
#else
/*
* Little Endian assumed. PDP Endian and other very rare endian format
* are unsupported.
*/
#undef LZ4_BIG_ENDIAN
#endif
/*
* Unaligned memory access is automatically enabled for "common" CPU,
* such as x86. For others CPU, the compiler will be more cautious, and
* insert extra code to ensure aligned access is respected. If you know
* your target CPU supports unaligned memory access, you may want to
* force this option manually to improve performance
*/
#if defined(__ARM_FEATURE_UNALIGNED)
#define LZ4_FORCE_UNALIGNED_ACCESS 1
#endif
/*
* Illumos : we can't use GCC's __builtin_ctz family of builtins in the
* kernel
* Linux : we can use GCC's __builtin_ctz family of builtins in the
* kernel
*/
#undef LZ4_FORCE_SW_BITCOUNT
#if defined(__sparc)
#define LZ4_FORCE_SW_BITCOUNT
#endif
/*
* Compiler Options
*/
/* Disable restrict */
#define restrict
/*
* Linux : GCC_VERSION is defined as of 3.9-rc1, so undefine it.
* torvalds/linux@3f3f8d2f48acfd8ed3b8e6b7377935da57b27b16
*/
#ifdef GCC_VERSION
#undef GCC_VERSION
#endif
#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
#if (GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__)
#define expect(expr, value) (__builtin_expect((expr), (value)))
#else
#define expect(expr, value) (expr)
#endif
#ifndef likely
#define likely(expr) expect((expr) != 0, 1)
#endif
#ifndef unlikely
#define unlikely(expr) expect((expr) != 0, 0)
#endif
#define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | \
(((x) & 0xffu) << 8)))
/* Basic types */
#define BYTE uint8_t
#define U16 uint16_t
#define U32 uint32_t
#define S32 int32_t
#define U64 uint64_t
#ifndef LZ4_FORCE_UNALIGNED_ACCESS
#pragma pack(1)
#endif
typedef struct _U16_S {
U16 v;
} U16_S;
typedef struct _U32_S {
U32 v;
} U32_S;
typedef struct _U64_S {
U64 v;
} U64_S;
#ifndef LZ4_FORCE_UNALIGNED_ACCESS
#pragma pack()
#endif
#define A64(x) (((U64_S *)(x))->v)
#define A32(x) (((U32_S *)(x))->v)
#define A16(x) (((U16_S *)(x))->v)
/*
* Constants
*/
#define MINMATCH 4
#define HASH_LOG COMPRESSIONLEVEL
#define HASHTABLESIZE (1 << HASH_LOG)
#define HASH_MASK (HASHTABLESIZE - 1)
#define SKIPSTRENGTH (NOTCOMPRESSIBLE_CONFIRMATION > 2 ? \
NOTCOMPRESSIBLE_CONFIRMATION : 2)
#define COPYLENGTH 8
#define LASTLITERALS 5
#define MFLIMIT (COPYLENGTH + MINMATCH)
#define MINLENGTH (MFLIMIT + 1)
#define MAXD_LOG 16
#define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
#define ML_BITS 4
#define ML_MASK ((1U<<ML_BITS)-1)
#define RUN_BITS (8-ML_BITS)
#define RUN_MASK ((1U<<RUN_BITS)-1)
/*
* Architecture-specific macros
*/
#if LZ4_ARCH64
#define STEPSIZE 8
#define UARCH U64
#define AARCH A64
#define LZ4_COPYSTEP(s, d) A64(d) = A64(s); d += 8; s += 8;
#define LZ4_COPYPACKET(s, d) LZ4_COPYSTEP(s, d)
#define LZ4_SECURECOPY(s, d, e) if (d < e) LZ4_WILDCOPY(s, d, e)
#define HTYPE U32
#define INITBASE(base) const BYTE* const base = ip
#else /* !LZ4_ARCH64 */
#define STEPSIZE 4
#define UARCH U32
#define AARCH A32
#define LZ4_COPYSTEP(s, d) A32(d) = A32(s); d += 4; s += 4;
#define LZ4_COPYPACKET(s, d) LZ4_COPYSTEP(s, d); LZ4_COPYSTEP(s, d);
#define LZ4_SECURECOPY LZ4_WILDCOPY
#define HTYPE const BYTE *
#define INITBASE(base) const int base = 0
#endif /* !LZ4_ARCH64 */
#if (defined(LZ4_BIG_ENDIAN) && !defined(BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE))
#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
{ U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }
#define LZ4_WRITE_LITTLEENDIAN_16(p, i) \
{ U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p += 2; }
#else
#define LZ4_READ_LITTLEENDIAN_16(d, s, p) { d = (s) - A16(p); }
#define LZ4_WRITE_LITTLEENDIAN_16(p, v) { A16(p) = v; p += 2; }
#endif
/* Local structures */
struct refTables {
HTYPE hashTable[HASHTABLESIZE];
};
/* Macros */
#define LZ4_HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH * 8) - \
HASH_LOG))
#define LZ4_HASH_VALUE(p) LZ4_HASH_FUNCTION(A32(p))
#define LZ4_WILDCOPY(s, d, e) do { LZ4_COPYPACKET(s, d) } while (d < e);
#define LZ4_BLINDCOPY(s, d, l) { BYTE* e = (d) + l; LZ4_WILDCOPY(s, d, e); \
d = e; }
/* Private functions */
#if LZ4_ARCH64
static inline int
LZ4_NbCommonBytes(register U64 val)
{
#if defined(LZ4_BIG_ENDIAN)
#if ((defined(__GNUC__) && (GCC_VERSION >= 304)) || defined(__clang__)) && \
!defined(LZ4_FORCE_SW_BITCOUNT)
return (__builtin_clzll(val) >> 3);
#else
int r;
if (!(val >> 32)) {
r = 4;
} else {
r = 0;
val >>= 32;
}
if (!(val >> 16)) {
r += 2;
val >>= 8;
} else {
val >>= 24;
}
r += (!val);
return (r);
#endif
#else
#if ((defined(__GNUC__) && (GCC_VERSION >= 304)) || defined(__clang__)) && \
!defined(LZ4_FORCE_SW_BITCOUNT)
return (__builtin_ctzll(val) >> 3);
#else
static const int DeBruijnBytePos[64] =
{ 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5,
3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5,
5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4,
4, 5, 7, 2, 6, 5, 7, 6, 7, 7
};
return DeBruijnBytePos[((U64) ((val & -val) * 0x0218A392CDABBD3F)) >>
58];
#endif
#endif
}
#else
static inline int
LZ4_NbCommonBytes(register U32 val)
{
#if defined(LZ4_BIG_ENDIAN)
#if ((defined(__GNUC__) && (GCC_VERSION >= 304)) || defined(__clang__)) && \
!defined(LZ4_FORCE_SW_BITCOUNT)
return (__builtin_clz(val) >> 3);
#else
int r;
if (!(val >> 16)) {
r = 2;
val >>= 8;
} else {
r = 0;
val >>= 24;
}
r += (!val);
return (r);
#endif
#else
#if defined(__GNUC__) && (GCC_VERSION >= 304) && \
!defined(LZ4_FORCE_SW_BITCOUNT)
return (__builtin_ctz(val) >> 3);
#else
static const int DeBruijnBytePos[32] = {
0, 0, 3, 0, 3, 1, 3, 0,
3, 2, 2, 1, 3, 2, 0, 1,
3, 3, 1, 2, 2, 2, 2, 0,
3, 1, 2, 0, 1, 0, 1, 1
};
return DeBruijnBytePos[((U32) ((val & -(S32) val) * 0x077CB531U)) >>
27];
#endif
#endif
}
#endif
/* Compression functions */
-/*ARGSUSED*/
static int
LZ4_compressCtx(void *ctx, const char *source, char *dest, int isize,
int osize)
{
struct refTables *srt = (struct refTables *)ctx;
HTYPE *HashTable = (HTYPE *) (srt->hashTable);
const BYTE *ip = (BYTE *) source;
INITBASE(base);
const BYTE *anchor = ip;
const BYTE *const iend = ip + isize;
const BYTE *const oend = (BYTE *) dest + osize;
const BYTE *const mflimit = iend - MFLIMIT;
#define matchlimit (iend - LASTLITERALS)
BYTE *op = (BYTE *) dest;
int len, length;
const int skipStrength = SKIPSTRENGTH;
U32 forwardH;
/* Init */
if (isize < MINLENGTH)
goto _last_literals;
/* First Byte */
HashTable[LZ4_HASH_VALUE(ip)] = ip - base;
ip++;
forwardH = LZ4_HASH_VALUE(ip);
/* Main Loop */
for (;;) {
int findMatchAttempts = (1U << skipStrength) + 3;
const BYTE *forwardIp = ip;
const BYTE *ref;
BYTE *token;
/* Find a match */
do {
U32 h = forwardH;
int step = findMatchAttempts++ >> skipStrength;
ip = forwardIp;
forwardIp = ip + step;
if (unlikely(forwardIp > mflimit)) {
goto _last_literals;
}
forwardH = LZ4_HASH_VALUE(forwardIp);
ref = base + HashTable[h];
HashTable[h] = ip - base;
} while ((ref < ip - MAX_DISTANCE) || (A32(ref) != A32(ip)));
/* Catch up */
while ((ip > anchor) && (ref > (BYTE *) source) &&
unlikely(ip[-1] == ref[-1])) {
ip--;
ref--;
}
/* Encode Literal length */
length = ip - anchor;
token = op++;
/* Check output limit */
if (unlikely(op + length + (2 + 1 + LASTLITERALS) +
(length >> 8) > oend))
return (0);
if (length >= (int)RUN_MASK) {
*token = (RUN_MASK << ML_BITS);
len = length - RUN_MASK;
for (; len > 254; len -= 255)
*op++ = 255;
*op++ = (BYTE)len;
} else
*token = (length << ML_BITS);
/* Copy Literals */
LZ4_BLINDCOPY(anchor, op, length);
_next_match:
/* Encode Offset */
LZ4_WRITE_LITTLEENDIAN_16(op, ip - ref);
/* Start Counting */
ip += MINMATCH;
ref += MINMATCH; /* MinMatch verified */
anchor = ip;
while (likely(ip < matchlimit - (STEPSIZE - 1))) {
UARCH diff = AARCH(ref) ^ AARCH(ip);
if (!diff) {
ip += STEPSIZE;
ref += STEPSIZE;
continue;
}
ip += LZ4_NbCommonBytes(diff);
goto _endCount;
}
#if LZ4_ARCH64
if ((ip < (matchlimit - 3)) && (A32(ref) == A32(ip))) {
ip += 4;
ref += 4;
}
#endif
if ((ip < (matchlimit - 1)) && (A16(ref) == A16(ip))) {
ip += 2;
ref += 2;
}
if ((ip < matchlimit) && (*ref == *ip))
ip++;
_endCount:
/* Encode MatchLength */
len = (ip - anchor);
/* Check output limit */
if (unlikely(op + (1 + LASTLITERALS) + (len >> 8) > oend))
return (0);
if (len >= (int)ML_MASK) {
*token += ML_MASK;
len -= ML_MASK;
for (; len > 509; len -= 510) {
*op++ = 255;
*op++ = 255;
}
if (len > 254) {
len -= 255;
*op++ = 255;
}
*op++ = (BYTE)len;
} else
*token += len;
/* Test end of chunk */
if (ip > mflimit) {
anchor = ip;
break;
}
/* Fill table */
HashTable[LZ4_HASH_VALUE(ip - 2)] = ip - 2 - base;
/* Test next position */
ref = base + HashTable[LZ4_HASH_VALUE(ip)];
HashTable[LZ4_HASH_VALUE(ip)] = ip - base;
if ((ref > ip - (MAX_DISTANCE + 1)) && (A32(ref) == A32(ip))) {
token = op++;
*token = 0;
goto _next_match;
}
/* Prepare next loop */
anchor = ip++;
forwardH = LZ4_HASH_VALUE(ip);
}
_last_literals:
/* Encode Last Literals */
{
int lastRun = iend - anchor;
if (op + lastRun + 1 + ((lastRun + 255 - RUN_MASK) / 255) >
oend)
return (0);
if (lastRun >= (int)RUN_MASK) {
*op++ = (RUN_MASK << ML_BITS);
lastRun -= RUN_MASK;
for (; lastRun > 254; lastRun -= 255) {
*op++ = 255;
}
*op++ = (BYTE)lastRun;
} else
*op++ = (lastRun << ML_BITS);
(void) memcpy(op, anchor, iend - anchor);
op += iend - anchor;
}
/* End */
return (int)(((char *)op) - dest);
}
/* Note : this function is valid only if isize < LZ4_64KLIMIT */
#define LZ4_64KLIMIT ((1 << 16) + (MFLIMIT - 1))
#define HASHLOG64K (HASH_LOG + 1)
#define HASH64KTABLESIZE (1U << HASHLOG64K)
#define LZ4_HASH64K_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8) - \
HASHLOG64K))
#define LZ4_HASH64K_VALUE(p) LZ4_HASH64K_FUNCTION(A32(p))
-/*ARGSUSED*/
static int
LZ4_compress64kCtx(void *ctx, const char *source, char *dest, int isize,
int osize)
{
struct refTables *srt = (struct refTables *)ctx;
U16 *HashTable = (U16 *) (srt->hashTable);
const BYTE *ip = (BYTE *) source;
const BYTE *anchor = ip;
const BYTE *const base = ip;
const BYTE *const iend = ip + isize;
const BYTE *const oend = (BYTE *) dest + osize;
const BYTE *const mflimit = iend - MFLIMIT;
#define matchlimit (iend - LASTLITERALS)
BYTE *op = (BYTE *) dest;
int len, length;
const int skipStrength = SKIPSTRENGTH;
U32 forwardH;
/* Init */
if (isize < MINLENGTH)
goto _last_literals;
/* First Byte */
ip++;
forwardH = LZ4_HASH64K_VALUE(ip);
/* Main Loop */
for (;;) {
int findMatchAttempts = (1U << skipStrength) + 3;
const BYTE *forwardIp = ip;
const BYTE *ref;
BYTE *token;
/* Find a match */
do {
U32 h = forwardH;
int step = findMatchAttempts++ >> skipStrength;
ip = forwardIp;
forwardIp = ip + step;
if (forwardIp > mflimit) {
goto _last_literals;
}
forwardH = LZ4_HASH64K_VALUE(forwardIp);
ref = base + HashTable[h];
HashTable[h] = ip - base;
} while (A32(ref) != A32(ip));
/* Catch up */
while ((ip > anchor) && (ref > (BYTE *) source) &&
(ip[-1] == ref[-1])) {
ip--;
ref--;
}
/* Encode Literal length */
length = ip - anchor;
token = op++;
/* Check output limit */
if (unlikely(op + length + (2 + 1 + LASTLITERALS) +
(length >> 8) > oend))
return (0);
if (length >= (int)RUN_MASK) {
*token = (RUN_MASK << ML_BITS);
len = length - RUN_MASK;
for (; len > 254; len -= 255)
*op++ = 255;
*op++ = (BYTE)len;
} else
*token = (length << ML_BITS);
/* Copy Literals */
LZ4_BLINDCOPY(anchor, op, length);
_next_match:
/* Encode Offset */
LZ4_WRITE_LITTLEENDIAN_16(op, ip - ref);
/* Start Counting */
ip += MINMATCH;
ref += MINMATCH; /* MinMatch verified */
anchor = ip;
while (ip < matchlimit - (STEPSIZE - 1)) {
UARCH diff = AARCH(ref) ^ AARCH(ip);
if (!diff) {
ip += STEPSIZE;
ref += STEPSIZE;
continue;
}
ip += LZ4_NbCommonBytes(diff);
goto _endCount;
}
#if LZ4_ARCH64
if ((ip < (matchlimit - 3)) && (A32(ref) == A32(ip))) {
ip += 4;
ref += 4;
}
#endif
if ((ip < (matchlimit - 1)) && (A16(ref) == A16(ip))) {
ip += 2;
ref += 2;
}
if ((ip < matchlimit) && (*ref == *ip))
ip++;
_endCount:
/* Encode MatchLength */
len = (ip - anchor);
/* Check output limit */
if (unlikely(op + (1 + LASTLITERALS) + (len >> 8) > oend))
return (0);
if (len >= (int)ML_MASK) {
*token += ML_MASK;
len -= ML_MASK;
for (; len > 509; len -= 510) {
*op++ = 255;
*op++ = 255;
}
if (len > 254) {
len -= 255;
*op++ = 255;
}
*op++ = (BYTE)len;
} else
*token += len;
/* Test end of chunk */
if (ip > mflimit) {
anchor = ip;
break;
}
/* Fill table */
HashTable[LZ4_HASH64K_VALUE(ip - 2)] = ip - 2 - base;
/* Test next position */
ref = base + HashTable[LZ4_HASH64K_VALUE(ip)];
HashTable[LZ4_HASH64K_VALUE(ip)] = ip - base;
if (A32(ref) == A32(ip)) {
token = op++;
*token = 0;
goto _next_match;
}
/* Prepare next loop */
anchor = ip++;
forwardH = LZ4_HASH64K_VALUE(ip);
}
_last_literals:
/* Encode Last Literals */
{
int lastRun = iend - anchor;
if (op + lastRun + 1 + ((lastRun + 255 - RUN_MASK) / 255) >
oend)
return (0);
if (lastRun >= (int)RUN_MASK) {
*op++ = (RUN_MASK << ML_BITS);
lastRun -= RUN_MASK;
for (; lastRun > 254; lastRun -= 255)
*op++ = 255;
*op++ = (BYTE)lastRun;
} else
*op++ = (lastRun << ML_BITS);
(void) memcpy(op, anchor, iend - anchor);
op += iend - anchor;
}
/* End */
return (int)(((char *)op) - dest);
}
static int
real_LZ4_compress(const char *source, char *dest, int isize, int osize)
{
void *ctx;
int result;
ctx = lz4_alloc(KM_SLEEP);
/*
* out of kernel memory, gently fall through - this will disable
* compression in zio_compress_data
*/
if (ctx == NULL)
return (0);
memset(ctx, 0, sizeof (struct refTables));
if (isize < LZ4_64KLIMIT)
result = LZ4_compress64kCtx(ctx, source, dest, isize, osize);
else
result = LZ4_compressCtx(ctx, source, dest, isize, osize);
lz4_free(ctx);
return (result);
}
/* Decompression functions */
/*
* Note: The decoding functions real_LZ4_uncompress() and
* LZ4_uncompress_unknownOutputSize() are safe against "buffer overflow"
* attack type. They will never write nor read outside of the provided
* output buffers. LZ4_uncompress_unknownOutputSize() also insures that
* it will never read outside of the input buffer. A corrupted input
* will produce an error result, a negative int, indicating the position
* of the error within input stream.
*
* Note[2]: real_LZ4_uncompress(), referred to above, is not used in ZFS so
* its code is not present here.
*/
static const int dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
#if LZ4_ARCH64
static const int dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3};
#endif
static int
LZ4_uncompress_unknownOutputSize(const char *source, char *dest, int isize,
int maxOutputSize)
{
/* Local Variables */
const BYTE *restrict ip = (const BYTE *) source;
const BYTE *const iend = ip + isize;
const BYTE *ref;
BYTE *op = (BYTE *) dest;
BYTE *const oend = op + maxOutputSize;
BYTE *cpy;
/* Main Loop */
while (ip < iend) {
unsigned token;
size_t length;
/* get runlength */
token = *ip++;
if ((length = (token >> ML_BITS)) == RUN_MASK) {
int s = 255;
while ((ip < iend) && (s == 255)) {
s = *ip++;
if (unlikely(length > (size_t)(length + s)))
goto _output_error;
length += s;
}
}
/* copy literals */
cpy = op + length;
/* CORNER-CASE: cpy might overflow. */
if (cpy < op)
goto _output_error; /* cpy was overflowed, bail! */
if ((cpy > oend - COPYLENGTH) ||
(ip + length > iend - COPYLENGTH)) {
if (cpy > oend)
/* Error: writes beyond output buffer */
goto _output_error;
if (ip + length != iend)
/*
* Error: LZ4 format requires to consume all
* input at this stage
*/
goto _output_error;
(void) memcpy(op, ip, length);
op += length;
/* Necessarily EOF, due to parsing restrictions */
break;
}
LZ4_WILDCOPY(ip, op, cpy);
ip -= (op - cpy);
op = cpy;
/* get offset */
LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip);
ip += 2;
if (ref < (BYTE * const) dest)
/*
* Error: offset creates reference outside of
* destination buffer
*/
goto _output_error;
/* get matchlength */
if ((length = (token & ML_MASK)) == ML_MASK) {
while (ip < iend) {
int s = *ip++;
if (unlikely(length > (size_t)(length + s)))
goto _output_error;
length += s;
if (s == 255)
continue;
break;
}
}
/* copy repeated sequence */
if (unlikely(op - ref < STEPSIZE)) {
#if LZ4_ARCH64
int dec64 = dec64table[op - ref];
#else
const int dec64 = 0;
#endif
op[0] = ref[0];
op[1] = ref[1];
op[2] = ref[2];
op[3] = ref[3];
op += 4;
ref += 4;
ref -= dec32table[op - ref];
A32(op) = A32(ref);
op += STEPSIZE - 4;
ref -= dec64;
} else {
LZ4_COPYSTEP(ref, op);
}
cpy = op + length - (STEPSIZE - 4);
if (cpy > oend - COPYLENGTH) {
if (cpy > oend)
/*
* Error: request to write outside of
* destination buffer
*/
goto _output_error;
#if LZ4_ARCH64
if ((ref + COPYLENGTH) > oend)
#else
if ((ref + COPYLENGTH) > oend ||
(op + COPYLENGTH) > oend)
#endif
goto _output_error;
LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
while (op < cpy)
*op++ = *ref++;
op = cpy;
if (op == oend)
/*
* Check EOF (should never happen, since
* last 5 bytes are supposed to be literals)
*/
goto _output_error;
continue;
}
LZ4_SECURECOPY(ref, op, cpy);
op = cpy; /* correction */
}
/* end of decoding */
return (int)(((char *)op) - dest);
/* write overflow error detected */
_output_error:
return (-1);
}
#ifdef __FreeBSD__
/*
* FreeBSD has 4, 8 and 16 KB malloc zones which can be used here.
* Should struct refTables get resized this may need to be revisited, hence
* compiler-time asserts.
*/
_Static_assert(sizeof(struct refTables) <= 16384,
"refTables too big for malloc");
_Static_assert((sizeof(struct refTables) % 4096) == 0,
"refTables not a multiple of page size");
#else
#define ZFS_LZ4_USE_CACHE
#endif
#ifdef ZFS_LZ4_USE_CACHE
static kmem_cache_t *lz4_cache;
void
lz4_init(void)
{
lz4_cache = kmem_cache_create("lz4_cache",
sizeof (struct refTables), 0, NULL, NULL, NULL, NULL, NULL, 0);
}
void
lz4_fini(void)
{
if (lz4_cache) {
kmem_cache_destroy(lz4_cache);
lz4_cache = NULL;
}
}
static void *
lz4_alloc(int flags)
{
ASSERT(lz4_cache != NULL);
return (kmem_cache_alloc(lz4_cache, flags));
}
static void
lz4_free(void *ctx)
{
kmem_cache_free(lz4_cache, ctx);
}
#else
void
lz4_init(void)
{
}
void
lz4_fini(void)
{
}
static void *
lz4_alloc(int flags)
{
return (kmem_alloc(sizeof (struct refTables), flags));
}
static void
lz4_free(void *ctx)
{
kmem_free(ctx, sizeof (struct refTables));
}
#endif
diff --git a/sys/contrib/openzfs/module/zfs/lzjb.c b/sys/contrib/openzfs/module/zfs/lzjb.c
index a478e64c5141..1c536b110318 100644
--- a/sys/contrib/openzfs/module/zfs/lzjb.c
+++ b/sys/contrib/openzfs/module/zfs/lzjb.c
@@ -1,132 +1,132 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
*/
/*
* We keep our own copy of this algorithm for 3 main reasons:
* 1. If we didn't, anyone modifying common/os/compress.c would
* directly break our on disk format
* 2. Our version of lzjb does not have a number of checks that the
* common/os version needs and uses
* 3. We initialize the lempel to ensure deterministic results,
* so that identical blocks can always be deduplicated.
* In particular, we are adding the "feature" that compress() can
* take a destination buffer size and returns the compressed length, or the
* source length if compression would overflow the destination buffer.
*/
#include <sys/zfs_context.h>
#include <sys/zio_compress.h>
#define MATCH_BITS 6
#define MATCH_MIN 3
#define MATCH_MAX ((1 << MATCH_BITS) + (MATCH_MIN - 1))
#define OFFSET_MASK ((1 << (16 - MATCH_BITS)) - 1)
#define LEMPEL_SIZE 1024
-/*ARGSUSED*/
size_t
lzjb_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
{
+ (void) n;
uchar_t *src = s_start;
uchar_t *dst = d_start;
uchar_t *cpy;
uchar_t *copymap = NULL;
int copymask = 1 << (NBBY - 1);
int mlen, offset, hash;
uint16_t *hp;
uint16_t *lempel;
lempel = kmem_zalloc(LEMPEL_SIZE * sizeof (uint16_t), KM_SLEEP);
while (src < (uchar_t *)s_start + s_len) {
if ((copymask <<= 1) == (1 << NBBY)) {
if (dst >= (uchar_t *)d_start + d_len - 1 - 2 * NBBY) {
kmem_free(lempel,
LEMPEL_SIZE*sizeof (uint16_t));
return (s_len);
}
copymask = 1;
copymap = dst;
*dst++ = 0;
}
if (src > (uchar_t *)s_start + s_len - MATCH_MAX) {
*dst++ = *src++;
continue;
}
hash = (src[0] << 16) + (src[1] << 8) + src[2];
hash += hash >> 9;
hash += hash >> 5;
hp = &lempel[hash & (LEMPEL_SIZE - 1)];
offset = (intptr_t)(src - *hp) & OFFSET_MASK;
*hp = (uint16_t)(uintptr_t)src;
cpy = src - offset;
if (cpy >= (uchar_t *)s_start && cpy != src &&
src[0] == cpy[0] && src[1] == cpy[1] && src[2] == cpy[2]) {
*copymap |= copymask;
for (mlen = MATCH_MIN; mlen < MATCH_MAX; mlen++)
if (src[mlen] != cpy[mlen])
break;
*dst++ = ((mlen - MATCH_MIN) << (NBBY - MATCH_BITS)) |
(offset >> NBBY);
*dst++ = (uchar_t)offset;
src += mlen;
} else {
*dst++ = *src++;
}
}
kmem_free(lempel, LEMPEL_SIZE * sizeof (uint16_t));
return (dst - (uchar_t *)d_start);
}
-/*ARGSUSED*/
int
lzjb_decompress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
{
+ (void) s_len, (void) n;
uchar_t *src = s_start;
uchar_t *dst = d_start;
uchar_t *d_end = (uchar_t *)d_start + d_len;
uchar_t *cpy;
uchar_t copymap = 0;
int copymask = 1 << (NBBY - 1);
while (dst < d_end) {
if ((copymask <<= 1) == (1 << NBBY)) {
copymask = 1;
copymap = *src++;
}
if (copymap & copymask) {
int mlen = (src[0] >> (NBBY - MATCH_BITS)) + MATCH_MIN;
int offset = ((src[0] << NBBY) | src[1]) & OFFSET_MASK;
src += 2;
if ((cpy = dst - offset) < (uchar_t *)d_start)
return (-1);
while (--mlen >= 0 && dst < d_end)
*dst++ = *cpy++;
} else {
*dst++ = *src++;
}
}
return (0);
}
diff --git a/sys/contrib/openzfs/module/zfs/metaslab.c b/sys/contrib/openzfs/module/zfs/metaslab.c
index df0d83327c0b..f367bea98267 100644
--- a/sys/contrib/openzfs/module/zfs/metaslab.c
+++ b/sys/contrib/openzfs/module/zfs/metaslab.c
@@ -1,6257 +1,6255 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2019 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2017, Intel Corporation.
*/
#include <sys/zfs_context.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/space_map.h>
#include <sys/metaslab_impl.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_draid.h>
#include <sys/zio.h>
#include <sys/spa_impl.h>
#include <sys/zfeature.h>
#include <sys/vdev_indirect_mapping.h>
#include <sys/zap.h>
#include <sys/btree.h>
#define WITH_DF_BLOCK_ALLOCATOR
#define GANG_ALLOCATION(flags) \
((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
/*
* Metaslab granularity, in bytes. This is roughly similar to what would be
* referred to as the "stripe size" in traditional RAID arrays. In normal
* operation, we will try to write this amount of data to a top-level vdev
* before moving on to the next one.
*/
unsigned long metaslab_aliquot = 512 << 10;
/*
* For testing, make some blocks above a certain size be gang blocks.
*/
unsigned long metaslab_force_ganging = SPA_MAXBLOCKSIZE + 1;
/*
* In pools where the log space map feature is not enabled we touch
* multiple metaslabs (and their respective space maps) with each
* transaction group. Thus, we benefit from having a small space map
* block size since it allows us to issue more I/O operations scattered
* around the disk. So a sane default for the space map block size
* is 8~16K.
*/
int zfs_metaslab_sm_blksz_no_log = (1 << 14);
/*
* When the log space map feature is enabled, we accumulate a lot of
* changes per metaslab that are flushed once in a while so we benefit
* from a bigger block size like 128K for the metaslab space maps.
*/
int zfs_metaslab_sm_blksz_with_log = (1 << 17);
/*
* The in-core space map representation is more compact than its on-disk form.
* The zfs_condense_pct determines how much more compact the in-core
* space map representation must be before we compact it on-disk.
* Values should be greater than or equal to 100.
*/
int zfs_condense_pct = 200;
/*
* Condensing a metaslab is not guaranteed to actually reduce the amount of
* space used on disk. In particular, a space map uses data in increments of
* MAX(1 << ashift, space_map_blksz), so a metaslab might use the
* same number of blocks after condensing. Since the goal of condensing is to
* reduce the number of IOPs required to read the space map, we only want to
* condense when we can be sure we will reduce the number of blocks used by the
* space map. Unfortunately, we cannot precisely compute whether or not this is
* the case in metaslab_should_condense since we are holding ms_lock. Instead,
* we apply the following heuristic: do not condense a spacemap unless the
* uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
* blocks.
*/
int zfs_metaslab_condense_block_threshold = 4;
/*
* The zfs_mg_noalloc_threshold defines which metaslab groups should
* be eligible for allocation. The value is defined as a percentage of
* free space. Metaslab groups that have more free space than
* zfs_mg_noalloc_threshold are always eligible for allocations. Once
* a metaslab group's free space is less than or equal to the
* zfs_mg_noalloc_threshold the allocator will avoid allocating to that
* group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
* Once all groups in the pool reach zfs_mg_noalloc_threshold then all
* groups are allowed to accept allocations. Gang blocks are always
* eligible to allocate on any metaslab group. The default value of 0 means
* no metaslab group will be excluded based on this criterion.
*/
int zfs_mg_noalloc_threshold = 0;
/*
* Metaslab groups are considered eligible for allocations if their
* fragmentation metric (measured as a percentage) is less than or
* equal to zfs_mg_fragmentation_threshold. If a metaslab group
* exceeds this threshold then it will be skipped unless all metaslab
* groups within the metaslab class have also crossed this threshold.
*
* This tunable was introduced to avoid edge cases where we continue
* allocating from very fragmented disks in our pool while other, less
* fragmented disks, exists. On the other hand, if all disks in the
* pool are uniformly approaching the threshold, the threshold can
* be a speed bump in performance, where we keep switching the disks
* that we allocate from (e.g. we allocate some segments from disk A
* making it bypassing the threshold while freeing segments from disk
* B getting its fragmentation below the threshold).
*
* Empirically, we've seen that our vdev selection for allocations is
* good enough that fragmentation increases uniformly across all vdevs
* the majority of the time. Thus we set the threshold percentage high
* enough to avoid hitting the speed bump on pools that are being pushed
* to the edge.
*/
int zfs_mg_fragmentation_threshold = 95;
/*
* Allow metaslabs to keep their active state as long as their fragmentation
* percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
* active metaslab that exceeds this threshold will no longer keep its active
* status allowing better metaslabs to be selected.
*/
int zfs_metaslab_fragmentation_threshold = 70;
/*
* When set will load all metaslabs when pool is first opened.
*/
int metaslab_debug_load = 0;
/*
* When set will prevent metaslabs from being unloaded.
*/
int metaslab_debug_unload = 0;
/*
* Minimum size which forces the dynamic allocator to change
* it's allocation strategy. Once the space map cannot satisfy
* an allocation of this size then it switches to using more
* aggressive strategy (i.e search by size rather than offset).
*/
uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE;
/*
* The minimum free space, in percent, which must be available
* in a space map to continue allocations in a first-fit fashion.
* Once the space map's free space drops below this level we dynamically
* switch to using best-fit allocations.
*/
int metaslab_df_free_pct = 4;
/*
* Maximum distance to search forward from the last offset. Without this
* limit, fragmented pools can see >100,000 iterations and
* metaslab_block_picker() becomes the performance limiting factor on
* high-performance storage.
*
* With the default setting of 16MB, we typically see less than 500
* iterations, even with very fragmented, ashift=9 pools. The maximum number
* of iterations possible is:
* metaslab_df_max_search / (2 * (1<<ashift))
* With the default setting of 16MB this is 16*1024 (with ashift=9) or
* 2048 (with ashift=12).
*/
int metaslab_df_max_search = 16 * 1024 * 1024;
/*
* Forces the metaslab_block_picker function to search for at least this many
* segments forwards until giving up on finding a segment that the allocation
* will fit into.
*/
uint32_t metaslab_min_search_count = 100;
/*
* If we are not searching forward (due to metaslab_df_max_search,
* metaslab_df_free_pct, or metaslab_df_alloc_threshold), this tunable
* controls what segment is used. If it is set, we will use the largest free
* segment. If it is not set, we will use a segment of exactly the requested
* size (or larger).
*/
int metaslab_df_use_largest_segment = B_FALSE;
/*
* Percentage of all cpus that can be used by the metaslab taskq.
*/
int metaslab_load_pct = 50;
/*
* These tunables control how long a metaslab will remain loaded after the
* last allocation from it. A metaslab can't be unloaded until at least
* metaslab_unload_delay TXG's and metaslab_unload_delay_ms milliseconds
* have elapsed. However, zfs_metaslab_mem_limit may cause it to be
* unloaded sooner. These settings are intended to be generous -- to keep
* metaslabs loaded for a long time, reducing the rate of metaslab loading.
*/
int metaslab_unload_delay = 32;
int metaslab_unload_delay_ms = 10 * 60 * 1000; /* ten minutes */
/*
* Max number of metaslabs per group to preload.
*/
int metaslab_preload_limit = 10;
/*
* Enable/disable preloading of metaslab.
*/
int metaslab_preload_enabled = B_TRUE;
/*
* Enable/disable fragmentation weighting on metaslabs.
*/
int metaslab_fragmentation_factor_enabled = B_TRUE;
/*
* Enable/disable lba weighting (i.e. outer tracks are given preference).
*/
int metaslab_lba_weighting_enabled = B_TRUE;
/*
* Enable/disable metaslab group biasing.
*/
int metaslab_bias_enabled = B_TRUE;
/*
* Enable/disable remapping of indirect DVAs to their concrete vdevs.
*/
boolean_t zfs_remap_blkptr_enable = B_TRUE;
/*
* Enable/disable segment-based metaslab selection.
*/
int zfs_metaslab_segment_weight_enabled = B_TRUE;
/*
* When using segment-based metaslab selection, we will continue
* allocating from the active metaslab until we have exhausted
* zfs_metaslab_switch_threshold of its buckets.
*/
int zfs_metaslab_switch_threshold = 2;
/*
* Internal switch to enable/disable the metaslab allocation tracing
* facility.
*/
boolean_t metaslab_trace_enabled = B_FALSE;
/*
* Maximum entries that the metaslab allocation tracing facility will keep
* in a given list when running in non-debug mode. We limit the number
* of entries in non-debug mode to prevent us from using up too much memory.
* The limit should be sufficiently large that we don't expect any allocation
* to every exceed this value. In debug mode, the system will panic if this
* limit is ever reached allowing for further investigation.
*/
uint64_t metaslab_trace_max_entries = 5000;
/*
* Maximum number of metaslabs per group that can be disabled
* simultaneously.
*/
int max_disabled_ms = 3;
/*
* Time (in seconds) to respect ms_max_size when the metaslab is not loaded.
* To avoid 64-bit overflow, don't set above UINT32_MAX.
*/
unsigned long zfs_metaslab_max_size_cache_sec = 3600; /* 1 hour */
/*
* Maximum percentage of memory to use on storing loaded metaslabs. If loading
* a metaslab would take it over this percentage, the oldest selected metaslab
* is automatically unloaded.
*/
int zfs_metaslab_mem_limit = 25;
/*
* Force the per-metaslab range trees to use 64-bit integers to store
* segments. Used for debugging purposes.
*/
boolean_t zfs_metaslab_force_large_segs = B_FALSE;
/*
* By default we only store segments over a certain size in the size-sorted
* metaslab trees (ms_allocatable_by_size and
* ms_unflushed_frees_by_size). This dramatically reduces memory usage and
* improves load and unload times at the cost of causing us to use slightly
* larger segments than we would otherwise in some cases.
*/
uint32_t metaslab_by_size_min_shift = 14;
/*
* If not set, we will first try normal allocation. If that fails then
* we will do a gang allocation. If that fails then we will do a "try hard"
* gang allocation. If that fails then we will have a multi-layer gang
* block.
*
* If set, we will first try normal allocation. If that fails then
* we will do a "try hard" allocation. If that fails we will do a gang
* allocation. If that fails we will do a "try hard" gang allocation. If
* that fails then we will have a multi-layer gang block.
*/
int zfs_metaslab_try_hard_before_gang = B_FALSE;
/*
* When not trying hard, we only consider the best zfs_metaslab_find_max_tries
* metaslabs. This improves performance, especially when there are many
* metaslabs per vdev and the allocation can't actually be satisfied (so we
* would otherwise iterate all the metaslabs). If there is a metaslab with a
* worse weight but it can actually satisfy the allocation, we won't find it
* until trying hard. This may happen if the worse metaslab is not loaded
* (and the true weight is better than we have calculated), or due to weight
* bucketization. E.g. we are looking for a 60K segment, and the best
* metaslabs all have free segments in the 32-63K bucket, but the best
* zfs_metaslab_find_max_tries metaslabs have ms_max_size <60KB, and a
* subsequent metaslab has ms_max_size >60KB (but fewer segments in this
* bucket, and therefore a lower weight).
*/
int zfs_metaslab_find_max_tries = 100;
static uint64_t metaslab_weight(metaslab_t *, boolean_t);
static void metaslab_set_fragmentation(metaslab_t *, boolean_t);
static void metaslab_free_impl(vdev_t *, uint64_t, uint64_t, boolean_t);
static void metaslab_check_free_impl(vdev_t *, uint64_t, uint64_t);
static void metaslab_passivate(metaslab_t *msp, uint64_t weight);
static uint64_t metaslab_weight_from_range_tree(metaslab_t *msp);
static void metaslab_flush_update(metaslab_t *, dmu_tx_t *);
static unsigned int metaslab_idx_func(multilist_t *, void *);
static void metaslab_evict(metaslab_t *, uint64_t);
static void metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg);
kmem_cache_t *metaslab_alloc_trace_cache;
typedef struct metaslab_stats {
kstat_named_t metaslabstat_trace_over_limit;
kstat_named_t metaslabstat_reload_tree;
kstat_named_t metaslabstat_too_many_tries;
kstat_named_t metaslabstat_try_hard;
} metaslab_stats_t;
static metaslab_stats_t metaslab_stats = {
{ "trace_over_limit", KSTAT_DATA_UINT64 },
{ "reload_tree", KSTAT_DATA_UINT64 },
{ "too_many_tries", KSTAT_DATA_UINT64 },
{ "try_hard", KSTAT_DATA_UINT64 },
};
#define METASLABSTAT_BUMP(stat) \
atomic_inc_64(&metaslab_stats.stat.value.ui64);
kstat_t *metaslab_ksp;
void
metaslab_stat_init(void)
{
ASSERT(metaslab_alloc_trace_cache == NULL);
metaslab_alloc_trace_cache = kmem_cache_create(
"metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t),
0, NULL, NULL, NULL, NULL, NULL, 0);
metaslab_ksp = kstat_create("zfs", 0, "metaslab_stats",
"misc", KSTAT_TYPE_NAMED, sizeof (metaslab_stats) /
sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
if (metaslab_ksp != NULL) {
metaslab_ksp->ks_data = &metaslab_stats;
kstat_install(metaslab_ksp);
}
}
void
metaslab_stat_fini(void)
{
if (metaslab_ksp != NULL) {
kstat_delete(metaslab_ksp);
metaslab_ksp = NULL;
}
kmem_cache_destroy(metaslab_alloc_trace_cache);
metaslab_alloc_trace_cache = NULL;
}
/*
* ==========================================================================
* Metaslab classes
* ==========================================================================
*/
metaslab_class_t *
metaslab_class_create(spa_t *spa, metaslab_ops_t *ops)
{
metaslab_class_t *mc;
mc = kmem_zalloc(offsetof(metaslab_class_t,
mc_allocator[spa->spa_alloc_count]), KM_SLEEP);
mc->mc_spa = spa;
mc->mc_ops = ops;
mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL);
multilist_create(&mc->mc_metaslab_txg_list, sizeof (metaslab_t),
offsetof(metaslab_t, ms_class_txg_node), metaslab_idx_func);
for (int i = 0; i < spa->spa_alloc_count; i++) {
metaslab_class_allocator_t *mca = &mc->mc_allocator[i];
mca->mca_rotor = NULL;
zfs_refcount_create_tracked(&mca->mca_alloc_slots);
}
return (mc);
}
void
metaslab_class_destroy(metaslab_class_t *mc)
{
spa_t *spa = mc->mc_spa;
ASSERT(mc->mc_alloc == 0);
ASSERT(mc->mc_deferred == 0);
ASSERT(mc->mc_space == 0);
ASSERT(mc->mc_dspace == 0);
for (int i = 0; i < spa->spa_alloc_count; i++) {
metaslab_class_allocator_t *mca = &mc->mc_allocator[i];
ASSERT(mca->mca_rotor == NULL);
zfs_refcount_destroy(&mca->mca_alloc_slots);
}
mutex_destroy(&mc->mc_lock);
multilist_destroy(&mc->mc_metaslab_txg_list);
kmem_free(mc, offsetof(metaslab_class_t,
mc_allocator[spa->spa_alloc_count]));
}
int
metaslab_class_validate(metaslab_class_t *mc)
{
metaslab_group_t *mg;
vdev_t *vd;
/*
* Must hold one of the spa_config locks.
*/
ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
if ((mg = mc->mc_allocator[0].mca_rotor) == NULL)
return (0);
do {
vd = mg->mg_vd;
ASSERT(vd->vdev_mg != NULL);
ASSERT3P(vd->vdev_top, ==, vd);
ASSERT3P(mg->mg_class, ==, mc);
ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
} while ((mg = mg->mg_next) != mc->mc_allocator[0].mca_rotor);
return (0);
}
static void
metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
{
atomic_add_64(&mc->mc_alloc, alloc_delta);
atomic_add_64(&mc->mc_deferred, defer_delta);
atomic_add_64(&mc->mc_space, space_delta);
atomic_add_64(&mc->mc_dspace, dspace_delta);
}
uint64_t
metaslab_class_get_alloc(metaslab_class_t *mc)
{
return (mc->mc_alloc);
}
uint64_t
metaslab_class_get_deferred(metaslab_class_t *mc)
{
return (mc->mc_deferred);
}
uint64_t
metaslab_class_get_space(metaslab_class_t *mc)
{
return (mc->mc_space);
}
uint64_t
metaslab_class_get_dspace(metaslab_class_t *mc)
{
return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
}
void
metaslab_class_histogram_verify(metaslab_class_t *mc)
{
spa_t *spa = mc->mc_spa;
vdev_t *rvd = spa->spa_root_vdev;
uint64_t *mc_hist;
int i;
if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
return;
mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
KM_SLEEP);
mutex_enter(&mc->mc_lock);
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = vdev_get_mg(tvd, mc);
/*
* Skip any holes, uninitialized top-levels, or
* vdevs that are not in this metalab class.
*/
if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
mg->mg_class != mc) {
continue;
}
IMPLY(mg == mg->mg_vd->vdev_log_mg,
mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
mc_hist[i] += mg->mg_histogram[i];
}
for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]);
}
mutex_exit(&mc->mc_lock);
kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
}
/*
* Calculate the metaslab class's fragmentation metric. The metric
* is weighted based on the space contribution of each metaslab group.
* The return value will be a number between 0 and 100 (inclusive), or
* ZFS_FRAG_INVALID if the metric has not been set. See comment above the
* zfs_frag_table for more information about the metric.
*/
uint64_t
metaslab_class_fragmentation(metaslab_class_t *mc)
{
vdev_t *rvd = mc->mc_spa->spa_root_vdev;
uint64_t fragmentation = 0;
spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
/*
* Skip any holes, uninitialized top-levels,
* or vdevs that are not in this metalab class.
*/
if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
mg->mg_class != mc) {
continue;
}
/*
* If a metaslab group does not contain a fragmentation
* metric then just bail out.
*/
if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
return (ZFS_FRAG_INVALID);
}
/*
* Determine how much this metaslab_group is contributing
* to the overall pool fragmentation metric.
*/
fragmentation += mg->mg_fragmentation *
metaslab_group_get_space(mg);
}
fragmentation /= metaslab_class_get_space(mc);
ASSERT3U(fragmentation, <=, 100);
spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
return (fragmentation);
}
/*
* Calculate the amount of expandable space that is available in
* this metaslab class. If a device is expanded then its expandable
* space will be the amount of allocatable space that is currently not
* part of this metaslab class.
*/
uint64_t
metaslab_class_expandable_space(metaslab_class_t *mc)
{
vdev_t *rvd = mc->mc_spa->spa_root_vdev;
uint64_t space = 0;
spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
mg->mg_class != mc) {
continue;
}
/*
* Calculate if we have enough space to add additional
* metaslabs. We report the expandable space in terms
* of the metaslab size since that's the unit of expansion.
*/
space += P2ALIGN(tvd->vdev_max_asize - tvd->vdev_asize,
1ULL << tvd->vdev_ms_shift);
}
spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
return (space);
}
void
metaslab_class_evict_old(metaslab_class_t *mc, uint64_t txg)
{
multilist_t *ml = &mc->mc_metaslab_txg_list;
for (int i = 0; i < multilist_get_num_sublists(ml); i++) {
multilist_sublist_t *mls = multilist_sublist_lock(ml, i);
metaslab_t *msp = multilist_sublist_head(mls);
multilist_sublist_unlock(mls);
while (msp != NULL) {
mutex_enter(&msp->ms_lock);
/*
* If the metaslab has been removed from the list
* (which could happen if we were at the memory limit
* and it was evicted during this loop), then we can't
* proceed and we should restart the sublist.
*/
if (!multilist_link_active(&msp->ms_class_txg_node)) {
mutex_exit(&msp->ms_lock);
i--;
break;
}
mls = multilist_sublist_lock(ml, i);
metaslab_t *next_msp = multilist_sublist_next(mls, msp);
multilist_sublist_unlock(mls);
if (txg >
msp->ms_selected_txg + metaslab_unload_delay &&
gethrtime() > msp->ms_selected_time +
(uint64_t)MSEC2NSEC(metaslab_unload_delay_ms)) {
metaslab_evict(msp, txg);
} else {
/*
* Once we've hit a metaslab selected too
* recently to evict, we're done evicting for
* now.
*/
mutex_exit(&msp->ms_lock);
break;
}
mutex_exit(&msp->ms_lock);
msp = next_msp;
}
}
}
static int
metaslab_compare(const void *x1, const void *x2)
{
const metaslab_t *m1 = (const metaslab_t *)x1;
const metaslab_t *m2 = (const metaslab_t *)x2;
int sort1 = 0;
int sort2 = 0;
if (m1->ms_allocator != -1 && m1->ms_primary)
sort1 = 1;
else if (m1->ms_allocator != -1 && !m1->ms_primary)
sort1 = 2;
if (m2->ms_allocator != -1 && m2->ms_primary)
sort2 = 1;
else if (m2->ms_allocator != -1 && !m2->ms_primary)
sort2 = 2;
/*
* Sort inactive metaslabs first, then primaries, then secondaries. When
* selecting a metaslab to allocate from, an allocator first tries its
* primary, then secondary active metaslab. If it doesn't have active
* metaslabs, or can't allocate from them, it searches for an inactive
* metaslab to activate. If it can't find a suitable one, it will steal
* a primary or secondary metaslab from another allocator.
*/
if (sort1 < sort2)
return (-1);
if (sort1 > sort2)
return (1);
int cmp = TREE_CMP(m2->ms_weight, m1->ms_weight);
if (likely(cmp))
return (cmp);
IMPLY(TREE_CMP(m1->ms_start, m2->ms_start) == 0, m1 == m2);
return (TREE_CMP(m1->ms_start, m2->ms_start));
}
/*
* ==========================================================================
* Metaslab groups
* ==========================================================================
*/
/*
* Update the allocatable flag and the metaslab group's capacity.
* The allocatable flag is set to true if the capacity is below
* the zfs_mg_noalloc_threshold or has a fragmentation value that is
* greater than zfs_mg_fragmentation_threshold. If a metaslab group
* transitions from allocatable to non-allocatable or vice versa then the
* metaslab group's class is updated to reflect the transition.
*/
static void
metaslab_group_alloc_update(metaslab_group_t *mg)
{
vdev_t *vd = mg->mg_vd;
metaslab_class_t *mc = mg->mg_class;
vdev_stat_t *vs = &vd->vdev_stat;
boolean_t was_allocatable;
boolean_t was_initialized;
ASSERT(vd == vd->vdev_top);
ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_READER), ==,
SCL_ALLOC);
mutex_enter(&mg->mg_lock);
was_allocatable = mg->mg_allocatable;
was_initialized = mg->mg_initialized;
mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
(vs->vs_space + 1);
mutex_enter(&mc->mc_lock);
/*
* If the metaslab group was just added then it won't
* have any space until we finish syncing out this txg.
* At that point we will consider it initialized and available
* for allocations. We also don't consider non-activated
* metaslab groups (e.g. vdevs that are in the middle of being removed)
* to be initialized, because they can't be used for allocation.
*/
mg->mg_initialized = metaslab_group_initialized(mg);
if (!was_initialized && mg->mg_initialized) {
mc->mc_groups++;
} else if (was_initialized && !mg->mg_initialized) {
ASSERT3U(mc->mc_groups, >, 0);
mc->mc_groups--;
}
if (mg->mg_initialized)
mg->mg_no_free_space = B_FALSE;
/*
* A metaslab group is considered allocatable if it has plenty
* of free space or is not heavily fragmented. We only take
* fragmentation into account if the metaslab group has a valid
* fragmentation metric (i.e. a value between 0 and 100).
*/
mg->mg_allocatable = (mg->mg_activation_count > 0 &&
mg->mg_free_capacity > zfs_mg_noalloc_threshold &&
(mg->mg_fragmentation == ZFS_FRAG_INVALID ||
mg->mg_fragmentation <= zfs_mg_fragmentation_threshold));
/*
* The mc_alloc_groups maintains a count of the number of
* groups in this metaslab class that are still above the
* zfs_mg_noalloc_threshold. This is used by the allocating
* threads to determine if they should avoid allocations to
* a given group. The allocator will avoid allocations to a group
* if that group has reached or is below the zfs_mg_noalloc_threshold
* and there are still other groups that are above the threshold.
* When a group transitions from allocatable to non-allocatable or
* vice versa we update the metaslab class to reflect that change.
* When the mc_alloc_groups value drops to 0 that means that all
* groups have reached the zfs_mg_noalloc_threshold making all groups
* eligible for allocations. This effectively means that all devices
* are balanced again.
*/
if (was_allocatable && !mg->mg_allocatable)
mc->mc_alloc_groups--;
else if (!was_allocatable && mg->mg_allocatable)
mc->mc_alloc_groups++;
mutex_exit(&mc->mc_lock);
mutex_exit(&mg->mg_lock);
}
int
metaslab_sort_by_flushed(const void *va, const void *vb)
{
const metaslab_t *a = va;
const metaslab_t *b = vb;
int cmp = TREE_CMP(a->ms_unflushed_txg, b->ms_unflushed_txg);
if (likely(cmp))
return (cmp);
uint64_t a_vdev_id = a->ms_group->mg_vd->vdev_id;
uint64_t b_vdev_id = b->ms_group->mg_vd->vdev_id;
cmp = TREE_CMP(a_vdev_id, b_vdev_id);
if (cmp)
return (cmp);
return (TREE_CMP(a->ms_id, b->ms_id));
}
metaslab_group_t *
metaslab_group_create(metaslab_class_t *mc, vdev_t *vd, int allocators)
{
metaslab_group_t *mg;
mg = kmem_zalloc(offsetof(metaslab_group_t,
mg_allocator[allocators]), KM_SLEEP);
mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&mg->mg_ms_disabled_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&mg->mg_ms_disabled_cv, NULL, CV_DEFAULT, NULL);
avl_create(&mg->mg_metaslab_tree, metaslab_compare,
sizeof (metaslab_t), offsetof(metaslab_t, ms_group_node));
mg->mg_vd = vd;
mg->mg_class = mc;
mg->mg_activation_count = 0;
mg->mg_initialized = B_FALSE;
mg->mg_no_free_space = B_TRUE;
mg->mg_allocators = allocators;
for (int i = 0; i < allocators; i++) {
metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
zfs_refcount_create_tracked(&mga->mga_alloc_queue_depth);
}
mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct,
maxclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT | TASKQ_DYNAMIC);
return (mg);
}
void
metaslab_group_destroy(metaslab_group_t *mg)
{
ASSERT(mg->mg_prev == NULL);
ASSERT(mg->mg_next == NULL);
/*
* We may have gone below zero with the activation count
* either because we never activated in the first place or
* because we're done, and possibly removing the vdev.
*/
ASSERT(mg->mg_activation_count <= 0);
taskq_destroy(mg->mg_taskq);
avl_destroy(&mg->mg_metaslab_tree);
mutex_destroy(&mg->mg_lock);
mutex_destroy(&mg->mg_ms_disabled_lock);
cv_destroy(&mg->mg_ms_disabled_cv);
for (int i = 0; i < mg->mg_allocators; i++) {
metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
zfs_refcount_destroy(&mga->mga_alloc_queue_depth);
}
kmem_free(mg, offsetof(metaslab_group_t,
mg_allocator[mg->mg_allocators]));
}
void
metaslab_group_activate(metaslab_group_t *mg)
{
metaslab_class_t *mc = mg->mg_class;
spa_t *spa = mc->mc_spa;
metaslab_group_t *mgprev, *mgnext;
ASSERT3U(spa_config_held(spa, SCL_ALLOC, RW_WRITER), !=, 0);
ASSERT(mg->mg_prev == NULL);
ASSERT(mg->mg_next == NULL);
ASSERT(mg->mg_activation_count <= 0);
if (++mg->mg_activation_count <= 0)
return;
mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children);
metaslab_group_alloc_update(mg);
if ((mgprev = mc->mc_allocator[0].mca_rotor) == NULL) {
mg->mg_prev = mg;
mg->mg_next = mg;
} else {
mgnext = mgprev->mg_next;
mg->mg_prev = mgprev;
mg->mg_next = mgnext;
mgprev->mg_next = mg;
mgnext->mg_prev = mg;
}
for (int i = 0; i < spa->spa_alloc_count; i++) {
mc->mc_allocator[i].mca_rotor = mg;
mg = mg->mg_next;
}
}
/*
* Passivate a metaslab group and remove it from the allocation rotor.
* Callers must hold both the SCL_ALLOC and SCL_ZIO lock prior to passivating
* a metaslab group. This function will momentarily drop spa_config_locks
* that are lower than the SCL_ALLOC lock (see comment below).
*/
void
metaslab_group_passivate(metaslab_group_t *mg)
{
metaslab_class_t *mc = mg->mg_class;
spa_t *spa = mc->mc_spa;
metaslab_group_t *mgprev, *mgnext;
int locks = spa_config_held(spa, SCL_ALL, RW_WRITER);
ASSERT3U(spa_config_held(spa, SCL_ALLOC | SCL_ZIO, RW_WRITER), ==,
(SCL_ALLOC | SCL_ZIO));
if (--mg->mg_activation_count != 0) {
for (int i = 0; i < spa->spa_alloc_count; i++)
ASSERT(mc->mc_allocator[i].mca_rotor != mg);
ASSERT(mg->mg_prev == NULL);
ASSERT(mg->mg_next == NULL);
ASSERT(mg->mg_activation_count < 0);
return;
}
/*
* The spa_config_lock is an array of rwlocks, ordered as
* follows (from highest to lowest):
* SCL_CONFIG > SCL_STATE > SCL_L2ARC > SCL_ALLOC >
* SCL_ZIO > SCL_FREE > SCL_VDEV
* (For more information about the spa_config_lock see spa_misc.c)
* The higher the lock, the broader its coverage. When we passivate
* a metaslab group, we must hold both the SCL_ALLOC and the SCL_ZIO
* config locks. However, the metaslab group's taskq might be trying
* to preload metaslabs so we must drop the SCL_ZIO lock and any
* lower locks to allow the I/O to complete. At a minimum,
* we continue to hold the SCL_ALLOC lock, which prevents any future
* allocations from taking place and any changes to the vdev tree.
*/
spa_config_exit(spa, locks & ~(SCL_ZIO - 1), spa);
taskq_wait_outstanding(mg->mg_taskq, 0);
spa_config_enter(spa, locks & ~(SCL_ZIO - 1), spa, RW_WRITER);
metaslab_group_alloc_update(mg);
for (int i = 0; i < mg->mg_allocators; i++) {
metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
metaslab_t *msp = mga->mga_primary;
if (msp != NULL) {
mutex_enter(&msp->ms_lock);
metaslab_passivate(msp,
metaslab_weight_from_range_tree(msp));
mutex_exit(&msp->ms_lock);
}
msp = mga->mga_secondary;
if (msp != NULL) {
mutex_enter(&msp->ms_lock);
metaslab_passivate(msp,
metaslab_weight_from_range_tree(msp));
mutex_exit(&msp->ms_lock);
}
}
mgprev = mg->mg_prev;
mgnext = mg->mg_next;
if (mg == mgnext) {
mgnext = NULL;
} else {
mgprev->mg_next = mgnext;
mgnext->mg_prev = mgprev;
}
for (int i = 0; i < spa->spa_alloc_count; i++) {
if (mc->mc_allocator[i].mca_rotor == mg)
mc->mc_allocator[i].mca_rotor = mgnext;
}
mg->mg_prev = NULL;
mg->mg_next = NULL;
}
boolean_t
metaslab_group_initialized(metaslab_group_t *mg)
{
vdev_t *vd = mg->mg_vd;
vdev_stat_t *vs = &vd->vdev_stat;
return (vs->vs_space != 0 && mg->mg_activation_count > 0);
}
uint64_t
metaslab_group_get_space(metaslab_group_t *mg)
{
/*
* Note that the number of nodes in mg_metaslab_tree may be one less
* than vdev_ms_count, due to the embedded log metaslab.
*/
mutex_enter(&mg->mg_lock);
uint64_t ms_count = avl_numnodes(&mg->mg_metaslab_tree);
mutex_exit(&mg->mg_lock);
return ((1ULL << mg->mg_vd->vdev_ms_shift) * ms_count);
}
void
metaslab_group_histogram_verify(metaslab_group_t *mg)
{
uint64_t *mg_hist;
avl_tree_t *t = &mg->mg_metaslab_tree;
uint64_t ashift = mg->mg_vd->vdev_ashift;
if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
return;
mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
KM_SLEEP);
ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=,
SPACE_MAP_HISTOGRAM_SIZE + ashift);
mutex_enter(&mg->mg_lock);
for (metaslab_t *msp = avl_first(t);
msp != NULL; msp = AVL_NEXT(t, msp)) {
VERIFY3P(msp->ms_group, ==, mg);
/* skip if not active */
if (msp->ms_sm == NULL)
continue;
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
mg_hist[i + ashift] +=
msp->ms_sm->sm_phys->smp_histogram[i];
}
}
for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++)
VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]);
mutex_exit(&mg->mg_lock);
kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
}
static void
metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp)
{
metaslab_class_t *mc = mg->mg_class;
uint64_t ashift = mg->mg_vd->vdev_ashift;
ASSERT(MUTEX_HELD(&msp->ms_lock));
if (msp->ms_sm == NULL)
return;
mutex_enter(&mg->mg_lock);
mutex_enter(&mc->mc_lock);
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
IMPLY(mg == mg->mg_vd->vdev_log_mg,
mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
mg->mg_histogram[i + ashift] +=
msp->ms_sm->sm_phys->smp_histogram[i];
mc->mc_histogram[i + ashift] +=
msp->ms_sm->sm_phys->smp_histogram[i];
}
mutex_exit(&mc->mc_lock);
mutex_exit(&mg->mg_lock);
}
void
metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
{
metaslab_class_t *mc = mg->mg_class;
uint64_t ashift = mg->mg_vd->vdev_ashift;
ASSERT(MUTEX_HELD(&msp->ms_lock));
if (msp->ms_sm == NULL)
return;
mutex_enter(&mg->mg_lock);
mutex_enter(&mc->mc_lock);
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
ASSERT3U(mg->mg_histogram[i + ashift], >=,
msp->ms_sm->sm_phys->smp_histogram[i]);
ASSERT3U(mc->mc_histogram[i + ashift], >=,
msp->ms_sm->sm_phys->smp_histogram[i]);
IMPLY(mg == mg->mg_vd->vdev_log_mg,
mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
mg->mg_histogram[i + ashift] -=
msp->ms_sm->sm_phys->smp_histogram[i];
mc->mc_histogram[i + ashift] -=
msp->ms_sm->sm_phys->smp_histogram[i];
}
mutex_exit(&mc->mc_lock);
mutex_exit(&mg->mg_lock);
}
static void
metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
{
ASSERT(msp->ms_group == NULL);
mutex_enter(&mg->mg_lock);
msp->ms_group = mg;
msp->ms_weight = 0;
avl_add(&mg->mg_metaslab_tree, msp);
mutex_exit(&mg->mg_lock);
mutex_enter(&msp->ms_lock);
metaslab_group_histogram_add(mg, msp);
mutex_exit(&msp->ms_lock);
}
static void
metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
{
mutex_enter(&msp->ms_lock);
metaslab_group_histogram_remove(mg, msp);
mutex_exit(&msp->ms_lock);
mutex_enter(&mg->mg_lock);
ASSERT(msp->ms_group == mg);
avl_remove(&mg->mg_metaslab_tree, msp);
metaslab_class_t *mc = msp->ms_group->mg_class;
multilist_sublist_t *mls =
multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
if (multilist_link_active(&msp->ms_class_txg_node))
multilist_sublist_remove(mls, msp);
multilist_sublist_unlock(mls);
msp->ms_group = NULL;
mutex_exit(&mg->mg_lock);
}
static void
metaslab_group_sort_impl(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(MUTEX_HELD(&mg->mg_lock));
ASSERT(msp->ms_group == mg);
avl_remove(&mg->mg_metaslab_tree, msp);
msp->ms_weight = weight;
avl_add(&mg->mg_metaslab_tree, msp);
}
static void
metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
{
/*
* Although in principle the weight can be any value, in
* practice we do not use values in the range [1, 511].
*/
ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0);
ASSERT(MUTEX_HELD(&msp->ms_lock));
mutex_enter(&mg->mg_lock);
metaslab_group_sort_impl(mg, msp, weight);
mutex_exit(&mg->mg_lock);
}
/*
* Calculate the fragmentation for a given metaslab group. We can use
* a simple average here since all metaslabs within the group must have
* the same size. The return value will be a value between 0 and 100
* (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
* group have a fragmentation metric.
*/
uint64_t
metaslab_group_fragmentation(metaslab_group_t *mg)
{
vdev_t *vd = mg->mg_vd;
uint64_t fragmentation = 0;
uint64_t valid_ms = 0;
for (int m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
if (msp->ms_fragmentation == ZFS_FRAG_INVALID)
continue;
if (msp->ms_group != mg)
continue;
valid_ms++;
fragmentation += msp->ms_fragmentation;
}
if (valid_ms <= mg->mg_vd->vdev_ms_count / 2)
return (ZFS_FRAG_INVALID);
fragmentation /= valid_ms;
ASSERT3U(fragmentation, <=, 100);
return (fragmentation);
}
/*
* Determine if a given metaslab group should skip allocations. A metaslab
* group should avoid allocations if its free capacity is less than the
* zfs_mg_noalloc_threshold or its fragmentation metric is greater than
* zfs_mg_fragmentation_threshold and there is at least one metaslab group
* that can still handle allocations. If the allocation throttle is enabled
* then we skip allocations to devices that have reached their maximum
* allocation queue depth unless the selected metaslab group is the only
* eligible group remaining.
*/
static boolean_t
metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
uint64_t psize, int allocator, int d)
{
spa_t *spa = mg->mg_vd->vdev_spa;
metaslab_class_t *mc = mg->mg_class;
/*
* We can only consider skipping this metaslab group if it's
* in the normal metaslab class and there are other metaslab
* groups to select from. Otherwise, we always consider it eligible
* for allocations.
*/
if ((mc != spa_normal_class(spa) &&
mc != spa_special_class(spa) &&
mc != spa_dedup_class(spa)) ||
mc->mc_groups <= 1)
return (B_TRUE);
/*
* If the metaslab group's mg_allocatable flag is set (see comments
* in metaslab_group_alloc_update() for more information) and
* the allocation throttle is disabled then allow allocations to this
* device. However, if the allocation throttle is enabled then
* check if we have reached our allocation limit (mga_alloc_queue_depth)
* to determine if we should allow allocations to this metaslab group.
* If all metaslab groups are no longer considered allocatable
* (mc_alloc_groups == 0) or we're trying to allocate the smallest
* gang block size then we allow allocations on this metaslab group
* regardless of the mg_allocatable or throttle settings.
*/
if (mg->mg_allocatable) {
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
int64_t qdepth;
uint64_t qmax = mga->mga_cur_max_alloc_queue_depth;
if (!mc->mc_alloc_throttle_enabled)
return (B_TRUE);
/*
* If this metaslab group does not have any free space, then
* there is no point in looking further.
*/
if (mg->mg_no_free_space)
return (B_FALSE);
/*
* Relax allocation throttling for ditto blocks. Due to
* random imbalances in allocation it tends to push copies
* to one vdev, that looks a bit better at the moment.
*/
qmax = qmax * (4 + d) / 4;
qdepth = zfs_refcount_count(&mga->mga_alloc_queue_depth);
/*
* If this metaslab group is below its qmax or it's
* the only allocatable metasable group, then attempt
* to allocate from it.
*/
if (qdepth < qmax || mc->mc_alloc_groups == 1)
return (B_TRUE);
ASSERT3U(mc->mc_alloc_groups, >, 1);
/*
* Since this metaslab group is at or over its qmax, we
* need to determine if there are metaslab groups after this
* one that might be able to handle this allocation. This is
* racy since we can't hold the locks for all metaslab
* groups at the same time when we make this check.
*/
for (metaslab_group_t *mgp = mg->mg_next;
mgp != rotor; mgp = mgp->mg_next) {
metaslab_group_allocator_t *mgap =
&mgp->mg_allocator[allocator];
qmax = mgap->mga_cur_max_alloc_queue_depth;
qmax = qmax * (4 + d) / 4;
qdepth =
zfs_refcount_count(&mgap->mga_alloc_queue_depth);
/*
* If there is another metaslab group that
* might be able to handle the allocation, then
* we return false so that we skip this group.
*/
if (qdepth < qmax && !mgp->mg_no_free_space)
return (B_FALSE);
}
/*
* We didn't find another group to handle the allocation
* so we can't skip this metaslab group even though
* we are at or over our qmax.
*/
return (B_TRUE);
} else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) {
return (B_TRUE);
}
return (B_FALSE);
}
/*
* ==========================================================================
* Range tree callbacks
* ==========================================================================
*/
/*
* Comparison function for the private size-ordered tree using 32-bit
* ranges. Tree is sorted by size, larger sizes at the end of the tree.
*/
static int
metaslab_rangesize32_compare(const void *x1, const void *x2)
{
const range_seg32_t *r1 = x1;
const range_seg32_t *r2 = x2;
uint64_t rs_size1 = r1->rs_end - r1->rs_start;
uint64_t rs_size2 = r2->rs_end - r2->rs_start;
int cmp = TREE_CMP(rs_size1, rs_size2);
if (likely(cmp))
return (cmp);
return (TREE_CMP(r1->rs_start, r2->rs_start));
}
/*
* Comparison function for the private size-ordered tree using 64-bit
* ranges. Tree is sorted by size, larger sizes at the end of the tree.
*/
static int
metaslab_rangesize64_compare(const void *x1, const void *x2)
{
const range_seg64_t *r1 = x1;
const range_seg64_t *r2 = x2;
uint64_t rs_size1 = r1->rs_end - r1->rs_start;
uint64_t rs_size2 = r2->rs_end - r2->rs_start;
int cmp = TREE_CMP(rs_size1, rs_size2);
if (likely(cmp))
return (cmp);
return (TREE_CMP(r1->rs_start, r2->rs_start));
}
typedef struct metaslab_rt_arg {
zfs_btree_t *mra_bt;
uint32_t mra_floor_shift;
} metaslab_rt_arg_t;
struct mssa_arg {
range_tree_t *rt;
metaslab_rt_arg_t *mra;
};
static void
metaslab_size_sorted_add(void *arg, uint64_t start, uint64_t size)
{
struct mssa_arg *mssap = arg;
range_tree_t *rt = mssap->rt;
metaslab_rt_arg_t *mrap = mssap->mra;
range_seg_max_t seg = {0};
rs_set_start(&seg, rt, start);
rs_set_end(&seg, rt, start + size);
metaslab_rt_add(rt, &seg, mrap);
}
static void
metaslab_size_tree_full_load(range_tree_t *rt)
{
metaslab_rt_arg_t *mrap = rt->rt_arg;
METASLABSTAT_BUMP(metaslabstat_reload_tree);
ASSERT0(zfs_btree_numnodes(mrap->mra_bt));
mrap->mra_floor_shift = 0;
struct mssa_arg arg = {0};
arg.rt = rt;
arg.mra = mrap;
range_tree_walk(rt, metaslab_size_sorted_add, &arg);
}
/*
* Create any block allocator specific components. The current allocators
* rely on using both a size-ordered range_tree_t and an array of uint64_t's.
*/
-/* ARGSUSED */
static void
metaslab_rt_create(range_tree_t *rt, void *arg)
{
metaslab_rt_arg_t *mrap = arg;
zfs_btree_t *size_tree = mrap->mra_bt;
size_t size;
int (*compare) (const void *, const void *);
switch (rt->rt_type) {
case RANGE_SEG32:
size = sizeof (range_seg32_t);
compare = metaslab_rangesize32_compare;
break;
case RANGE_SEG64:
size = sizeof (range_seg64_t);
compare = metaslab_rangesize64_compare;
break;
default:
panic("Invalid range seg type %d", rt->rt_type);
}
zfs_btree_create(size_tree, compare, size);
mrap->mra_floor_shift = metaslab_by_size_min_shift;
}
-/* ARGSUSED */
static void
metaslab_rt_destroy(range_tree_t *rt, void *arg)
{
+ (void) rt;
metaslab_rt_arg_t *mrap = arg;
zfs_btree_t *size_tree = mrap->mra_bt;
zfs_btree_destroy(size_tree);
kmem_free(mrap, sizeof (*mrap));
}
-/* ARGSUSED */
static void
metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg)
{
metaslab_rt_arg_t *mrap = arg;
zfs_btree_t *size_tree = mrap->mra_bt;
if (rs_get_end(rs, rt) - rs_get_start(rs, rt) <
(1 << mrap->mra_floor_shift))
return;
zfs_btree_add(size_tree, rs);
}
-/* ARGSUSED */
static void
metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
{
metaslab_rt_arg_t *mrap = arg;
zfs_btree_t *size_tree = mrap->mra_bt;
if (rs_get_end(rs, rt) - rs_get_start(rs, rt) < (1 <<
mrap->mra_floor_shift))
return;
zfs_btree_remove(size_tree, rs);
}
-/* ARGSUSED */
static void
metaslab_rt_vacate(range_tree_t *rt, void *arg)
{
metaslab_rt_arg_t *mrap = arg;
zfs_btree_t *size_tree = mrap->mra_bt;
zfs_btree_clear(size_tree);
zfs_btree_destroy(size_tree);
metaslab_rt_create(rt, arg);
}
static range_tree_ops_t metaslab_rt_ops = {
.rtop_create = metaslab_rt_create,
.rtop_destroy = metaslab_rt_destroy,
.rtop_add = metaslab_rt_add,
.rtop_remove = metaslab_rt_remove,
.rtop_vacate = metaslab_rt_vacate
};
/*
* ==========================================================================
* Common allocator routines
* ==========================================================================
*/
/*
* Return the maximum contiguous segment within the metaslab.
*/
uint64_t
metaslab_largest_allocatable(metaslab_t *msp)
{
zfs_btree_t *t = &msp->ms_allocatable_by_size;
range_seg_t *rs;
if (t == NULL)
return (0);
if (zfs_btree_numnodes(t) == 0)
metaslab_size_tree_full_load(msp->ms_allocatable);
rs = zfs_btree_last(t, NULL);
if (rs == NULL)
return (0);
return (rs_get_end(rs, msp->ms_allocatable) - rs_get_start(rs,
msp->ms_allocatable));
}
/*
* Return the maximum contiguous segment within the unflushed frees of this
* metaslab.
*/
static uint64_t
metaslab_largest_unflushed_free(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
if (msp->ms_unflushed_frees == NULL)
return (0);
if (zfs_btree_numnodes(&msp->ms_unflushed_frees_by_size) == 0)
metaslab_size_tree_full_load(msp->ms_unflushed_frees);
range_seg_t *rs = zfs_btree_last(&msp->ms_unflushed_frees_by_size,
NULL);
if (rs == NULL)
return (0);
/*
* When a range is freed from the metaslab, that range is added to
* both the unflushed frees and the deferred frees. While the block
* will eventually be usable, if the metaslab were loaded the range
* would not be added to the ms_allocatable tree until TXG_DEFER_SIZE
* txgs had passed. As a result, when attempting to estimate an upper
* bound for the largest currently-usable free segment in the
* metaslab, we need to not consider any ranges currently in the defer
* trees. This algorithm approximates the largest available chunk in
* the largest range in the unflushed_frees tree by taking the first
* chunk. While this may be a poor estimate, it should only remain so
* briefly and should eventually self-correct as frees are no longer
* deferred. Similar logic applies to the ms_freed tree. See
* metaslab_load() for more details.
*
* There are two primary sources of inaccuracy in this estimate. Both
* are tolerated for performance reasons. The first source is that we
* only check the largest segment for overlaps. Smaller segments may
* have more favorable overlaps with the other trees, resulting in
* larger usable chunks. Second, we only look at the first chunk in
* the largest segment; there may be other usable chunks in the
* largest segment, but we ignore them.
*/
uint64_t rstart = rs_get_start(rs, msp->ms_unflushed_frees);
uint64_t rsize = rs_get_end(rs, msp->ms_unflushed_frees) - rstart;
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
uint64_t start = 0;
uint64_t size = 0;
boolean_t found = range_tree_find_in(msp->ms_defer[t], rstart,
rsize, &start, &size);
if (found) {
if (rstart == start)
return (0);
rsize = start - rstart;
}
}
uint64_t start = 0;
uint64_t size = 0;
boolean_t found = range_tree_find_in(msp->ms_freed, rstart,
rsize, &start, &size);
if (found)
rsize = start - rstart;
return (rsize);
}
static range_seg_t *
metaslab_block_find(zfs_btree_t *t, range_tree_t *rt, uint64_t start,
uint64_t size, zfs_btree_index_t *where)
{
range_seg_t *rs;
range_seg_max_t rsearch;
rs_set_start(&rsearch, rt, start);
rs_set_end(&rsearch, rt, start + size);
rs = zfs_btree_find(t, &rsearch, where);
if (rs == NULL) {
rs = zfs_btree_next(t, where, where);
}
return (rs);
}
#if defined(WITH_DF_BLOCK_ALLOCATOR) || \
defined(WITH_CF_BLOCK_ALLOCATOR)
/*
* This is a helper function that can be used by the allocator to find a
* suitable block to allocate. This will search the specified B-tree looking
* for a block that matches the specified criteria.
*/
static uint64_t
metaslab_block_picker(range_tree_t *rt, uint64_t *cursor, uint64_t size,
uint64_t max_search)
{
if (*cursor == 0)
*cursor = rt->rt_start;
zfs_btree_t *bt = &rt->rt_root;
zfs_btree_index_t where;
range_seg_t *rs = metaslab_block_find(bt, rt, *cursor, size, &where);
uint64_t first_found;
int count_searched = 0;
if (rs != NULL)
first_found = rs_get_start(rs, rt);
while (rs != NULL && (rs_get_start(rs, rt) - first_found <=
max_search || count_searched < metaslab_min_search_count)) {
uint64_t offset = rs_get_start(rs, rt);
if (offset + size <= rs_get_end(rs, rt)) {
*cursor = offset + size;
return (offset);
}
rs = zfs_btree_next(bt, &where, &where);
count_searched++;
}
*cursor = 0;
return (-1ULL);
}
#endif /* WITH_DF/CF_BLOCK_ALLOCATOR */
#if defined(WITH_DF_BLOCK_ALLOCATOR)
/*
* ==========================================================================
* Dynamic Fit (df) block allocator
*
* Search for a free chunk of at least this size, starting from the last
* offset (for this alignment of block) looking for up to
* metaslab_df_max_search bytes (16MB). If a large enough free chunk is not
* found within 16MB, then return a free chunk of exactly the requested size (or
* larger).
*
* If it seems like searching from the last offset will be unproductive, skip
* that and just return a free chunk of exactly the requested size (or larger).
* This is based on metaslab_df_alloc_threshold and metaslab_df_free_pct. This
* mechanism is probably not very useful and may be removed in the future.
*
* The behavior when not searching can be changed to return the largest free
* chunk, instead of a free chunk of exactly the requested size, by setting
* metaslab_df_use_largest_segment.
* ==========================================================================
*/
static uint64_t
metaslab_df_alloc(metaslab_t *msp, uint64_t size)
{
/*
* Find the largest power of 2 block size that evenly divides the
* requested size. This is used to try to allocate blocks with similar
* alignment from the same area of the metaslab (i.e. same cursor
* bucket) but it does not guarantee that other allocations sizes
* may exist in the same region.
*/
uint64_t align = size & -size;
uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
range_tree_t *rt = msp->ms_allocatable;
int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
uint64_t offset;
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* If we're running low on space, find a segment based on size,
* rather than iterating based on offset.
*/
if (metaslab_largest_allocatable(msp) < metaslab_df_alloc_threshold ||
free_pct < metaslab_df_free_pct) {
offset = -1;
} else {
offset = metaslab_block_picker(rt,
cursor, size, metaslab_df_max_search);
}
if (offset == -1) {
range_seg_t *rs;
if (zfs_btree_numnodes(&msp->ms_allocatable_by_size) == 0)
metaslab_size_tree_full_load(msp->ms_allocatable);
if (metaslab_df_use_largest_segment) {
/* use largest free segment */
rs = zfs_btree_last(&msp->ms_allocatable_by_size, NULL);
} else {
zfs_btree_index_t where;
/* use segment of this size, or next largest */
rs = metaslab_block_find(&msp->ms_allocatable_by_size,
rt, msp->ms_start, size, &where);
}
if (rs != NULL && rs_get_start(rs, rt) + size <= rs_get_end(rs,
rt)) {
offset = rs_get_start(rs, rt);
*cursor = offset + size;
}
}
return (offset);
}
static metaslab_ops_t metaslab_df_ops = {
metaslab_df_alloc
};
metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
#endif /* WITH_DF_BLOCK_ALLOCATOR */
#if defined(WITH_CF_BLOCK_ALLOCATOR)
/*
* ==========================================================================
* Cursor fit block allocator -
* Select the largest region in the metaslab, set the cursor to the beginning
* of the range and the cursor_end to the end of the range. As allocations
* are made advance the cursor. Continue allocating from the cursor until
* the range is exhausted and then find a new range.
* ==========================================================================
*/
static uint64_t
metaslab_cf_alloc(metaslab_t *msp, uint64_t size)
{
range_tree_t *rt = msp->ms_allocatable;
zfs_btree_t *t = &msp->ms_allocatable_by_size;
uint64_t *cursor = &msp->ms_lbas[0];
uint64_t *cursor_end = &msp->ms_lbas[1];
uint64_t offset = 0;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT3U(*cursor_end, >=, *cursor);
if ((*cursor + size) > *cursor_end) {
range_seg_t *rs;
if (zfs_btree_numnodes(t) == 0)
metaslab_size_tree_full_load(msp->ms_allocatable);
rs = zfs_btree_last(t, NULL);
if (rs == NULL || (rs_get_end(rs, rt) - rs_get_start(rs, rt)) <
size)
return (-1ULL);
*cursor = rs_get_start(rs, rt);
*cursor_end = rs_get_end(rs, rt);
}
offset = *cursor;
*cursor += size;
return (offset);
}
static metaslab_ops_t metaslab_cf_ops = {
metaslab_cf_alloc
};
metaslab_ops_t *zfs_metaslab_ops = &metaslab_cf_ops;
#endif /* WITH_CF_BLOCK_ALLOCATOR */
#if defined(WITH_NDF_BLOCK_ALLOCATOR)
/*
* ==========================================================================
* New dynamic fit allocator -
* Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
* contiguous blocks. If no region is found then just use the largest segment
* that remains.
* ==========================================================================
*/
/*
* Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
* to request from the allocator.
*/
uint64_t metaslab_ndf_clump_shift = 4;
static uint64_t
metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
{
zfs_btree_t *t = &msp->ms_allocatable->rt_root;
range_tree_t *rt = msp->ms_allocatable;
zfs_btree_index_t where;
range_seg_t *rs;
range_seg_max_t rsearch;
uint64_t hbit = highbit64(size);
uint64_t *cursor = &msp->ms_lbas[hbit - 1];
uint64_t max_size = metaslab_largest_allocatable(msp);
ASSERT(MUTEX_HELD(&msp->ms_lock));
if (max_size < size)
return (-1ULL);
rs_set_start(&rsearch, rt, *cursor);
rs_set_end(&rsearch, rt, *cursor + size);
rs = zfs_btree_find(t, &rsearch, &where);
if (rs == NULL || (rs_get_end(rs, rt) - rs_get_start(rs, rt)) < size) {
t = &msp->ms_allocatable_by_size;
rs_set_start(&rsearch, rt, 0);
rs_set_end(&rsearch, rt, MIN(max_size, 1ULL << (hbit +
metaslab_ndf_clump_shift)));
rs = zfs_btree_find(t, &rsearch, &where);
if (rs == NULL)
rs = zfs_btree_next(t, &where, &where);
ASSERT(rs != NULL);
}
if ((rs_get_end(rs, rt) - rs_get_start(rs, rt)) >= size) {
*cursor = rs_get_start(rs, rt) + size;
return (rs_get_start(rs, rt));
}
return (-1ULL);
}
static metaslab_ops_t metaslab_ndf_ops = {
metaslab_ndf_alloc
};
metaslab_ops_t *zfs_metaslab_ops = &metaslab_ndf_ops;
#endif /* WITH_NDF_BLOCK_ALLOCATOR */
/*
* ==========================================================================
* Metaslabs
* ==========================================================================
*/
/*
* Wait for any in-progress metaslab loads to complete.
*/
static void
metaslab_load_wait(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
while (msp->ms_loading) {
ASSERT(!msp->ms_loaded);
cv_wait(&msp->ms_load_cv, &msp->ms_lock);
}
}
/*
* Wait for any in-progress flushing to complete.
*/
static void
metaslab_flush_wait(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
while (msp->ms_flushing)
cv_wait(&msp->ms_flush_cv, &msp->ms_lock);
}
static unsigned int
metaslab_idx_func(multilist_t *ml, void *arg)
{
metaslab_t *msp = arg;
/*
* ms_id values are allocated sequentially, so full 64bit
* division would be a waste of time, so limit it to 32 bits.
*/
return ((unsigned int)msp->ms_id % multilist_get_num_sublists(ml));
}
uint64_t
metaslab_allocated_space(metaslab_t *msp)
{
return (msp->ms_allocated_space);
}
/*
* Verify that the space accounting on disk matches the in-core range_trees.
*/
static void
metaslab_verify_space(metaslab_t *msp, uint64_t txg)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
uint64_t allocating = 0;
uint64_t sm_free_space, msp_free_space;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(!msp->ms_condensing);
if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
return;
/*
* We can only verify the metaslab space when we're called
* from syncing context with a loaded metaslab that has an
* allocated space map. Calling this in non-syncing context
* does not provide a consistent view of the metaslab since
* we're performing allocations in the future.
*/
if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL ||
!msp->ms_loaded)
return;
/*
* Even though the smp_alloc field can get negative,
* when it comes to a metaslab's space map, that should
* never be the case.
*/
ASSERT3S(space_map_allocated(msp->ms_sm), >=, 0);
ASSERT3U(space_map_allocated(msp->ms_sm), >=,
range_tree_space(msp->ms_unflushed_frees));
ASSERT3U(metaslab_allocated_space(msp), ==,
space_map_allocated(msp->ms_sm) +
range_tree_space(msp->ms_unflushed_allocs) -
range_tree_space(msp->ms_unflushed_frees));
sm_free_space = msp->ms_size - metaslab_allocated_space(msp);
/*
* Account for future allocations since we would have
* already deducted that space from the ms_allocatable.
*/
for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
allocating +=
range_tree_space(msp->ms_allocating[(txg + t) & TXG_MASK]);
}
ASSERT3U(allocating + msp->ms_allocated_this_txg, ==,
msp->ms_allocating_total);
ASSERT3U(msp->ms_deferspace, ==,
range_tree_space(msp->ms_defer[0]) +
range_tree_space(msp->ms_defer[1]));
msp_free_space = range_tree_space(msp->ms_allocatable) + allocating +
msp->ms_deferspace + range_tree_space(msp->ms_freed);
VERIFY3U(sm_free_space, ==, msp_free_space);
}
static void
metaslab_aux_histograms_clear(metaslab_t *msp)
{
/*
* Auxiliary histograms are only cleared when resetting them,
* which can only happen while the metaslab is loaded.
*/
ASSERT(msp->ms_loaded);
bzero(msp->ms_synchist, sizeof (msp->ms_synchist));
for (int t = 0; t < TXG_DEFER_SIZE; t++)
bzero(msp->ms_deferhist[t], sizeof (msp->ms_deferhist[t]));
}
static void
metaslab_aux_histogram_add(uint64_t *histogram, uint64_t shift,
range_tree_t *rt)
{
/*
* This is modeled after space_map_histogram_add(), so refer to that
* function for implementation details. We want this to work like
* the space map histogram, and not the range tree histogram, as we
* are essentially constructing a delta that will be later subtracted
* from the space map histogram.
*/
int idx = 0;
for (int i = shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
ASSERT3U(i, >=, idx + shift);
histogram[idx] += rt->rt_histogram[i] << (i - idx - shift);
if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) {
ASSERT3U(idx + shift, ==, i);
idx++;
ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE);
}
}
}
/*
* Called at every sync pass that the metaslab gets synced.
*
* The reason is that we want our auxiliary histograms to be updated
* wherever the metaslab's space map histogram is updated. This way
* we stay consistent on which parts of the metaslab space map's
* histogram are currently not available for allocations (e.g because
* they are in the defer, freed, and freeing trees).
*/
static void
metaslab_aux_histograms_update(metaslab_t *msp)
{
space_map_t *sm = msp->ms_sm;
ASSERT(sm != NULL);
/*
* This is similar to the metaslab's space map histogram updates
* that take place in metaslab_sync(). The only difference is that
* we only care about segments that haven't made it into the
* ms_allocatable tree yet.
*/
if (msp->ms_loaded) {
metaslab_aux_histograms_clear(msp);
metaslab_aux_histogram_add(msp->ms_synchist,
sm->sm_shift, msp->ms_freed);
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
metaslab_aux_histogram_add(msp->ms_deferhist[t],
sm->sm_shift, msp->ms_defer[t]);
}
}
metaslab_aux_histogram_add(msp->ms_synchist,
sm->sm_shift, msp->ms_freeing);
}
/*
* Called every time we are done syncing (writing to) the metaslab,
* i.e. at the end of each sync pass.
* [see the comment in metaslab_impl.h for ms_synchist, ms_deferhist]
*/
static void
metaslab_aux_histograms_update_done(metaslab_t *msp, boolean_t defer_allowed)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
space_map_t *sm = msp->ms_sm;
if (sm == NULL) {
/*
* We came here from metaslab_init() when creating/opening a
* pool, looking at a metaslab that hasn't had any allocations
* yet.
*/
return;
}
/*
* This is similar to the actions that we take for the ms_freed
* and ms_defer trees in metaslab_sync_done().
*/
uint64_t hist_index = spa_syncing_txg(spa) % TXG_DEFER_SIZE;
if (defer_allowed) {
bcopy(msp->ms_synchist, msp->ms_deferhist[hist_index],
sizeof (msp->ms_synchist));
} else {
bzero(msp->ms_deferhist[hist_index],
sizeof (msp->ms_deferhist[hist_index]));
}
bzero(msp->ms_synchist, sizeof (msp->ms_synchist));
}
/*
* Ensure that the metaslab's weight and fragmentation are consistent
* with the contents of the histogram (either the range tree's histogram
* or the space map's depending whether the metaslab is loaded).
*/
static void
metaslab_verify_weight_and_frag(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
return;
/*
* We can end up here from vdev_remove_complete(), in which case we
* cannot do these assertions because we hold spa config locks and
* thus we are not allowed to read from the DMU.
*
* We check if the metaslab group has been removed and if that's
* the case we return immediately as that would mean that we are
* here from the aforementioned code path.
*/
if (msp->ms_group == NULL)
return;
/*
* Devices being removed always return a weight of 0 and leave
* fragmentation and ms_max_size as is - there is nothing for
* us to verify here.
*/
vdev_t *vd = msp->ms_group->mg_vd;
if (vd->vdev_removing)
return;
/*
* If the metaslab is dirty it probably means that we've done
* some allocations or frees that have changed our histograms
* and thus the weight.
*/
for (int t = 0; t < TXG_SIZE; t++) {
if (txg_list_member(&vd->vdev_ms_list, msp, t))
return;
}
/*
* This verification checks that our in-memory state is consistent
* with what's on disk. If the pool is read-only then there aren't
* any changes and we just have the initially-loaded state.
*/
if (!spa_writeable(msp->ms_group->mg_vd->vdev_spa))
return;
/* some extra verification for in-core tree if you can */
if (msp->ms_loaded) {
range_tree_stat_verify(msp->ms_allocatable);
VERIFY(space_map_histogram_verify(msp->ms_sm,
msp->ms_allocatable));
}
uint64_t weight = msp->ms_weight;
uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
boolean_t space_based = WEIGHT_IS_SPACEBASED(msp->ms_weight);
uint64_t frag = msp->ms_fragmentation;
uint64_t max_segsize = msp->ms_max_size;
msp->ms_weight = 0;
msp->ms_fragmentation = 0;
/*
* This function is used for verification purposes and thus should
* not introduce any side-effects/mutations on the system's state.
*
* Regardless of whether metaslab_weight() thinks this metaslab
* should be active or not, we want to ensure that the actual weight
* (and therefore the value of ms_weight) would be the same if it
* was to be recalculated at this point.
*
* In addition we set the nodirty flag so metaslab_weight() does
* not dirty the metaslab for future TXGs (e.g. when trying to
* force condensing to upgrade the metaslab spacemaps).
*/
msp->ms_weight = metaslab_weight(msp, B_TRUE) | was_active;
VERIFY3U(max_segsize, ==, msp->ms_max_size);
/*
* If the weight type changed then there is no point in doing
* verification. Revert fields to their original values.
*/
if ((space_based && !WEIGHT_IS_SPACEBASED(msp->ms_weight)) ||
(!space_based && WEIGHT_IS_SPACEBASED(msp->ms_weight))) {
msp->ms_fragmentation = frag;
msp->ms_weight = weight;
return;
}
VERIFY3U(msp->ms_fragmentation, ==, frag);
VERIFY3U(msp->ms_weight, ==, weight);
}
/*
* If we're over the zfs_metaslab_mem_limit, select the loaded metaslab from
* this class that was used longest ago, and attempt to unload it. We don't
* want to spend too much time in this loop to prevent performance
* degradation, and we expect that most of the time this operation will
* succeed. Between that and the normal unloading processing during txg sync,
* we expect this to keep the metaslab memory usage under control.
*/
static void
metaslab_potentially_evict(metaslab_class_t *mc)
{
#ifdef _KERNEL
uint64_t allmem = arc_all_memory();
uint64_t inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache);
uint64_t size = spl_kmem_cache_entry_size(zfs_btree_leaf_cache);
int tries = 0;
for (; allmem * zfs_metaslab_mem_limit / 100 < inuse * size &&
tries < multilist_get_num_sublists(&mc->mc_metaslab_txg_list) * 2;
tries++) {
unsigned int idx = multilist_get_random_index(
&mc->mc_metaslab_txg_list);
multilist_sublist_t *mls =
multilist_sublist_lock(&mc->mc_metaslab_txg_list, idx);
metaslab_t *msp = multilist_sublist_head(mls);
multilist_sublist_unlock(mls);
while (msp != NULL && allmem * zfs_metaslab_mem_limit / 100 <
inuse * size) {
VERIFY3P(mls, ==, multilist_sublist_lock(
&mc->mc_metaslab_txg_list, idx));
ASSERT3U(idx, ==,
metaslab_idx_func(&mc->mc_metaslab_txg_list, msp));
if (!multilist_link_active(&msp->ms_class_txg_node)) {
multilist_sublist_unlock(mls);
break;
}
metaslab_t *next_msp = multilist_sublist_next(mls, msp);
multilist_sublist_unlock(mls);
/*
* If the metaslab is currently loading there are two
* cases. If it's the metaslab we're evicting, we
* can't continue on or we'll panic when we attempt to
* recursively lock the mutex. If it's another
* metaslab that's loading, it can be safely skipped,
* since we know it's very new and therefore not a
* good eviction candidate. We check later once the
* lock is held that the metaslab is fully loaded
* before actually unloading it.
*/
if (msp->ms_loading) {
msp = next_msp;
inuse =
spl_kmem_cache_inuse(zfs_btree_leaf_cache);
continue;
}
/*
* We can't unload metaslabs with no spacemap because
* they're not ready to be unloaded yet. We can't
* unload metaslabs with outstanding allocations
* because doing so could cause the metaslab's weight
* to decrease while it's unloaded, which violates an
* invariant that we use to prevent unnecessary
* loading. We also don't unload metaslabs that are
* currently active because they are high-weight
* metaslabs that are likely to be used in the near
* future.
*/
mutex_enter(&msp->ms_lock);
if (msp->ms_allocator == -1 && msp->ms_sm != NULL &&
msp->ms_allocating_total == 0) {
metaslab_unload(msp);
}
mutex_exit(&msp->ms_lock);
msp = next_msp;
inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache);
}
}
+#else
+ (void) mc;
#endif
}
static int
metaslab_load_impl(metaslab_t *msp)
{
int error = 0;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(msp->ms_loading);
ASSERT(!msp->ms_condensing);
/*
* We temporarily drop the lock to unblock other operations while we
* are reading the space map. Therefore, metaslab_sync() and
* metaslab_sync_done() can run at the same time as we do.
*
* If we are using the log space maps, metaslab_sync() can't write to
* the metaslab's space map while we are loading as we only write to
* it when we are flushing the metaslab, and that can't happen while
* we are loading it.
*
* If we are not using log space maps though, metaslab_sync() can
* append to the space map while we are loading. Therefore we load
* only entries that existed when we started the load. Additionally,
* metaslab_sync_done() has to wait for the load to complete because
* there are potential races like metaslab_load() loading parts of the
* space map that are currently being appended by metaslab_sync(). If
* we didn't, the ms_allocatable would have entries that
* metaslab_sync_done() would try to re-add later.
*
* That's why before dropping the lock we remember the synced length
* of the metaslab and read up to that point of the space map,
* ignoring entries appended by metaslab_sync() that happen after we
* drop the lock.
*/
uint64_t length = msp->ms_synced_length;
mutex_exit(&msp->ms_lock);
hrtime_t load_start = gethrtime();
metaslab_rt_arg_t *mrap;
if (msp->ms_allocatable->rt_arg == NULL) {
mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP);
} else {
mrap = msp->ms_allocatable->rt_arg;
msp->ms_allocatable->rt_ops = NULL;
msp->ms_allocatable->rt_arg = NULL;
}
mrap->mra_bt = &msp->ms_allocatable_by_size;
mrap->mra_floor_shift = metaslab_by_size_min_shift;
if (msp->ms_sm != NULL) {
error = space_map_load_length(msp->ms_sm, msp->ms_allocatable,
SM_FREE, length);
/* Now, populate the size-sorted tree. */
metaslab_rt_create(msp->ms_allocatable, mrap);
msp->ms_allocatable->rt_ops = &metaslab_rt_ops;
msp->ms_allocatable->rt_arg = mrap;
struct mssa_arg arg = {0};
arg.rt = msp->ms_allocatable;
arg.mra = mrap;
range_tree_walk(msp->ms_allocatable, metaslab_size_sorted_add,
&arg);
} else {
/*
* Add the size-sorted tree first, since we don't need to load
* the metaslab from the spacemap.
*/
metaslab_rt_create(msp->ms_allocatable, mrap);
msp->ms_allocatable->rt_ops = &metaslab_rt_ops;
msp->ms_allocatable->rt_arg = mrap;
/*
* The space map has not been allocated yet, so treat
* all the space in the metaslab as free and add it to the
* ms_allocatable tree.
*/
range_tree_add(msp->ms_allocatable,
msp->ms_start, msp->ms_size);
if (msp->ms_new) {
/*
* If the ms_sm doesn't exist, this means that this
* metaslab hasn't gone through metaslab_sync() and
* thus has never been dirtied. So we shouldn't
* expect any unflushed allocs or frees from previous
* TXGs.
*/
ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
}
}
/*
* We need to grab the ms_sync_lock to prevent metaslab_sync() from
* changing the ms_sm (or log_sm) and the metaslab's range trees
* while we are about to use them and populate the ms_allocatable.
* The ms_lock is insufficient for this because metaslab_sync() doesn't
* hold the ms_lock while writing the ms_checkpointing tree to disk.
*/
mutex_enter(&msp->ms_sync_lock);
mutex_enter(&msp->ms_lock);
ASSERT(!msp->ms_condensing);
ASSERT(!msp->ms_flushing);
if (error != 0) {
mutex_exit(&msp->ms_sync_lock);
return (error);
}
ASSERT3P(msp->ms_group, !=, NULL);
msp->ms_loaded = B_TRUE;
/*
* Apply all the unflushed changes to ms_allocatable right
* away so any manipulations we do below have a clear view
* of what is allocated and what is free.
*/
range_tree_walk(msp->ms_unflushed_allocs,
range_tree_remove, msp->ms_allocatable);
range_tree_walk(msp->ms_unflushed_frees,
range_tree_add, msp->ms_allocatable);
ASSERT3P(msp->ms_group, !=, NULL);
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
if (spa_syncing_log_sm(spa) != NULL) {
ASSERT(spa_feature_is_enabled(spa,
SPA_FEATURE_LOG_SPACEMAP));
/*
* If we use a log space map we add all the segments
* that are in ms_unflushed_frees so they are available
* for allocation.
*
* ms_allocatable needs to contain all free segments
* that are ready for allocations (thus not segments
* from ms_freeing, ms_freed, and the ms_defer trees).
* But if we grab the lock in this code path at a sync
* pass later that 1, then it also contains the
* segments of ms_freed (they were added to it earlier
* in this path through ms_unflushed_frees). So we
* need to remove all the segments that exist in
* ms_freed from ms_allocatable as they will be added
* later in metaslab_sync_done().
*
* When there's no log space map, the ms_allocatable
* correctly doesn't contain any segments that exist
* in ms_freed [see ms_synced_length].
*/
range_tree_walk(msp->ms_freed,
range_tree_remove, msp->ms_allocatable);
}
/*
* If we are not using the log space map, ms_allocatable
* contains the segments that exist in the ms_defer trees
* [see ms_synced_length]. Thus we need to remove them
* from ms_allocatable as they will be added again in
* metaslab_sync_done().
*
* If we are using the log space map, ms_allocatable still
* contains the segments that exist in the ms_defer trees.
* Not because it read them through the ms_sm though. But
* because these segments are part of ms_unflushed_frees
* whose segments we add to ms_allocatable earlier in this
* code path.
*/
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
range_tree_walk(msp->ms_defer[t],
range_tree_remove, msp->ms_allocatable);
}
/*
* Call metaslab_recalculate_weight_and_sort() now that the
* metaslab is loaded so we get the metaslab's real weight.
*
* Unless this metaslab was created with older software and
* has not yet been converted to use segment-based weight, we
* expect the new weight to be better or equal to the weight
* that the metaslab had while it was not loaded. This is
* because the old weight does not take into account the
* consolidation of adjacent segments between TXGs. [see
* comment for ms_synchist and ms_deferhist[] for more info]
*/
uint64_t weight = msp->ms_weight;
uint64_t max_size = msp->ms_max_size;
metaslab_recalculate_weight_and_sort(msp);
if (!WEIGHT_IS_SPACEBASED(weight))
ASSERT3U(weight, <=, msp->ms_weight);
msp->ms_max_size = metaslab_largest_allocatable(msp);
ASSERT3U(max_size, <=, msp->ms_max_size);
hrtime_t load_end = gethrtime();
msp->ms_load_time = load_end;
zfs_dbgmsg("metaslab_load: txg %llu, spa %s, vdev_id %llu, "
"ms_id %llu, smp_length %llu, "
"unflushed_allocs %llu, unflushed_frees %llu, "
"freed %llu, defer %llu + %llu, unloaded time %llu ms, "
"loading_time %lld ms, ms_max_size %llu, "
"max size error %lld, "
"old_weight %llx, new_weight %llx",
(u_longlong_t)spa_syncing_txg(spa), spa_name(spa),
(u_longlong_t)msp->ms_group->mg_vd->vdev_id,
(u_longlong_t)msp->ms_id,
(u_longlong_t)space_map_length(msp->ms_sm),
(u_longlong_t)range_tree_space(msp->ms_unflushed_allocs),
(u_longlong_t)range_tree_space(msp->ms_unflushed_frees),
(u_longlong_t)range_tree_space(msp->ms_freed),
(u_longlong_t)range_tree_space(msp->ms_defer[0]),
(u_longlong_t)range_tree_space(msp->ms_defer[1]),
(longlong_t)((load_start - msp->ms_unload_time) / 1000000),
(longlong_t)((load_end - load_start) / 1000000),
(u_longlong_t)msp->ms_max_size,
(u_longlong_t)msp->ms_max_size - max_size,
(u_longlong_t)weight, (u_longlong_t)msp->ms_weight);
metaslab_verify_space(msp, spa_syncing_txg(spa));
mutex_exit(&msp->ms_sync_lock);
return (0);
}
int
metaslab_load(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* There may be another thread loading the same metaslab, if that's
* the case just wait until the other thread is done and return.
*/
metaslab_load_wait(msp);
if (msp->ms_loaded)
return (0);
VERIFY(!msp->ms_loading);
ASSERT(!msp->ms_condensing);
/*
* We set the loading flag BEFORE potentially dropping the lock to
* wait for an ongoing flush (see ms_flushing below). This way other
* threads know that there is already a thread that is loading this
* metaslab.
*/
msp->ms_loading = B_TRUE;
/*
* Wait for any in-progress flushing to finish as we drop the ms_lock
* both here (during space_map_load()) and in metaslab_flush() (when
* we flush our changes to the ms_sm).
*/
if (msp->ms_flushing)
metaslab_flush_wait(msp);
/*
* In the possibility that we were waiting for the metaslab to be
* flushed (where we temporarily dropped the ms_lock), ensure that
* no one else loaded the metaslab somehow.
*/
ASSERT(!msp->ms_loaded);
/*
* If we're loading a metaslab in the normal class, consider evicting
* another one to keep our memory usage under the limit defined by the
* zfs_metaslab_mem_limit tunable.
*/
if (spa_normal_class(msp->ms_group->mg_class->mc_spa) ==
msp->ms_group->mg_class) {
metaslab_potentially_evict(msp->ms_group->mg_class);
}
int error = metaslab_load_impl(msp);
ASSERT(MUTEX_HELD(&msp->ms_lock));
msp->ms_loading = B_FALSE;
cv_broadcast(&msp->ms_load_cv);
return (error);
}
void
metaslab_unload(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* This can happen if a metaslab is selected for eviction (in
* metaslab_potentially_evict) and then unloaded during spa_sync (via
* metaslab_class_evict_old).
*/
if (!msp->ms_loaded)
return;
range_tree_vacate(msp->ms_allocatable, NULL, NULL);
msp->ms_loaded = B_FALSE;
msp->ms_unload_time = gethrtime();
msp->ms_activation_weight = 0;
msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
if (msp->ms_group != NULL) {
metaslab_class_t *mc = msp->ms_group->mg_class;
multilist_sublist_t *mls =
multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
if (multilist_link_active(&msp->ms_class_txg_node))
multilist_sublist_remove(mls, msp);
multilist_sublist_unlock(mls);
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
zfs_dbgmsg("metaslab_unload: txg %llu, spa %s, vdev_id %llu, "
"ms_id %llu, weight %llx, "
"selected txg %llu (%llu ms ago), alloc_txg %llu, "
"loaded %llu ms ago, max_size %llu",
(u_longlong_t)spa_syncing_txg(spa), spa_name(spa),
(u_longlong_t)msp->ms_group->mg_vd->vdev_id,
(u_longlong_t)msp->ms_id,
(u_longlong_t)msp->ms_weight,
(u_longlong_t)msp->ms_selected_txg,
(u_longlong_t)(msp->ms_unload_time -
msp->ms_selected_time) / 1000 / 1000,
(u_longlong_t)msp->ms_alloc_txg,
(u_longlong_t)(msp->ms_unload_time -
msp->ms_load_time) / 1000 / 1000,
(u_longlong_t)msp->ms_max_size);
}
/*
* We explicitly recalculate the metaslab's weight based on its space
* map (as it is now not loaded). We want unload metaslabs to always
* have their weights calculated from the space map histograms, while
* loaded ones have it calculated from their in-core range tree
* [see metaslab_load()]. This way, the weight reflects the information
* available in-core, whether it is loaded or not.
*
* If ms_group == NULL means that we came here from metaslab_fini(),
* at which point it doesn't make sense for us to do the recalculation
* and the sorting.
*/
if (msp->ms_group != NULL)
metaslab_recalculate_weight_and_sort(msp);
}
/*
* We want to optimize the memory use of the per-metaslab range
* trees. To do this, we store the segments in the range trees in
* units of sectors, zero-indexing from the start of the metaslab. If
* the vdev_ms_shift - the vdev_ashift is less than 32, we can store
* the ranges using two uint32_ts, rather than two uint64_ts.
*/
range_seg_type_t
metaslab_calculate_range_tree_type(vdev_t *vdev, metaslab_t *msp,
uint64_t *start, uint64_t *shift)
{
if (vdev->vdev_ms_shift - vdev->vdev_ashift < 32 &&
!zfs_metaslab_force_large_segs) {
*shift = vdev->vdev_ashift;
*start = msp->ms_start;
return (RANGE_SEG32);
} else {
*shift = 0;
*start = 0;
return (RANGE_SEG64);
}
}
void
metaslab_set_selected_txg(metaslab_t *msp, uint64_t txg)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
metaslab_class_t *mc = msp->ms_group->mg_class;
multilist_sublist_t *mls =
multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
if (multilist_link_active(&msp->ms_class_txg_node))
multilist_sublist_remove(mls, msp);
msp->ms_selected_txg = txg;
msp->ms_selected_time = gethrtime();
multilist_sublist_insert_tail(mls, msp);
multilist_sublist_unlock(mls);
}
void
metaslab_space_update(vdev_t *vd, metaslab_class_t *mc, int64_t alloc_delta,
int64_t defer_delta, int64_t space_delta)
{
vdev_space_update(vd, alloc_delta, defer_delta, space_delta);
ASSERT3P(vd->vdev_spa->spa_root_vdev, ==, vd->vdev_parent);
ASSERT(vd->vdev_ms_count != 0);
metaslab_class_space_update(mc, alloc_delta, defer_delta, space_delta,
vdev_deflated_space(vd, space_delta));
}
int
metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object,
uint64_t txg, metaslab_t **msp)
{
vdev_t *vd = mg->mg_vd;
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset;
metaslab_t *ms;
int error;
ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&ms->ms_sync_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL);
cv_init(&ms->ms_flush_cv, NULL, CV_DEFAULT, NULL);
multilist_link_init(&ms->ms_class_txg_node);
ms->ms_id = id;
ms->ms_start = id << vd->vdev_ms_shift;
ms->ms_size = 1ULL << vd->vdev_ms_shift;
ms->ms_allocator = -1;
ms->ms_new = B_TRUE;
vdev_ops_t *ops = vd->vdev_ops;
if (ops->vdev_op_metaslab_init != NULL)
ops->vdev_op_metaslab_init(vd, &ms->ms_start, &ms->ms_size);
/*
* We only open space map objects that already exist. All others
* will be opened when we finally allocate an object for it.
*
* Note:
* When called from vdev_expand(), we can't call into the DMU as
* we are holding the spa_config_lock as a writer and we would
* deadlock [see relevant comment in vdev_metaslab_init()]. in
* that case, the object parameter is zero though, so we won't
* call into the DMU.
*/
if (object != 0) {
error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start,
ms->ms_size, vd->vdev_ashift);
if (error != 0) {
kmem_free(ms, sizeof (metaslab_t));
return (error);
}
ASSERT(ms->ms_sm != NULL);
ms->ms_allocated_space = space_map_allocated(ms->ms_sm);
}
uint64_t shift, start;
range_seg_type_t type =
metaslab_calculate_range_tree_type(vd, ms, &start, &shift);
ms->ms_allocatable = range_tree_create(NULL, type, NULL, start, shift);
for (int t = 0; t < TXG_SIZE; t++) {
ms->ms_allocating[t] = range_tree_create(NULL, type,
NULL, start, shift);
}
ms->ms_freeing = range_tree_create(NULL, type, NULL, start, shift);
ms->ms_freed = range_tree_create(NULL, type, NULL, start, shift);
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
ms->ms_defer[t] = range_tree_create(NULL, type, NULL,
start, shift);
}
ms->ms_checkpointing =
range_tree_create(NULL, type, NULL, start, shift);
ms->ms_unflushed_allocs =
range_tree_create(NULL, type, NULL, start, shift);
metaslab_rt_arg_t *mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP);
mrap->mra_bt = &ms->ms_unflushed_frees_by_size;
mrap->mra_floor_shift = metaslab_by_size_min_shift;
ms->ms_unflushed_frees = range_tree_create(&metaslab_rt_ops,
type, mrap, start, shift);
ms->ms_trim = range_tree_create(NULL, type, NULL, start, shift);
metaslab_group_add(mg, ms);
metaslab_set_fragmentation(ms, B_FALSE);
/*
* If we're opening an existing pool (txg == 0) or creating
* a new one (txg == TXG_INITIAL), all space is available now.
* If we're adding space to an existing pool, the new space
* does not become available until after this txg has synced.
* The metaslab's weight will also be initialized when we sync
* out this txg. This ensures that we don't attempt to allocate
* from it before we have initialized it completely.
*/
if (txg <= TXG_INITIAL) {
metaslab_sync_done(ms, 0);
metaslab_space_update(vd, mg->mg_class,
metaslab_allocated_space(ms), 0, 0);
}
if (txg != 0) {
vdev_dirty(vd, 0, NULL, txg);
vdev_dirty(vd, VDD_METASLAB, ms, txg);
}
*msp = ms;
return (0);
}
static void
metaslab_fini_flush_data(metaslab_t *msp)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
if (metaslab_unflushed_txg(msp) == 0) {
ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL),
==, NULL);
return;
}
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
mutex_enter(&spa->spa_flushed_ms_lock);
avl_remove(&spa->spa_metaslabs_by_flushed, msp);
mutex_exit(&spa->spa_flushed_ms_lock);
spa_log_sm_decrement_mscount(spa, metaslab_unflushed_txg(msp));
spa_log_summary_decrement_mscount(spa, metaslab_unflushed_txg(msp));
}
uint64_t
metaslab_unflushed_changes_memused(metaslab_t *ms)
{
return ((range_tree_numsegs(ms->ms_unflushed_allocs) +
range_tree_numsegs(ms->ms_unflushed_frees)) *
ms->ms_unflushed_allocs->rt_root.bt_elem_size);
}
void
metaslab_fini(metaslab_t *msp)
{
metaslab_group_t *mg = msp->ms_group;
vdev_t *vd = mg->mg_vd;
spa_t *spa = vd->vdev_spa;
metaslab_fini_flush_data(msp);
metaslab_group_remove(mg, msp);
mutex_enter(&msp->ms_lock);
VERIFY(msp->ms_group == NULL);
/*
* If this metaslab hasn't been through metaslab_sync_done() yet its
* space hasn't been accounted for in its vdev and doesn't need to be
* subtracted.
*/
if (!msp->ms_new) {
metaslab_space_update(vd, mg->mg_class,
-metaslab_allocated_space(msp), 0, -msp->ms_size);
}
space_map_close(msp->ms_sm);
msp->ms_sm = NULL;
metaslab_unload(msp);
range_tree_destroy(msp->ms_allocatable);
range_tree_destroy(msp->ms_freeing);
range_tree_destroy(msp->ms_freed);
ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
metaslab_unflushed_changes_memused(msp));
spa->spa_unflushed_stats.sus_memused -=
metaslab_unflushed_changes_memused(msp);
range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
range_tree_destroy(msp->ms_unflushed_allocs);
range_tree_destroy(msp->ms_checkpointing);
range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
range_tree_destroy(msp->ms_unflushed_frees);
for (int t = 0; t < TXG_SIZE; t++) {
range_tree_destroy(msp->ms_allocating[t]);
}
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
range_tree_destroy(msp->ms_defer[t]);
}
ASSERT0(msp->ms_deferspace);
for (int t = 0; t < TXG_SIZE; t++)
ASSERT(!txg_list_member(&vd->vdev_ms_list, msp, t));
range_tree_vacate(msp->ms_trim, NULL, NULL);
range_tree_destroy(msp->ms_trim);
mutex_exit(&msp->ms_lock);
cv_destroy(&msp->ms_load_cv);
cv_destroy(&msp->ms_flush_cv);
mutex_destroy(&msp->ms_lock);
mutex_destroy(&msp->ms_sync_lock);
ASSERT3U(msp->ms_allocator, ==, -1);
kmem_free(msp, sizeof (metaslab_t));
}
#define FRAGMENTATION_TABLE_SIZE 17
/*
* This table defines a segment size based fragmentation metric that will
* allow each metaslab to derive its own fragmentation value. This is done
* by calculating the space in each bucket of the spacemap histogram and
* multiplying that by the fragmentation metric in this table. Doing
* this for all buckets and dividing it by the total amount of free
* space in this metaslab (i.e. the total free space in all buckets) gives
* us the fragmentation metric. This means that a high fragmentation metric
* equates to most of the free space being comprised of small segments.
* Conversely, if the metric is low, then most of the free space is in
* large segments. A 10% change in fragmentation equates to approximately
* double the number of segments.
*
* This table defines 0% fragmented space using 16MB segments. Testing has
* shown that segments that are greater than or equal to 16MB do not suffer
* from drastic performance problems. Using this value, we derive the rest
* of the table. Since the fragmentation value is never stored on disk, it
* is possible to change these calculations in the future.
*/
int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = {
100, /* 512B */
100, /* 1K */
98, /* 2K */
95, /* 4K */
90, /* 8K */
80, /* 16K */
70, /* 32K */
60, /* 64K */
50, /* 128K */
40, /* 256K */
30, /* 512K */
20, /* 1M */
15, /* 2M */
10, /* 4M */
5, /* 8M */
0 /* 16M */
};
/*
* Calculate the metaslab's fragmentation metric and set ms_fragmentation.
* Setting this value to ZFS_FRAG_INVALID means that the metaslab has not
* been upgraded and does not support this metric. Otherwise, the return
* value should be in the range [0, 100].
*/
static void
metaslab_set_fragmentation(metaslab_t *msp, boolean_t nodirty)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
uint64_t fragmentation = 0;
uint64_t total = 0;
boolean_t feature_enabled = spa_feature_is_enabled(spa,
SPA_FEATURE_SPACEMAP_HISTOGRAM);
if (!feature_enabled) {
msp->ms_fragmentation = ZFS_FRAG_INVALID;
return;
}
/*
* A null space map means that the entire metaslab is free
* and thus is not fragmented.
*/
if (msp->ms_sm == NULL) {
msp->ms_fragmentation = 0;
return;
}
/*
* If this metaslab's space map has not been upgraded, flag it
* so that we upgrade next time we encounter it.
*/
if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) {
uint64_t txg = spa_syncing_txg(spa);
vdev_t *vd = msp->ms_group->mg_vd;
/*
* If we've reached the final dirty txg, then we must
* be shutting down the pool. We don't want to dirty
* any data past this point so skip setting the condense
* flag. We can retry this action the next time the pool
* is imported. We also skip marking this metaslab for
* condensing if the caller has explicitly set nodirty.
*/
if (!nodirty &&
spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) {
msp->ms_condense_wanted = B_TRUE;
vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
zfs_dbgmsg("txg %llu, requesting force condense: "
"ms_id %llu, vdev_id %llu", (u_longlong_t)txg,
(u_longlong_t)msp->ms_id,
(u_longlong_t)vd->vdev_id);
}
msp->ms_fragmentation = ZFS_FRAG_INVALID;
return;
}
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
uint64_t space = 0;
uint8_t shift = msp->ms_sm->sm_shift;
int idx = MIN(shift - SPA_MINBLOCKSHIFT + i,
FRAGMENTATION_TABLE_SIZE - 1);
if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
continue;
space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift);
total += space;
ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE);
fragmentation += space * zfs_frag_table[idx];
}
if (total > 0)
fragmentation /= total;
ASSERT3U(fragmentation, <=, 100);
msp->ms_fragmentation = fragmentation;
}
/*
* Compute a weight -- a selection preference value -- for the given metaslab.
* This is based on the amount of free space, the level of fragmentation,
* the LBA range, and whether the metaslab is loaded.
*/
static uint64_t
metaslab_space_weight(metaslab_t *msp)
{
metaslab_group_t *mg = msp->ms_group;
vdev_t *vd = mg->mg_vd;
uint64_t weight, space;
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* The baseline weight is the metaslab's free space.
*/
space = msp->ms_size - metaslab_allocated_space(msp);
if (metaslab_fragmentation_factor_enabled &&
msp->ms_fragmentation != ZFS_FRAG_INVALID) {
/*
* Use the fragmentation information to inversely scale
* down the baseline weight. We need to ensure that we
* don't exclude this metaslab completely when it's 100%
* fragmented. To avoid this we reduce the fragmented value
* by 1.
*/
space = (space * (100 - (msp->ms_fragmentation - 1))) / 100;
/*
* If space < SPA_MINBLOCKSIZE, then we will not allocate from
* this metaslab again. The fragmentation metric may have
* decreased the space to something smaller than
* SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
* so that we can consume any remaining space.
*/
if (space > 0 && space < SPA_MINBLOCKSIZE)
space = SPA_MINBLOCKSIZE;
}
weight = space;
/*
* Modern disks have uniform bit density and constant angular velocity.
* Therefore, the outer recording zones are faster (higher bandwidth)
* than the inner zones by the ratio of outer to inner track diameter,
* which is typically around 2:1. We account for this by assigning
* higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
* In effect, this means that we'll select the metaslab with the most
* free bandwidth rather than simply the one with the most free space.
*/
if (!vd->vdev_nonrot && metaslab_lba_weighting_enabled) {
weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
ASSERT(weight >= space && weight <= 2 * space);
}
/*
* If this metaslab is one we're actively using, adjust its
* weight to make it preferable to any inactive metaslab so
* we'll polish it off. If the fragmentation on this metaslab
* has exceed our threshold, then don't mark it active.
*/
if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID &&
msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) {
weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
}
WEIGHT_SET_SPACEBASED(weight);
return (weight);
}
/*
* Return the weight of the specified metaslab, according to the segment-based
* weighting algorithm. The metaslab must be loaded. This function can
* be called within a sync pass since it relies only on the metaslab's
* range tree which is always accurate when the metaslab is loaded.
*/
static uint64_t
metaslab_weight_from_range_tree(metaslab_t *msp)
{
uint64_t weight = 0;
uint32_t segments = 0;
ASSERT(msp->ms_loaded);
for (int i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT;
i--) {
uint8_t shift = msp->ms_group->mg_vd->vdev_ashift;
int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
segments <<= 1;
segments += msp->ms_allocatable->rt_histogram[i];
/*
* The range tree provides more precision than the space map
* and must be downgraded so that all values fit within the
* space map's histogram. This allows us to compare loaded
* vs. unloaded metaslabs to determine which metaslab is
* considered "best".
*/
if (i > max_idx)
continue;
if (segments != 0) {
WEIGHT_SET_COUNT(weight, segments);
WEIGHT_SET_INDEX(weight, i);
WEIGHT_SET_ACTIVE(weight, 0);
break;
}
}
return (weight);
}
/*
* Calculate the weight based on the on-disk histogram. Should be applied
* only to unloaded metaslabs (i.e no incoming allocations) in-order to
* give results consistent with the on-disk state
*/
static uint64_t
metaslab_weight_from_spacemap(metaslab_t *msp)
{
space_map_t *sm = msp->ms_sm;
ASSERT(!msp->ms_loaded);
ASSERT(sm != NULL);
ASSERT3U(space_map_object(sm), !=, 0);
ASSERT3U(sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
/*
* Create a joint histogram from all the segments that have made
* it to the metaslab's space map histogram, that are not yet
* available for allocation because they are still in the freeing
* pipeline (e.g. freeing, freed, and defer trees). Then subtract
* these segments from the space map's histogram to get a more
* accurate weight.
*/
uint64_t deferspace_histogram[SPACE_MAP_HISTOGRAM_SIZE] = {0};
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
deferspace_histogram[i] += msp->ms_synchist[i];
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
deferspace_histogram[i] += msp->ms_deferhist[t][i];
}
}
uint64_t weight = 0;
for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) {
ASSERT3U(sm->sm_phys->smp_histogram[i], >=,
deferspace_histogram[i]);
uint64_t count =
sm->sm_phys->smp_histogram[i] - deferspace_histogram[i];
if (count != 0) {
WEIGHT_SET_COUNT(weight, count);
WEIGHT_SET_INDEX(weight, i + sm->sm_shift);
WEIGHT_SET_ACTIVE(weight, 0);
break;
}
}
return (weight);
}
/*
* Compute a segment-based weight for the specified metaslab. The weight
* is determined by highest bucket in the histogram. The information
* for the highest bucket is encoded into the weight value.
*/
static uint64_t
metaslab_segment_weight(metaslab_t *msp)
{
metaslab_group_t *mg = msp->ms_group;
uint64_t weight = 0;
uint8_t shift = mg->mg_vd->vdev_ashift;
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* The metaslab is completely free.
*/
if (metaslab_allocated_space(msp) == 0) {
int idx = highbit64(msp->ms_size) - 1;
int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
if (idx < max_idx) {
WEIGHT_SET_COUNT(weight, 1ULL);
WEIGHT_SET_INDEX(weight, idx);
} else {
WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx));
WEIGHT_SET_INDEX(weight, max_idx);
}
WEIGHT_SET_ACTIVE(weight, 0);
ASSERT(!WEIGHT_IS_SPACEBASED(weight));
return (weight);
}
ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
/*
* If the metaslab is fully allocated then just make the weight 0.
*/
if (metaslab_allocated_space(msp) == msp->ms_size)
return (0);
/*
* If the metaslab is already loaded, then use the range tree to
* determine the weight. Otherwise, we rely on the space map information
* to generate the weight.
*/
if (msp->ms_loaded) {
weight = metaslab_weight_from_range_tree(msp);
} else {
weight = metaslab_weight_from_spacemap(msp);
}
/*
* If the metaslab was active the last time we calculated its weight
* then keep it active. We want to consume the entire region that
* is associated with this weight.
*/
if (msp->ms_activation_weight != 0 && weight != 0)
WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight));
return (weight);
}
/*
* Determine if we should attempt to allocate from this metaslab. If the
* metaslab is loaded, then we can determine if the desired allocation
* can be satisfied by looking at the size of the maximum free segment
* on that metaslab. Otherwise, we make our decision based on the metaslab's
* weight. For segment-based weighting we can determine the maximum
* allocation based on the index encoded in its value. For space-based
* weights we rely on the entire weight (excluding the weight-type bit).
*/
static boolean_t
metaslab_should_allocate(metaslab_t *msp, uint64_t asize, boolean_t try_hard)
{
/*
* If the metaslab is loaded, ms_max_size is definitive and we can use
* the fast check. If it's not, the ms_max_size is a lower bound (once
* set), and we should use the fast check as long as we're not in
* try_hard and it's been less than zfs_metaslab_max_size_cache_sec
* seconds since the metaslab was unloaded.
*/
if (msp->ms_loaded ||
(msp->ms_max_size != 0 && !try_hard && gethrtime() <
msp->ms_unload_time + SEC2NSEC(zfs_metaslab_max_size_cache_sec)))
return (msp->ms_max_size >= asize);
boolean_t should_allocate;
if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
/*
* The metaslab segment weight indicates segments in the
* range [2^i, 2^(i+1)), where i is the index in the weight.
* Since the asize might be in the middle of the range, we
* should attempt the allocation if asize < 2^(i+1).
*/
should_allocate = (asize <
1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1));
} else {
should_allocate = (asize <=
(msp->ms_weight & ~METASLAB_WEIGHT_TYPE));
}
return (should_allocate);
}
static uint64_t
metaslab_weight(metaslab_t *msp, boolean_t nodirty)
{
vdev_t *vd = msp->ms_group->mg_vd;
spa_t *spa = vd->vdev_spa;
uint64_t weight;
ASSERT(MUTEX_HELD(&msp->ms_lock));
metaslab_set_fragmentation(msp, nodirty);
/*
* Update the maximum size. If the metaslab is loaded, this will
* ensure that we get an accurate maximum size if newly freed space
* has been added back into the free tree. If the metaslab is
* unloaded, we check if there's a larger free segment in the
* unflushed frees. This is a lower bound on the largest allocatable
* segment size. Coalescing of adjacent entries may reveal larger
* allocatable segments, but we aren't aware of those until loading
* the space map into a range tree.
*/
if (msp->ms_loaded) {
msp->ms_max_size = metaslab_largest_allocatable(msp);
} else {
msp->ms_max_size = MAX(msp->ms_max_size,
metaslab_largest_unflushed_free(msp));
}
/*
* Segment-based weighting requires space map histogram support.
*/
if (zfs_metaslab_segment_weight_enabled &&
spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
(msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size ==
sizeof (space_map_phys_t))) {
weight = metaslab_segment_weight(msp);
} else {
weight = metaslab_space_weight(msp);
}
return (weight);
}
void
metaslab_recalculate_weight_and_sort(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
/* note: we preserve the mask (e.g. indication of primary, etc..) */
uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
metaslab_group_sort(msp->ms_group, msp,
metaslab_weight(msp, B_FALSE) | was_active);
}
static int
metaslab_activate_allocator(metaslab_group_t *mg, metaslab_t *msp,
int allocator, uint64_t activation_weight)
{
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* If we're activating for the claim code, we don't want to actually
* set the metaslab up for a specific allocator.
*/
if (activation_weight == METASLAB_WEIGHT_CLAIM) {
ASSERT0(msp->ms_activation_weight);
msp->ms_activation_weight = msp->ms_weight;
metaslab_group_sort(mg, msp, msp->ms_weight |
activation_weight);
return (0);
}
metaslab_t **mspp = (activation_weight == METASLAB_WEIGHT_PRIMARY ?
&mga->mga_primary : &mga->mga_secondary);
mutex_enter(&mg->mg_lock);
if (*mspp != NULL) {
mutex_exit(&mg->mg_lock);
return (EEXIST);
}
*mspp = msp;
ASSERT3S(msp->ms_allocator, ==, -1);
msp->ms_allocator = allocator;
msp->ms_primary = (activation_weight == METASLAB_WEIGHT_PRIMARY);
ASSERT0(msp->ms_activation_weight);
msp->ms_activation_weight = msp->ms_weight;
metaslab_group_sort_impl(mg, msp,
msp->ms_weight | activation_weight);
mutex_exit(&mg->mg_lock);
return (0);
}
static int
metaslab_activate(metaslab_t *msp, int allocator, uint64_t activation_weight)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* The current metaslab is already activated for us so there
* is nothing to do. Already activated though, doesn't mean
* that this metaslab is activated for our allocator nor our
* requested activation weight. The metaslab could have started
* as an active one for our allocator but changed allocators
* while we were waiting to grab its ms_lock or we stole it
* [see find_valid_metaslab()]. This means that there is a
* possibility of passivating a metaslab of another allocator
* or from a different activation mask, from this thread.
*/
if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
ASSERT(msp->ms_loaded);
return (0);
}
int error = metaslab_load(msp);
if (error != 0) {
metaslab_group_sort(msp->ms_group, msp, 0);
return (error);
}
/*
* When entering metaslab_load() we may have dropped the
* ms_lock because we were loading this metaslab, or we
* were waiting for another thread to load it for us. In
* that scenario, we recheck the weight of the metaslab
* to see if it was activated by another thread.
*
* If the metaslab was activated for another allocator or
* it was activated with a different activation weight (e.g.
* we wanted to make it a primary but it was activated as
* secondary) we return error (EBUSY).
*
* If the metaslab was activated for the same allocator
* and requested activation mask, skip activating it.
*/
if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
if (msp->ms_allocator != allocator)
return (EBUSY);
if ((msp->ms_weight & activation_weight) == 0)
return (SET_ERROR(EBUSY));
EQUIV((activation_weight == METASLAB_WEIGHT_PRIMARY),
msp->ms_primary);
return (0);
}
/*
* If the metaslab has literally 0 space, it will have weight 0. In
* that case, don't bother activating it. This can happen if the
* metaslab had space during find_valid_metaslab, but another thread
* loaded it and used all that space while we were waiting to grab the
* lock.
*/
if (msp->ms_weight == 0) {
ASSERT0(range_tree_space(msp->ms_allocatable));
return (SET_ERROR(ENOSPC));
}
if ((error = metaslab_activate_allocator(msp->ms_group, msp,
allocator, activation_weight)) != 0) {
return (error);
}
ASSERT(msp->ms_loaded);
ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
return (0);
}
static void
metaslab_passivate_allocator(metaslab_group_t *mg, metaslab_t *msp,
uint64_t weight)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(msp->ms_loaded);
if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
metaslab_group_sort(mg, msp, weight);
return;
}
mutex_enter(&mg->mg_lock);
ASSERT3P(msp->ms_group, ==, mg);
ASSERT3S(0, <=, msp->ms_allocator);
ASSERT3U(msp->ms_allocator, <, mg->mg_allocators);
metaslab_group_allocator_t *mga = &mg->mg_allocator[msp->ms_allocator];
if (msp->ms_primary) {
ASSERT3P(mga->mga_primary, ==, msp);
ASSERT(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
mga->mga_primary = NULL;
} else {
ASSERT3P(mga->mga_secondary, ==, msp);
ASSERT(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
mga->mga_secondary = NULL;
}
msp->ms_allocator = -1;
metaslab_group_sort_impl(mg, msp, weight);
mutex_exit(&mg->mg_lock);
}
static void
metaslab_passivate(metaslab_t *msp, uint64_t weight)
{
uint64_t size __maybe_unused = weight & ~METASLAB_WEIGHT_TYPE;
/*
* If size < SPA_MINBLOCKSIZE, then we will not allocate from
* this metaslab again. In that case, it had better be empty,
* or we would be leaving space on the table.
*/
ASSERT(!WEIGHT_IS_SPACEBASED(msp->ms_weight) ||
size >= SPA_MINBLOCKSIZE ||
range_tree_space(msp->ms_allocatable) == 0);
ASSERT0(weight & METASLAB_ACTIVE_MASK);
ASSERT(msp->ms_activation_weight != 0);
msp->ms_activation_weight = 0;
metaslab_passivate_allocator(msp->ms_group, msp, weight);
ASSERT0(msp->ms_weight & METASLAB_ACTIVE_MASK);
}
/*
* Segment-based metaslabs are activated once and remain active until
* we either fail an allocation attempt (similar to space-based metaslabs)
* or have exhausted the free space in zfs_metaslab_switch_threshold
* buckets since the metaslab was activated. This function checks to see
* if we've exhausted the zfs_metaslab_switch_threshold buckets in the
* metaslab and passivates it proactively. This will allow us to select a
* metaslab with a larger contiguous region, if any, remaining within this
* metaslab group. If we're in sync pass > 1, then we continue using this
* metaslab so that we don't dirty more block and cause more sync passes.
*/
static void
metaslab_segment_may_passivate(metaslab_t *msp)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1)
return;
/*
* Since we are in the middle of a sync pass, the most accurate
* information that is accessible to us is the in-core range tree
* histogram; calculate the new weight based on that information.
*/
uint64_t weight = metaslab_weight_from_range_tree(msp);
int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight);
int current_idx = WEIGHT_GET_INDEX(weight);
if (current_idx <= activation_idx - zfs_metaslab_switch_threshold)
metaslab_passivate(msp, weight);
}
static void
metaslab_preload(void *arg)
{
metaslab_t *msp = arg;
metaslab_class_t *mc = msp->ms_group->mg_class;
spa_t *spa = mc->mc_spa;
fstrans_cookie_t cookie = spl_fstrans_mark();
ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock));
mutex_enter(&msp->ms_lock);
(void) metaslab_load(msp);
metaslab_set_selected_txg(msp, spa_syncing_txg(spa));
mutex_exit(&msp->ms_lock);
spl_fstrans_unmark(cookie);
}
static void
metaslab_group_preload(metaslab_group_t *mg)
{
spa_t *spa = mg->mg_vd->vdev_spa;
metaslab_t *msp;
avl_tree_t *t = &mg->mg_metaslab_tree;
int m = 0;
if (spa_shutting_down(spa) || !metaslab_preload_enabled) {
taskq_wait_outstanding(mg->mg_taskq, 0);
return;
}
mutex_enter(&mg->mg_lock);
/*
* Load the next potential metaslabs
*/
for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) {
ASSERT3P(msp->ms_group, ==, mg);
/*
* We preload only the maximum number of metaslabs specified
* by metaslab_preload_limit. If a metaslab is being forced
* to condense then we preload it too. This will ensure
* that force condensing happens in the next txg.
*/
if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) {
continue;
}
VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload,
msp, TQ_SLEEP) != TASKQID_INVALID);
}
mutex_exit(&mg->mg_lock);
}
/*
* Determine if the space map's on-disk footprint is past our tolerance for
* inefficiency. We would like to use the following criteria to make our
* decision:
*
* 1. Do not condense if the size of the space map object would dramatically
* increase as a result of writing out the free space range tree.
*
* 2. Condense if the on on-disk space map representation is at least
* zfs_condense_pct/100 times the size of the optimal representation
* (i.e. zfs_condense_pct = 110 and in-core = 1MB, optimal = 1.1MB).
*
* 3. Do not condense if the on-disk size of the space map does not actually
* decrease.
*
* Unfortunately, we cannot compute the on-disk size of the space map in this
* context because we cannot accurately compute the effects of compression, etc.
* Instead, we apply the heuristic described in the block comment for
* zfs_metaslab_condense_block_threshold - we only condense if the space used
* is greater than a threshold number of blocks.
*/
static boolean_t
metaslab_should_condense(metaslab_t *msp)
{
space_map_t *sm = msp->ms_sm;
vdev_t *vd = msp->ms_group->mg_vd;
uint64_t vdev_blocksize = 1 << vd->vdev_ashift;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(msp->ms_loaded);
ASSERT(sm != NULL);
ASSERT3U(spa_sync_pass(vd->vdev_spa), ==, 1);
/*
* We always condense metaslabs that are empty and metaslabs for
* which a condense request has been made.
*/
if (range_tree_numsegs(msp->ms_allocatable) == 0 ||
msp->ms_condense_wanted)
return (B_TRUE);
uint64_t record_size = MAX(sm->sm_blksz, vdev_blocksize);
uint64_t object_size = space_map_length(sm);
uint64_t optimal_size = space_map_estimate_optimal_size(sm,
msp->ms_allocatable, SM_NO_VDEVID);
return (object_size >= (optimal_size * zfs_condense_pct / 100) &&
object_size > zfs_metaslab_condense_block_threshold * record_size);
}
/*
* Condense the on-disk space map representation to its minimized form.
* The minimized form consists of a small number of allocations followed
* by the entries of the free range tree (ms_allocatable). The condensed
* spacemap contains all the entries of previous TXGs (including those in
* the pool-wide log spacemaps; thus this is effectively a superset of
* metaslab_flush()), but this TXG's entries still need to be written.
*/
static void
metaslab_condense(metaslab_t *msp, dmu_tx_t *tx)
{
range_tree_t *condense_tree;
space_map_t *sm = msp->ms_sm;
uint64_t txg = dmu_tx_get_txg(tx);
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(msp->ms_loaded);
ASSERT(msp->ms_sm != NULL);
/*
* In order to condense the space map, we need to change it so it
* only describes which segments are currently allocated and free.
*
* All the current free space resides in the ms_allocatable, all
* the ms_defer trees, and all the ms_allocating trees. We ignore
* ms_freed because it is empty because we're in sync pass 1. We
* ignore ms_freeing because these changes are not yet reflected
* in the spacemap (they will be written later this txg).
*
* So to truncate the space map to represent all the entries of
* previous TXGs we do the following:
*
* 1] We create a range tree (condense tree) that is 100% empty.
* 2] We add to it all segments found in the ms_defer trees
* as those segments are marked as free in the original space
* map. We do the same with the ms_allocating trees for the same
* reason. Adding these segments should be a relatively
* inexpensive operation since we expect these trees to have a
* small number of nodes.
* 3] We vacate any unflushed allocs, since they are not frees we
* need to add to the condense tree. Then we vacate any
* unflushed frees as they should already be part of ms_allocatable.
* 4] At this point, we would ideally like to add all segments
* in the ms_allocatable tree from the condense tree. This way
* we would write all the entries of the condense tree as the
* condensed space map, which would only contain freed
* segments with everything else assumed to be allocated.
*
* Doing so can be prohibitively expensive as ms_allocatable can
* be large, and therefore computationally expensive to add to
* the condense_tree. Instead we first sync out an entry marking
* everything as allocated, then the condense_tree and then the
* ms_allocatable, in the condensed space map. While this is not
* optimal, it is typically close to optimal and more importantly
* much cheaper to compute.
*
* 5] Finally, as both of the unflushed trees were written to our
* new and condensed metaslab space map, we basically flushed
* all the unflushed changes to disk, thus we call
* metaslab_flush_update().
*/
ASSERT3U(spa_sync_pass(spa), ==, 1);
ASSERT(range_tree_is_empty(msp->ms_freed)); /* since it is pass 1 */
zfs_dbgmsg("condensing: txg %llu, msp[%llu] %px, vdev id %llu, "
"spa %s, smp size %llu, segments %llu, forcing condense=%s",
(u_longlong_t)txg, (u_longlong_t)msp->ms_id, msp,
(u_longlong_t)msp->ms_group->mg_vd->vdev_id,
spa->spa_name, (u_longlong_t)space_map_length(msp->ms_sm),
(u_longlong_t)range_tree_numsegs(msp->ms_allocatable),
msp->ms_condense_wanted ? "TRUE" : "FALSE");
msp->ms_condense_wanted = B_FALSE;
range_seg_type_t type;
uint64_t shift, start;
type = metaslab_calculate_range_tree_type(msp->ms_group->mg_vd, msp,
&start, &shift);
condense_tree = range_tree_create(NULL, type, NULL, start, shift);
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
range_tree_walk(msp->ms_defer[t],
range_tree_add, condense_tree);
}
for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK],
range_tree_add, condense_tree);
}
ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
metaslab_unflushed_changes_memused(msp));
spa->spa_unflushed_stats.sus_memused -=
metaslab_unflushed_changes_memused(msp);
range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
/*
* We're about to drop the metaslab's lock thus allowing other
* consumers to change it's content. Set the metaslab's ms_condensing
* flag to ensure that allocations on this metaslab do not occur
* while we're in the middle of committing it to disk. This is only
* critical for ms_allocatable as all other range trees use per TXG
* views of their content.
*/
msp->ms_condensing = B_TRUE;
mutex_exit(&msp->ms_lock);
uint64_t object = space_map_object(msp->ms_sm);
space_map_truncate(sm,
spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ?
zfs_metaslab_sm_blksz_with_log : zfs_metaslab_sm_blksz_no_log, tx);
/*
* space_map_truncate() may have reallocated the spacemap object.
* If so, update the vdev_ms_array.
*/
if (space_map_object(msp->ms_sm) != object) {
object = space_map_object(msp->ms_sm);
dmu_write(spa->spa_meta_objset,
msp->ms_group->mg_vd->vdev_ms_array, sizeof (uint64_t) *
msp->ms_id, sizeof (uint64_t), &object, tx);
}
/*
* Note:
* When the log space map feature is enabled, each space map will
* always have ALLOCS followed by FREES for each sync pass. This is
* typically true even when the log space map feature is disabled,
* except from the case where a metaslab goes through metaslab_sync()
* and gets condensed. In that case the metaslab's space map will have
* ALLOCS followed by FREES (due to condensing) followed by ALLOCS
* followed by FREES (due to space_map_write() in metaslab_sync()) for
* sync pass 1.
*/
range_tree_t *tmp_tree = range_tree_create(NULL, type, NULL, start,
shift);
range_tree_add(tmp_tree, msp->ms_start, msp->ms_size);
space_map_write(sm, tmp_tree, SM_ALLOC, SM_NO_VDEVID, tx);
space_map_write(sm, msp->ms_allocatable, SM_FREE, SM_NO_VDEVID, tx);
space_map_write(sm, condense_tree, SM_FREE, SM_NO_VDEVID, tx);
range_tree_vacate(condense_tree, NULL, NULL);
range_tree_destroy(condense_tree);
range_tree_vacate(tmp_tree, NULL, NULL);
range_tree_destroy(tmp_tree);
mutex_enter(&msp->ms_lock);
msp->ms_condensing = B_FALSE;
metaslab_flush_update(msp, tx);
}
/*
* Called when the metaslab has been flushed (its own spacemap now reflects
* all the contents of the pool-wide spacemap log). Updates the metaslab's
* metadata and any pool-wide related log space map data (e.g. summary,
* obsolete logs, etc..) to reflect that.
*/
static void
metaslab_flush_update(metaslab_t *msp, dmu_tx_t *tx)
{
metaslab_group_t *mg = msp->ms_group;
spa_t *spa = mg->mg_vd->vdev_spa;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT3U(spa_sync_pass(spa), ==, 1);
ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
/*
* Just because a metaslab got flushed, that doesn't mean that
* it will pass through metaslab_sync_done(). Thus, make sure to
* update ms_synced_length here in case it doesn't.
*/
msp->ms_synced_length = space_map_length(msp->ms_sm);
/*
* We may end up here from metaslab_condense() without the
* feature being active. In that case this is a no-op.
*/
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
return;
ASSERT(spa_syncing_log_sm(spa) != NULL);
ASSERT(msp->ms_sm != NULL);
ASSERT(metaslab_unflushed_txg(msp) != 0);
ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL), ==, msp);
VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(spa));
/* update metaslab's position in our flushing tree */
uint64_t ms_prev_flushed_txg = metaslab_unflushed_txg(msp);
mutex_enter(&spa->spa_flushed_ms_lock);
avl_remove(&spa->spa_metaslabs_by_flushed, msp);
metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx);
avl_add(&spa->spa_metaslabs_by_flushed, msp);
mutex_exit(&spa->spa_flushed_ms_lock);
/* update metaslab counts of spa_log_sm_t nodes */
spa_log_sm_decrement_mscount(spa, ms_prev_flushed_txg);
spa_log_sm_increment_current_mscount(spa);
/* cleanup obsolete logs if any */
uint64_t log_blocks_before = spa_log_sm_nblocks(spa);
spa_cleanup_old_sm_logs(spa, tx);
uint64_t log_blocks_after = spa_log_sm_nblocks(spa);
VERIFY3U(log_blocks_after, <=, log_blocks_before);
/* update log space map summary */
uint64_t blocks_gone = log_blocks_before - log_blocks_after;
spa_log_summary_add_flushed_metaslab(spa);
spa_log_summary_decrement_mscount(spa, ms_prev_flushed_txg);
spa_log_summary_decrement_blkcount(spa, blocks_gone);
}
boolean_t
metaslab_flush(metaslab_t *msp, dmu_tx_t *tx)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT3U(spa_sync_pass(spa), ==, 1);
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
ASSERT(msp->ms_sm != NULL);
ASSERT(metaslab_unflushed_txg(msp) != 0);
ASSERT(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL) != NULL);
/*
* There is nothing wrong with flushing the same metaslab twice, as
* this codepath should work on that case. However, the current
* flushing scheme makes sure to avoid this situation as we would be
* making all these calls without having anything meaningful to write
* to disk. We assert this behavior here.
*/
ASSERT3U(metaslab_unflushed_txg(msp), <, dmu_tx_get_txg(tx));
/*
* We can not flush while loading, because then we would
* not load the ms_unflushed_{allocs,frees}.
*/
if (msp->ms_loading)
return (B_FALSE);
metaslab_verify_space(msp, dmu_tx_get_txg(tx));
metaslab_verify_weight_and_frag(msp);
/*
* Metaslab condensing is effectively flushing. Therefore if the
* metaslab can be condensed we can just condense it instead of
* flushing it.
*
* Note that metaslab_condense() does call metaslab_flush_update()
* so we can just return immediately after condensing. We also
* don't need to care about setting ms_flushing or broadcasting
* ms_flush_cv, even if we temporarily drop the ms_lock in
* metaslab_condense(), as the metaslab is already loaded.
*/
if (msp->ms_loaded && metaslab_should_condense(msp)) {
metaslab_group_t *mg = msp->ms_group;
/*
* For all histogram operations below refer to the
* comments of metaslab_sync() where we follow a
* similar procedure.
*/
metaslab_group_histogram_verify(mg);
metaslab_class_histogram_verify(mg->mg_class);
metaslab_group_histogram_remove(mg, msp);
metaslab_condense(msp, tx);
space_map_histogram_clear(msp->ms_sm);
space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
ASSERT(range_tree_is_empty(msp->ms_freed));
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
space_map_histogram_add(msp->ms_sm,
msp->ms_defer[t], tx);
}
metaslab_aux_histograms_update(msp);
metaslab_group_histogram_add(mg, msp);
metaslab_group_histogram_verify(mg);
metaslab_class_histogram_verify(mg->mg_class);
metaslab_verify_space(msp, dmu_tx_get_txg(tx));
/*
* Since we recreated the histogram (and potentially
* the ms_sm too while condensing) ensure that the
* weight is updated too because we are not guaranteed
* that this metaslab is dirty and will go through
* metaslab_sync_done().
*/
metaslab_recalculate_weight_and_sort(msp);
return (B_TRUE);
}
msp->ms_flushing = B_TRUE;
uint64_t sm_len_before = space_map_length(msp->ms_sm);
mutex_exit(&msp->ms_lock);
space_map_write(msp->ms_sm, msp->ms_unflushed_allocs, SM_ALLOC,
SM_NO_VDEVID, tx);
space_map_write(msp->ms_sm, msp->ms_unflushed_frees, SM_FREE,
SM_NO_VDEVID, tx);
mutex_enter(&msp->ms_lock);
uint64_t sm_len_after = space_map_length(msp->ms_sm);
if (zfs_flags & ZFS_DEBUG_LOG_SPACEMAP) {
zfs_dbgmsg("flushing: txg %llu, spa %s, vdev_id %llu, "
"ms_id %llu, unflushed_allocs %llu, unflushed_frees %llu, "
"appended %llu bytes", (u_longlong_t)dmu_tx_get_txg(tx),
spa_name(spa),
(u_longlong_t)msp->ms_group->mg_vd->vdev_id,
(u_longlong_t)msp->ms_id,
(u_longlong_t)range_tree_space(msp->ms_unflushed_allocs),
(u_longlong_t)range_tree_space(msp->ms_unflushed_frees),
(u_longlong_t)(sm_len_after - sm_len_before));
}
ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
metaslab_unflushed_changes_memused(msp));
spa->spa_unflushed_stats.sus_memused -=
metaslab_unflushed_changes_memused(msp);
range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
metaslab_verify_space(msp, dmu_tx_get_txg(tx));
metaslab_verify_weight_and_frag(msp);
metaslab_flush_update(msp, tx);
metaslab_verify_space(msp, dmu_tx_get_txg(tx));
metaslab_verify_weight_and_frag(msp);
msp->ms_flushing = B_FALSE;
cv_broadcast(&msp->ms_flush_cv);
return (B_TRUE);
}
/*
* Write a metaslab to disk in the context of the specified transaction group.
*/
void
metaslab_sync(metaslab_t *msp, uint64_t txg)
{
metaslab_group_t *mg = msp->ms_group;
vdev_t *vd = mg->mg_vd;
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa_meta_objset(spa);
range_tree_t *alloctree = msp->ms_allocating[txg & TXG_MASK];
dmu_tx_t *tx;
ASSERT(!vd->vdev_ishole);
/*
* This metaslab has just been added so there's no work to do now.
*/
if (msp->ms_new) {
ASSERT0(range_tree_space(alloctree));
ASSERT0(range_tree_space(msp->ms_freeing));
ASSERT0(range_tree_space(msp->ms_freed));
ASSERT0(range_tree_space(msp->ms_checkpointing));
ASSERT0(range_tree_space(msp->ms_trim));
return;
}
/*
* Normally, we don't want to process a metaslab if there are no
* allocations or frees to perform. However, if the metaslab is being
* forced to condense, it's loaded and we're not beyond the final
* dirty txg, we need to let it through. Not condensing beyond the
* final dirty txg prevents an issue where metaslabs that need to be
* condensed but were loaded for other reasons could cause a panic
* here. By only checking the txg in that branch of the conditional,
* we preserve the utility of the VERIFY statements in all other
* cases.
*/
if (range_tree_is_empty(alloctree) &&
range_tree_is_empty(msp->ms_freeing) &&
range_tree_is_empty(msp->ms_checkpointing) &&
!(msp->ms_loaded && msp->ms_condense_wanted &&
txg <= spa_final_dirty_txg(spa)))
return;
VERIFY3U(txg, <=, spa_final_dirty_txg(spa));
/*
* The only state that can actually be changing concurrently
* with metaslab_sync() is the metaslab's ms_allocatable. No
* other thread can be modifying this txg's alloc, freeing,
* freed, or space_map_phys_t. We drop ms_lock whenever we
* could call into the DMU, because the DMU can call down to
* us (e.g. via zio_free()) at any time.
*
* The spa_vdev_remove_thread() can be reading metaslab state
* concurrently, and it is locked out by the ms_sync_lock.
* Note that the ms_lock is insufficient for this, because it
* is dropped by space_map_write().
*/
tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
/*
* Generate a log space map if one doesn't exist already.
*/
spa_generate_syncing_log_sm(spa, tx);
if (msp->ms_sm == NULL) {
uint64_t new_object = space_map_alloc(mos,
spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ?
zfs_metaslab_sm_blksz_with_log :
zfs_metaslab_sm_blksz_no_log, tx);
VERIFY3U(new_object, !=, 0);
dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
msp->ms_id, sizeof (uint64_t), &new_object, tx);
VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
msp->ms_start, msp->ms_size, vd->vdev_ashift));
ASSERT(msp->ms_sm != NULL);
ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
ASSERT0(metaslab_allocated_space(msp));
}
if (metaslab_unflushed_txg(msp) == 0 &&
spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
ASSERT(spa_syncing_log_sm(spa) != NULL);
metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx);
spa_log_sm_increment_current_mscount(spa);
spa_log_summary_add_flushed_metaslab(spa);
ASSERT(msp->ms_sm != NULL);
mutex_enter(&spa->spa_flushed_ms_lock);
avl_add(&spa->spa_metaslabs_by_flushed, msp);
mutex_exit(&spa->spa_flushed_ms_lock);
ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
}
if (!range_tree_is_empty(msp->ms_checkpointing) &&
vd->vdev_checkpoint_sm == NULL) {
ASSERT(spa_has_checkpoint(spa));
uint64_t new_object = space_map_alloc(mos,
zfs_vdev_standard_sm_blksz, tx);
VERIFY3U(new_object, !=, 0);
VERIFY0(space_map_open(&vd->vdev_checkpoint_sm,
mos, new_object, 0, vd->vdev_asize, vd->vdev_ashift));
ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
/*
* We save the space map object as an entry in vdev_top_zap
* so it can be retrieved when the pool is reopened after an
* export or through zdb.
*/
VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset,
vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
sizeof (new_object), 1, &new_object, tx));
}
mutex_enter(&msp->ms_sync_lock);
mutex_enter(&msp->ms_lock);
/*
* Note: metaslab_condense() clears the space map's histogram.
* Therefore we must verify and remove this histogram before
* condensing.
*/
metaslab_group_histogram_verify(mg);
metaslab_class_histogram_verify(mg->mg_class);
metaslab_group_histogram_remove(mg, msp);
if (spa->spa_sync_pass == 1 && msp->ms_loaded &&
metaslab_should_condense(msp))
metaslab_condense(msp, tx);
/*
* We'll be going to disk to sync our space accounting, thus we
* drop the ms_lock during that time so allocations coming from
* open-context (ZIL) for future TXGs do not block.
*/
mutex_exit(&msp->ms_lock);
space_map_t *log_sm = spa_syncing_log_sm(spa);
if (log_sm != NULL) {
ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP));
space_map_write(log_sm, alloctree, SM_ALLOC,
vd->vdev_id, tx);
space_map_write(log_sm, msp->ms_freeing, SM_FREE,
vd->vdev_id, tx);
mutex_enter(&msp->ms_lock);
ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
metaslab_unflushed_changes_memused(msp));
spa->spa_unflushed_stats.sus_memused -=
metaslab_unflushed_changes_memused(msp);
range_tree_remove_xor_add(alloctree,
msp->ms_unflushed_frees, msp->ms_unflushed_allocs);
range_tree_remove_xor_add(msp->ms_freeing,
msp->ms_unflushed_allocs, msp->ms_unflushed_frees);
spa->spa_unflushed_stats.sus_memused +=
metaslab_unflushed_changes_memused(msp);
} else {
ASSERT(!spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP));
space_map_write(msp->ms_sm, alloctree, SM_ALLOC,
SM_NO_VDEVID, tx);
space_map_write(msp->ms_sm, msp->ms_freeing, SM_FREE,
SM_NO_VDEVID, tx);
mutex_enter(&msp->ms_lock);
}
msp->ms_allocated_space += range_tree_space(alloctree);
ASSERT3U(msp->ms_allocated_space, >=,
range_tree_space(msp->ms_freeing));
msp->ms_allocated_space -= range_tree_space(msp->ms_freeing);
if (!range_tree_is_empty(msp->ms_checkpointing)) {
ASSERT(spa_has_checkpoint(spa));
ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
/*
* Since we are doing writes to disk and the ms_checkpointing
* tree won't be changing during that time, we drop the
* ms_lock while writing to the checkpoint space map, for the
* same reason mentioned above.
*/
mutex_exit(&msp->ms_lock);
space_map_write(vd->vdev_checkpoint_sm,
msp->ms_checkpointing, SM_FREE, SM_NO_VDEVID, tx);
mutex_enter(&msp->ms_lock);
spa->spa_checkpoint_info.sci_dspace +=
range_tree_space(msp->ms_checkpointing);
vd->vdev_stat.vs_checkpoint_space +=
range_tree_space(msp->ms_checkpointing);
ASSERT3U(vd->vdev_stat.vs_checkpoint_space, ==,
-space_map_allocated(vd->vdev_checkpoint_sm));
range_tree_vacate(msp->ms_checkpointing, NULL, NULL);
}
if (msp->ms_loaded) {
/*
* When the space map is loaded, we have an accurate
* histogram in the range tree. This gives us an opportunity
* to bring the space map's histogram up-to-date so we clear
* it first before updating it.
*/
space_map_histogram_clear(msp->ms_sm);
space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
/*
* Since we've cleared the histogram we need to add back
* any free space that has already been processed, plus
* any deferred space. This allows the on-disk histogram
* to accurately reflect all free space even if some space
* is not yet available for allocation (i.e. deferred).
*/
space_map_histogram_add(msp->ms_sm, msp->ms_freed, tx);
/*
* Add back any deferred free space that has not been
* added back into the in-core free tree yet. This will
* ensure that we don't end up with a space map histogram
* that is completely empty unless the metaslab is fully
* allocated.
*/
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
space_map_histogram_add(msp->ms_sm,
msp->ms_defer[t], tx);
}
}
/*
* Always add the free space from this sync pass to the space
* map histogram. We want to make sure that the on-disk histogram
* accounts for all free space. If the space map is not loaded,
* then we will lose some accuracy but will correct it the next
* time we load the space map.
*/
space_map_histogram_add(msp->ms_sm, msp->ms_freeing, tx);
metaslab_aux_histograms_update(msp);
metaslab_group_histogram_add(mg, msp);
metaslab_group_histogram_verify(mg);
metaslab_class_histogram_verify(mg->mg_class);
/*
* For sync pass 1, we avoid traversing this txg's free range tree
* and instead will just swap the pointers for freeing and freed.
* We can safely do this since the freed_tree is guaranteed to be
* empty on the initial pass.
*
* Keep in mind that even if we are currently using a log spacemap
* we want current frees to end up in the ms_allocatable (but not
* get appended to the ms_sm) so their ranges can be reused as usual.
*/
if (spa_sync_pass(spa) == 1) {
range_tree_swap(&msp->ms_freeing, &msp->ms_freed);
ASSERT0(msp->ms_allocated_this_txg);
} else {
range_tree_vacate(msp->ms_freeing,
range_tree_add, msp->ms_freed);
}
msp->ms_allocated_this_txg += range_tree_space(alloctree);
range_tree_vacate(alloctree, NULL, NULL);
ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
ASSERT0(range_tree_space(msp->ms_allocating[TXG_CLEAN(txg)
& TXG_MASK]));
ASSERT0(range_tree_space(msp->ms_freeing));
ASSERT0(range_tree_space(msp->ms_checkpointing));
mutex_exit(&msp->ms_lock);
/*
* Verify that the space map object ID has been recorded in the
* vdev_ms_array.
*/
uint64_t object;
VERIFY0(dmu_read(mos, vd->vdev_ms_array,
msp->ms_id * sizeof (uint64_t), sizeof (uint64_t), &object, 0));
VERIFY3U(object, ==, space_map_object(msp->ms_sm));
mutex_exit(&msp->ms_sync_lock);
dmu_tx_commit(tx);
}
static void
metaslab_evict(metaslab_t *msp, uint64_t txg)
{
if (!msp->ms_loaded || msp->ms_disabled != 0)
return;
for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
VERIFY0(range_tree_space(
msp->ms_allocating[(txg + t) & TXG_MASK]));
}
if (msp->ms_allocator != -1)
metaslab_passivate(msp, msp->ms_weight & ~METASLAB_ACTIVE_MASK);
if (!metaslab_debug_unload)
metaslab_unload(msp);
}
/*
* Called after a transaction group has completely synced to mark
* all of the metaslab's free space as usable.
*/
void
metaslab_sync_done(metaslab_t *msp, uint64_t txg)
{
metaslab_group_t *mg = msp->ms_group;
vdev_t *vd = mg->mg_vd;
spa_t *spa = vd->vdev_spa;
range_tree_t **defer_tree;
int64_t alloc_delta, defer_delta;
boolean_t defer_allowed = B_TRUE;
ASSERT(!vd->vdev_ishole);
mutex_enter(&msp->ms_lock);
if (msp->ms_new) {
/* this is a new metaslab, add its capacity to the vdev */
metaslab_space_update(vd, mg->mg_class, 0, 0, msp->ms_size);
/* there should be no allocations nor frees at this point */
VERIFY0(msp->ms_allocated_this_txg);
VERIFY0(range_tree_space(msp->ms_freed));
}
ASSERT0(range_tree_space(msp->ms_freeing));
ASSERT0(range_tree_space(msp->ms_checkpointing));
defer_tree = &msp->ms_defer[txg % TXG_DEFER_SIZE];
uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) -
metaslab_class_get_alloc(spa_normal_class(spa));
if (free_space <= spa_get_slop_space(spa) || vd->vdev_removing) {
defer_allowed = B_FALSE;
}
defer_delta = 0;
alloc_delta = msp->ms_allocated_this_txg -
range_tree_space(msp->ms_freed);
if (defer_allowed) {
defer_delta = range_tree_space(msp->ms_freed) -
range_tree_space(*defer_tree);
} else {
defer_delta -= range_tree_space(*defer_tree);
}
metaslab_space_update(vd, mg->mg_class, alloc_delta + defer_delta,
defer_delta, 0);
if (spa_syncing_log_sm(spa) == NULL) {
/*
* If there's a metaslab_load() in progress and we don't have
* a log space map, it means that we probably wrote to the
* metaslab's space map. If this is the case, we need to
* make sure that we wait for the load to complete so that we
* have a consistent view at the in-core side of the metaslab.
*/
metaslab_load_wait(msp);
} else {
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
}
/*
* When auto-trimming is enabled, free ranges which are added to
* ms_allocatable are also be added to ms_trim. The ms_trim tree is
* periodically consumed by the vdev_autotrim_thread() which issues
* trims for all ranges and then vacates the tree. The ms_trim tree
* can be discarded at any time with the sole consequence of recent
* frees not being trimmed.
*/
if (spa_get_autotrim(spa) == SPA_AUTOTRIM_ON) {
range_tree_walk(*defer_tree, range_tree_add, msp->ms_trim);
if (!defer_allowed) {
range_tree_walk(msp->ms_freed, range_tree_add,
msp->ms_trim);
}
} else {
range_tree_vacate(msp->ms_trim, NULL, NULL);
}
/*
* Move the frees from the defer_tree back to the free
* range tree (if it's loaded). Swap the freed_tree and
* the defer_tree -- this is safe to do because we've
* just emptied out the defer_tree.
*/
range_tree_vacate(*defer_tree,
msp->ms_loaded ? range_tree_add : NULL, msp->ms_allocatable);
if (defer_allowed) {
range_tree_swap(&msp->ms_freed, defer_tree);
} else {
range_tree_vacate(msp->ms_freed,
msp->ms_loaded ? range_tree_add : NULL,
msp->ms_allocatable);
}
msp->ms_synced_length = space_map_length(msp->ms_sm);
msp->ms_deferspace += defer_delta;
ASSERT3S(msp->ms_deferspace, >=, 0);
ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
if (msp->ms_deferspace != 0) {
/*
* Keep syncing this metaslab until all deferred frees
* are back in circulation.
*/
vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
}
metaslab_aux_histograms_update_done(msp, defer_allowed);
if (msp->ms_new) {
msp->ms_new = B_FALSE;
mutex_enter(&mg->mg_lock);
mg->mg_ms_ready++;
mutex_exit(&mg->mg_lock);
}
/*
* Re-sort metaslab within its group now that we've adjusted
* its allocatable space.
*/
metaslab_recalculate_weight_and_sort(msp);
ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
ASSERT0(range_tree_space(msp->ms_freeing));
ASSERT0(range_tree_space(msp->ms_freed));
ASSERT0(range_tree_space(msp->ms_checkpointing));
msp->ms_allocating_total -= msp->ms_allocated_this_txg;
msp->ms_allocated_this_txg = 0;
mutex_exit(&msp->ms_lock);
}
void
metaslab_sync_reassess(metaslab_group_t *mg)
{
spa_t *spa = mg->mg_class->mc_spa;
spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
metaslab_group_alloc_update(mg);
mg->mg_fragmentation = metaslab_group_fragmentation(mg);
/*
* Preload the next potential metaslabs but only on active
* metaslab groups. We can get into a state where the metaslab
* is no longer active since we dirty metaslabs as we remove a
* a device, thus potentially making the metaslab group eligible
* for preloading.
*/
if (mg->mg_activation_count > 0) {
metaslab_group_preload(mg);
}
spa_config_exit(spa, SCL_ALLOC, FTAG);
}
/*
* When writing a ditto block (i.e. more than one DVA for a given BP) on
* the same vdev as an existing DVA of this BP, then try to allocate it
* on a different metaslab than existing DVAs (i.e. a unique metaslab).
*/
static boolean_t
metaslab_is_unique(metaslab_t *msp, dva_t *dva)
{
uint64_t dva_ms_id;
if (DVA_GET_ASIZE(dva) == 0)
return (B_TRUE);
if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
return (B_TRUE);
dva_ms_id = DVA_GET_OFFSET(dva) >> msp->ms_group->mg_vd->vdev_ms_shift;
return (msp->ms_id != dva_ms_id);
}
/*
* ==========================================================================
* Metaslab allocation tracing facility
* ==========================================================================
*/
/*
* Add an allocation trace element to the allocation tracing list.
*/
static void
metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg,
metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset,
int allocator)
{
metaslab_alloc_trace_t *mat;
if (!metaslab_trace_enabled)
return;
/*
* When the tracing list reaches its maximum we remove
* the second element in the list before adding a new one.
* By removing the second element we preserve the original
* entry as a clue to what allocations steps have already been
* performed.
*/
if (zal->zal_size == metaslab_trace_max_entries) {
metaslab_alloc_trace_t *mat_next;
#ifdef ZFS_DEBUG
panic("too many entries in allocation list");
#endif
METASLABSTAT_BUMP(metaslabstat_trace_over_limit);
zal->zal_size--;
mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list));
list_remove(&zal->zal_list, mat_next);
kmem_cache_free(metaslab_alloc_trace_cache, mat_next);
}
mat = kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP);
list_link_init(&mat->mat_list_node);
mat->mat_mg = mg;
mat->mat_msp = msp;
mat->mat_size = psize;
mat->mat_dva_id = dva_id;
mat->mat_offset = offset;
mat->mat_weight = 0;
mat->mat_allocator = allocator;
if (msp != NULL)
mat->mat_weight = msp->ms_weight;
/*
* The list is part of the zio so locking is not required. Only
* a single thread will perform allocations for a given zio.
*/
list_insert_tail(&zal->zal_list, mat);
zal->zal_size++;
ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries);
}
void
metaslab_trace_init(zio_alloc_list_t *zal)
{
list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t),
offsetof(metaslab_alloc_trace_t, mat_list_node));
zal->zal_size = 0;
}
void
metaslab_trace_fini(zio_alloc_list_t *zal)
{
metaslab_alloc_trace_t *mat;
while ((mat = list_remove_head(&zal->zal_list)) != NULL)
kmem_cache_free(metaslab_alloc_trace_cache, mat);
list_destroy(&zal->zal_list);
zal->zal_size = 0;
}
/*
* ==========================================================================
* Metaslab block operations
* ==========================================================================
*/
static void
metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, void *tag, int flags,
int allocator)
{
if (!(flags & METASLAB_ASYNC_ALLOC) ||
(flags & METASLAB_DONT_THROTTLE))
return;
metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
if (!mg->mg_class->mc_alloc_throttle_enabled)
return;
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
(void) zfs_refcount_add(&mga->mga_alloc_queue_depth, tag);
}
static void
metaslab_group_increment_qdepth(metaslab_group_t *mg, int allocator)
{
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
metaslab_class_allocator_t *mca =
&mg->mg_class->mc_allocator[allocator];
uint64_t max = mg->mg_max_alloc_queue_depth;
uint64_t cur = mga->mga_cur_max_alloc_queue_depth;
while (cur < max) {
if (atomic_cas_64(&mga->mga_cur_max_alloc_queue_depth,
cur, cur + 1) == cur) {
atomic_inc_64(&mca->mca_alloc_max_slots);
return;
}
cur = mga->mga_cur_max_alloc_queue_depth;
}
}
void
metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, void *tag, int flags,
int allocator, boolean_t io_complete)
{
if (!(flags & METASLAB_ASYNC_ALLOC) ||
(flags & METASLAB_DONT_THROTTLE))
return;
metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
if (!mg->mg_class->mc_alloc_throttle_enabled)
return;
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
(void) zfs_refcount_remove(&mga->mga_alloc_queue_depth, tag);
if (io_complete)
metaslab_group_increment_qdepth(mg, allocator);
}
void
metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, void *tag,
int allocator)
{
#ifdef ZFS_DEBUG
const dva_t *dva = bp->blk_dva;
int ndvas = BP_GET_NDVAS(bp);
for (int d = 0; d < ndvas; d++) {
uint64_t vdev = DVA_GET_VDEV(&dva[d]);
metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
VERIFY(zfs_refcount_not_held(&mga->mga_alloc_queue_depth, tag));
}
#endif
}
static uint64_t
metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg)
{
uint64_t start;
range_tree_t *rt = msp->ms_allocatable;
metaslab_class_t *mc = msp->ms_group->mg_class;
ASSERT(MUTEX_HELD(&msp->ms_lock));
VERIFY(!msp->ms_condensing);
VERIFY0(msp->ms_disabled);
start = mc->mc_ops->msop_alloc(msp, size);
if (start != -1ULL) {
metaslab_group_t *mg = msp->ms_group;
vdev_t *vd = mg->mg_vd;
VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
range_tree_remove(rt, start, size);
range_tree_clear(msp->ms_trim, start, size);
if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
range_tree_add(msp->ms_allocating[txg & TXG_MASK], start, size);
msp->ms_allocating_total += size;
/* Track the last successful allocation */
msp->ms_alloc_txg = txg;
metaslab_verify_space(msp, txg);
}
/*
* Now that we've attempted the allocation we need to update the
* metaslab's maximum block size since it may have changed.
*/
msp->ms_max_size = metaslab_largest_allocatable(msp);
return (start);
}
/*
* Find the metaslab with the highest weight that is less than what we've
* already tried. In the common case, this means that we will examine each
* metaslab at most once. Note that concurrent callers could reorder metaslabs
* by activation/passivation once we have dropped the mg_lock. If a metaslab is
* activated by another thread, and we fail to allocate from the metaslab we
* have selected, we may not try the newly-activated metaslab, and instead
* activate another metaslab. This is not optimal, but generally does not cause
* any problems (a possible exception being if every metaslab is completely full
* except for the newly-activated metaslab which we fail to examine).
*/
static metaslab_t *
find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight,
dva_t *dva, int d, boolean_t want_unique, uint64_t asize, int allocator,
boolean_t try_hard, zio_alloc_list_t *zal, metaslab_t *search,
boolean_t *was_active)
{
avl_index_t idx;
avl_tree_t *t = &mg->mg_metaslab_tree;
metaslab_t *msp = avl_find(t, search, &idx);
if (msp == NULL)
msp = avl_nearest(t, idx, AVL_AFTER);
int tries = 0;
for (; msp != NULL; msp = AVL_NEXT(t, msp)) {
int i;
if (!try_hard && tries > zfs_metaslab_find_max_tries) {
METASLABSTAT_BUMP(metaslabstat_too_many_tries);
return (NULL);
}
tries++;
if (!metaslab_should_allocate(msp, asize, try_hard)) {
metaslab_trace_add(zal, mg, msp, asize, d,
TRACE_TOO_SMALL, allocator);
continue;
}
/*
* If the selected metaslab is condensing or disabled,
* skip it.
*/
if (msp->ms_condensing || msp->ms_disabled > 0)
continue;
*was_active = msp->ms_allocator != -1;
/*
* If we're activating as primary, this is our first allocation
* from this disk, so we don't need to check how close we are.
* If the metaslab under consideration was already active,
* we're getting desperate enough to steal another allocator's
* metaslab, so we still don't care about distances.
*/
if (activation_weight == METASLAB_WEIGHT_PRIMARY || *was_active)
break;
for (i = 0; i < d; i++) {
if (want_unique &&
!metaslab_is_unique(msp, &dva[i]))
break; /* try another metaslab */
}
if (i == d)
break;
}
if (msp != NULL) {
search->ms_weight = msp->ms_weight;
search->ms_start = msp->ms_start + 1;
search->ms_allocator = msp->ms_allocator;
search->ms_primary = msp->ms_primary;
}
return (msp);
}
static void
metaslab_active_mask_verify(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
return;
if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0)
return;
if (msp->ms_weight & METASLAB_WEIGHT_PRIMARY) {
VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM);
VERIFY3S(msp->ms_allocator, !=, -1);
VERIFY(msp->ms_primary);
return;
}
if (msp->ms_weight & METASLAB_WEIGHT_SECONDARY) {
VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM);
VERIFY3S(msp->ms_allocator, !=, -1);
VERIFY(!msp->ms_primary);
return;
}
if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
VERIFY3S(msp->ms_allocator, ==, -1);
return;
}
}
-/* ARGSUSED */
static uint64_t
metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal,
uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d,
int allocator, boolean_t try_hard)
{
metaslab_t *msp = NULL;
uint64_t offset = -1ULL;
uint64_t activation_weight = METASLAB_WEIGHT_PRIMARY;
for (int i = 0; i < d; i++) {
if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
activation_weight = METASLAB_WEIGHT_SECONDARY;
} else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
activation_weight = METASLAB_WEIGHT_CLAIM;
break;
}
}
/*
* If we don't have enough metaslabs active to fill the entire array, we
* just use the 0th slot.
*/
if (mg->mg_ms_ready < mg->mg_allocators * 3)
allocator = 0;
metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
ASSERT3U(mg->mg_vd->vdev_ms_count, >=, 2);
metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP);
search->ms_weight = UINT64_MAX;
search->ms_start = 0;
/*
* At the end of the metaslab tree are the already-active metaslabs,
* first the primaries, then the secondaries. When we resume searching
* through the tree, we need to consider ms_allocator and ms_primary so
* we start in the location right after where we left off, and don't
* accidentally loop forever considering the same metaslabs.
*/
search->ms_allocator = -1;
search->ms_primary = B_TRUE;
for (;;) {
boolean_t was_active = B_FALSE;
mutex_enter(&mg->mg_lock);
if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
mga->mga_primary != NULL) {
msp = mga->mga_primary;
/*
* Even though we don't hold the ms_lock for the
* primary metaslab, those fields should not
* change while we hold the mg_lock. Thus it is
* safe to make assertions on them.
*/
ASSERT(msp->ms_primary);
ASSERT3S(msp->ms_allocator, ==, allocator);
ASSERT(msp->ms_loaded);
was_active = B_TRUE;
ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
} else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
mga->mga_secondary != NULL) {
msp = mga->mga_secondary;
/*
* See comment above about the similar assertions
* for the primary metaslab.
*/
ASSERT(!msp->ms_primary);
ASSERT3S(msp->ms_allocator, ==, allocator);
ASSERT(msp->ms_loaded);
was_active = B_TRUE;
ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
} else {
msp = find_valid_metaslab(mg, activation_weight, dva, d,
want_unique, asize, allocator, try_hard, zal,
search, &was_active);
}
mutex_exit(&mg->mg_lock);
if (msp == NULL) {
kmem_free(search, sizeof (*search));
return (-1ULL);
}
mutex_enter(&msp->ms_lock);
metaslab_active_mask_verify(msp);
/*
* This code is disabled out because of issues with
* tracepoints in non-gpl kernel modules.
*/
#if 0
DTRACE_PROBE3(ms__activation__attempt,
metaslab_t *, msp, uint64_t, activation_weight,
boolean_t, was_active);
#endif
/*
* Ensure that the metaslab we have selected is still
* capable of handling our request. It's possible that
* another thread may have changed the weight while we
* were blocked on the metaslab lock. We check the
* active status first to see if we need to set_selected_txg
* a new metaslab.
*/
if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) {
ASSERT3S(msp->ms_allocator, ==, -1);
mutex_exit(&msp->ms_lock);
continue;
}
/*
* If the metaslab was activated for another allocator
* while we were waiting in the ms_lock above, or it's
* a primary and we're seeking a secondary (or vice versa),
* we go back and select a new metaslab.
*/
if (!was_active && (msp->ms_weight & METASLAB_ACTIVE_MASK) &&
(msp->ms_allocator != -1) &&
(msp->ms_allocator != allocator || ((activation_weight ==
METASLAB_WEIGHT_PRIMARY) != msp->ms_primary))) {
ASSERT(msp->ms_loaded);
ASSERT((msp->ms_weight & METASLAB_WEIGHT_CLAIM) ||
msp->ms_allocator != -1);
mutex_exit(&msp->ms_lock);
continue;
}
/*
* This metaslab was used for claiming regions allocated
* by the ZIL during pool import. Once these regions are
* claimed we don't need to keep the CLAIM bit set
* anymore. Passivate this metaslab to zero its activation
* mask.
*/
if (msp->ms_weight & METASLAB_WEIGHT_CLAIM &&
activation_weight != METASLAB_WEIGHT_CLAIM) {
ASSERT(msp->ms_loaded);
ASSERT3S(msp->ms_allocator, ==, -1);
metaslab_passivate(msp, msp->ms_weight &
~METASLAB_WEIGHT_CLAIM);
mutex_exit(&msp->ms_lock);
continue;
}
metaslab_set_selected_txg(msp, txg);
int activation_error =
metaslab_activate(msp, allocator, activation_weight);
metaslab_active_mask_verify(msp);
/*
* If the metaslab was activated by another thread for
* another allocator or activation_weight (EBUSY), or it
* failed because another metaslab was assigned as primary
* for this allocator (EEXIST) we continue using this
* metaslab for our allocation, rather than going on to a
* worse metaslab (we waited for that metaslab to be loaded
* after all).
*
* If the activation failed due to an I/O error or ENOSPC we
* skip to the next metaslab.
*/
boolean_t activated;
if (activation_error == 0) {
activated = B_TRUE;
} else if (activation_error == EBUSY ||
activation_error == EEXIST) {
activated = B_FALSE;
} else {
mutex_exit(&msp->ms_lock);
continue;
}
ASSERT(msp->ms_loaded);
/*
* Now that we have the lock, recheck to see if we should
* continue to use this metaslab for this allocation. The
* the metaslab is now loaded so metaslab_should_allocate()
* can accurately determine if the allocation attempt should
* proceed.
*/
if (!metaslab_should_allocate(msp, asize, try_hard)) {
/* Passivate this metaslab and select a new one. */
metaslab_trace_add(zal, mg, msp, asize, d,
TRACE_TOO_SMALL, allocator);
goto next;
}
/*
* If this metaslab is currently condensing then pick again
* as we can't manipulate this metaslab until it's committed
* to disk. If this metaslab is being initialized, we shouldn't
* allocate from it since the allocated region might be
* overwritten after allocation.
*/
if (msp->ms_condensing) {
metaslab_trace_add(zal, mg, msp, asize, d,
TRACE_CONDENSING, allocator);
if (activated) {
metaslab_passivate(msp, msp->ms_weight &
~METASLAB_ACTIVE_MASK);
}
mutex_exit(&msp->ms_lock);
continue;
} else if (msp->ms_disabled > 0) {
metaslab_trace_add(zal, mg, msp, asize, d,
TRACE_DISABLED, allocator);
if (activated) {
metaslab_passivate(msp, msp->ms_weight &
~METASLAB_ACTIVE_MASK);
}
mutex_exit(&msp->ms_lock);
continue;
}
offset = metaslab_block_alloc(msp, asize, txg);
metaslab_trace_add(zal, mg, msp, asize, d, offset, allocator);
if (offset != -1ULL) {
/* Proactively passivate the metaslab, if needed */
if (activated)
metaslab_segment_may_passivate(msp);
break;
}
next:
ASSERT(msp->ms_loaded);
/*
* This code is disabled out because of issues with
* tracepoints in non-gpl kernel modules.
*/
#if 0
DTRACE_PROBE2(ms__alloc__failure, metaslab_t *, msp,
uint64_t, asize);
#endif
/*
* We were unable to allocate from this metaslab so determine
* a new weight for this metaslab. Now that we have loaded
* the metaslab we can provide a better hint to the metaslab
* selector.
*
* For space-based metaslabs, we use the maximum block size.
* This information is only available when the metaslab
* is loaded and is more accurate than the generic free
* space weight that was calculated by metaslab_weight().
* This information allows us to quickly compare the maximum
* available allocation in the metaslab to the allocation
* size being requested.
*
* For segment-based metaslabs, determine the new weight
* based on the highest bucket in the range tree. We
* explicitly use the loaded segment weight (i.e. the range
* tree histogram) since it contains the space that is
* currently available for allocation and is accurate
* even within a sync pass.
*/
uint64_t weight;
if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
weight = metaslab_largest_allocatable(msp);
WEIGHT_SET_SPACEBASED(weight);
} else {
weight = metaslab_weight_from_range_tree(msp);
}
if (activated) {
metaslab_passivate(msp, weight);
} else {
/*
* For the case where we use the metaslab that is
* active for another allocator we want to make
* sure that we retain the activation mask.
*
* Note that we could attempt to use something like
* metaslab_recalculate_weight_and_sort() that
* retains the activation mask here. That function
* uses metaslab_weight() to set the weight though
* which is not as accurate as the calculations
* above.
*/
weight |= msp->ms_weight & METASLAB_ACTIVE_MASK;
metaslab_group_sort(mg, msp, weight);
}
metaslab_active_mask_verify(msp);
/*
* We have just failed an allocation attempt, check
* that metaslab_should_allocate() agrees. Otherwise,
* we may end up in an infinite loop retrying the same
* metaslab.
*/
ASSERT(!metaslab_should_allocate(msp, asize, try_hard));
mutex_exit(&msp->ms_lock);
}
mutex_exit(&msp->ms_lock);
kmem_free(search, sizeof (*search));
return (offset);
}
static uint64_t
metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal,
uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d,
int allocator, boolean_t try_hard)
{
uint64_t offset;
ASSERT(mg->mg_initialized);
offset = metaslab_group_alloc_normal(mg, zal, asize, txg, want_unique,
dva, d, allocator, try_hard);
mutex_enter(&mg->mg_lock);
if (offset == -1ULL) {
mg->mg_failed_allocations++;
metaslab_trace_add(zal, mg, NULL, asize, d,
TRACE_GROUP_FAILURE, allocator);
if (asize == SPA_GANGBLOCKSIZE) {
/*
* This metaslab group was unable to allocate
* the minimum gang block size so it must be out of
* space. We must notify the allocation throttle
* to start skipping allocation attempts to this
* metaslab group until more space becomes available.
* Note: this failure cannot be caused by the
* allocation throttle since the allocation throttle
* is only responsible for skipping devices and
* not failing block allocations.
*/
mg->mg_no_free_space = B_TRUE;
}
}
mg->mg_allocations++;
mutex_exit(&mg->mg_lock);
return (offset);
}
/*
* Allocate a block for the specified i/o.
*/
int
metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags,
zio_alloc_list_t *zal, int allocator)
{
metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
metaslab_group_t *mg, *fast_mg, *rotor;
vdev_t *vd;
boolean_t try_hard = B_FALSE;
ASSERT(!DVA_IS_VALID(&dva[d]));
/*
* For testing, make some blocks above a certain size be gang blocks.
* This will result in more split blocks when using device removal,
* and a large number of split blocks coupled with ztest-induced
* damage can result in extremely long reconstruction times. This
* will also test spilling from special to normal.
*/
if (psize >= metaslab_force_ganging && (random_in_range(100) < 3)) {
metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG,
allocator);
return (SET_ERROR(ENOSPC));
}
/*
* Start at the rotor and loop through all mgs until we find something.
* Note that there's no locking on mca_rotor or mca_aliquot because
* nothing actually breaks if we miss a few updates -- we just won't
* allocate quite as evenly. It all balances out over time.
*
* If we are doing ditto or log blocks, try to spread them across
* consecutive vdevs. If we're forced to reuse a vdev before we've
* allocated all of our ditto blocks, then try and spread them out on
* that vdev as much as possible. If it turns out to not be possible,
* gradually lower our standards until anything becomes acceptable.
* Also, allocating on consecutive vdevs (as opposed to random vdevs)
* gives us hope of containing our fault domains to something we're
* able to reason about. Otherwise, any two top-level vdev failures
* will guarantee the loss of data. With consecutive allocation,
* only two adjacent top-level vdev failures will result in data loss.
*
* If we are doing gang blocks (hintdva is non-NULL), try to keep
* ourselves on the same vdev as our gang block header. That
* way, we can hope for locality in vdev_cache, plus it makes our
* fault domains something tractable.
*/
if (hintdva) {
vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
/*
* It's possible the vdev we're using as the hint no
* longer exists or its mg has been closed (e.g. by
* device removal). Consult the rotor when
* all else fails.
*/
if (vd != NULL && vd->vdev_mg != NULL) {
mg = vdev_get_mg(vd, mc);
if (flags & METASLAB_HINTBP_AVOID &&
mg->mg_next != NULL)
mg = mg->mg_next;
} else {
mg = mca->mca_rotor;
}
} else if (d != 0) {
vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
mg = vd->vdev_mg->mg_next;
} else if (flags & METASLAB_FASTWRITE) {
mg = fast_mg = mca->mca_rotor;
do {
if (fast_mg->mg_vd->vdev_pending_fastwrite <
mg->mg_vd->vdev_pending_fastwrite)
mg = fast_mg;
} while ((fast_mg = fast_mg->mg_next) != mca->mca_rotor);
} else {
ASSERT(mca->mca_rotor != NULL);
mg = mca->mca_rotor;
}
/*
* If the hint put us into the wrong metaslab class, or into a
* metaslab group that has been passivated, just follow the rotor.
*/
if (mg->mg_class != mc || mg->mg_activation_count <= 0)
mg = mca->mca_rotor;
rotor = mg;
top:
do {
boolean_t allocatable;
ASSERT(mg->mg_activation_count == 1);
vd = mg->mg_vd;
/*
* Don't allocate from faulted devices.
*/
if (try_hard) {
spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
allocatable = vdev_allocatable(vd);
spa_config_exit(spa, SCL_ZIO, FTAG);
} else {
allocatable = vdev_allocatable(vd);
}
/*
* Determine if the selected metaslab group is eligible
* for allocations. If we're ganging then don't allow
* this metaslab group to skip allocations since that would
* inadvertently return ENOSPC and suspend the pool
* even though space is still available.
*/
if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) {
allocatable = metaslab_group_allocatable(mg, rotor,
psize, allocator, d);
}
if (!allocatable) {
metaslab_trace_add(zal, mg, NULL, psize, d,
TRACE_NOT_ALLOCATABLE, allocator);
goto next;
}
ASSERT(mg->mg_initialized);
/*
* Avoid writing single-copy data to a failing,
* non-redundant vdev, unless we've already tried all
* other vdevs.
*/
if ((vd->vdev_stat.vs_write_errors > 0 ||
vd->vdev_state < VDEV_STATE_HEALTHY) &&
d == 0 && !try_hard && vd->vdev_children == 0) {
metaslab_trace_add(zal, mg, NULL, psize, d,
TRACE_VDEV_ERROR, allocator);
goto next;
}
ASSERT(mg->mg_class == mc);
uint64_t asize = vdev_psize_to_asize(vd, psize);
ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
/*
* If we don't need to try hard, then require that the
* block be on a different metaslab from any other DVAs
* in this BP (unique=true). If we are trying hard, then
* allow any metaslab to be used (unique=false).
*/
uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg,
!try_hard, dva, d, allocator, try_hard);
if (offset != -1ULL) {
/*
* If we've just selected this metaslab group,
* figure out whether the corresponding vdev is
* over- or under-used relative to the pool,
* and set an allocation bias to even it out.
*
* Bias is also used to compensate for unequally
* sized vdevs so that space is allocated fairly.
*/
if (mca->mca_aliquot == 0 && metaslab_bias_enabled) {
vdev_stat_t *vs = &vd->vdev_stat;
int64_t vs_free = vs->vs_space - vs->vs_alloc;
int64_t mc_free = mc->mc_space - mc->mc_alloc;
int64_t ratio;
/*
* Calculate how much more or less we should
* try to allocate from this device during
* this iteration around the rotor.
*
* This basically introduces a zero-centered
* bias towards the devices with the most
* free space, while compensating for vdev
* size differences.
*
* Examples:
* vdev V1 = 16M/128M
* vdev V2 = 16M/128M
* ratio(V1) = 100% ratio(V2) = 100%
*
* vdev V1 = 16M/128M
* vdev V2 = 64M/128M
* ratio(V1) = 127% ratio(V2) = 72%
*
* vdev V1 = 16M/128M
* vdev V2 = 64M/512M
* ratio(V1) = 40% ratio(V2) = 160%
*/
ratio = (vs_free * mc->mc_alloc_groups * 100) /
(mc_free + 1);
mg->mg_bias = ((ratio - 100) *
(int64_t)mg->mg_aliquot) / 100;
} else if (!metaslab_bias_enabled) {
mg->mg_bias = 0;
}
if ((flags & METASLAB_FASTWRITE) ||
atomic_add_64_nv(&mca->mca_aliquot, asize) >=
mg->mg_aliquot + mg->mg_bias) {
mca->mca_rotor = mg->mg_next;
mca->mca_aliquot = 0;
}
DVA_SET_VDEV(&dva[d], vd->vdev_id);
DVA_SET_OFFSET(&dva[d], offset);
DVA_SET_GANG(&dva[d],
((flags & METASLAB_GANG_HEADER) ? 1 : 0));
DVA_SET_ASIZE(&dva[d], asize);
if (flags & METASLAB_FASTWRITE) {
atomic_add_64(&vd->vdev_pending_fastwrite,
psize);
}
return (0);
}
next:
mca->mca_rotor = mg->mg_next;
mca->mca_aliquot = 0;
} while ((mg = mg->mg_next) != rotor);
/*
* If we haven't tried hard, perhaps do so now.
*/
if (!try_hard && (zfs_metaslab_try_hard_before_gang ||
GANG_ALLOCATION(flags) || (flags & METASLAB_ZIL) != 0 ||
psize <= 1 << spa->spa_min_ashift)) {
METASLABSTAT_BUMP(metaslabstat_try_hard);
try_hard = B_TRUE;
goto top;
}
bzero(&dva[d], sizeof (dva_t));
metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC, allocator);
return (SET_ERROR(ENOSPC));
}
void
metaslab_free_concrete(vdev_t *vd, uint64_t offset, uint64_t asize,
boolean_t checkpoint)
{
metaslab_t *msp;
spa_t *spa = vd->vdev_spa;
ASSERT(vdev_is_concrete(vd));
ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
VERIFY(!msp->ms_condensing);
VERIFY3U(offset, >=, msp->ms_start);
VERIFY3U(offset + asize, <=, msp->ms_start + msp->ms_size);
VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
VERIFY0(P2PHASE(asize, 1ULL << vd->vdev_ashift));
metaslab_check_free_impl(vd, offset, asize);
mutex_enter(&msp->ms_lock);
if (range_tree_is_empty(msp->ms_freeing) &&
range_tree_is_empty(msp->ms_checkpointing)) {
vdev_dirty(vd, VDD_METASLAB, msp, spa_syncing_txg(spa));
}
if (checkpoint) {
ASSERT(spa_has_checkpoint(spa));
range_tree_add(msp->ms_checkpointing, offset, asize);
} else {
range_tree_add(msp->ms_freeing, offset, asize);
}
mutex_exit(&msp->ms_lock);
}
-/* ARGSUSED */
void
metaslab_free_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
+ (void) inner_offset;
boolean_t *checkpoint = arg;
ASSERT3P(checkpoint, !=, NULL);
if (vd->vdev_ops->vdev_op_remap != NULL)
vdev_indirect_mark_obsolete(vd, offset, size);
else
metaslab_free_impl(vd, offset, size, *checkpoint);
}
static void
metaslab_free_impl(vdev_t *vd, uint64_t offset, uint64_t size,
boolean_t checkpoint)
{
spa_t *spa = vd->vdev_spa;
ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
if (spa_syncing_txg(spa) > spa_freeze_txg(spa))
return;
if (spa->spa_vdev_removal != NULL &&
spa->spa_vdev_removal->svr_vdev_id == vd->vdev_id &&
vdev_is_concrete(vd)) {
/*
* Note: we check if the vdev is concrete because when
* we complete the removal, we first change the vdev to be
* an indirect vdev (in open context), and then (in syncing
* context) clear spa_vdev_removal.
*/
free_from_removing_vdev(vd, offset, size);
} else if (vd->vdev_ops->vdev_op_remap != NULL) {
vdev_indirect_mark_obsolete(vd, offset, size);
vd->vdev_ops->vdev_op_remap(vd, offset, size,
metaslab_free_impl_cb, &checkpoint);
} else {
metaslab_free_concrete(vd, offset, size, checkpoint);
}
}
typedef struct remap_blkptr_cb_arg {
blkptr_t *rbca_bp;
spa_remap_cb_t rbca_cb;
vdev_t *rbca_remap_vd;
uint64_t rbca_remap_offset;
void *rbca_cb_arg;
} remap_blkptr_cb_arg_t;
static void
remap_blkptr_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
remap_blkptr_cb_arg_t *rbca = arg;
blkptr_t *bp = rbca->rbca_bp;
/* We can not remap split blocks. */
if (size != DVA_GET_ASIZE(&bp->blk_dva[0]))
return;
ASSERT0(inner_offset);
if (rbca->rbca_cb != NULL) {
/*
* At this point we know that we are not handling split
* blocks and we invoke the callback on the previous
* vdev which must be indirect.
*/
ASSERT3P(rbca->rbca_remap_vd->vdev_ops, ==, &vdev_indirect_ops);
rbca->rbca_cb(rbca->rbca_remap_vd->vdev_id,
rbca->rbca_remap_offset, size, rbca->rbca_cb_arg);
/* set up remap_blkptr_cb_arg for the next call */
rbca->rbca_remap_vd = vd;
rbca->rbca_remap_offset = offset;
}
/*
* The phys birth time is that of dva[0]. This ensures that we know
* when each dva was written, so that resilver can determine which
* blocks need to be scrubbed (i.e. those written during the time
* the vdev was offline). It also ensures that the key used in
* the ARC hash table is unique (i.e. dva[0] + phys_birth). If
* we didn't change the phys_birth, a lookup in the ARC for a
* remapped BP could find the data that was previously stored at
* this vdev + offset.
*/
vdev_t *oldvd = vdev_lookup_top(vd->vdev_spa,
DVA_GET_VDEV(&bp->blk_dva[0]));
vdev_indirect_births_t *vib = oldvd->vdev_indirect_births;
bp->blk_phys_birth = vdev_indirect_births_physbirth(vib,
DVA_GET_OFFSET(&bp->blk_dva[0]), DVA_GET_ASIZE(&bp->blk_dva[0]));
DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id);
DVA_SET_OFFSET(&bp->blk_dva[0], offset);
}
/*
* If the block pointer contains any indirect DVAs, modify them to refer to
* concrete DVAs. Note that this will sometimes not be possible, leaving
* the indirect DVA in place. This happens if the indirect DVA spans multiple
* segments in the mapping (i.e. it is a "split block").
*
* If the BP was remapped, calls the callback on the original dva (note the
* callback can be called multiple times if the original indirect DVA refers
* to another indirect DVA, etc).
*
* Returns TRUE if the BP was remapped.
*/
boolean_t
spa_remap_blkptr(spa_t *spa, blkptr_t *bp, spa_remap_cb_t callback, void *arg)
{
remap_blkptr_cb_arg_t rbca;
if (!zfs_remap_blkptr_enable)
return (B_FALSE);
if (!spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS))
return (B_FALSE);
/*
* Dedup BP's can not be remapped, because ddt_phys_select() depends
* on DVA[0] being the same in the BP as in the DDT (dedup table).
*/
if (BP_GET_DEDUP(bp))
return (B_FALSE);
/*
* Gang blocks can not be remapped, because
* zio_checksum_gang_verifier() depends on the DVA[0] that's in
* the BP used to read the gang block header (GBH) being the same
* as the DVA[0] that we allocated for the GBH.
*/
if (BP_IS_GANG(bp))
return (B_FALSE);
/*
* Embedded BP's have no DVA to remap.
*/
if (BP_GET_NDVAS(bp) < 1)
return (B_FALSE);
/*
* Note: we only remap dva[0]. If we remapped other dvas, we
* would no longer know what their phys birth txg is.
*/
dva_t *dva = &bp->blk_dva[0];
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t size = DVA_GET_ASIZE(dva);
vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
if (vd->vdev_ops->vdev_op_remap == NULL)
return (B_FALSE);
rbca.rbca_bp = bp;
rbca.rbca_cb = callback;
rbca.rbca_remap_vd = vd;
rbca.rbca_remap_offset = offset;
rbca.rbca_cb_arg = arg;
/*
* remap_blkptr_cb() will be called in order for each level of
* indirection, until a concrete vdev is reached or a split block is
* encountered. old_vd and old_offset are updated within the callback
* as we go from the one indirect vdev to the next one (either concrete
* or indirect again) in that order.
*/
vd->vdev_ops->vdev_op_remap(vd, offset, size, remap_blkptr_cb, &rbca);
/* Check if the DVA wasn't remapped because it is a split block */
if (DVA_GET_VDEV(&rbca.rbca_bp->blk_dva[0]) == vd->vdev_id)
return (B_FALSE);
return (B_TRUE);
}
/*
* Undo the allocation of a DVA which happened in the given transaction group.
*/
void
metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
{
metaslab_t *msp;
vdev_t *vd;
uint64_t vdev = DVA_GET_VDEV(dva);
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t size = DVA_GET_ASIZE(dva);
ASSERT(DVA_IS_VALID(dva));
ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
if (txg > spa_freeze_txg(spa))
return;
if ((vd = vdev_lookup_top(spa, vdev)) == NULL || !DVA_IS_VALID(dva) ||
(offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu",
(u_longlong_t)vdev, (u_longlong_t)offset,
(u_longlong_t)size);
return;
}
ASSERT(!vd->vdev_removing);
ASSERT(vdev_is_concrete(vd));
ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
ASSERT3P(vd->vdev_indirect_mapping, ==, NULL);
if (DVA_GET_GANG(dva))
size = vdev_gang_header_asize(vd);
msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
mutex_enter(&msp->ms_lock);
range_tree_remove(msp->ms_allocating[txg & TXG_MASK],
offset, size);
msp->ms_allocating_total -= size;
VERIFY(!msp->ms_condensing);
VERIFY3U(offset, >=, msp->ms_start);
VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
VERIFY3U(range_tree_space(msp->ms_allocatable) + size, <=,
msp->ms_size);
VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
range_tree_add(msp->ms_allocatable, offset, size);
mutex_exit(&msp->ms_lock);
}
/*
* Free the block represented by the given DVA.
*/
void
metaslab_free_dva(spa_t *spa, const dva_t *dva, boolean_t checkpoint)
{
uint64_t vdev = DVA_GET_VDEV(dva);
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t size = DVA_GET_ASIZE(dva);
vdev_t *vd = vdev_lookup_top(spa, vdev);
ASSERT(DVA_IS_VALID(dva));
ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
if (DVA_GET_GANG(dva)) {
size = vdev_gang_header_asize(vd);
}
metaslab_free_impl(vd, offset, size, checkpoint);
}
/*
* Reserve some allocation slots. The reservation system must be called
* before we call into the allocator. If there aren't any available slots
* then the I/O will be throttled until an I/O completes and its slots are
* freed up. The function returns true if it was successful in placing
* the reservation.
*/
boolean_t
metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, int allocator,
zio_t *zio, int flags)
{
metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
uint64_t max = mca->mca_alloc_max_slots;
ASSERT(mc->mc_alloc_throttle_enabled);
if (GANG_ALLOCATION(flags) || (flags & METASLAB_MUST_RESERVE) ||
zfs_refcount_count(&mca->mca_alloc_slots) + slots <= max) {
/*
* The potential race between _count() and _add() is covered
* by the allocator lock in most cases, or irrelevant due to
* GANG_ALLOCATION() or METASLAB_MUST_RESERVE set in others.
* But even if we assume some other non-existing scenario, the
* worst that can happen is few more I/Os get to allocation
* earlier, that is not a problem.
*
* We reserve the slots individually so that we can unreserve
* them individually when an I/O completes.
*/
for (int d = 0; d < slots; d++)
zfs_refcount_add(&mca->mca_alloc_slots, zio);
zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
return (B_TRUE);
}
return (B_FALSE);
}
void
metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots,
int allocator, zio_t *zio)
{
metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
ASSERT(mc->mc_alloc_throttle_enabled);
for (int d = 0; d < slots; d++)
zfs_refcount_remove(&mca->mca_alloc_slots, zio);
}
static int
metaslab_claim_concrete(vdev_t *vd, uint64_t offset, uint64_t size,
uint64_t txg)
{
metaslab_t *msp;
spa_t *spa = vd->vdev_spa;
int error = 0;
if (offset >> vd->vdev_ms_shift >= vd->vdev_ms_count)
return (SET_ERROR(ENXIO));
ASSERT3P(vd->vdev_ms, !=, NULL);
msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
mutex_enter(&msp->ms_lock);
if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded) {
error = metaslab_activate(msp, 0, METASLAB_WEIGHT_CLAIM);
if (error == EBUSY) {
ASSERT(msp->ms_loaded);
ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
error = 0;
}
}
if (error == 0 &&
!range_tree_contains(msp->ms_allocatable, offset, size))
error = SET_ERROR(ENOENT);
if (error || txg == 0) { /* txg == 0 indicates dry run */
mutex_exit(&msp->ms_lock);
return (error);
}
VERIFY(!msp->ms_condensing);
VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
VERIFY3U(range_tree_space(msp->ms_allocatable) - size, <=,
msp->ms_size);
range_tree_remove(msp->ms_allocatable, offset, size);
range_tree_clear(msp->ms_trim, offset, size);
if (spa_writeable(spa)) { /* don't dirty if we're zdb(8) */
metaslab_class_t *mc = msp->ms_group->mg_class;
multilist_sublist_t *mls =
multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
if (!multilist_link_active(&msp->ms_class_txg_node)) {
msp->ms_selected_txg = txg;
multilist_sublist_insert_head(mls, msp);
}
multilist_sublist_unlock(mls);
if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
vdev_dirty(vd, VDD_METASLAB, msp, txg);
range_tree_add(msp->ms_allocating[txg & TXG_MASK],
offset, size);
msp->ms_allocating_total += size;
}
mutex_exit(&msp->ms_lock);
return (0);
}
typedef struct metaslab_claim_cb_arg_t {
uint64_t mcca_txg;
int mcca_error;
} metaslab_claim_cb_arg_t;
-/* ARGSUSED */
static void
metaslab_claim_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
+ (void) inner_offset;
metaslab_claim_cb_arg_t *mcca_arg = arg;
if (mcca_arg->mcca_error == 0) {
mcca_arg->mcca_error = metaslab_claim_concrete(vd, offset,
size, mcca_arg->mcca_txg);
}
}
int
metaslab_claim_impl(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg)
{
if (vd->vdev_ops->vdev_op_remap != NULL) {
metaslab_claim_cb_arg_t arg;
/*
* Only zdb(8) can claim on indirect vdevs. This is used
* to detect leaks of mapped space (that are not accounted
* for in the obsolete counts, spacemap, or bpobj).
*/
ASSERT(!spa_writeable(vd->vdev_spa));
arg.mcca_error = 0;
arg.mcca_txg = txg;
vd->vdev_ops->vdev_op_remap(vd, offset, size,
metaslab_claim_impl_cb, &arg);
if (arg.mcca_error == 0) {
arg.mcca_error = metaslab_claim_concrete(vd,
offset, size, txg);
}
return (arg.mcca_error);
} else {
return (metaslab_claim_concrete(vd, offset, size, txg));
}
}
/*
* Intent log support: upon opening the pool after a crash, notify the SPA
* of blocks that the intent log has allocated for immediate write, but
* which are still considered free by the SPA because the last transaction
* group didn't commit yet.
*/
static int
metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
{
uint64_t vdev = DVA_GET_VDEV(dva);
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t size = DVA_GET_ASIZE(dva);
vdev_t *vd;
if ((vd = vdev_lookup_top(spa, vdev)) == NULL) {
return (SET_ERROR(ENXIO));
}
ASSERT(DVA_IS_VALID(dva));
if (DVA_GET_GANG(dva))
size = vdev_gang_header_asize(vd);
return (metaslab_claim_impl(vd, offset, size, txg));
}
int
metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
int ndvas, uint64_t txg, blkptr_t *hintbp, int flags,
zio_alloc_list_t *zal, zio_t *zio, int allocator)
{
dva_t *dva = bp->blk_dva;
dva_t *hintdva = (hintbp != NULL) ? hintbp->blk_dva : NULL;
int error = 0;
ASSERT(bp->blk_birth == 0);
ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
if (mc->mc_allocator[allocator].mca_rotor == NULL) {
/* no vdevs in this class */
spa_config_exit(spa, SCL_ALLOC, FTAG);
return (SET_ERROR(ENOSPC));
}
ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
ASSERT(BP_GET_NDVAS(bp) == 0);
ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
ASSERT3P(zal, !=, NULL);
for (int d = 0; d < ndvas; d++) {
error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
txg, flags, zal, allocator);
if (error != 0) {
for (d--; d >= 0; d--) {
metaslab_unalloc_dva(spa, &dva[d], txg);
metaslab_group_alloc_decrement(spa,
DVA_GET_VDEV(&dva[d]), zio, flags,
allocator, B_FALSE);
bzero(&dva[d], sizeof (dva_t));
}
spa_config_exit(spa, SCL_ALLOC, FTAG);
return (error);
} else {
/*
* Update the metaslab group's queue depth
* based on the newly allocated dva.
*/
metaslab_group_alloc_increment(spa,
DVA_GET_VDEV(&dva[d]), zio, flags, allocator);
}
}
ASSERT(error == 0);
ASSERT(BP_GET_NDVAS(bp) == ndvas);
spa_config_exit(spa, SCL_ALLOC, FTAG);
BP_SET_BIRTH(bp, txg, 0);
return (0);
}
void
metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
{
const dva_t *dva = bp->blk_dva;
int ndvas = BP_GET_NDVAS(bp);
ASSERT(!BP_IS_HOLE(bp));
ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
/*
* If we have a checkpoint for the pool we need to make sure that
* the blocks that we free that are part of the checkpoint won't be
* reused until the checkpoint is discarded or we revert to it.
*
* The checkpoint flag is passed down the metaslab_free code path
* and is set whenever we want to add a block to the checkpoint's
* accounting. That is, we "checkpoint" blocks that existed at the
* time the checkpoint was created and are therefore referenced by
* the checkpointed uberblock.
*
* Note that, we don't checkpoint any blocks if the current
* syncing txg <= spa_checkpoint_txg. We want these frees to sync
* normally as they will be referenced by the checkpointed uberblock.
*/
boolean_t checkpoint = B_FALSE;
if (bp->blk_birth <= spa->spa_checkpoint_txg &&
spa_syncing_txg(spa) > spa->spa_checkpoint_txg) {
/*
* At this point, if the block is part of the checkpoint
* there is no way it was created in the current txg.
*/
ASSERT(!now);
ASSERT3U(spa_syncing_txg(spa), ==, txg);
checkpoint = B_TRUE;
}
spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
for (int d = 0; d < ndvas; d++) {
if (now) {
metaslab_unalloc_dva(spa, &dva[d], txg);
} else {
ASSERT3U(txg, ==, spa_syncing_txg(spa));
metaslab_free_dva(spa, &dva[d], checkpoint);
}
}
spa_config_exit(spa, SCL_FREE, FTAG);
}
int
metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
{
const dva_t *dva = bp->blk_dva;
int ndvas = BP_GET_NDVAS(bp);
int error = 0;
ASSERT(!BP_IS_HOLE(bp));
if (txg != 0) {
/*
* First do a dry run to make sure all DVAs are claimable,
* so we don't have to unwind from partial failures below.
*/
if ((error = metaslab_claim(spa, bp, 0)) != 0)
return (error);
}
spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
for (int d = 0; d < ndvas; d++) {
error = metaslab_claim_dva(spa, &dva[d], txg);
if (error != 0)
break;
}
spa_config_exit(spa, SCL_ALLOC, FTAG);
ASSERT(error == 0 || txg == 0);
return (error);
}
void
metaslab_fastwrite_mark(spa_t *spa, const blkptr_t *bp)
{
const dva_t *dva = bp->blk_dva;
int ndvas = BP_GET_NDVAS(bp);
uint64_t psize = BP_GET_PSIZE(bp);
int d;
vdev_t *vd;
ASSERT(!BP_IS_HOLE(bp));
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT(psize > 0);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
for (d = 0; d < ndvas; d++) {
if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL)
continue;
atomic_add_64(&vd->vdev_pending_fastwrite, psize);
}
spa_config_exit(spa, SCL_VDEV, FTAG);
}
void
metaslab_fastwrite_unmark(spa_t *spa, const blkptr_t *bp)
{
const dva_t *dva = bp->blk_dva;
int ndvas = BP_GET_NDVAS(bp);
uint64_t psize = BP_GET_PSIZE(bp);
int d;
vdev_t *vd;
ASSERT(!BP_IS_HOLE(bp));
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT(psize > 0);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
for (d = 0; d < ndvas; d++) {
if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL)
continue;
ASSERT3U(vd->vdev_pending_fastwrite, >=, psize);
atomic_sub_64(&vd->vdev_pending_fastwrite, psize);
}
spa_config_exit(spa, SCL_VDEV, FTAG);
}
-/* ARGSUSED */
static void
metaslab_check_free_impl_cb(uint64_t inner, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
+ (void) inner, (void) arg;
+
if (vd->vdev_ops == &vdev_indirect_ops)
return;
metaslab_check_free_impl(vd, offset, size);
}
static void
metaslab_check_free_impl(vdev_t *vd, uint64_t offset, uint64_t size)
{
metaslab_t *msp;
spa_t *spa __maybe_unused = vd->vdev_spa;
if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
return;
if (vd->vdev_ops->vdev_op_remap != NULL) {
vd->vdev_ops->vdev_op_remap(vd, offset, size,
metaslab_check_free_impl_cb, NULL);
return;
}
ASSERT(vdev_is_concrete(vd));
ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
mutex_enter(&msp->ms_lock);
if (msp->ms_loaded) {
range_tree_verify_not_present(msp->ms_allocatable,
offset, size);
}
/*
* Check all segments that currently exist in the freeing pipeline.
*
* It would intuitively make sense to also check the current allocating
* tree since metaslab_unalloc_dva() exists for extents that are
* allocated and freed in the same sync pass within the same txg.
* Unfortunately there are places (e.g. the ZIL) where we allocate a
* segment but then we free part of it within the same txg
* [see zil_sync()]. Thus, we don't call range_tree_verify() in the
* current allocating tree.
*/
range_tree_verify_not_present(msp->ms_freeing, offset, size);
range_tree_verify_not_present(msp->ms_checkpointing, offset, size);
range_tree_verify_not_present(msp->ms_freed, offset, size);
for (int j = 0; j < TXG_DEFER_SIZE; j++)
range_tree_verify_not_present(msp->ms_defer[j], offset, size);
range_tree_verify_not_present(msp->ms_trim, offset, size);
mutex_exit(&msp->ms_lock);
}
void
metaslab_check_free(spa_t *spa, const blkptr_t *bp)
{
if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
return;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
vdev_t *vd = vdev_lookup_top(spa, vdev);
uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
if (DVA_GET_GANG(&bp->blk_dva[i]))
size = vdev_gang_header_asize(vd);
ASSERT3P(vd, !=, NULL);
metaslab_check_free_impl(vd, offset, size);
}
spa_config_exit(spa, SCL_VDEV, FTAG);
}
static void
metaslab_group_disable_wait(metaslab_group_t *mg)
{
ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock));
while (mg->mg_disabled_updating) {
cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock);
}
}
static void
metaslab_group_disabled_increment(metaslab_group_t *mg)
{
ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock));
ASSERT(mg->mg_disabled_updating);
while (mg->mg_ms_disabled >= max_disabled_ms) {
cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock);
}
mg->mg_ms_disabled++;
ASSERT3U(mg->mg_ms_disabled, <=, max_disabled_ms);
}
/*
* Mark the metaslab as disabled to prevent any allocations on this metaslab.
* We must also track how many metaslabs are currently disabled within a
* metaslab group and limit them to prevent allocation failures from
* occurring because all metaslabs are disabled.
*/
void
metaslab_disable(metaslab_t *msp)
{
ASSERT(!MUTEX_HELD(&msp->ms_lock));
metaslab_group_t *mg = msp->ms_group;
mutex_enter(&mg->mg_ms_disabled_lock);
/*
* To keep an accurate count of how many threads have disabled
* a specific metaslab group, we only allow one thread to mark
* the metaslab group at a time. This ensures that the value of
* ms_disabled will be accurate when we decide to mark a metaslab
* group as disabled. To do this we force all other threads
* to wait till the metaslab's mg_disabled_updating flag is no
* longer set.
*/
metaslab_group_disable_wait(mg);
mg->mg_disabled_updating = B_TRUE;
if (msp->ms_disabled == 0) {
metaslab_group_disabled_increment(mg);
}
mutex_enter(&msp->ms_lock);
msp->ms_disabled++;
mutex_exit(&msp->ms_lock);
mg->mg_disabled_updating = B_FALSE;
cv_broadcast(&mg->mg_ms_disabled_cv);
mutex_exit(&mg->mg_ms_disabled_lock);
}
void
metaslab_enable(metaslab_t *msp, boolean_t sync, boolean_t unload)
{
metaslab_group_t *mg = msp->ms_group;
spa_t *spa = mg->mg_vd->vdev_spa;
/*
* Wait for the outstanding IO to be synced to prevent newly
* allocated blocks from being overwritten. This used by
* initialize and TRIM which are modifying unallocated space.
*/
if (sync)
txg_wait_synced(spa_get_dsl(spa), 0);
mutex_enter(&mg->mg_ms_disabled_lock);
mutex_enter(&msp->ms_lock);
if (--msp->ms_disabled == 0) {
mg->mg_ms_disabled--;
cv_broadcast(&mg->mg_ms_disabled_cv);
if (unload)
metaslab_unload(msp);
}
mutex_exit(&msp->ms_lock);
mutex_exit(&mg->mg_ms_disabled_lock);
}
static void
metaslab_update_ondisk_flush_data(metaslab_t *ms, dmu_tx_t *tx)
{
vdev_t *vd = ms->ms_group->mg_vd;
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa_meta_objset(spa);
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
metaslab_unflushed_phys_t entry = {
.msp_unflushed_txg = metaslab_unflushed_txg(ms),
};
uint64_t entry_size = sizeof (entry);
uint64_t entry_offset = ms->ms_id * entry_size;
uint64_t object = 0;
int err = zap_lookup(mos, vd->vdev_top_zap,
VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1,
&object);
if (err == ENOENT) {
object = dmu_object_alloc(mos, DMU_OTN_UINT64_METADATA,
SPA_OLD_MAXBLOCKSIZE, DMU_OT_NONE, 0, tx);
VERIFY0(zap_add(mos, vd->vdev_top_zap,
VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1,
&object, tx));
} else {
VERIFY0(err);
}
dmu_write(spa_meta_objset(spa), object, entry_offset, entry_size,
&entry, tx);
}
void
metaslab_set_unflushed_txg(metaslab_t *ms, uint64_t txg, dmu_tx_t *tx)
{
spa_t *spa = ms->ms_group->mg_vd->vdev_spa;
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
return;
ms->ms_unflushed_txg = txg;
metaslab_update_ondisk_flush_data(ms, tx);
}
uint64_t
metaslab_unflushed_txg(metaslab_t *ms)
{
return (ms->ms_unflushed_txg);
}
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, aliquot, ULONG, ZMOD_RW,
"Allocation granularity (a.k.a. stripe size)");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_load, INT, ZMOD_RW,
"Load all metaslabs when pool is first opened");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_unload, INT, ZMOD_RW,
"Prevent metaslabs from being unloaded");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_enabled, INT, ZMOD_RW,
"Preload potential metaslabs during reassessment");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay, INT, ZMOD_RW,
"Delay in txgs after metaslab was last used before unloading");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay_ms, INT, ZMOD_RW,
"Delay in milliseconds after metaslab was last used before unloading");
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, noalloc_threshold, INT, ZMOD_RW,
"Percentage of metaslab group size that should be free to make it "
"eligible for allocation");
ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, fragmentation_threshold, INT, ZMOD_RW,
"Percentage of metaslab group size that should be considered eligible "
"for allocations unless all metaslab groups within the metaslab class "
"have also crossed this threshold");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, fragmentation_threshold, INT,
ZMOD_RW, "Fragmentation for metaslab to allow allocation");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, fragmentation_factor_enabled, INT, ZMOD_RW,
"Use the fragmentation metric to prefer less fragmented metaslabs");
/* END CSTYLED */
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, lba_weighting_enabled, INT, ZMOD_RW,
"Prefer metaslabs with lower LBAs");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, bias_enabled, INT, ZMOD_RW,
"Enable metaslab group biasing");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, segment_weight_enabled, INT,
ZMOD_RW, "Enable segment-based metaslab selection");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, switch_threshold, INT, ZMOD_RW,
"Segment-based metaslab selection maximum buckets before switching");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, force_ganging, ULONG, ZMOD_RW,
"Blocks larger than this size are forced to be gang blocks");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_max_search, INT, ZMOD_RW,
"Max distance (bytes) to search forward before using size tree");
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_use_largest_segment, INT, ZMOD_RW,
"When looking in size tree, use largest segment instead of exact fit");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, max_size_cache_sec, ULONG,
ZMOD_RW, "How long to trust the cached max chunk size of a metaslab");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, mem_limit, INT, ZMOD_RW,
"Percentage of memory that can be used to store metaslab range trees");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, try_hard_before_gang, INT,
ZMOD_RW, "Try hard to allocate before ganging");
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, find_max_tries, INT, ZMOD_RW,
"Normally only consider this many of the best metaslabs in each vdev");
diff --git a/sys/contrib/openzfs/module/zfs/range_tree.c b/sys/contrib/openzfs/module/zfs/range_tree.c
index 595918e5a742..67910f9ffde0 100644
--- a/sys/contrib/openzfs/module/zfs/range_tree.c
+++ b/sys/contrib/openzfs/module/zfs/range_tree.c
@@ -1,923 +1,921 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2013, 2019 by Delphix. All rights reserved.
* Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/dmu.h>
#include <sys/dnode.h>
#include <sys/zio.h>
#include <sys/range_tree.h>
/*
* Range trees are tree-based data structures that can be used to
* track free space or generally any space allocation information.
* A range tree keeps track of individual segments and automatically
* provides facilities such as adjacent extent merging and extent
* splitting in response to range add/remove requests.
*
* A range tree starts out completely empty, with no segments in it.
* Adding an allocation via range_tree_add to the range tree can either:
* 1) create a new extent
* 2) extend an adjacent extent
* 3) merge two adjacent extents
* Conversely, removing an allocation via range_tree_remove can:
* 1) completely remove an extent
* 2) shorten an extent (if the allocation was near one of its ends)
* 3) split an extent into two extents, in effect punching a hole
*
* A range tree is also capable of 'bridging' gaps when adding
* allocations. This is useful for cases when close proximity of
* allocations is an important detail that needs to be represented
* in the range tree. See range_tree_set_gap(). The default behavior
* is not to bridge gaps (i.e. the maximum allowed gap size is 0).
*
* In order to traverse a range tree, use either the range_tree_walk()
* or range_tree_vacate() functions.
*
* To obtain more accurate information on individual segment
* operations that the range tree performs "under the hood", you can
* specify a set of callbacks by passing a range_tree_ops_t structure
* to the range_tree_create function. Any callbacks that are non-NULL
* are then called at the appropriate times.
*
* The range tree code also supports a special variant of range trees
* that can bridge small gaps between segments. This kind of tree is used
* by the dsl scanning code to group I/Os into mostly sequential chunks to
* optimize disk performance. The code here attempts to do this with as
* little memory and computational overhead as possible. One limitation of
* this implementation is that segments of range trees with gaps can only
* support removing complete segments.
*/
static inline void
rs_copy(range_seg_t *src, range_seg_t *dest, range_tree_t *rt)
{
ASSERT3U(rt->rt_type, <=, RANGE_SEG_NUM_TYPES);
size_t size = 0;
switch (rt->rt_type) {
case RANGE_SEG32:
size = sizeof (range_seg32_t);
break;
case RANGE_SEG64:
size = sizeof (range_seg64_t);
break;
case RANGE_SEG_GAP:
size = sizeof (range_seg_gap_t);
break;
default:
VERIFY(0);
}
bcopy(src, dest, size);
}
void
range_tree_stat_verify(range_tree_t *rt)
{
range_seg_t *rs;
zfs_btree_index_t where;
uint64_t hist[RANGE_TREE_HISTOGRAM_SIZE] = { 0 };
int i;
for (rs = zfs_btree_first(&rt->rt_root, &where); rs != NULL;
rs = zfs_btree_next(&rt->rt_root, &where, &where)) {
uint64_t size = rs_get_end(rs, rt) - rs_get_start(rs, rt);
int idx = highbit64(size) - 1;
hist[idx]++;
ASSERT3U(hist[idx], !=, 0);
}
for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
if (hist[i] != rt->rt_histogram[i]) {
zfs_dbgmsg("i=%d, hist=%px, hist=%llu, rt_hist=%llu",
i, hist, (u_longlong_t)hist[i],
(u_longlong_t)rt->rt_histogram[i]);
}
VERIFY3U(hist[i], ==, rt->rt_histogram[i]);
}
}
static void
range_tree_stat_incr(range_tree_t *rt, range_seg_t *rs)
{
uint64_t size = rs_get_end(rs, rt) - rs_get_start(rs, rt);
int idx = highbit64(size) - 1;
ASSERT(size != 0);
ASSERT3U(idx, <,
sizeof (rt->rt_histogram) / sizeof (*rt->rt_histogram));
rt->rt_histogram[idx]++;
ASSERT3U(rt->rt_histogram[idx], !=, 0);
}
static void
range_tree_stat_decr(range_tree_t *rt, range_seg_t *rs)
{
uint64_t size = rs_get_end(rs, rt) - rs_get_start(rs, rt);
int idx = highbit64(size) - 1;
ASSERT(size != 0);
ASSERT3U(idx, <,
sizeof (rt->rt_histogram) / sizeof (*rt->rt_histogram));
ASSERT3U(rt->rt_histogram[idx], !=, 0);
rt->rt_histogram[idx]--;
}
static int
range_tree_seg32_compare(const void *x1, const void *x2)
{
const range_seg32_t *r1 = x1;
const range_seg32_t *r2 = x2;
ASSERT3U(r1->rs_start, <=, r1->rs_end);
ASSERT3U(r2->rs_start, <=, r2->rs_end);
return ((r1->rs_start >= r2->rs_end) - (r1->rs_end <= r2->rs_start));
}
static int
range_tree_seg64_compare(const void *x1, const void *x2)
{
const range_seg64_t *r1 = x1;
const range_seg64_t *r2 = x2;
ASSERT3U(r1->rs_start, <=, r1->rs_end);
ASSERT3U(r2->rs_start, <=, r2->rs_end);
return ((r1->rs_start >= r2->rs_end) - (r1->rs_end <= r2->rs_start));
}
static int
range_tree_seg_gap_compare(const void *x1, const void *x2)
{
const range_seg_gap_t *r1 = x1;
const range_seg_gap_t *r2 = x2;
ASSERT3U(r1->rs_start, <=, r1->rs_end);
ASSERT3U(r2->rs_start, <=, r2->rs_end);
return ((r1->rs_start >= r2->rs_end) - (r1->rs_end <= r2->rs_start));
}
range_tree_t *
range_tree_create_impl(range_tree_ops_t *ops, range_seg_type_t type, void *arg,
uint64_t start, uint64_t shift,
int (*zfs_btree_compare) (const void *, const void *),
uint64_t gap)
{
range_tree_t *rt = kmem_zalloc(sizeof (range_tree_t), KM_SLEEP);
ASSERT3U(shift, <, 64);
ASSERT3U(type, <=, RANGE_SEG_NUM_TYPES);
size_t size;
int (*compare) (const void *, const void *);
switch (type) {
case RANGE_SEG32:
size = sizeof (range_seg32_t);
compare = range_tree_seg32_compare;
break;
case RANGE_SEG64:
size = sizeof (range_seg64_t);
compare = range_tree_seg64_compare;
break;
case RANGE_SEG_GAP:
size = sizeof (range_seg_gap_t);
compare = range_tree_seg_gap_compare;
break;
default:
panic("Invalid range seg type %d", type);
}
zfs_btree_create(&rt->rt_root, compare, size);
rt->rt_ops = ops;
rt->rt_gap = gap;
rt->rt_arg = arg;
rt->rt_type = type;
rt->rt_start = start;
rt->rt_shift = shift;
rt->rt_btree_compare = zfs_btree_compare;
if (rt->rt_ops != NULL && rt->rt_ops->rtop_create != NULL)
rt->rt_ops->rtop_create(rt, rt->rt_arg);
return (rt);
}
range_tree_t *
range_tree_create(range_tree_ops_t *ops, range_seg_type_t type,
void *arg, uint64_t start, uint64_t shift)
{
return (range_tree_create_impl(ops, type, arg, start, shift, NULL, 0));
}
void
range_tree_destroy(range_tree_t *rt)
{
VERIFY0(rt->rt_space);
if (rt->rt_ops != NULL && rt->rt_ops->rtop_destroy != NULL)
rt->rt_ops->rtop_destroy(rt, rt->rt_arg);
zfs_btree_destroy(&rt->rt_root);
kmem_free(rt, sizeof (*rt));
}
void
range_tree_adjust_fill(range_tree_t *rt, range_seg_t *rs, int64_t delta)
{
if (delta < 0 && delta * -1 >= rs_get_fill(rs, rt)) {
zfs_panic_recover("zfs: attempting to decrease fill to or "
"below 0; probable double remove in segment [%llx:%llx]",
(longlong_t)rs_get_start(rs, rt),
(longlong_t)rs_get_end(rs, rt));
}
if (rs_get_fill(rs, rt) + delta > rs_get_end(rs, rt) -
rs_get_start(rs, rt)) {
zfs_panic_recover("zfs: attempting to increase fill beyond "
"max; probable double add in segment [%llx:%llx]",
(longlong_t)rs_get_start(rs, rt),
(longlong_t)rs_get_end(rs, rt));
}
if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
rs_set_fill(rs, rt, rs_get_fill(rs, rt) + delta);
if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
rt->rt_ops->rtop_add(rt, rs, rt->rt_arg);
}
static void
range_tree_add_impl(void *arg, uint64_t start, uint64_t size, uint64_t fill)
{
range_tree_t *rt = arg;
zfs_btree_index_t where;
range_seg_t *rs_before, *rs_after, *rs;
range_seg_max_t tmp, rsearch;
uint64_t end = start + size, gap = rt->rt_gap;
uint64_t bridge_size = 0;
boolean_t merge_before, merge_after;
ASSERT3U(size, !=, 0);
ASSERT3U(fill, <=, size);
ASSERT3U(start + size, >, start);
rs_set_start(&rsearch, rt, start);
rs_set_end(&rsearch, rt, end);
rs = zfs_btree_find(&rt->rt_root, &rsearch, &where);
/*
* If this is a gap-supporting range tree, it is possible that we
* are inserting into an existing segment. In this case simply
* bump the fill count and call the remove / add callbacks. If the
* new range will extend an existing segment, we remove the
* existing one, apply the new extent to it and re-insert it using
* the normal code paths.
*/
if (rs != NULL) {
if (gap == 0) {
zfs_panic_recover("zfs: adding existent segment to "
"range tree (offset=%llx size=%llx)",
(longlong_t)start, (longlong_t)size);
return;
}
uint64_t rstart = rs_get_start(rs, rt);
uint64_t rend = rs_get_end(rs, rt);
if (rstart <= start && rend >= end) {
range_tree_adjust_fill(rt, rs, fill);
return;
}
if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
range_tree_stat_decr(rt, rs);
rt->rt_space -= rend - rstart;
fill += rs_get_fill(rs, rt);
start = MIN(start, rstart);
end = MAX(end, rend);
size = end - start;
zfs_btree_remove(&rt->rt_root, rs);
range_tree_add_impl(rt, start, size, fill);
return;
}
ASSERT3P(rs, ==, NULL);
/*
* Determine whether or not we will have to merge with our neighbors.
* If gap != 0, we might need to merge with our neighbors even if we
* aren't directly touching.
*/
zfs_btree_index_t where_before, where_after;
rs_before = zfs_btree_prev(&rt->rt_root, &where, &where_before);
rs_after = zfs_btree_next(&rt->rt_root, &where, &where_after);
merge_before = (rs_before != NULL && rs_get_end(rs_before, rt) >=
start - gap);
merge_after = (rs_after != NULL && rs_get_start(rs_after, rt) <= end +
gap);
if (merge_before && gap != 0)
bridge_size += start - rs_get_end(rs_before, rt);
if (merge_after && gap != 0)
bridge_size += rs_get_start(rs_after, rt) - end;
if (merge_before && merge_after) {
if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL) {
rt->rt_ops->rtop_remove(rt, rs_before, rt->rt_arg);
rt->rt_ops->rtop_remove(rt, rs_after, rt->rt_arg);
}
range_tree_stat_decr(rt, rs_before);
range_tree_stat_decr(rt, rs_after);
rs_copy(rs_after, &tmp, rt);
uint64_t before_start = rs_get_start_raw(rs_before, rt);
uint64_t before_fill = rs_get_fill(rs_before, rt);
uint64_t after_fill = rs_get_fill(rs_after, rt);
zfs_btree_remove_idx(&rt->rt_root, &where_before);
/*
* We have to re-find the node because our old reference is
* invalid as soon as we do any mutating btree operations.
*/
rs_after = zfs_btree_find(&rt->rt_root, &tmp, &where_after);
rs_set_start_raw(rs_after, rt, before_start);
rs_set_fill(rs_after, rt, after_fill + before_fill + fill);
rs = rs_after;
} else if (merge_before) {
if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
rt->rt_ops->rtop_remove(rt, rs_before, rt->rt_arg);
range_tree_stat_decr(rt, rs_before);
uint64_t before_fill = rs_get_fill(rs_before, rt);
rs_set_end(rs_before, rt, end);
rs_set_fill(rs_before, rt, before_fill + fill);
rs = rs_before;
} else if (merge_after) {
if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
rt->rt_ops->rtop_remove(rt, rs_after, rt->rt_arg);
range_tree_stat_decr(rt, rs_after);
uint64_t after_fill = rs_get_fill(rs_after, rt);
rs_set_start(rs_after, rt, start);
rs_set_fill(rs_after, rt, after_fill + fill);
rs = rs_after;
} else {
rs = &tmp;
rs_set_start(rs, rt, start);
rs_set_end(rs, rt, end);
rs_set_fill(rs, rt, fill);
zfs_btree_add_idx(&rt->rt_root, rs, &where);
}
if (gap != 0) {
ASSERT3U(rs_get_fill(rs, rt), <=, rs_get_end(rs, rt) -
rs_get_start(rs, rt));
} else {
ASSERT3U(rs_get_fill(rs, rt), ==, rs_get_end(rs, rt) -
rs_get_start(rs, rt));
}
if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
rt->rt_ops->rtop_add(rt, rs, rt->rt_arg);
range_tree_stat_incr(rt, rs);
rt->rt_space += size + bridge_size;
}
void
range_tree_add(void *arg, uint64_t start, uint64_t size)
{
range_tree_add_impl(arg, start, size, size);
}
static void
range_tree_remove_impl(range_tree_t *rt, uint64_t start, uint64_t size,
boolean_t do_fill)
{
zfs_btree_index_t where;
range_seg_t *rs;
range_seg_max_t rsearch, rs_tmp;
uint64_t end = start + size;
boolean_t left_over, right_over;
VERIFY3U(size, !=, 0);
VERIFY3U(size, <=, rt->rt_space);
if (rt->rt_type == RANGE_SEG64)
ASSERT3U(start + size, >, start);
rs_set_start(&rsearch, rt, start);
rs_set_end(&rsearch, rt, end);
rs = zfs_btree_find(&rt->rt_root, &rsearch, &where);
/* Make sure we completely overlap with someone */
if (rs == NULL) {
zfs_panic_recover("zfs: removing nonexistent segment from "
"range tree (offset=%llx size=%llx)",
(longlong_t)start, (longlong_t)size);
return;
}
/*
* Range trees with gap support must only remove complete segments
* from the tree. This allows us to maintain accurate fill accounting
* and to ensure that bridged sections are not leaked. If we need to
* remove less than the full segment, we can only adjust the fill count.
*/
if (rt->rt_gap != 0) {
if (do_fill) {
if (rs_get_fill(rs, rt) == size) {
start = rs_get_start(rs, rt);
end = rs_get_end(rs, rt);
size = end - start;
} else {
range_tree_adjust_fill(rt, rs, -size);
return;
}
} else if (rs_get_start(rs, rt) != start ||
rs_get_end(rs, rt) != end) {
zfs_panic_recover("zfs: freeing partial segment of "
"gap tree (offset=%llx size=%llx) of "
"(offset=%llx size=%llx)",
(longlong_t)start, (longlong_t)size,
(longlong_t)rs_get_start(rs, rt),
(longlong_t)rs_get_end(rs, rt) - rs_get_start(rs,
rt));
return;
}
}
VERIFY3U(rs_get_start(rs, rt), <=, start);
VERIFY3U(rs_get_end(rs, rt), >=, end);
left_over = (rs_get_start(rs, rt) != start);
right_over = (rs_get_end(rs, rt) != end);
range_tree_stat_decr(rt, rs);
if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
if (left_over && right_over) {
range_seg_max_t newseg;
rs_set_start(&newseg, rt, end);
rs_set_end_raw(&newseg, rt, rs_get_end_raw(rs, rt));
rs_set_fill(&newseg, rt, rs_get_end(rs, rt) - end);
range_tree_stat_incr(rt, &newseg);
// This modifies the buffer already inside the range tree
rs_set_end(rs, rt, start);
rs_copy(rs, &rs_tmp, rt);
if (zfs_btree_next(&rt->rt_root, &where, &where) != NULL)
zfs_btree_add_idx(&rt->rt_root, &newseg, &where);
else
zfs_btree_add(&rt->rt_root, &newseg);
if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
rt->rt_ops->rtop_add(rt, &newseg, rt->rt_arg);
} else if (left_over) {
// This modifies the buffer already inside the range tree
rs_set_end(rs, rt, start);
rs_copy(rs, &rs_tmp, rt);
} else if (right_over) {
// This modifies the buffer already inside the range tree
rs_set_start(rs, rt, end);
rs_copy(rs, &rs_tmp, rt);
} else {
zfs_btree_remove_idx(&rt->rt_root, &where);
rs = NULL;
}
if (rs != NULL) {
/*
* The fill of the leftover segment will always be equal to
* the size, since we do not support removing partial segments
* of range trees with gaps.
*/
rs_set_fill_raw(rs, rt, rs_get_end_raw(rs, rt) -
rs_get_start_raw(rs, rt));
range_tree_stat_incr(rt, &rs_tmp);
if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
rt->rt_ops->rtop_add(rt, &rs_tmp, rt->rt_arg);
}
rt->rt_space -= size;
}
void
range_tree_remove(void *arg, uint64_t start, uint64_t size)
{
range_tree_remove_impl(arg, start, size, B_FALSE);
}
void
range_tree_remove_fill(range_tree_t *rt, uint64_t start, uint64_t size)
{
range_tree_remove_impl(rt, start, size, B_TRUE);
}
void
range_tree_resize_segment(range_tree_t *rt, range_seg_t *rs,
uint64_t newstart, uint64_t newsize)
{
int64_t delta = newsize - (rs_get_end(rs, rt) - rs_get_start(rs, rt));
range_tree_stat_decr(rt, rs);
if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
rs_set_start(rs, rt, newstart);
rs_set_end(rs, rt, newstart + newsize);
range_tree_stat_incr(rt, rs);
if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
rt->rt_ops->rtop_add(rt, rs, rt->rt_arg);
rt->rt_space += delta;
}
static range_seg_t *
range_tree_find_impl(range_tree_t *rt, uint64_t start, uint64_t size)
{
range_seg_max_t rsearch;
uint64_t end = start + size;
VERIFY(size != 0);
rs_set_start(&rsearch, rt, start);
rs_set_end(&rsearch, rt, end);
return (zfs_btree_find(&rt->rt_root, &rsearch, NULL));
}
range_seg_t *
range_tree_find(range_tree_t *rt, uint64_t start, uint64_t size)
{
if (rt->rt_type == RANGE_SEG64)
ASSERT3U(start + size, >, start);
range_seg_t *rs = range_tree_find_impl(rt, start, size);
if (rs != NULL && rs_get_start(rs, rt) <= start &&
rs_get_end(rs, rt) >= start + size) {
return (rs);
}
return (NULL);
}
void
range_tree_verify_not_present(range_tree_t *rt, uint64_t off, uint64_t size)
{
range_seg_t *rs = range_tree_find(rt, off, size);
if (rs != NULL)
panic("segment already in tree; rs=%p", (void *)rs);
}
boolean_t
range_tree_contains(range_tree_t *rt, uint64_t start, uint64_t size)
{
return (range_tree_find(rt, start, size) != NULL);
}
/*
* Returns the first subset of the given range which overlaps with the range
* tree. Returns true if there is a segment in the range, and false if there
* isn't.
*/
boolean_t
range_tree_find_in(range_tree_t *rt, uint64_t start, uint64_t size,
uint64_t *ostart, uint64_t *osize)
{
if (rt->rt_type == RANGE_SEG64)
ASSERT3U(start + size, >, start);
range_seg_max_t rsearch;
rs_set_start(&rsearch, rt, start);
rs_set_end_raw(&rsearch, rt, rs_get_start_raw(&rsearch, rt) + 1);
zfs_btree_index_t where;
range_seg_t *rs = zfs_btree_find(&rt->rt_root, &rsearch, &where);
if (rs != NULL) {
*ostart = start;
*osize = MIN(size, rs_get_end(rs, rt) - start);
return (B_TRUE);
}
rs = zfs_btree_next(&rt->rt_root, &where, &where);
if (rs == NULL || rs_get_start(rs, rt) > start + size)
return (B_FALSE);
*ostart = rs_get_start(rs, rt);
*osize = MIN(start + size, rs_get_end(rs, rt)) -
rs_get_start(rs, rt);
return (B_TRUE);
}
/*
* Ensure that this range is not in the tree, regardless of whether
* it is currently in the tree.
*/
void
range_tree_clear(range_tree_t *rt, uint64_t start, uint64_t size)
{
range_seg_t *rs;
if (size == 0)
return;
if (rt->rt_type == RANGE_SEG64)
ASSERT3U(start + size, >, start);
while ((rs = range_tree_find_impl(rt, start, size)) != NULL) {
uint64_t free_start = MAX(rs_get_start(rs, rt), start);
uint64_t free_end = MIN(rs_get_end(rs, rt), start + size);
range_tree_remove(rt, free_start, free_end - free_start);
}
}
void
range_tree_swap(range_tree_t **rtsrc, range_tree_t **rtdst)
{
range_tree_t *rt;
ASSERT0(range_tree_space(*rtdst));
ASSERT0(zfs_btree_numnodes(&(*rtdst)->rt_root));
rt = *rtsrc;
*rtsrc = *rtdst;
*rtdst = rt;
}
void
range_tree_vacate(range_tree_t *rt, range_tree_func_t *func, void *arg)
{
if (rt->rt_ops != NULL && rt->rt_ops->rtop_vacate != NULL)
rt->rt_ops->rtop_vacate(rt, rt->rt_arg);
if (func != NULL) {
range_seg_t *rs;
zfs_btree_index_t *cookie = NULL;
while ((rs = zfs_btree_destroy_nodes(&rt->rt_root, &cookie)) !=
NULL) {
func(arg, rs_get_start(rs, rt), rs_get_end(rs, rt) -
rs_get_start(rs, rt));
}
} else {
zfs_btree_clear(&rt->rt_root);
}
bzero(rt->rt_histogram, sizeof (rt->rt_histogram));
rt->rt_space = 0;
}
void
range_tree_walk(range_tree_t *rt, range_tree_func_t *func, void *arg)
{
zfs_btree_index_t where;
for (range_seg_t *rs = zfs_btree_first(&rt->rt_root, &where);
rs != NULL; rs = zfs_btree_next(&rt->rt_root, &where, &where)) {
func(arg, rs_get_start(rs, rt), rs_get_end(rs, rt) -
rs_get_start(rs, rt));
}
}
range_seg_t *
range_tree_first(range_tree_t *rt)
{
return (zfs_btree_first(&rt->rt_root, NULL));
}
uint64_t
range_tree_space(range_tree_t *rt)
{
return (rt->rt_space);
}
uint64_t
range_tree_numsegs(range_tree_t *rt)
{
return ((rt == NULL) ? 0 : zfs_btree_numnodes(&rt->rt_root));
}
boolean_t
range_tree_is_empty(range_tree_t *rt)
{
ASSERT(rt != NULL);
return (range_tree_space(rt) == 0);
}
-/* ARGSUSED */
void
rt_btree_create(range_tree_t *rt, void *arg)
{
zfs_btree_t *size_tree = arg;
size_t size;
switch (rt->rt_type) {
case RANGE_SEG32:
size = sizeof (range_seg32_t);
break;
case RANGE_SEG64:
size = sizeof (range_seg64_t);
break;
case RANGE_SEG_GAP:
size = sizeof (range_seg_gap_t);
break;
default:
panic("Invalid range seg type %d", rt->rt_type);
}
zfs_btree_create(size_tree, rt->rt_btree_compare, size);
}
-/* ARGSUSED */
void
rt_btree_destroy(range_tree_t *rt, void *arg)
{
+ (void) rt;
zfs_btree_t *size_tree = arg;
ASSERT0(zfs_btree_numnodes(size_tree));
zfs_btree_destroy(size_tree);
}
-/* ARGSUSED */
void
rt_btree_add(range_tree_t *rt, range_seg_t *rs, void *arg)
{
+ (void) rt;
zfs_btree_t *size_tree = arg;
zfs_btree_add(size_tree, rs);
}
-/* ARGSUSED */
void
rt_btree_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
{
+ (void) rt;
zfs_btree_t *size_tree = arg;
zfs_btree_remove(size_tree, rs);
}
-/* ARGSUSED */
void
rt_btree_vacate(range_tree_t *rt, void *arg)
{
zfs_btree_t *size_tree = arg;
zfs_btree_clear(size_tree);
zfs_btree_destroy(size_tree);
rt_btree_create(rt, arg);
}
range_tree_ops_t rt_btree_ops = {
.rtop_create = rt_btree_create,
.rtop_destroy = rt_btree_destroy,
.rtop_add = rt_btree_add,
.rtop_remove = rt_btree_remove,
.rtop_vacate = rt_btree_vacate
};
/*
* Remove any overlapping ranges between the given segment [start, end)
* from removefrom. Add non-overlapping leftovers to addto.
*/
void
range_tree_remove_xor_add_segment(uint64_t start, uint64_t end,
range_tree_t *removefrom, range_tree_t *addto)
{
zfs_btree_index_t where;
range_seg_max_t starting_rs;
rs_set_start(&starting_rs, removefrom, start);
rs_set_end_raw(&starting_rs, removefrom, rs_get_start_raw(&starting_rs,
removefrom) + 1);
range_seg_t *curr = zfs_btree_find(&removefrom->rt_root,
&starting_rs, &where);
if (curr == NULL)
curr = zfs_btree_next(&removefrom->rt_root, &where, &where);
range_seg_t *next;
for (; curr != NULL; curr = next) {
if (start == end)
return;
VERIFY3U(start, <, end);
/* there is no overlap */
if (end <= rs_get_start(curr, removefrom)) {
range_tree_add(addto, start, end - start);
return;
}
uint64_t overlap_start = MAX(rs_get_start(curr, removefrom),
start);
uint64_t overlap_end = MIN(rs_get_end(curr, removefrom),
end);
uint64_t overlap_size = overlap_end - overlap_start;
ASSERT3S(overlap_size, >, 0);
range_seg_max_t rs;
rs_copy(curr, &rs, removefrom);
range_tree_remove(removefrom, overlap_start, overlap_size);
if (start < overlap_start)
range_tree_add(addto, start, overlap_start - start);
start = overlap_end;
next = zfs_btree_find(&removefrom->rt_root, &rs, &where);
/*
* If we find something here, we only removed part of the
* curr segment. Either there's some left at the end
* because we've reached the end of the range we're removing,
* or there's some left at the start because we started
* partway through the range. Either way, we continue with
* the loop. If it's the former, we'll return at the start of
* the loop, and if it's the latter we'll see if there is more
* area to process.
*/
if (next != NULL) {
ASSERT(start == end || start == rs_get_end(&rs,
removefrom));
}
next = zfs_btree_next(&removefrom->rt_root, &where, &where);
}
VERIFY3P(curr, ==, NULL);
if (start != end) {
VERIFY3U(start, <, end);
range_tree_add(addto, start, end - start);
} else {
VERIFY3U(start, ==, end);
}
}
/*
* For each entry in rt, if it exists in removefrom, remove it
* from removefrom. Otherwise, add it to addto.
*/
void
range_tree_remove_xor_add(range_tree_t *rt, range_tree_t *removefrom,
range_tree_t *addto)
{
zfs_btree_index_t where;
for (range_seg_t *rs = zfs_btree_first(&rt->rt_root, &where); rs;
rs = zfs_btree_next(&rt->rt_root, &where, &where)) {
range_tree_remove_xor_add_segment(rs_get_start(rs, rt),
rs_get_end(rs, rt), removefrom, addto);
}
}
uint64_t
range_tree_min(range_tree_t *rt)
{
range_seg_t *rs = zfs_btree_first(&rt->rt_root, NULL);
return (rs != NULL ? rs_get_start(rs, rt) : 0);
}
uint64_t
range_tree_max(range_tree_t *rt)
{
range_seg_t *rs = zfs_btree_last(&rt->rt_root, NULL);
return (rs != NULL ? rs_get_end(rs, rt) : 0);
}
uint64_t
range_tree_span(range_tree_t *rt)
{
return (range_tree_max(rt) - range_tree_min(rt));
}
diff --git a/sys/contrib/openzfs/module/zfs/sa.c b/sys/contrib/openzfs/module/zfs/sa.c
index 2604a7513ecf..b69b0c68f13c 100644
--- a/sys/contrib/openzfs/module/zfs/sa.c
+++ b/sys/contrib/openzfs/module/zfs/sa.c
@@ -1,2257 +1,2257 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2017 by Delphix. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/types.h>
#include <sys/param.h>
#include <sys/sysmacros.h>
#include <sys/dmu.h>
#include <sys/dmu_impl.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_tx.h>
#include <sys/dbuf.h>
#include <sys/dnode.h>
#include <sys/zap.h>
#include <sys/sa.h>
#include <sys/sunddi.h>
#include <sys/sa_impl.h>
#include <sys/errno.h>
#include <sys/zfs_context.h>
#ifdef _KERNEL
#include <sys/zfs_znode.h>
#endif
/*
* ZFS System attributes:
*
* A generic mechanism to allow for arbitrary attributes
* to be stored in a dnode. The data will be stored in the bonus buffer of
* the dnode and if necessary a special "spill" block will be used to handle
* overflow situations. The spill block will be sized to fit the data
* from 512 - 128K. When a spill block is used the BP (blkptr_t) for the
* spill block is stored at the end of the current bonus buffer. Any
* attributes that would be in the way of the blkptr_t will be relocated
* into the spill block.
*
* Attribute registration:
*
* Stored persistently on a per dataset basis
* a mapping between attribute "string" names and their actual attribute
* numeric values, length, and byteswap function. The names are only used
* during registration. All attributes are known by their unique attribute
* id value. If an attribute can have a variable size then the value
* 0 will be used to indicate this.
*
* Attribute Layout:
*
* Attribute layouts are a way to compactly store multiple attributes, but
* without taking the overhead associated with managing each attribute
* individually. Since you will typically have the same set of attributes
* stored in the same order a single table will be used to represent that
* layout. The ZPL for example will usually have only about 10 different
* layouts (regular files, device files, symlinks,
* regular files + scanstamp, files/dir with extended attributes, and then
* you have the possibility of all of those minus ACL, because it would
* be kicked out into the spill block)
*
* Layouts are simply an array of the attributes and their
* ordering i.e. [0, 1, 4, 5, 2]
*
* Each distinct layout is given a unique layout number and that is what's
* stored in the header at the beginning of the SA data buffer.
*
* A layout only covers a single dbuf (bonus or spill). If a set of
* attributes is split up between the bonus buffer and a spill buffer then
* two different layouts will be used. This allows us to byteswap the
* spill without looking at the bonus buffer and keeps the on disk format of
* the bonus and spill buffer the same.
*
* Adding a single attribute will cause the entire set of attributes to
* be rewritten and could result in a new layout number being constructed
* as part of the rewrite if no such layout exists for the new set of
* attributes. The new attribute will be appended to the end of the already
* existing attributes.
*
* Both the attribute registration and attribute layout information are
* stored in normal ZAP attributes. Their should be a small number of
* known layouts and the set of attributes is assumed to typically be quite
* small.
*
* The registered attributes and layout "table" information is maintained
* in core and a special "sa_os_t" is attached to the objset_t.
*
* A special interface is provided to allow for quickly applying
* a large set of attributes at once. sa_replace_all_by_template() is
* used to set an array of attributes. This is used by the ZPL when
* creating a brand new file. The template that is passed into the function
* specifies the attribute, size for variable length attributes, location of
* data and special "data locator" function if the data isn't in a contiguous
* location.
*
* Byteswap implications:
*
* Since the SA attributes are not entirely self describing we can't do
* the normal byteswap processing. The special ZAP layout attribute and
* attribute registration attributes define the byteswap function and the
* size of the attributes, unless it is variable sized.
* The normal ZFS byteswapping infrastructure assumes you don't need
* to read any objects in order to do the necessary byteswapping. Whereas
* SA attributes can only be properly byteswapped if the dataset is opened
* and the layout/attribute ZAP attributes are available. Because of this
* the SA attributes will be byteswapped when they are first accessed by
* the SA code that will read the SA data.
*/
typedef void (sa_iterfunc_t)(void *hdr, void *addr, sa_attr_type_t,
uint16_t length, int length_idx, boolean_t, void *userp);
static int sa_build_index(sa_handle_t *hdl, sa_buf_type_t buftype);
static void sa_idx_tab_hold(objset_t *os, sa_idx_tab_t *idx_tab);
static sa_idx_tab_t *sa_find_idx_tab(objset_t *os, dmu_object_type_t bonustype,
sa_hdr_phys_t *hdr);
static void sa_idx_tab_rele(objset_t *os, void *arg);
static void sa_copy_data(sa_data_locator_t *func, void *start, void *target,
int buflen);
static int sa_modify_attrs(sa_handle_t *hdl, sa_attr_type_t newattr,
sa_data_op_t action, sa_data_locator_t *locator, void *datastart,
uint16_t buflen, dmu_tx_t *tx);
arc_byteswap_func_t sa_bswap_table[] = {
byteswap_uint64_array,
byteswap_uint32_array,
byteswap_uint16_array,
byteswap_uint8_array,
zfs_acl_byteswap,
};
#ifdef HAVE_EFFICIENT_UNALIGNED_ACCESS
#define SA_COPY_DATA(f, s, t, l) \
do { \
if (f == NULL) { \
if (l == 8) { \
*(uint64_t *)t = *(uint64_t *)s; \
} else if (l == 16) { \
*(uint64_t *)t = *(uint64_t *)s; \
*(uint64_t *)((uintptr_t)t + 8) = \
*(uint64_t *)((uintptr_t)s + 8); \
} else { \
bcopy(s, t, l); \
} \
} else { \
sa_copy_data(f, s, t, l); \
} \
} while (0)
#else
#define SA_COPY_DATA(f, s, t, l) sa_copy_data(f, s, t, l)
#endif
/*
* This table is fixed and cannot be changed. Its purpose is to
* allow the SA code to work with both old/new ZPL file systems.
* It contains the list of legacy attributes. These attributes aren't
* stored in the "attribute" registry zap objects, since older ZPL file systems
* won't have the registry. Only objsets of type ZFS_TYPE_FILESYSTEM will
* use this static table.
*/
sa_attr_reg_t sa_legacy_attrs[] = {
{"ZPL_ATIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 0},
{"ZPL_MTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 1},
{"ZPL_CTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 2},
{"ZPL_CRTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY, 3},
{"ZPL_GEN", sizeof (uint64_t), SA_UINT64_ARRAY, 4},
{"ZPL_MODE", sizeof (uint64_t), SA_UINT64_ARRAY, 5},
{"ZPL_SIZE", sizeof (uint64_t), SA_UINT64_ARRAY, 6},
{"ZPL_PARENT", sizeof (uint64_t), SA_UINT64_ARRAY, 7},
{"ZPL_LINKS", sizeof (uint64_t), SA_UINT64_ARRAY, 8},
{"ZPL_XATTR", sizeof (uint64_t), SA_UINT64_ARRAY, 9},
{"ZPL_RDEV", sizeof (uint64_t), SA_UINT64_ARRAY, 10},
{"ZPL_FLAGS", sizeof (uint64_t), SA_UINT64_ARRAY, 11},
{"ZPL_UID", sizeof (uint64_t), SA_UINT64_ARRAY, 12},
{"ZPL_GID", sizeof (uint64_t), SA_UINT64_ARRAY, 13},
{"ZPL_PAD", sizeof (uint64_t) * 4, SA_UINT64_ARRAY, 14},
{"ZPL_ZNODE_ACL", 88, SA_UINT8_ARRAY, 15},
};
/*
* This is only used for objects of type DMU_OT_ZNODE
*/
sa_attr_type_t sa_legacy_zpl_layout[] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
};
/*
* Special dummy layout used for buffers with no attributes.
*/
sa_attr_type_t sa_dummy_zpl_layout[] = { 0 };
static int sa_legacy_attr_count = ARRAY_SIZE(sa_legacy_attrs);
static kmem_cache_t *sa_cache = NULL;
-/*ARGSUSED*/
static int
sa_cache_constructor(void *buf, void *unused, int kmflag)
{
+ (void) unused, (void) kmflag;
sa_handle_t *hdl = buf;
mutex_init(&hdl->sa_lock, NULL, MUTEX_DEFAULT, NULL);
return (0);
}
-/*ARGSUSED*/
static void
sa_cache_destructor(void *buf, void *unused)
{
+ (void) unused;
sa_handle_t *hdl = buf;
mutex_destroy(&hdl->sa_lock);
}
void
sa_cache_init(void)
{
sa_cache = kmem_cache_create("sa_cache",
sizeof (sa_handle_t), 0, sa_cache_constructor,
sa_cache_destructor, NULL, NULL, NULL, 0);
}
void
sa_cache_fini(void)
{
if (sa_cache)
kmem_cache_destroy(sa_cache);
}
static int
layout_num_compare(const void *arg1, const void *arg2)
{
const sa_lot_t *node1 = (const sa_lot_t *)arg1;
const sa_lot_t *node2 = (const sa_lot_t *)arg2;
return (TREE_CMP(node1->lot_num, node2->lot_num));
}
static int
layout_hash_compare(const void *arg1, const void *arg2)
{
const sa_lot_t *node1 = (const sa_lot_t *)arg1;
const sa_lot_t *node2 = (const sa_lot_t *)arg2;
int cmp = TREE_CMP(node1->lot_hash, node2->lot_hash);
if (likely(cmp))
return (cmp);
return (TREE_CMP(node1->lot_instance, node2->lot_instance));
}
static boolean_t
sa_layout_equal(sa_lot_t *tbf, sa_attr_type_t *attrs, int count)
{
int i;
if (count != tbf->lot_attr_count)
return (1);
for (i = 0; i != count; i++) {
if (attrs[i] != tbf->lot_attrs[i])
return (1);
}
return (0);
}
#define SA_ATTR_HASH(attr) (zfs_crc64_table[(-1ULL ^ attr) & 0xFF])
static uint64_t
sa_layout_info_hash(sa_attr_type_t *attrs, int attr_count)
{
int i;
uint64_t crc = -1ULL;
for (i = 0; i != attr_count; i++)
crc ^= SA_ATTR_HASH(attrs[i]);
return (crc);
}
static int
sa_get_spill(sa_handle_t *hdl)
{
int rc;
if (hdl->sa_spill == NULL) {
if ((rc = dmu_spill_hold_existing(hdl->sa_bonus, NULL,
&hdl->sa_spill)) == 0)
VERIFY(0 == sa_build_index(hdl, SA_SPILL));
} else {
rc = 0;
}
return (rc);
}
/*
* Main attribute lookup/update function
* returns 0 for success or non zero for failures
*
* Operates on bulk array, first failure will abort further processing
*/
static int
sa_attr_op(sa_handle_t *hdl, sa_bulk_attr_t *bulk, int count,
sa_data_op_t data_op, dmu_tx_t *tx)
{
sa_os_t *sa = hdl->sa_os->os_sa;
int i;
int error = 0;
sa_buf_type_t buftypes;
buftypes = 0;
ASSERT(count > 0);
for (i = 0; i != count; i++) {
ASSERT(bulk[i].sa_attr <= hdl->sa_os->os_sa->sa_num_attrs);
bulk[i].sa_addr = NULL;
/* First check the bonus buffer */
if (hdl->sa_bonus_tab && TOC_ATTR_PRESENT(
hdl->sa_bonus_tab->sa_idx_tab[bulk[i].sa_attr])) {
SA_ATTR_INFO(sa, hdl->sa_bonus_tab,
SA_GET_HDR(hdl, SA_BONUS),
bulk[i].sa_attr, bulk[i], SA_BONUS, hdl);
if (tx && !(buftypes & SA_BONUS)) {
dmu_buf_will_dirty(hdl->sa_bonus, tx);
buftypes |= SA_BONUS;
}
}
if (bulk[i].sa_addr == NULL &&
((error = sa_get_spill(hdl)) == 0)) {
if (TOC_ATTR_PRESENT(
hdl->sa_spill_tab->sa_idx_tab[bulk[i].sa_attr])) {
SA_ATTR_INFO(sa, hdl->sa_spill_tab,
SA_GET_HDR(hdl, SA_SPILL),
bulk[i].sa_attr, bulk[i], SA_SPILL, hdl);
if (tx && !(buftypes & SA_SPILL) &&
bulk[i].sa_size == bulk[i].sa_length) {
dmu_buf_will_dirty(hdl->sa_spill, tx);
buftypes |= SA_SPILL;
}
}
}
if (error && error != ENOENT) {
return ((error == ECKSUM) ? EIO : error);
}
switch (data_op) {
case SA_LOOKUP:
if (bulk[i].sa_addr == NULL)
return (SET_ERROR(ENOENT));
if (bulk[i].sa_data) {
SA_COPY_DATA(bulk[i].sa_data_func,
bulk[i].sa_addr, bulk[i].sa_data,
bulk[i].sa_size);
}
continue;
case SA_UPDATE:
/* existing rewrite of attr */
if (bulk[i].sa_addr &&
bulk[i].sa_size == bulk[i].sa_length) {
SA_COPY_DATA(bulk[i].sa_data_func,
bulk[i].sa_data, bulk[i].sa_addr,
bulk[i].sa_length);
continue;
} else if (bulk[i].sa_addr) { /* attr size change */
error = sa_modify_attrs(hdl, bulk[i].sa_attr,
SA_REPLACE, bulk[i].sa_data_func,
bulk[i].sa_data, bulk[i].sa_length, tx);
} else { /* adding new attribute */
error = sa_modify_attrs(hdl, bulk[i].sa_attr,
SA_ADD, bulk[i].sa_data_func,
bulk[i].sa_data, bulk[i].sa_length, tx);
}
if (error)
return (error);
break;
default:
break;
}
}
return (error);
}
static sa_lot_t *
sa_add_layout_entry(objset_t *os, sa_attr_type_t *attrs, int attr_count,
uint64_t lot_num, uint64_t hash, boolean_t zapadd, dmu_tx_t *tx)
{
sa_os_t *sa = os->os_sa;
sa_lot_t *tb, *findtb;
int i;
avl_index_t loc;
ASSERT(MUTEX_HELD(&sa->sa_lock));
tb = kmem_zalloc(sizeof (sa_lot_t), KM_SLEEP);
tb->lot_attr_count = attr_count;
tb->lot_attrs = kmem_alloc(sizeof (sa_attr_type_t) * attr_count,
KM_SLEEP);
bcopy(attrs, tb->lot_attrs, sizeof (sa_attr_type_t) * attr_count);
tb->lot_num = lot_num;
tb->lot_hash = hash;
tb->lot_instance = 0;
if (zapadd) {
char attr_name[8];
if (sa->sa_layout_attr_obj == 0) {
sa->sa_layout_attr_obj = zap_create_link(os,
DMU_OT_SA_ATTR_LAYOUTS,
sa->sa_master_obj, SA_LAYOUTS, tx);
}
(void) snprintf(attr_name, sizeof (attr_name),
"%d", (int)lot_num);
VERIFY(0 == zap_update(os, os->os_sa->sa_layout_attr_obj,
attr_name, 2, attr_count, attrs, tx));
}
list_create(&tb->lot_idx_tab, sizeof (sa_idx_tab_t),
offsetof(sa_idx_tab_t, sa_next));
for (i = 0; i != attr_count; i++) {
if (sa->sa_attr_table[tb->lot_attrs[i]].sa_length == 0)
tb->lot_var_sizes++;
}
avl_add(&sa->sa_layout_num_tree, tb);
/* verify we don't have a hash collision */
if ((findtb = avl_find(&sa->sa_layout_hash_tree, tb, &loc)) != NULL) {
for (; findtb && findtb->lot_hash == hash;
findtb = AVL_NEXT(&sa->sa_layout_hash_tree, findtb)) {
if (findtb->lot_instance != tb->lot_instance)
break;
tb->lot_instance++;
}
}
avl_add(&sa->sa_layout_hash_tree, tb);
return (tb);
}
static void
sa_find_layout(objset_t *os, uint64_t hash, sa_attr_type_t *attrs,
int count, dmu_tx_t *tx, sa_lot_t **lot)
{
sa_lot_t *tb, tbsearch;
avl_index_t loc;
sa_os_t *sa = os->os_sa;
boolean_t found = B_FALSE;
mutex_enter(&sa->sa_lock);
tbsearch.lot_hash = hash;
tbsearch.lot_instance = 0;
tb = avl_find(&sa->sa_layout_hash_tree, &tbsearch, &loc);
if (tb) {
for (; tb && tb->lot_hash == hash;
tb = AVL_NEXT(&sa->sa_layout_hash_tree, tb)) {
if (sa_layout_equal(tb, attrs, count) == 0) {
found = B_TRUE;
break;
}
}
}
if (!found) {
tb = sa_add_layout_entry(os, attrs, count,
avl_numnodes(&sa->sa_layout_num_tree), hash, B_TRUE, tx);
}
mutex_exit(&sa->sa_lock);
*lot = tb;
}
static int
sa_resize_spill(sa_handle_t *hdl, uint32_t size, dmu_tx_t *tx)
{
int error;
uint32_t blocksize;
if (size == 0) {
blocksize = SPA_MINBLOCKSIZE;
} else if (size > SPA_OLD_MAXBLOCKSIZE) {
ASSERT(0);
return (SET_ERROR(EFBIG));
} else {
blocksize = P2ROUNDUP_TYPED(size, SPA_MINBLOCKSIZE, uint32_t);
}
error = dbuf_spill_set_blksz(hdl->sa_spill, blocksize, tx);
ASSERT(error == 0);
return (error);
}
static void
sa_copy_data(sa_data_locator_t *func, void *datastart, void *target, int buflen)
{
if (func == NULL) {
bcopy(datastart, target, buflen);
} else {
boolean_t start;
int bytes;
void *dataptr;
void *saptr = target;
uint32_t length;
start = B_TRUE;
bytes = 0;
while (bytes < buflen) {
func(&dataptr, &length, buflen, start, datastart);
bcopy(dataptr, saptr, length);
saptr = (void *)((caddr_t)saptr + length);
bytes += length;
start = B_FALSE;
}
}
}
/*
* Determine several different values pertaining to system attribute
* buffers.
*
* Return the size of the sa_hdr_phys_t header for the buffer. Each
* variable length attribute except the first contributes two bytes to
* the header size, which is then rounded up to an 8-byte boundary.
*
* The following output parameters are also computed.
*
* index - The index of the first attribute in attr_desc that will
* spill over. Only valid if will_spill is set.
*
* total - The total number of bytes of all system attributes described
* in attr_desc.
*
* will_spill - Set when spilling is necessary. It is only set when
* the buftype is SA_BONUS.
*/
static int
sa_find_sizes(sa_os_t *sa, sa_bulk_attr_t *attr_desc, int attr_count,
dmu_buf_t *db, sa_buf_type_t buftype, int full_space, int *index,
int *total, boolean_t *will_spill)
{
int var_size_count = 0;
int i;
int hdrsize;
int extra_hdrsize;
if (buftype == SA_BONUS && sa->sa_force_spill) {
*total = 0;
*index = 0;
*will_spill = B_TRUE;
return (0);
}
*index = -1;
*total = 0;
*will_spill = B_FALSE;
extra_hdrsize = 0;
hdrsize = (SA_BONUSTYPE_FROM_DB(db) == DMU_OT_ZNODE) ? 0 :
sizeof (sa_hdr_phys_t);
ASSERT(IS_P2ALIGNED(full_space, 8));
for (i = 0; i != attr_count; i++) {
boolean_t is_var_sz, might_spill_here;
int tmp_hdrsize;
*total = P2ROUNDUP(*total, 8);
*total += attr_desc[i].sa_length;
if (*will_spill)
continue;
is_var_sz = (SA_REGISTERED_LEN(sa, attr_desc[i].sa_attr) == 0);
if (is_var_sz)
var_size_count++;
/*
* Calculate what the SA header size would be if this
* attribute doesn't spill.
*/
tmp_hdrsize = hdrsize + ((is_var_sz && var_size_count > 1) ?
sizeof (uint16_t) : 0);
/*
* Check whether this attribute spans into the space
* that would be used by the spill block pointer should
* a spill block be needed.
*/
might_spill_here =
buftype == SA_BONUS && *index == -1 &&
(*total + P2ROUNDUP(tmp_hdrsize, 8)) >
(full_space - sizeof (blkptr_t));
if (is_var_sz && var_size_count > 1) {
if (buftype == SA_SPILL ||
tmp_hdrsize + *total < full_space) {
/*
* Record the extra header size in case this
* increase needs to be reversed due to
* spill-over.
*/
hdrsize = tmp_hdrsize;
if (*index != -1 || might_spill_here)
extra_hdrsize += sizeof (uint16_t);
} else {
ASSERT(buftype == SA_BONUS);
if (*index == -1)
*index = i;
*will_spill = B_TRUE;
continue;
}
}
/*
* Store index of where spill *could* occur. Then
* continue to count the remaining attribute sizes. The
* sum is used later for sizing bonus and spill buffer.
*/
if (might_spill_here)
*index = i;
if ((*total + P2ROUNDUP(hdrsize, 8)) > full_space &&
buftype == SA_BONUS)
*will_spill = B_TRUE;
}
if (*will_spill)
hdrsize -= extra_hdrsize;
hdrsize = P2ROUNDUP(hdrsize, 8);
return (hdrsize);
}
#define BUF_SPACE_NEEDED(total, header) (total + header)
/*
* Find layout that corresponds to ordering of attributes
* If not found a new layout number is created and added to
* persistent layout tables.
*/
static int
sa_build_layouts(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc, int attr_count,
dmu_tx_t *tx)
{
sa_os_t *sa = hdl->sa_os->os_sa;
uint64_t hash;
sa_buf_type_t buftype;
sa_hdr_phys_t *sahdr;
void *data_start;
sa_attr_type_t *attrs, *attrs_start;
int i, lot_count;
int dnodesize;
int spill_idx;
int hdrsize;
int spillhdrsize = 0;
int used;
dmu_object_type_t bonustype;
sa_lot_t *lot;
int len_idx;
int spill_used;
int bonuslen;
boolean_t spilling;
dmu_buf_will_dirty(hdl->sa_bonus, tx);
bonustype = SA_BONUSTYPE_FROM_DB(hdl->sa_bonus);
dmu_object_dnsize_from_db(hdl->sa_bonus, &dnodesize);
bonuslen = DN_BONUS_SIZE(dnodesize);
/* first determine bonus header size and sum of all attributes */
hdrsize = sa_find_sizes(sa, attr_desc, attr_count, hdl->sa_bonus,
SA_BONUS, bonuslen, &spill_idx, &used, &spilling);
if (used > SPA_OLD_MAXBLOCKSIZE)
return (SET_ERROR(EFBIG));
VERIFY0(dmu_set_bonus(hdl->sa_bonus, spilling ?
MIN(bonuslen - sizeof (blkptr_t), used + hdrsize) :
used + hdrsize, tx));
ASSERT((bonustype == DMU_OT_ZNODE && spilling == 0) ||
bonustype == DMU_OT_SA);
/* setup and size spill buffer when needed */
if (spilling) {
boolean_t dummy;
if (hdl->sa_spill == NULL) {
VERIFY(dmu_spill_hold_by_bonus(hdl->sa_bonus, 0, NULL,
&hdl->sa_spill) == 0);
}
dmu_buf_will_dirty(hdl->sa_spill, tx);
spillhdrsize = sa_find_sizes(sa, &attr_desc[spill_idx],
attr_count - spill_idx, hdl->sa_spill, SA_SPILL,
hdl->sa_spill->db_size, &i, &spill_used, &dummy);
if (spill_used > SPA_OLD_MAXBLOCKSIZE)
return (SET_ERROR(EFBIG));
if (BUF_SPACE_NEEDED(spill_used, spillhdrsize) >
hdl->sa_spill->db_size)
VERIFY(0 == sa_resize_spill(hdl,
BUF_SPACE_NEEDED(spill_used, spillhdrsize), tx));
}
/* setup starting pointers to lay down data */
data_start = (void *)((uintptr_t)hdl->sa_bonus->db_data + hdrsize);
sahdr = (sa_hdr_phys_t *)hdl->sa_bonus->db_data;
buftype = SA_BONUS;
attrs_start = attrs = kmem_alloc(sizeof (sa_attr_type_t) * attr_count,
KM_SLEEP);
lot_count = 0;
for (i = 0, len_idx = 0, hash = -1ULL; i != attr_count; i++) {
uint16_t length;
ASSERT(IS_P2ALIGNED(data_start, 8));
attrs[i] = attr_desc[i].sa_attr;
length = SA_REGISTERED_LEN(sa, attrs[i]);
if (length == 0)
length = attr_desc[i].sa_length;
if (spilling && i == spill_idx) { /* switch to spill buffer */
VERIFY(bonustype == DMU_OT_SA);
if (buftype == SA_BONUS && !sa->sa_force_spill) {
sa_find_layout(hdl->sa_os, hash, attrs_start,
lot_count, tx, &lot);
SA_SET_HDR(sahdr, lot->lot_num, hdrsize);
}
buftype = SA_SPILL;
hash = -1ULL;
len_idx = 0;
sahdr = (sa_hdr_phys_t *)hdl->sa_spill->db_data;
sahdr->sa_magic = SA_MAGIC;
data_start = (void *)((uintptr_t)sahdr +
spillhdrsize);
attrs_start = &attrs[i];
lot_count = 0;
}
hash ^= SA_ATTR_HASH(attrs[i]);
attr_desc[i].sa_addr = data_start;
attr_desc[i].sa_size = length;
SA_COPY_DATA(attr_desc[i].sa_data_func, attr_desc[i].sa_data,
data_start, length);
if (sa->sa_attr_table[attrs[i]].sa_length == 0) {
sahdr->sa_lengths[len_idx++] = length;
}
data_start = (void *)P2ROUNDUP(((uintptr_t)data_start +
length), 8);
lot_count++;
}
sa_find_layout(hdl->sa_os, hash, attrs_start, lot_count, tx, &lot);
/*
* Verify that old znodes always have layout number 0.
* Must be DMU_OT_SA for arbitrary layouts
*/
VERIFY((bonustype == DMU_OT_ZNODE && lot->lot_num == 0) ||
(bonustype == DMU_OT_SA && lot->lot_num > 1));
if (bonustype == DMU_OT_SA) {
SA_SET_HDR(sahdr, lot->lot_num,
buftype == SA_BONUS ? hdrsize : spillhdrsize);
}
kmem_free(attrs, sizeof (sa_attr_type_t) * attr_count);
if (hdl->sa_bonus_tab) {
sa_idx_tab_rele(hdl->sa_os, hdl->sa_bonus_tab);
hdl->sa_bonus_tab = NULL;
}
if (!sa->sa_force_spill)
VERIFY(0 == sa_build_index(hdl, SA_BONUS));
if (hdl->sa_spill) {
sa_idx_tab_rele(hdl->sa_os, hdl->sa_spill_tab);
if (!spilling) {
/*
* remove spill block that is no longer needed.
*/
dmu_buf_rele(hdl->sa_spill, NULL);
hdl->sa_spill = NULL;
hdl->sa_spill_tab = NULL;
VERIFY(0 == dmu_rm_spill(hdl->sa_os,
sa_handle_object(hdl), tx));
} else {
VERIFY(0 == sa_build_index(hdl, SA_SPILL));
}
}
return (0);
}
static void
sa_free_attr_table(sa_os_t *sa)
{
int i;
if (sa->sa_attr_table == NULL)
return;
for (i = 0; i != sa->sa_num_attrs; i++) {
if (sa->sa_attr_table[i].sa_name)
kmem_free(sa->sa_attr_table[i].sa_name,
strlen(sa->sa_attr_table[i].sa_name) + 1);
}
kmem_free(sa->sa_attr_table,
sizeof (sa_attr_table_t) * sa->sa_num_attrs);
sa->sa_attr_table = NULL;
}
static int
sa_attr_table_setup(objset_t *os, sa_attr_reg_t *reg_attrs, int count)
{
sa_os_t *sa = os->os_sa;
uint64_t sa_attr_count = 0;
uint64_t sa_reg_count = 0;
int error = 0;
uint64_t attr_value;
sa_attr_table_t *tb;
zap_cursor_t zc;
zap_attribute_t za;
int registered_count = 0;
int i;
dmu_objset_type_t ostype = dmu_objset_type(os);
sa->sa_user_table =
kmem_zalloc(count * sizeof (sa_attr_type_t), KM_SLEEP);
sa->sa_user_table_sz = count * sizeof (sa_attr_type_t);
if (sa->sa_reg_attr_obj != 0) {
error = zap_count(os, sa->sa_reg_attr_obj,
&sa_attr_count);
/*
* Make sure we retrieved a count and that it isn't zero
*/
if (error || (error == 0 && sa_attr_count == 0)) {
if (error == 0)
error = SET_ERROR(EINVAL);
goto bail;
}
sa_reg_count = sa_attr_count;
}
if (ostype == DMU_OST_ZFS && sa_attr_count == 0)
sa_attr_count += sa_legacy_attr_count;
/* Allocate attribute numbers for attributes that aren't registered */
for (i = 0; i != count; i++) {
boolean_t found = B_FALSE;
int j;
if (ostype == DMU_OST_ZFS) {
for (j = 0; j != sa_legacy_attr_count; j++) {
if (strcmp(reg_attrs[i].sa_name,
sa_legacy_attrs[j].sa_name) == 0) {
sa->sa_user_table[i] =
sa_legacy_attrs[j].sa_attr;
found = B_TRUE;
}
}
}
if (found)
continue;
if (sa->sa_reg_attr_obj)
error = zap_lookup(os, sa->sa_reg_attr_obj,
reg_attrs[i].sa_name, 8, 1, &attr_value);
else
error = SET_ERROR(ENOENT);
switch (error) {
case ENOENT:
sa->sa_user_table[i] = (sa_attr_type_t)sa_attr_count;
sa_attr_count++;
break;
case 0:
sa->sa_user_table[i] = ATTR_NUM(attr_value);
break;
default:
goto bail;
}
}
sa->sa_num_attrs = sa_attr_count;
tb = sa->sa_attr_table =
kmem_zalloc(sizeof (sa_attr_table_t) * sa_attr_count, KM_SLEEP);
/*
* Attribute table is constructed from requested attribute list,
* previously foreign registered attributes, and also the legacy
* ZPL set of attributes.
*/
if (sa->sa_reg_attr_obj) {
for (zap_cursor_init(&zc, os, sa->sa_reg_attr_obj);
(error = zap_cursor_retrieve(&zc, &za)) == 0;
zap_cursor_advance(&zc)) {
uint64_t value;
value = za.za_first_integer;
registered_count++;
tb[ATTR_NUM(value)].sa_attr = ATTR_NUM(value);
tb[ATTR_NUM(value)].sa_length = ATTR_LENGTH(value);
tb[ATTR_NUM(value)].sa_byteswap = ATTR_BSWAP(value);
tb[ATTR_NUM(value)].sa_registered = B_TRUE;
if (tb[ATTR_NUM(value)].sa_name) {
continue;
}
tb[ATTR_NUM(value)].sa_name =
kmem_zalloc(strlen(za.za_name) +1, KM_SLEEP);
(void) strlcpy(tb[ATTR_NUM(value)].sa_name, za.za_name,
strlen(za.za_name) +1);
}
zap_cursor_fini(&zc);
/*
* Make sure we processed the correct number of registered
* attributes
*/
if (registered_count != sa_reg_count) {
ASSERT(error != 0);
goto bail;
}
}
if (ostype == DMU_OST_ZFS) {
for (i = 0; i != sa_legacy_attr_count; i++) {
if (tb[i].sa_name)
continue;
tb[i].sa_attr = sa_legacy_attrs[i].sa_attr;
tb[i].sa_length = sa_legacy_attrs[i].sa_length;
tb[i].sa_byteswap = sa_legacy_attrs[i].sa_byteswap;
tb[i].sa_registered = B_FALSE;
tb[i].sa_name =
kmem_zalloc(strlen(sa_legacy_attrs[i].sa_name) +1,
KM_SLEEP);
(void) strlcpy(tb[i].sa_name,
sa_legacy_attrs[i].sa_name,
strlen(sa_legacy_attrs[i].sa_name) + 1);
}
}
for (i = 0; i != count; i++) {
sa_attr_type_t attr_id;
attr_id = sa->sa_user_table[i];
if (tb[attr_id].sa_name)
continue;
tb[attr_id].sa_length = reg_attrs[i].sa_length;
tb[attr_id].sa_byteswap = reg_attrs[i].sa_byteswap;
tb[attr_id].sa_attr = attr_id;
tb[attr_id].sa_name =
kmem_zalloc(strlen(reg_attrs[i].sa_name) + 1, KM_SLEEP);
(void) strlcpy(tb[attr_id].sa_name, reg_attrs[i].sa_name,
strlen(reg_attrs[i].sa_name) + 1);
}
sa->sa_need_attr_registration =
(sa_attr_count != registered_count);
return (0);
bail:
kmem_free(sa->sa_user_table, count * sizeof (sa_attr_type_t));
sa->sa_user_table = NULL;
sa_free_attr_table(sa);
ASSERT(error != 0);
return (error);
}
int
sa_setup(objset_t *os, uint64_t sa_obj, sa_attr_reg_t *reg_attrs, int count,
sa_attr_type_t **user_table)
{
zap_cursor_t zc;
zap_attribute_t za;
sa_os_t *sa;
dmu_objset_type_t ostype = dmu_objset_type(os);
sa_attr_type_t *tb;
int error;
mutex_enter(&os->os_user_ptr_lock);
if (os->os_sa) {
mutex_enter(&os->os_sa->sa_lock);
mutex_exit(&os->os_user_ptr_lock);
tb = os->os_sa->sa_user_table;
mutex_exit(&os->os_sa->sa_lock);
*user_table = tb;
return (0);
}
sa = kmem_zalloc(sizeof (sa_os_t), KM_SLEEP);
mutex_init(&sa->sa_lock, NULL, MUTEX_NOLOCKDEP, NULL);
sa->sa_master_obj = sa_obj;
os->os_sa = sa;
mutex_enter(&sa->sa_lock);
mutex_exit(&os->os_user_ptr_lock);
avl_create(&sa->sa_layout_num_tree, layout_num_compare,
sizeof (sa_lot_t), offsetof(sa_lot_t, lot_num_node));
avl_create(&sa->sa_layout_hash_tree, layout_hash_compare,
sizeof (sa_lot_t), offsetof(sa_lot_t, lot_hash_node));
if (sa_obj) {
error = zap_lookup(os, sa_obj, SA_LAYOUTS,
8, 1, &sa->sa_layout_attr_obj);
if (error != 0 && error != ENOENT)
goto fail;
error = zap_lookup(os, sa_obj, SA_REGISTRY,
8, 1, &sa->sa_reg_attr_obj);
if (error != 0 && error != ENOENT)
goto fail;
}
if ((error = sa_attr_table_setup(os, reg_attrs, count)) != 0)
goto fail;
if (sa->sa_layout_attr_obj != 0) {
uint64_t layout_count;
error = zap_count(os, sa->sa_layout_attr_obj,
&layout_count);
/*
* Layout number count should be > 0
*/
if (error || (error == 0 && layout_count == 0)) {
if (error == 0)
error = SET_ERROR(EINVAL);
goto fail;
}
for (zap_cursor_init(&zc, os, sa->sa_layout_attr_obj);
(error = zap_cursor_retrieve(&zc, &za)) == 0;
zap_cursor_advance(&zc)) {
sa_attr_type_t *lot_attrs;
uint64_t lot_num;
lot_attrs = kmem_zalloc(sizeof (sa_attr_type_t) *
za.za_num_integers, KM_SLEEP);
if ((error = (zap_lookup(os, sa->sa_layout_attr_obj,
za.za_name, 2, za.za_num_integers,
lot_attrs))) != 0) {
kmem_free(lot_attrs, sizeof (sa_attr_type_t) *
za.za_num_integers);
break;
}
VERIFY(ddi_strtoull(za.za_name, NULL, 10,
(unsigned long long *)&lot_num) == 0);
(void) sa_add_layout_entry(os, lot_attrs,
za.za_num_integers, lot_num,
sa_layout_info_hash(lot_attrs,
za.za_num_integers), B_FALSE, NULL);
kmem_free(lot_attrs, sizeof (sa_attr_type_t) *
za.za_num_integers);
}
zap_cursor_fini(&zc);
/*
* Make sure layout count matches number of entries added
* to AVL tree
*/
if (avl_numnodes(&sa->sa_layout_num_tree) != layout_count) {
ASSERT(error != 0);
goto fail;
}
}
/* Add special layout number for old ZNODES */
if (ostype == DMU_OST_ZFS) {
(void) sa_add_layout_entry(os, sa_legacy_zpl_layout,
sa_legacy_attr_count, 0,
sa_layout_info_hash(sa_legacy_zpl_layout,
sa_legacy_attr_count), B_FALSE, NULL);
(void) sa_add_layout_entry(os, sa_dummy_zpl_layout, 0, 1,
0, B_FALSE, NULL);
}
*user_table = os->os_sa->sa_user_table;
mutex_exit(&sa->sa_lock);
return (0);
fail:
os->os_sa = NULL;
sa_free_attr_table(sa);
if (sa->sa_user_table)
kmem_free(sa->sa_user_table, sa->sa_user_table_sz);
mutex_exit(&sa->sa_lock);
avl_destroy(&sa->sa_layout_hash_tree);
avl_destroy(&sa->sa_layout_num_tree);
mutex_destroy(&sa->sa_lock);
kmem_free(sa, sizeof (sa_os_t));
return ((error == ECKSUM) ? EIO : error);
}
void
sa_tear_down(objset_t *os)
{
sa_os_t *sa = os->os_sa;
sa_lot_t *layout;
void *cookie;
kmem_free(sa->sa_user_table, sa->sa_user_table_sz);
/* Free up attr table */
sa_free_attr_table(sa);
cookie = NULL;
while ((layout =
avl_destroy_nodes(&sa->sa_layout_hash_tree, &cookie))) {
sa_idx_tab_t *tab;
while ((tab = list_head(&layout->lot_idx_tab))) {
ASSERT(zfs_refcount_count(&tab->sa_refcount));
sa_idx_tab_rele(os, tab);
}
}
cookie = NULL;
while ((layout = avl_destroy_nodes(&sa->sa_layout_num_tree, &cookie))) {
kmem_free(layout->lot_attrs,
sizeof (sa_attr_type_t) * layout->lot_attr_count);
kmem_free(layout, sizeof (sa_lot_t));
}
avl_destroy(&sa->sa_layout_hash_tree);
avl_destroy(&sa->sa_layout_num_tree);
mutex_destroy(&sa->sa_lock);
kmem_free(sa, sizeof (sa_os_t));
os->os_sa = NULL;
}
static void
sa_build_idx_tab(void *hdr, void *attr_addr, sa_attr_type_t attr,
uint16_t length, int length_idx, boolean_t var_length, void *userp)
{
sa_idx_tab_t *idx_tab = userp;
if (var_length) {
ASSERT(idx_tab->sa_variable_lengths);
idx_tab->sa_variable_lengths[length_idx] = length;
}
TOC_ATTR_ENCODE(idx_tab->sa_idx_tab[attr], length_idx,
(uint32_t)((uintptr_t)attr_addr - (uintptr_t)hdr));
}
static void
sa_attr_iter(objset_t *os, sa_hdr_phys_t *hdr, dmu_object_type_t type,
sa_iterfunc_t func, sa_lot_t *tab, void *userp)
{
void *data_start;
sa_lot_t *tb = tab;
sa_lot_t search;
avl_index_t loc;
sa_os_t *sa = os->os_sa;
int i;
uint16_t *length_start = NULL;
uint8_t length_idx = 0;
if (tab == NULL) {
search.lot_num = SA_LAYOUT_NUM(hdr, type);
tb = avl_find(&sa->sa_layout_num_tree, &search, &loc);
ASSERT(tb);
}
if (IS_SA_BONUSTYPE(type)) {
data_start = (void *)P2ROUNDUP(((uintptr_t)hdr +
offsetof(sa_hdr_phys_t, sa_lengths) +
(sizeof (uint16_t) * tb->lot_var_sizes)), 8);
length_start = hdr->sa_lengths;
} else {
data_start = hdr;
}
for (i = 0; i != tb->lot_attr_count; i++) {
int attr_length, reg_length;
uint8_t idx_len;
reg_length = sa->sa_attr_table[tb->lot_attrs[i]].sa_length;
if (reg_length) {
attr_length = reg_length;
idx_len = 0;
} else {
attr_length = length_start[length_idx];
idx_len = length_idx++;
}
func(hdr, data_start, tb->lot_attrs[i], attr_length,
idx_len, reg_length == 0 ? B_TRUE : B_FALSE, userp);
data_start = (void *)P2ROUNDUP(((uintptr_t)data_start +
attr_length), 8);
}
}
-/*ARGSUSED*/
static void
sa_byteswap_cb(void *hdr, void *attr_addr, sa_attr_type_t attr,
uint16_t length, int length_idx, boolean_t variable_length, void *userp)
{
+ (void) hdr, (void) length_idx, (void) variable_length;
sa_handle_t *hdl = userp;
sa_os_t *sa = hdl->sa_os->os_sa;
sa_bswap_table[sa->sa_attr_table[attr].sa_byteswap](attr_addr, length);
}
static void
sa_byteswap(sa_handle_t *hdl, sa_buf_type_t buftype)
{
sa_hdr_phys_t *sa_hdr_phys = SA_GET_HDR(hdl, buftype);
dmu_buf_impl_t *db;
int num_lengths = 1;
int i;
sa_os_t *sa __maybe_unused = hdl->sa_os->os_sa;
ASSERT(MUTEX_HELD(&sa->sa_lock));
if (sa_hdr_phys->sa_magic == SA_MAGIC)
return;
db = SA_GET_DB(hdl, buftype);
if (buftype == SA_SPILL) {
arc_release(db->db_buf, NULL);
arc_buf_thaw(db->db_buf);
}
sa_hdr_phys->sa_magic = BSWAP_32(sa_hdr_phys->sa_magic);
sa_hdr_phys->sa_layout_info = BSWAP_16(sa_hdr_phys->sa_layout_info);
/*
* Determine number of variable lengths in header
* The standard 8 byte header has one for free and a
* 16 byte header would have 4 + 1;
*/
if (SA_HDR_SIZE(sa_hdr_phys) > 8)
num_lengths += (SA_HDR_SIZE(sa_hdr_phys) - 8) >> 1;
for (i = 0; i != num_lengths; i++)
sa_hdr_phys->sa_lengths[i] =
BSWAP_16(sa_hdr_phys->sa_lengths[i]);
sa_attr_iter(hdl->sa_os, sa_hdr_phys, DMU_OT_SA,
sa_byteswap_cb, NULL, hdl);
if (buftype == SA_SPILL)
arc_buf_freeze(((dmu_buf_impl_t *)hdl->sa_spill)->db_buf);
}
static int
sa_build_index(sa_handle_t *hdl, sa_buf_type_t buftype)
{
sa_hdr_phys_t *sa_hdr_phys;
dmu_buf_impl_t *db = SA_GET_DB(hdl, buftype);
dmu_object_type_t bonustype = SA_BONUSTYPE_FROM_DB(db);
sa_os_t *sa = hdl->sa_os->os_sa;
sa_idx_tab_t *idx_tab;
sa_hdr_phys = SA_GET_HDR(hdl, buftype);
mutex_enter(&sa->sa_lock);
/* Do we need to byteswap? */
/* only check if not old znode */
if (IS_SA_BONUSTYPE(bonustype) && sa_hdr_phys->sa_magic != SA_MAGIC &&
sa_hdr_phys->sa_magic != 0) {
if (BSWAP_32(sa_hdr_phys->sa_magic) != SA_MAGIC) {
mutex_exit(&sa->sa_lock);
zfs_dbgmsg("Buffer Header: %x != SA_MAGIC:%x "
"object=%#llx\n", sa_hdr_phys->sa_magic, SA_MAGIC,
(u_longlong_t)db->db.db_object);
return (SET_ERROR(EIO));
}
sa_byteswap(hdl, buftype);
}
idx_tab = sa_find_idx_tab(hdl->sa_os, bonustype, sa_hdr_phys);
if (buftype == SA_BONUS)
hdl->sa_bonus_tab = idx_tab;
else
hdl->sa_spill_tab = idx_tab;
mutex_exit(&sa->sa_lock);
return (0);
}
-/*ARGSUSED*/
static void
sa_evict_sync(void *dbu)
{
+ (void) dbu;
panic("evicting sa dbuf\n");
}
static void
sa_idx_tab_rele(objset_t *os, void *arg)
{
sa_os_t *sa = os->os_sa;
sa_idx_tab_t *idx_tab = arg;
if (idx_tab == NULL)
return;
mutex_enter(&sa->sa_lock);
if (zfs_refcount_remove(&idx_tab->sa_refcount, NULL) == 0) {
list_remove(&idx_tab->sa_layout->lot_idx_tab, idx_tab);
if (idx_tab->sa_variable_lengths)
kmem_free(idx_tab->sa_variable_lengths,
sizeof (uint16_t) *
idx_tab->sa_layout->lot_var_sizes);
zfs_refcount_destroy(&idx_tab->sa_refcount);
kmem_free(idx_tab->sa_idx_tab,
sizeof (uint32_t) * sa->sa_num_attrs);
kmem_free(idx_tab, sizeof (sa_idx_tab_t));
}
mutex_exit(&sa->sa_lock);
}
static void
sa_idx_tab_hold(objset_t *os, sa_idx_tab_t *idx_tab)
{
sa_os_t *sa __maybe_unused = os->os_sa;
ASSERT(MUTEX_HELD(&sa->sa_lock));
(void) zfs_refcount_add(&idx_tab->sa_refcount, NULL);
}
void
sa_spill_rele(sa_handle_t *hdl)
{
mutex_enter(&hdl->sa_lock);
if (hdl->sa_spill) {
sa_idx_tab_rele(hdl->sa_os, hdl->sa_spill_tab);
dmu_buf_rele(hdl->sa_spill, NULL);
hdl->sa_spill = NULL;
hdl->sa_spill_tab = NULL;
}
mutex_exit(&hdl->sa_lock);
}
void
sa_handle_destroy(sa_handle_t *hdl)
{
dmu_buf_t *db = hdl->sa_bonus;
mutex_enter(&hdl->sa_lock);
(void) dmu_buf_remove_user(db, &hdl->sa_dbu);
if (hdl->sa_bonus_tab)
sa_idx_tab_rele(hdl->sa_os, hdl->sa_bonus_tab);
if (hdl->sa_spill_tab)
sa_idx_tab_rele(hdl->sa_os, hdl->sa_spill_tab);
dmu_buf_rele(hdl->sa_bonus, NULL);
if (hdl->sa_spill)
dmu_buf_rele(hdl->sa_spill, NULL);
mutex_exit(&hdl->sa_lock);
kmem_cache_free(sa_cache, hdl);
}
int
sa_handle_get_from_db(objset_t *os, dmu_buf_t *db, void *userp,
sa_handle_type_t hdl_type, sa_handle_t **handlepp)
{
int error = 0;
sa_handle_t *handle = NULL;
#ifdef ZFS_DEBUG
dmu_object_info_t doi;
dmu_object_info_from_db(db, &doi);
ASSERT(doi.doi_bonus_type == DMU_OT_SA ||
doi.doi_bonus_type == DMU_OT_ZNODE);
#endif
/* find handle, if it exists */
/* if one doesn't exist then create a new one, and initialize it */
if (hdl_type == SA_HDL_SHARED)
handle = dmu_buf_get_user(db);
if (handle == NULL) {
sa_handle_t *winner = NULL;
handle = kmem_cache_alloc(sa_cache, KM_SLEEP);
handle->sa_dbu.dbu_evict_func_sync = NULL;
handle->sa_dbu.dbu_evict_func_async = NULL;
handle->sa_userp = userp;
handle->sa_bonus = db;
handle->sa_os = os;
handle->sa_spill = NULL;
handle->sa_bonus_tab = NULL;
handle->sa_spill_tab = NULL;
error = sa_build_index(handle, SA_BONUS);
if (hdl_type == SA_HDL_SHARED) {
dmu_buf_init_user(&handle->sa_dbu, sa_evict_sync, NULL,
NULL);
winner = dmu_buf_set_user_ie(db, &handle->sa_dbu);
}
if (winner != NULL) {
kmem_cache_free(sa_cache, handle);
handle = winner;
}
}
*handlepp = handle;
return (error);
}
int
sa_handle_get(objset_t *objset, uint64_t objid, void *userp,
sa_handle_type_t hdl_type, sa_handle_t **handlepp)
{
dmu_buf_t *db;
int error;
if ((error = dmu_bonus_hold(objset, objid, NULL, &db)))
return (error);
return (sa_handle_get_from_db(objset, db, userp, hdl_type,
handlepp));
}
int
sa_buf_hold(objset_t *objset, uint64_t obj_num, void *tag, dmu_buf_t **db)
{
return (dmu_bonus_hold(objset, obj_num, tag, db));
}
void
sa_buf_rele(dmu_buf_t *db, void *tag)
{
dmu_buf_rele(db, tag);
}
static int
sa_lookup_impl(sa_handle_t *hdl, sa_bulk_attr_t *bulk, int count)
{
ASSERT(hdl);
ASSERT(MUTEX_HELD(&hdl->sa_lock));
return (sa_attr_op(hdl, bulk, count, SA_LOOKUP, NULL));
}
static int
sa_lookup_locked(sa_handle_t *hdl, sa_attr_type_t attr, void *buf,
uint32_t buflen)
{
int error;
sa_bulk_attr_t bulk;
VERIFY3U(buflen, <=, SA_ATTR_MAX_LEN);
bulk.sa_attr = attr;
bulk.sa_data = buf;
bulk.sa_length = buflen;
bulk.sa_data_func = NULL;
ASSERT(hdl);
error = sa_lookup_impl(hdl, &bulk, 1);
return (error);
}
int
sa_lookup(sa_handle_t *hdl, sa_attr_type_t attr, void *buf, uint32_t buflen)
{
int error;
mutex_enter(&hdl->sa_lock);
error = sa_lookup_locked(hdl, attr, buf, buflen);
mutex_exit(&hdl->sa_lock);
return (error);
}
#ifdef _KERNEL
int
sa_lookup_uio(sa_handle_t *hdl, sa_attr_type_t attr, zfs_uio_t *uio)
{
int error;
sa_bulk_attr_t bulk;
bulk.sa_data = NULL;
bulk.sa_attr = attr;
bulk.sa_data_func = NULL;
ASSERT(hdl);
mutex_enter(&hdl->sa_lock);
if ((error = sa_attr_op(hdl, &bulk, 1, SA_LOOKUP, NULL)) == 0) {
error = zfs_uiomove((void *)bulk.sa_addr, MIN(bulk.sa_size,
zfs_uio_resid(uio)), UIO_READ, uio);
}
mutex_exit(&hdl->sa_lock);
return (error);
}
/*
* For the existed object that is upgraded from old system, its ondisk layout
* has no slot for the project ID attribute. But quota accounting logic needs
* to access related slots by offset directly. So we need to adjust these old
* objects' layout to make the project ID to some unified and fixed offset.
*/
int
sa_add_projid(sa_handle_t *hdl, dmu_tx_t *tx, uint64_t projid)
{
znode_t *zp = sa_get_userdata(hdl);
dmu_buf_t *db = sa_get_db(hdl);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int count = 0, err = 0;
sa_bulk_attr_t *bulk, *attrs;
zfs_acl_locator_cb_t locate = { 0 };
uint64_t uid, gid, mode, rdev, xattr = 0, parent, gen, links;
uint64_t crtime[2], mtime[2], ctime[2], atime[2];
zfs_acl_phys_t znode_acl = { 0 };
char scanstamp[AV_SCANSTAMP_SZ];
if (zp->z_acl_cached == NULL) {
zfs_acl_t *aclp;
mutex_enter(&zp->z_acl_lock);
err = zfs_acl_node_read(zp, B_FALSE, &aclp, B_FALSE);
mutex_exit(&zp->z_acl_lock);
if (err != 0 && err != ENOENT)
return (err);
}
bulk = kmem_zalloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_SLEEP);
attrs = kmem_zalloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_SLEEP);
mutex_enter(&hdl->sa_lock);
mutex_enter(&zp->z_lock);
err = sa_lookup_locked(hdl, SA_ZPL_PROJID(zfsvfs), &projid,
sizeof (uint64_t));
if (unlikely(err == 0))
/* Someone has added project ID attr by race. */
err = EEXIST;
if (err != ENOENT)
goto out;
/* First do a bulk query of the attributes that aren't cached */
if (zp->z_is_sa) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
&mode, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL,
&gen, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
&uid, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
&gid, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL,
&parent, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
&atime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
&mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
&ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL,
&crtime, 16);
if (Z_ISBLK(ZTOTYPE(zp)) || Z_ISCHR(ZTOTYPE(zp)))
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zfsvfs), NULL,
&rdev, 8);
} else {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
&atime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
&mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
&ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL,
&crtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL,
&gen, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
&mode, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL,
&parent, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_XATTR(zfsvfs), NULL,
&xattr, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zfsvfs), NULL,
&rdev, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
&uid, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
&gid, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
&znode_acl, 88);
}
err = sa_bulk_lookup_locked(hdl, bulk, count);
if (err != 0)
goto out;
err = sa_lookup_locked(hdl, SA_ZPL_XATTR(zfsvfs), &xattr, 8);
if (err != 0 && err != ENOENT)
goto out;
zp->z_projid = projid;
zp->z_pflags |= ZFS_PROJID;
links = ZTONLNK(zp);
count = 0;
err = 0;
SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_SIZE(zfsvfs), NULL,
&zp->z_size, 8);
SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_GEN(zfsvfs), NULL, &gen, 8);
SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_UID(zfsvfs), NULL, &uid, 8);
SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_GID(zfsvfs), NULL, &gid, 8);
SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_PARENT(zfsvfs), NULL, &parent, 8);
SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, 8);
SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_CRTIME(zfsvfs), NULL,
&crtime, 16);
SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_LINKS(zfsvfs), NULL, &links, 8);
SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_PROJID(zfsvfs), NULL, &projid, 8);
if (Z_ISBLK(ZTOTYPE(zp)) || Z_ISCHR(ZTOTYPE(zp)))
SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_RDEV(zfsvfs), NULL,
&rdev, 8);
if (zp->z_acl_cached != NULL) {
SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_DACL_COUNT(zfsvfs), NULL,
&zp->z_acl_cached->z_acl_count, 8);
if (zp->z_acl_cached->z_version < ZFS_ACL_VERSION_FUID)
zfs_acl_xform(zp, zp->z_acl_cached, CRED());
locate.cb_aclp = zp->z_acl_cached;
SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_DACL_ACES(zfsvfs),
zfs_acl_data_locator, &locate,
zp->z_acl_cached->z_acl_bytes);
}
if (xattr)
SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_XATTR(zfsvfs), NULL,
&xattr, 8);
if (zp->z_pflags & ZFS_BONUS_SCANSTAMP) {
bcopy((caddr_t)db->db_data + ZFS_OLD_ZNODE_PHYS_SIZE,
scanstamp, AV_SCANSTAMP_SZ);
SA_ADD_BULK_ATTR(attrs, count, SA_ZPL_SCANSTAMP(zfsvfs), NULL,
scanstamp, AV_SCANSTAMP_SZ);
zp->z_pflags &= ~ZFS_BONUS_SCANSTAMP;
}
VERIFY(dmu_set_bonustype(db, DMU_OT_SA, tx) == 0);
VERIFY(sa_replace_all_by_template_locked(hdl, attrs, count, tx) == 0);
if (znode_acl.z_acl_extern_obj) {
VERIFY(0 == dmu_object_free(zfsvfs->z_os,
znode_acl.z_acl_extern_obj, tx));
}
zp->z_is_sa = B_TRUE;
out:
mutex_exit(&zp->z_lock);
mutex_exit(&hdl->sa_lock);
kmem_free(attrs, sizeof (sa_bulk_attr_t) * ZPL_END);
kmem_free(bulk, sizeof (sa_bulk_attr_t) * ZPL_END);
return (err);
}
#endif
static sa_idx_tab_t *
sa_find_idx_tab(objset_t *os, dmu_object_type_t bonustype, sa_hdr_phys_t *hdr)
{
sa_idx_tab_t *idx_tab;
sa_os_t *sa = os->os_sa;
sa_lot_t *tb, search;
avl_index_t loc;
/*
* Deterimine layout number. If SA node and header == 0 then
* force the index table to the dummy "1" empty layout.
*
* The layout number would only be zero for a newly created file
* that has not added any attributes yet, or with crypto enabled which
* doesn't write any attributes to the bonus buffer.
*/
search.lot_num = SA_LAYOUT_NUM(hdr, bonustype);
tb = avl_find(&sa->sa_layout_num_tree, &search, &loc);
/* Verify header size is consistent with layout information */
ASSERT(tb);
ASSERT((IS_SA_BONUSTYPE(bonustype) &&
SA_HDR_SIZE_MATCH_LAYOUT(hdr, tb)) || !IS_SA_BONUSTYPE(bonustype) ||
(IS_SA_BONUSTYPE(bonustype) && hdr->sa_layout_info == 0));
/*
* See if any of the already existing TOC entries can be reused?
*/
for (idx_tab = list_head(&tb->lot_idx_tab); idx_tab;
idx_tab = list_next(&tb->lot_idx_tab, idx_tab)) {
boolean_t valid_idx = B_TRUE;
int i;
if (tb->lot_var_sizes != 0 &&
idx_tab->sa_variable_lengths != NULL) {
for (i = 0; i != tb->lot_var_sizes; i++) {
if (hdr->sa_lengths[i] !=
idx_tab->sa_variable_lengths[i]) {
valid_idx = B_FALSE;
break;
}
}
}
if (valid_idx) {
sa_idx_tab_hold(os, idx_tab);
return (idx_tab);
}
}
/* No such luck, create a new entry */
idx_tab = kmem_zalloc(sizeof (sa_idx_tab_t), KM_SLEEP);
idx_tab->sa_idx_tab =
kmem_zalloc(sizeof (uint32_t) * sa->sa_num_attrs, KM_SLEEP);
idx_tab->sa_layout = tb;
zfs_refcount_create(&idx_tab->sa_refcount);
if (tb->lot_var_sizes)
idx_tab->sa_variable_lengths = kmem_alloc(sizeof (uint16_t) *
tb->lot_var_sizes, KM_SLEEP);
sa_attr_iter(os, hdr, bonustype, sa_build_idx_tab,
tb, idx_tab);
sa_idx_tab_hold(os, idx_tab); /* one hold for consumer */
sa_idx_tab_hold(os, idx_tab); /* one for layout */
list_insert_tail(&tb->lot_idx_tab, idx_tab);
return (idx_tab);
}
void
sa_default_locator(void **dataptr, uint32_t *len, uint32_t total_len,
boolean_t start, void *userdata)
{
ASSERT(start);
*dataptr = userdata;
*len = total_len;
}
static void
sa_attr_register_sync(sa_handle_t *hdl, dmu_tx_t *tx)
{
uint64_t attr_value = 0;
sa_os_t *sa = hdl->sa_os->os_sa;
sa_attr_table_t *tb = sa->sa_attr_table;
int i;
mutex_enter(&sa->sa_lock);
if (!sa->sa_need_attr_registration || sa->sa_master_obj == 0) {
mutex_exit(&sa->sa_lock);
return;
}
if (sa->sa_reg_attr_obj == 0) {
sa->sa_reg_attr_obj = zap_create_link(hdl->sa_os,
DMU_OT_SA_ATTR_REGISTRATION,
sa->sa_master_obj, SA_REGISTRY, tx);
}
for (i = 0; i != sa->sa_num_attrs; i++) {
if (sa->sa_attr_table[i].sa_registered)
continue;
ATTR_ENCODE(attr_value, tb[i].sa_attr, tb[i].sa_length,
tb[i].sa_byteswap);
VERIFY(0 == zap_update(hdl->sa_os, sa->sa_reg_attr_obj,
tb[i].sa_name, 8, 1, &attr_value, tx));
tb[i].sa_registered = B_TRUE;
}
sa->sa_need_attr_registration = B_FALSE;
mutex_exit(&sa->sa_lock);
}
/*
* Replace all attributes with attributes specified in template.
* If dnode had a spill buffer then those attributes will be
* also be replaced, possibly with just an empty spill block
*
* This interface is intended to only be used for bulk adding of
* attributes for a new file. It will also be used by the ZPL
* when converting and old formatted znode to native SA support.
*/
int
sa_replace_all_by_template_locked(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc,
int attr_count, dmu_tx_t *tx)
{
sa_os_t *sa = hdl->sa_os->os_sa;
if (sa->sa_need_attr_registration)
sa_attr_register_sync(hdl, tx);
return (sa_build_layouts(hdl, attr_desc, attr_count, tx));
}
int
sa_replace_all_by_template(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc,
int attr_count, dmu_tx_t *tx)
{
int error;
mutex_enter(&hdl->sa_lock);
error = sa_replace_all_by_template_locked(hdl, attr_desc,
attr_count, tx);
mutex_exit(&hdl->sa_lock);
return (error);
}
/*
* Add/remove a single attribute or replace a variable-sized attribute value
* with a value of a different size, and then rewrite the entire set
* of attributes.
* Same-length attribute value replacement (including fixed-length attributes)
* is handled more efficiently by the upper layers.
*/
static int
sa_modify_attrs(sa_handle_t *hdl, sa_attr_type_t newattr,
sa_data_op_t action, sa_data_locator_t *locator, void *datastart,
uint16_t buflen, dmu_tx_t *tx)
{
sa_os_t *sa = hdl->sa_os->os_sa;
dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus;
dnode_t *dn;
sa_bulk_attr_t *attr_desc;
void *old_data[2];
int bonus_attr_count = 0;
int bonus_data_size = 0;
int spill_data_size = 0;
int spill_attr_count = 0;
int error;
uint16_t length, reg_length;
int i, j, k, length_idx;
sa_hdr_phys_t *hdr;
sa_idx_tab_t *idx_tab;
int attr_count;
int count;
ASSERT(MUTEX_HELD(&hdl->sa_lock));
/* First make of copy of the old data */
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
if (dn->dn_bonuslen != 0) {
bonus_data_size = hdl->sa_bonus->db_size;
old_data[0] = kmem_alloc(bonus_data_size, KM_SLEEP);
bcopy(hdl->sa_bonus->db_data, old_data[0],
hdl->sa_bonus->db_size);
bonus_attr_count = hdl->sa_bonus_tab->sa_layout->lot_attr_count;
} else {
old_data[0] = NULL;
}
DB_DNODE_EXIT(db);
/* Bring spill buffer online if it isn't currently */
if ((error = sa_get_spill(hdl)) == 0) {
spill_data_size = hdl->sa_spill->db_size;
old_data[1] = vmem_alloc(spill_data_size, KM_SLEEP);
bcopy(hdl->sa_spill->db_data, old_data[1],
hdl->sa_spill->db_size);
spill_attr_count =
hdl->sa_spill_tab->sa_layout->lot_attr_count;
} else if (error && error != ENOENT) {
if (old_data[0])
kmem_free(old_data[0], bonus_data_size);
return (error);
} else {
old_data[1] = NULL;
}
/* build descriptor of all attributes */
attr_count = bonus_attr_count + spill_attr_count;
if (action == SA_ADD)
attr_count++;
else if (action == SA_REMOVE)
attr_count--;
attr_desc = kmem_zalloc(sizeof (sa_bulk_attr_t) * attr_count, KM_SLEEP);
/*
* loop through bonus and spill buffer if it exists, and
* build up new attr_descriptor to reset the attributes
*/
k = j = 0;
count = bonus_attr_count;
hdr = SA_GET_HDR(hdl, SA_BONUS);
idx_tab = SA_IDX_TAB_GET(hdl, SA_BONUS);
for (; k != 2; k++) {
/*
* Iterate over each attribute in layout. Fetch the
* size of variable-length attributes needing rewrite
* from sa_lengths[].
*/
for (i = 0, length_idx = 0; i != count; i++) {
sa_attr_type_t attr;
attr = idx_tab->sa_layout->lot_attrs[i];
reg_length = SA_REGISTERED_LEN(sa, attr);
if (reg_length == 0) {
length = hdr->sa_lengths[length_idx];
length_idx++;
} else {
length = reg_length;
}
if (attr == newattr) {
/*
* There is nothing to do for SA_REMOVE,
* so it is just skipped.
*/
if (action == SA_REMOVE)
continue;
/*
* Duplicate attributes are not allowed, so the
* action can not be SA_ADD here.
*/
ASSERT3S(action, ==, SA_REPLACE);
/*
* Only a variable-sized attribute can be
* replaced here, and its size must be changing.
*/
ASSERT3U(reg_length, ==, 0);
ASSERT3U(length, !=, buflen);
SA_ADD_BULK_ATTR(attr_desc, j, attr,
locator, datastart, buflen);
} else {
SA_ADD_BULK_ATTR(attr_desc, j, attr,
NULL, (void *)
(TOC_OFF(idx_tab->sa_idx_tab[attr]) +
(uintptr_t)old_data[k]), length);
}
}
if (k == 0 && hdl->sa_spill) {
hdr = SA_GET_HDR(hdl, SA_SPILL);
idx_tab = SA_IDX_TAB_GET(hdl, SA_SPILL);
count = spill_attr_count;
} else {
break;
}
}
if (action == SA_ADD) {
reg_length = SA_REGISTERED_LEN(sa, newattr);
IMPLY(reg_length != 0, reg_length == buflen);
SA_ADD_BULK_ATTR(attr_desc, j, newattr, locator,
datastart, buflen);
}
ASSERT3U(j, ==, attr_count);
error = sa_build_layouts(hdl, attr_desc, attr_count, tx);
if (old_data[0])
kmem_free(old_data[0], bonus_data_size);
if (old_data[1])
vmem_free(old_data[1], spill_data_size);
kmem_free(attr_desc, sizeof (sa_bulk_attr_t) * attr_count);
return (error);
}
static int
sa_bulk_update_impl(sa_handle_t *hdl, sa_bulk_attr_t *bulk, int count,
dmu_tx_t *tx)
{
int error;
sa_os_t *sa = hdl->sa_os->os_sa;
dmu_object_type_t bonustype;
dmu_buf_t *saved_spill;
ASSERT(hdl);
ASSERT(MUTEX_HELD(&hdl->sa_lock));
bonustype = SA_BONUSTYPE_FROM_DB(SA_GET_DB(hdl, SA_BONUS));
saved_spill = hdl->sa_spill;
/* sync out registration table if necessary */
if (sa->sa_need_attr_registration)
sa_attr_register_sync(hdl, tx);
error = sa_attr_op(hdl, bulk, count, SA_UPDATE, tx);
if (error == 0 && !IS_SA_BONUSTYPE(bonustype) && sa->sa_update_cb)
sa->sa_update_cb(hdl, tx);
/*
* If saved_spill is NULL and current sa_spill is not NULL that
* means we increased the refcount of the spill buffer through
* sa_get_spill() or dmu_spill_hold_by_dnode(). Therefore we
* must release the hold before calling dmu_tx_commit() to avoid
* making a copy of this buffer in dbuf_sync_leaf() due to the
* reference count now being greater than 1.
*/
if (!saved_spill && hdl->sa_spill) {
if (hdl->sa_spill_tab) {
sa_idx_tab_rele(hdl->sa_os, hdl->sa_spill_tab);
hdl->sa_spill_tab = NULL;
}
dmu_buf_rele(hdl->sa_spill, NULL);
hdl->sa_spill = NULL;
}
return (error);
}
/*
* update or add new attribute
*/
int
sa_update(sa_handle_t *hdl, sa_attr_type_t type,
void *buf, uint32_t buflen, dmu_tx_t *tx)
{
int error;
sa_bulk_attr_t bulk;
VERIFY3U(buflen, <=, SA_ATTR_MAX_LEN);
bulk.sa_attr = type;
bulk.sa_data_func = NULL;
bulk.sa_length = buflen;
bulk.sa_data = buf;
mutex_enter(&hdl->sa_lock);
error = sa_bulk_update_impl(hdl, &bulk, 1, tx);
mutex_exit(&hdl->sa_lock);
return (error);
}
/*
* Return size of an attribute
*/
int
sa_size(sa_handle_t *hdl, sa_attr_type_t attr, int *size)
{
sa_bulk_attr_t bulk;
int error;
bulk.sa_data = NULL;
bulk.sa_attr = attr;
bulk.sa_data_func = NULL;
ASSERT(hdl);
mutex_enter(&hdl->sa_lock);
if ((error = sa_attr_op(hdl, &bulk, 1, SA_LOOKUP, NULL)) != 0) {
mutex_exit(&hdl->sa_lock);
return (error);
}
*size = bulk.sa_size;
mutex_exit(&hdl->sa_lock);
return (0);
}
int
sa_bulk_lookup_locked(sa_handle_t *hdl, sa_bulk_attr_t *attrs, int count)
{
ASSERT(hdl);
ASSERT(MUTEX_HELD(&hdl->sa_lock));
return (sa_lookup_impl(hdl, attrs, count));
}
int
sa_bulk_lookup(sa_handle_t *hdl, sa_bulk_attr_t *attrs, int count)
{
int error;
ASSERT(hdl);
mutex_enter(&hdl->sa_lock);
error = sa_bulk_lookup_locked(hdl, attrs, count);
mutex_exit(&hdl->sa_lock);
return (error);
}
int
sa_bulk_update(sa_handle_t *hdl, sa_bulk_attr_t *attrs, int count, dmu_tx_t *tx)
{
int error;
ASSERT(hdl);
mutex_enter(&hdl->sa_lock);
error = sa_bulk_update_impl(hdl, attrs, count, tx);
mutex_exit(&hdl->sa_lock);
return (error);
}
int
sa_remove(sa_handle_t *hdl, sa_attr_type_t attr, dmu_tx_t *tx)
{
int error;
mutex_enter(&hdl->sa_lock);
error = sa_modify_attrs(hdl, attr, SA_REMOVE, NULL,
NULL, 0, tx);
mutex_exit(&hdl->sa_lock);
return (error);
}
void
sa_object_info(sa_handle_t *hdl, dmu_object_info_t *doi)
{
dmu_object_info_from_db(hdl->sa_bonus, doi);
}
void
sa_object_size(sa_handle_t *hdl, uint32_t *blksize, u_longlong_t *nblocks)
{
dmu_object_size_from_db(hdl->sa_bonus,
blksize, nblocks);
}
void
sa_set_userp(sa_handle_t *hdl, void *ptr)
{
hdl->sa_userp = ptr;
}
dmu_buf_t *
sa_get_db(sa_handle_t *hdl)
{
return (hdl->sa_bonus);
}
void *
sa_get_userdata(sa_handle_t *hdl)
{
return (hdl->sa_userp);
}
void
sa_register_update_callback_locked(objset_t *os, sa_update_cb_t *func)
{
ASSERT(MUTEX_HELD(&os->os_sa->sa_lock));
os->os_sa->sa_update_cb = func;
}
void
sa_register_update_callback(objset_t *os, sa_update_cb_t *func)
{
mutex_enter(&os->os_sa->sa_lock);
sa_register_update_callback_locked(os, func);
mutex_exit(&os->os_sa->sa_lock);
}
uint64_t
sa_handle_object(sa_handle_t *hdl)
{
return (hdl->sa_bonus->db_object);
}
boolean_t
sa_enabled(objset_t *os)
{
return (os->os_sa == NULL);
}
int
sa_set_sa_object(objset_t *os, uint64_t sa_object)
{
sa_os_t *sa = os->os_sa;
if (sa->sa_master_obj)
return (1);
sa->sa_master_obj = sa_object;
return (0);
}
int
sa_hdrsize(void *arg)
{
sa_hdr_phys_t *hdr = arg;
return (SA_HDR_SIZE(hdr));
}
void
sa_handle_lock(sa_handle_t *hdl)
{
ASSERT(hdl);
mutex_enter(&hdl->sa_lock);
}
void
sa_handle_unlock(sa_handle_t *hdl)
{
ASSERT(hdl);
mutex_exit(&hdl->sa_lock);
}
#ifdef _KERNEL
EXPORT_SYMBOL(sa_handle_get);
EXPORT_SYMBOL(sa_handle_get_from_db);
EXPORT_SYMBOL(sa_handle_destroy);
EXPORT_SYMBOL(sa_buf_hold);
EXPORT_SYMBOL(sa_buf_rele);
EXPORT_SYMBOL(sa_spill_rele);
EXPORT_SYMBOL(sa_lookup);
EXPORT_SYMBOL(sa_update);
EXPORT_SYMBOL(sa_remove);
EXPORT_SYMBOL(sa_bulk_lookup);
EXPORT_SYMBOL(sa_bulk_lookup_locked);
EXPORT_SYMBOL(sa_bulk_update);
EXPORT_SYMBOL(sa_size);
EXPORT_SYMBOL(sa_object_info);
EXPORT_SYMBOL(sa_object_size);
EXPORT_SYMBOL(sa_get_userdata);
EXPORT_SYMBOL(sa_set_userp);
EXPORT_SYMBOL(sa_get_db);
EXPORT_SYMBOL(sa_handle_object);
EXPORT_SYMBOL(sa_register_update_callback);
EXPORT_SYMBOL(sa_setup);
EXPORT_SYMBOL(sa_replace_all_by_template);
EXPORT_SYMBOL(sa_replace_all_by_template_locked);
EXPORT_SYMBOL(sa_enabled);
EXPORT_SYMBOL(sa_cache_init);
EXPORT_SYMBOL(sa_cache_fini);
EXPORT_SYMBOL(sa_set_sa_object);
EXPORT_SYMBOL(sa_hdrsize);
EXPORT_SYMBOL(sa_handle_lock);
EXPORT_SYMBOL(sa_handle_unlock);
EXPORT_SYMBOL(sa_lookup_uio);
EXPORT_SYMBOL(sa_add_projid);
#endif /* _KERNEL */
diff --git a/sys/contrib/openzfs/module/zfs/sha256.c b/sys/contrib/openzfs/module/zfs/sha256.c
index d297768eada5..c5b033cf019d 100644
--- a/sys/contrib/openzfs/module/zfs/sha256.c
+++ b/sys/contrib/openzfs/module/zfs/sha256.c
@@ -1,105 +1,104 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright 2013 Saso Kiselkov. All rights reserved.
* Copyright (c) 2016 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/zio.h>
#include <sys/zio_checksum.h>
#include <sys/sha2.h>
#include <sys/abd.h>
#include <sys/qat.h>
static int
sha_incremental(void *buf, size_t size, void *arg)
{
SHA2_CTX *ctx = arg;
SHA2Update(ctx, buf, size);
return (0);
}
-/*ARGSUSED*/
void
abd_checksum_SHA256(abd_t *abd, uint64_t size,
const void *ctx_template, zio_cksum_t *zcp)
{
+ (void) ctx_template;
int ret;
SHA2_CTX ctx;
zio_cksum_t tmp;
if (qat_checksum_use_accel(size)) {
uint8_t *buf = abd_borrow_buf_copy(abd, size);
ret = qat_checksum(ZIO_CHECKSUM_SHA256, buf, size, &tmp);
abd_return_buf(abd, buf, size);
if (ret == CPA_STATUS_SUCCESS)
goto bswap;
/* If the hardware implementation fails fall back to software */
}
SHA2Init(SHA256, &ctx);
(void) abd_iterate_func(abd, 0, size, sha_incremental, &ctx);
SHA2Final(&tmp, &ctx);
bswap:
/*
* A prior implementation of this function had a
* private SHA256 implementation always wrote things out in
* Big Endian and there wasn't a byteswap variant of it.
* To preserve on disk compatibility we need to force that
* behavior.
*/
zcp->zc_word[0] = BE_64(tmp.zc_word[0]);
zcp->zc_word[1] = BE_64(tmp.zc_word[1]);
zcp->zc_word[2] = BE_64(tmp.zc_word[2]);
zcp->zc_word[3] = BE_64(tmp.zc_word[3]);
}
-/*ARGSUSED*/
void
abd_checksum_SHA512_native(abd_t *abd, uint64_t size,
const void *ctx_template, zio_cksum_t *zcp)
{
+ (void) ctx_template;
SHA2_CTX ctx;
SHA2Init(SHA512_256, &ctx);
(void) abd_iterate_func(abd, 0, size, sha_incremental, &ctx);
SHA2Final(zcp, &ctx);
}
-/*ARGSUSED*/
void
abd_checksum_SHA512_byteswap(abd_t *abd, uint64_t size,
const void *ctx_template, zio_cksum_t *zcp)
{
zio_cksum_t tmp;
abd_checksum_SHA512_native(abd, size, ctx_template, &tmp);
zcp->zc_word[0] = BSWAP_64(tmp.zc_word[0]);
zcp->zc_word[1] = BSWAP_64(tmp.zc_word[1]);
zcp->zc_word[2] = BSWAP_64(tmp.zc_word[2]);
zcp->zc_word[3] = BSWAP_64(tmp.zc_word[3]);
}
diff --git a/sys/contrib/openzfs/module/zfs/spa.c b/sys/contrib/openzfs/module/zfs/spa.c
index a02fd198bed0..7c49ced9998a 100644
--- a/sys/contrib/openzfs/module/zfs/spa.c
+++ b/sys/contrib/openzfs/module/zfs/spa.c
@@ -1,9951 +1,9982 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2018, Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2013 Saso Kiselkov. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2016 Toomas Soome <tsoome@me.com>
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright 2018 Joyent, Inc.
* Copyright (c) 2017, 2019, Datto Inc. All rights reserved.
* Copyright 2017 Joyent, Inc.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
*/
/*
* SPA: Storage Pool Allocator
*
* This file contains all the routines used when modifying on-disk SPA state.
* This includes opening, importing, destroying, exporting a pool, and syncing a
* pool.
*/
#include <sys/zfs_context.h>
#include <sys/fm/fs/zfs.h>
#include <sys/spa_impl.h>
#include <sys/zio.h>
#include <sys/zio_checksum.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/zap.h>
#include <sys/zil.h>
#include <sys/ddt.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_removal.h>
#include <sys/vdev_indirect_mapping.h>
#include <sys/vdev_indirect_births.h>
#include <sys/vdev_initialize.h>
#include <sys/vdev_rebuild.h>
#include <sys/vdev_trim.h>
#include <sys/vdev_disk.h>
#include <sys/vdev_draid.h>
#include <sys/metaslab.h>
#include <sys/metaslab_impl.h>
#include <sys/mmp.h>
#include <sys/uberblock_impl.h>
#include <sys/txg.h>
#include <sys/avl.h>
#include <sys/bpobj.h>
#include <sys/dmu_traverse.h>
#include <sys/dmu_objset.h>
#include <sys/unique.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_synctask.h>
#include <sys/fs/zfs.h>
#include <sys/arc.h>
#include <sys/callb.h>
#include <sys/systeminfo.h>
#include <sys/spa_boot.h>
#include <sys/zfs_ioctl.h>
#include <sys/dsl_scan.h>
#include <sys/zfeature.h>
#include <sys/dsl_destroy.h>
#include <sys/zvol.h>
#ifdef _KERNEL
#include <sys/fm/protocol.h>
#include <sys/fm/util.h>
#include <sys/callb.h>
#include <sys/zone.h>
#include <sys/vmsystm.h>
#endif /* _KERNEL */
#include "zfs_prop.h"
#include "zfs_comutil.h"
/*
* The interval, in seconds, at which failed configuration cache file writes
* should be retried.
*/
int zfs_ccw_retry_interval = 300;
typedef enum zti_modes {
ZTI_MODE_FIXED, /* value is # of threads (min 1) */
ZTI_MODE_BATCH, /* cpu-intensive; value is ignored */
ZTI_MODE_SCALE, /* Taskqs scale with CPUs. */
ZTI_MODE_NULL, /* don't create a taskq */
ZTI_NMODES
} zti_modes_t;
#define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) }
#define ZTI_PCT(n) { ZTI_MODE_ONLINE_PERCENT, (n), 1 }
#define ZTI_BATCH { ZTI_MODE_BATCH, 0, 1 }
#define ZTI_SCALE { ZTI_MODE_SCALE, 0, 1 }
#define ZTI_NULL { ZTI_MODE_NULL, 0, 0 }
#define ZTI_N(n) ZTI_P(n, 1)
#define ZTI_ONE ZTI_N(1)
typedef struct zio_taskq_info {
zti_modes_t zti_mode;
uint_t zti_value;
uint_t zti_count;
} zio_taskq_info_t;
static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
"iss", "iss_h", "int", "int_h"
};
/*
* This table defines the taskq settings for each ZFS I/O type. When
* initializing a pool, we use this table to create an appropriately sized
* taskq. Some operations are low volume and therefore have a small, static
* number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE
* macros. Other operations process a large amount of data; the ZTI_BATCH
* macro causes us to create a taskq oriented for throughput. Some operations
* are so high frequency and short-lived that the taskq itself can become a
* point of lock contention. The ZTI_P(#, #) macro indicates that we need an
* additional degree of parallelism specified by the number of threads per-
* taskq and the number of taskqs; when dispatching an event in this case, the
* particular taskq is chosen at random. ZTI_SCALE is similar to ZTI_BATCH,
* but with number of taskqs also scaling with number of CPUs.
*
* The different taskq priorities are to handle the different contexts (issue
* and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that
* need to be handled with minimum delay.
*/
const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
/* ISSUE ISSUE_HIGH INTR INTR_HIGH */
{ ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */
{ ZTI_N(8), ZTI_NULL, ZTI_SCALE, ZTI_NULL }, /* READ */
{ ZTI_BATCH, ZTI_N(5), ZTI_SCALE, ZTI_N(5) }, /* WRITE */
{ ZTI_SCALE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */
{ ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */
{ ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */
{ ZTI_N(4), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* TRIM */
};
static void spa_sync_version(void *arg, dmu_tx_t *tx);
static void spa_sync_props(void *arg, dmu_tx_t *tx);
static boolean_t spa_has_active_shared_spare(spa_t *spa);
static int spa_load_impl(spa_t *spa, spa_import_type_t type, char **ereport);
static void spa_vdev_resilver_done(spa_t *spa);
uint_t zio_taskq_batch_pct = 80; /* 1 thread per cpu in pset */
uint_t zio_taskq_batch_tpq; /* threads per taskq */
boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */
uint_t zio_taskq_basedc = 80; /* base duty cycle */
boolean_t spa_create_process = B_TRUE; /* no process ==> no sysdc */
/*
* Report any spa_load_verify errors found, but do not fail spa_load.
* This is used by zdb to analyze non-idle pools.
*/
boolean_t spa_load_verify_dryrun = B_FALSE;
/*
* This (illegal) pool name is used when temporarily importing a spa_t in order
* to get the vdev stats associated with the imported devices.
*/
#define TRYIMPORT_NAME "$import"
/*
* For debugging purposes: print out vdev tree during pool import.
*/
int spa_load_print_vdev_tree = B_FALSE;
/*
* A non-zero value for zfs_max_missing_tvds means that we allow importing
* pools with missing top-level vdevs. This is strictly intended for advanced
* pool recovery cases since missing data is almost inevitable. Pools with
* missing devices can only be imported read-only for safety reasons, and their
* fail-mode will be automatically set to "continue".
*
* With 1 missing vdev we should be able to import the pool and mount all
* datasets. User data that was not modified after the missing device has been
* added should be recoverable. This means that snapshots created prior to the
* addition of that device should be completely intact.
*
* With 2 missing vdevs, some datasets may fail to mount since there are
* dataset statistics that are stored as regular metadata. Some data might be
* recoverable if those vdevs were added recently.
*
* With 3 or more missing vdevs, the pool is severely damaged and MOS entries
* may be missing entirely. Chances of data recovery are very low. Note that
* there are also risks of performing an inadvertent rewind as we might be
* missing all the vdevs with the latest uberblocks.
*/
unsigned long zfs_max_missing_tvds = 0;
/*
* The parameters below are similar to zfs_max_missing_tvds but are only
* intended for a preliminary open of the pool with an untrusted config which
* might be incomplete or out-dated.
*
* We are more tolerant for pools opened from a cachefile since we could have
* an out-dated cachefile where a device removal was not registered.
* We could have set the limit arbitrarily high but in the case where devices
* are really missing we would want to return the proper error codes; we chose
* SPA_DVAS_PER_BP - 1 so that some copies of the MOS would still be available
* and we get a chance to retrieve the trusted config.
*/
uint64_t zfs_max_missing_tvds_cachefile = SPA_DVAS_PER_BP - 1;
/*
* In the case where config was assembled by scanning device paths (/dev/dsks
* by default) we are less tolerant since all the existing devices should have
* been detected and we want spa_load to return the right error codes.
*/
uint64_t zfs_max_missing_tvds_scan = 0;
/*
* Debugging aid that pauses spa_sync() towards the end.
*/
boolean_t zfs_pause_spa_sync = B_FALSE;
/*
* Variables to indicate the livelist condense zthr func should wait at certain
* points for the livelist to be removed - used to test condense/destroy races
*/
int zfs_livelist_condense_zthr_pause = 0;
int zfs_livelist_condense_sync_pause = 0;
/*
* Variables to track whether or not condense cancellation has been
* triggered in testing.
*/
int zfs_livelist_condense_sync_cancel = 0;
int zfs_livelist_condense_zthr_cancel = 0;
/*
* Variable to track whether or not extra ALLOC blkptrs were added to a
* livelist entry while it was being condensed (caused by the way we track
* remapped blkptrs in dbuf_remap_impl)
*/
int zfs_livelist_condense_new_alloc = 0;
/*
* ==========================================================================
* SPA properties routines
* ==========================================================================
*/
/*
* Add a (source=src, propname=propval) list to an nvlist.
*/
static void
spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval,
uint64_t intval, zprop_source_t src)
{
const char *propname = zpool_prop_to_name(prop);
nvlist_t *propval;
VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
if (strval != NULL)
VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
else
VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0);
VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
nvlist_free(propval);
}
/*
* Get property values from the spa configuration.
*/
static void
spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
{
vdev_t *rvd = spa->spa_root_vdev;
dsl_pool_t *pool = spa->spa_dsl_pool;
uint64_t size, alloc, cap, version;
const zprop_source_t src = ZPROP_SRC_NONE;
spa_config_dirent_t *dp;
metaslab_class_t *mc = spa_normal_class(spa);
ASSERT(MUTEX_HELD(&spa->spa_props_lock));
if (rvd != NULL) {
alloc = metaslab_class_get_alloc(mc);
alloc += metaslab_class_get_alloc(spa_special_class(spa));
alloc += metaslab_class_get_alloc(spa_dedup_class(spa));
alloc += metaslab_class_get_alloc(spa_embedded_log_class(spa));
size = metaslab_class_get_space(mc);
size += metaslab_class_get_space(spa_special_class(spa));
size += metaslab_class_get_space(spa_dedup_class(spa));
size += metaslab_class_get_space(spa_embedded_log_class(spa));
spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL,
size - alloc, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_CHECKPOINT, NULL,
spa->spa_checkpoint_info.sci_dspace, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_FRAGMENTATION, NULL,
metaslab_class_fragmentation(mc), src);
spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL,
metaslab_class_expandable_space(mc), src);
spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL,
(spa_mode(spa) == SPA_MODE_READ), src);
cap = (size == 0) ? 0 : (alloc * 100 / size);
spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL,
ddt_get_pool_dedup_ratio(spa), src);
spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
rvd->vdev_state, src);
version = spa_version(spa);
if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) {
spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL,
version, ZPROP_SRC_DEFAULT);
} else {
spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL,
version, ZPROP_SRC_LOCAL);
}
spa_prop_add_list(*nvp, ZPOOL_PROP_LOAD_GUID,
NULL, spa_load_guid(spa), src);
}
if (pool != NULL) {
/*
* The $FREE directory was introduced in SPA_VERSION_DEADLISTS,
* when opening pools before this version freedir will be NULL.
*/
if (pool->dp_free_dir != NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL,
dsl_dir_phys(pool->dp_free_dir)->dd_used_bytes,
src);
} else {
spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING,
NULL, 0, src);
}
if (pool->dp_leak_dir != NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL,
dsl_dir_phys(pool->dp_leak_dir)->dd_used_bytes,
src);
} else {
spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED,
NULL, 0, src);
}
}
spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
if (spa->spa_comment != NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment,
0, ZPROP_SRC_LOCAL);
}
if (spa->spa_compatibility != NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_COMPATIBILITY,
spa->spa_compatibility, 0, ZPROP_SRC_LOCAL);
}
if (spa->spa_root != NULL)
spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
0, ZPROP_SRC_LOCAL);
if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) {
spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE), ZPROP_SRC_NONE);
} else {
spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE);
}
if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) {
spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL,
DNODE_MAX_SIZE, ZPROP_SRC_NONE);
} else {
spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL,
DNODE_MIN_SIZE, ZPROP_SRC_NONE);
}
if ((dp = list_head(&spa->spa_config_list)) != NULL) {
if (dp->scd_path == NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
"none", 0, ZPROP_SRC_LOCAL);
} else if (strcmp(dp->scd_path, spa_config_path) != 0) {
spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
dp->scd_path, 0, ZPROP_SRC_LOCAL);
}
}
}
/*
* Get zpool property values.
*/
int
spa_prop_get(spa_t *spa, nvlist_t **nvp)
{
objset_t *mos = spa->spa_meta_objset;
zap_cursor_t zc;
zap_attribute_t za;
dsl_pool_t *dp;
int err;
err = nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP);
if (err)
return (err);
dp = spa_get_dsl(spa);
dsl_pool_config_enter(dp, FTAG);
mutex_enter(&spa->spa_props_lock);
/*
* Get properties from the spa config.
*/
spa_prop_get_config(spa, nvp);
/* If no pool property object, no more prop to get. */
if (mos == NULL || spa->spa_pool_props_object == 0)
goto out;
/*
* Get properties from the MOS pool property object.
*/
for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
(err = zap_cursor_retrieve(&zc, &za)) == 0;
zap_cursor_advance(&zc)) {
uint64_t intval = 0;
char *strval = NULL;
zprop_source_t src = ZPROP_SRC_DEFAULT;
zpool_prop_t prop;
if ((prop = zpool_name_to_prop(za.za_name)) == ZPOOL_PROP_INVAL)
continue;
switch (za.za_integer_length) {
case 8:
/* integer property */
if (za.za_first_integer !=
zpool_prop_default_numeric(prop))
src = ZPROP_SRC_LOCAL;
if (prop == ZPOOL_PROP_BOOTFS) {
dsl_dataset_t *ds = NULL;
err = dsl_dataset_hold_obj(dp,
za.za_first_integer, FTAG, &ds);
if (err != 0)
break;
strval = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN,
KM_SLEEP);
dsl_dataset_name(ds, strval);
dsl_dataset_rele(ds, FTAG);
} else {
strval = NULL;
intval = za.za_first_integer;
}
spa_prop_add_list(*nvp, prop, strval, intval, src);
if (strval != NULL)
kmem_free(strval, ZFS_MAX_DATASET_NAME_LEN);
break;
case 1:
/* string property */
strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
err = zap_lookup(mos, spa->spa_pool_props_object,
za.za_name, 1, za.za_num_integers, strval);
if (err) {
kmem_free(strval, za.za_num_integers);
break;
}
spa_prop_add_list(*nvp, prop, strval, 0, src);
kmem_free(strval, za.za_num_integers);
break;
default:
break;
}
}
zap_cursor_fini(&zc);
out:
mutex_exit(&spa->spa_props_lock);
dsl_pool_config_exit(dp, FTAG);
if (err && err != ENOENT) {
nvlist_free(*nvp);
*nvp = NULL;
return (err);
}
return (0);
}
/*
* Validate the given pool properties nvlist and modify the list
* for the property values to be set.
*/
static int
spa_prop_validate(spa_t *spa, nvlist_t *props)
{
nvpair_t *elem;
int error = 0, reset_bootfs = 0;
uint64_t objnum = 0;
boolean_t has_feature = B_FALSE;
elem = NULL;
while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
uint64_t intval;
char *strval, *slash, *check, *fname;
const char *propname = nvpair_name(elem);
zpool_prop_t prop = zpool_name_to_prop(propname);
switch (prop) {
case ZPOOL_PROP_INVAL:
if (!zpool_prop_feature(propname)) {
error = SET_ERROR(EINVAL);
break;
}
/*
* Sanitize the input.
*/
if (nvpair_type(elem) != DATA_TYPE_UINT64) {
error = SET_ERROR(EINVAL);
break;
}
if (nvpair_value_uint64(elem, &intval) != 0) {
error = SET_ERROR(EINVAL);
break;
}
if (intval != 0) {
error = SET_ERROR(EINVAL);
break;
}
fname = strchr(propname, '@') + 1;
if (zfeature_lookup_name(fname, NULL) != 0) {
error = SET_ERROR(EINVAL);
break;
}
has_feature = B_TRUE;
break;
case ZPOOL_PROP_VERSION:
error = nvpair_value_uint64(elem, &intval);
if (!error &&
(intval < spa_version(spa) ||
intval > SPA_VERSION_BEFORE_FEATURES ||
has_feature))
error = SET_ERROR(EINVAL);
break;
case ZPOOL_PROP_DELEGATION:
case ZPOOL_PROP_AUTOREPLACE:
case ZPOOL_PROP_LISTSNAPS:
case ZPOOL_PROP_AUTOEXPAND:
case ZPOOL_PROP_AUTOTRIM:
error = nvpair_value_uint64(elem, &intval);
if (!error && intval > 1)
error = SET_ERROR(EINVAL);
break;
case ZPOOL_PROP_MULTIHOST:
error = nvpair_value_uint64(elem, &intval);
if (!error && intval > 1)
error = SET_ERROR(EINVAL);
if (!error) {
uint32_t hostid = zone_get_hostid(NULL);
if (hostid)
spa->spa_hostid = hostid;
else
error = SET_ERROR(ENOTSUP);
}
break;
case ZPOOL_PROP_BOOTFS:
/*
* If the pool version is less than SPA_VERSION_BOOTFS,
* or the pool is still being created (version == 0),
* the bootfs property cannot be set.
*/
if (spa_version(spa) < SPA_VERSION_BOOTFS) {
error = SET_ERROR(ENOTSUP);
break;
}
/*
* Make sure the vdev config is bootable
*/
if (!vdev_is_bootable(spa->spa_root_vdev)) {
error = SET_ERROR(ENOTSUP);
break;
}
reset_bootfs = 1;
error = nvpair_value_string(elem, &strval);
if (!error) {
objset_t *os;
if (strval == NULL || strval[0] == '\0') {
objnum = zpool_prop_default_numeric(
ZPOOL_PROP_BOOTFS);
break;
}
error = dmu_objset_hold(strval, FTAG, &os);
if (error != 0)
break;
/* Must be ZPL. */
if (dmu_objset_type(os) != DMU_OST_ZFS) {
error = SET_ERROR(ENOTSUP);
} else {
objnum = dmu_objset_id(os);
}
dmu_objset_rele(os, FTAG);
}
break;
case ZPOOL_PROP_FAILUREMODE:
error = nvpair_value_uint64(elem, &intval);
if (!error && intval > ZIO_FAILURE_MODE_PANIC)
error = SET_ERROR(EINVAL);
/*
* This is a special case which only occurs when
* the pool has completely failed. This allows
* the user to change the in-core failmode property
* without syncing it out to disk (I/Os might
* currently be blocked). We do this by returning
* EIO to the caller (spa_prop_set) to trick it
* into thinking we encountered a property validation
* error.
*/
if (!error && spa_suspended(spa)) {
spa->spa_failmode = intval;
error = SET_ERROR(EIO);
}
break;
case ZPOOL_PROP_CACHEFILE:
if ((error = nvpair_value_string(elem, &strval)) != 0)
break;
if (strval[0] == '\0')
break;
if (strcmp(strval, "none") == 0)
break;
if (strval[0] != '/') {
error = SET_ERROR(EINVAL);
break;
}
slash = strrchr(strval, '/');
ASSERT(slash != NULL);
if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
strcmp(slash, "/..") == 0)
error = SET_ERROR(EINVAL);
break;
case ZPOOL_PROP_COMMENT:
if ((error = nvpair_value_string(elem, &strval)) != 0)
break;
for (check = strval; *check != '\0'; check++) {
if (!isprint(*check)) {
error = SET_ERROR(EINVAL);
break;
}
}
if (strlen(strval) > ZPROP_MAX_COMMENT)
error = SET_ERROR(E2BIG);
break;
default:
break;
}
if (error)
break;
}
(void) nvlist_remove_all(props,
zpool_prop_to_name(ZPOOL_PROP_DEDUPDITTO));
if (!error && reset_bootfs) {
error = nvlist_remove(props,
zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
if (!error) {
error = nvlist_add_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
}
}
return (error);
}
void
spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
{
char *cachefile;
spa_config_dirent_t *dp;
if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
&cachefile) != 0)
return;
dp = kmem_alloc(sizeof (spa_config_dirent_t),
KM_SLEEP);
if (cachefile[0] == '\0')
dp->scd_path = spa_strdup(spa_config_path);
else if (strcmp(cachefile, "none") == 0)
dp->scd_path = NULL;
else
dp->scd_path = spa_strdup(cachefile);
list_insert_head(&spa->spa_config_list, dp);
if (need_sync)
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
int
spa_prop_set(spa_t *spa, nvlist_t *nvp)
{
int error;
nvpair_t *elem = NULL;
boolean_t need_sync = B_FALSE;
if ((error = spa_prop_validate(spa, nvp)) != 0)
return (error);
while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem));
if (prop == ZPOOL_PROP_CACHEFILE ||
prop == ZPOOL_PROP_ALTROOT ||
prop == ZPOOL_PROP_READONLY)
continue;
if (prop == ZPOOL_PROP_VERSION || prop == ZPOOL_PROP_INVAL) {
uint64_t ver;
if (prop == ZPOOL_PROP_VERSION) {
VERIFY(nvpair_value_uint64(elem, &ver) == 0);
} else {
ASSERT(zpool_prop_feature(nvpair_name(elem)));
ver = SPA_VERSION_FEATURES;
need_sync = B_TRUE;
}
/* Save time if the version is already set. */
if (ver == spa_version(spa))
continue;
/*
* In addition to the pool directory object, we might
* create the pool properties object, the features for
* read object, the features for write object, or the
* feature descriptions object.
*/
error = dsl_sync_task(spa->spa_name, NULL,
spa_sync_version, &ver,
6, ZFS_SPACE_CHECK_RESERVED);
if (error)
return (error);
continue;
}
need_sync = B_TRUE;
break;
}
if (need_sync) {
return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props,
nvp, 6, ZFS_SPACE_CHECK_RESERVED));
}
return (0);
}
/*
* If the bootfs property value is dsobj, clear it.
*/
void
spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
{
if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
VERIFY(zap_remove(spa->spa_meta_objset,
spa->spa_pool_props_object,
zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
spa->spa_bootfs = 0;
}
}
-/*ARGSUSED*/
static int
spa_change_guid_check(void *arg, dmu_tx_t *tx)
{
uint64_t *newguid __maybe_unused = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
vdev_t *rvd = spa->spa_root_vdev;
uint64_t vdev_state;
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
int error = (spa_has_checkpoint(spa)) ?
ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
return (SET_ERROR(error));
}
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
vdev_state = rvd->vdev_state;
spa_config_exit(spa, SCL_STATE, FTAG);
if (vdev_state != VDEV_STATE_HEALTHY)
return (SET_ERROR(ENXIO));
ASSERT3U(spa_guid(spa), !=, *newguid);
return (0);
}
static void
spa_change_guid_sync(void *arg, dmu_tx_t *tx)
{
uint64_t *newguid = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
uint64_t oldguid;
vdev_t *rvd = spa->spa_root_vdev;
oldguid = spa_guid(spa);
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
rvd->vdev_guid = *newguid;
rvd->vdev_guid_sum += (*newguid - oldguid);
vdev_config_dirty(rvd);
spa_config_exit(spa, SCL_STATE, FTAG);
spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu",
(u_longlong_t)oldguid, (u_longlong_t)*newguid);
}
/*
* Change the GUID for the pool. This is done so that we can later
* re-import a pool built from a clone of our own vdevs. We will modify
* the root vdev's guid, our own pool guid, and then mark all of our
* vdevs dirty. Note that we must make sure that all our vdevs are
* online when we do this, or else any vdevs that weren't present
* would be orphaned from our pool. We are also going to issue a
* sysevent to update any watchers.
*/
int
spa_change_guid(spa_t *spa)
{
int error;
uint64_t guid;
mutex_enter(&spa->spa_vdev_top_lock);
mutex_enter(&spa_namespace_lock);
guid = spa_generate_guid(NULL);
error = dsl_sync_task(spa->spa_name, spa_change_guid_check,
spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED);
if (error == 0) {
spa_write_cachefile(spa, B_FALSE, B_TRUE);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_REGUID);
}
mutex_exit(&spa_namespace_lock);
mutex_exit(&spa->spa_vdev_top_lock);
return (error);
}
/*
* ==========================================================================
* SPA state manipulation (open/create/destroy/import/export)
* ==========================================================================
*/
static int
spa_error_entry_compare(const void *a, const void *b)
{
const spa_error_entry_t *sa = (const spa_error_entry_t *)a;
const spa_error_entry_t *sb = (const spa_error_entry_t *)b;
int ret;
ret = memcmp(&sa->se_bookmark, &sb->se_bookmark,
sizeof (zbookmark_phys_t));
return (TREE_ISIGN(ret));
}
/*
* Utility function which retrieves copies of the current logs and
* re-initializes them in the process.
*/
void
spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
{
ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
avl_create(&spa->spa_errlist_scrub,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
avl_create(&spa->spa_errlist_last,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
}
static void
spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
{
const zio_taskq_info_t *ztip = &zio_taskqs[t][q];
enum zti_modes mode = ztip->zti_mode;
uint_t value = ztip->zti_value;
uint_t count = ztip->zti_count;
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
uint_t cpus, flags = TASKQ_DYNAMIC;
boolean_t batch = B_FALSE;
switch (mode) {
case ZTI_MODE_FIXED:
ASSERT3U(value, >, 0);
break;
case ZTI_MODE_BATCH:
batch = B_TRUE;
flags |= TASKQ_THREADS_CPU_PCT;
value = MIN(zio_taskq_batch_pct, 100);
break;
case ZTI_MODE_SCALE:
flags |= TASKQ_THREADS_CPU_PCT;
/*
* We want more taskqs to reduce lock contention, but we want
* less for better request ordering and CPU utilization.
*/
cpus = MAX(1, boot_ncpus * zio_taskq_batch_pct / 100);
if (zio_taskq_batch_tpq > 0) {
count = MAX(1, (cpus + zio_taskq_batch_tpq / 2) /
zio_taskq_batch_tpq);
} else {
/*
* Prefer 6 threads per taskq, but no more taskqs
* than threads in them on large systems. For 80%:
*
* taskq taskq total
* cpus taskqs percent threads threads
* ------- ------- ------- ------- -------
* 1 1 80% 1 1
* 2 1 80% 1 1
* 4 1 80% 3 3
* 8 2 40% 3 6
* 16 3 27% 4 12
* 32 5 16% 5 25
* 64 7 11% 7 49
* 128 10 8% 10 100
* 256 14 6% 15 210
*/
count = 1 + cpus / 6;
while (count * count > cpus)
count--;
}
/* Limit each taskq within 100% to not trigger assertion. */
count = MAX(count, (zio_taskq_batch_pct + 99) / 100);
value = (zio_taskq_batch_pct + count / 2) / count;
break;
case ZTI_MODE_NULL:
tqs->stqs_count = 0;
tqs->stqs_taskq = NULL;
return;
default:
panic("unrecognized mode for %s_%s taskq (%u:%u) in "
"spa_activate()",
zio_type_name[t], zio_taskq_types[q], mode, value);
break;
}
ASSERT3U(count, >, 0);
tqs->stqs_count = count;
tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP);
for (uint_t i = 0; i < count; i++) {
taskq_t *tq;
char name[32];
if (count > 1)
(void) snprintf(name, sizeof (name), "%s_%s_%u",
zio_type_name[t], zio_taskq_types[q], i);
else
(void) snprintf(name, sizeof (name), "%s_%s",
zio_type_name[t], zio_taskq_types[q]);
if (zio_taskq_sysdc && spa->spa_proc != &p0) {
if (batch)
flags |= TASKQ_DC_BATCH;
tq = taskq_create_sysdc(name, value, 50, INT_MAX,
spa->spa_proc, zio_taskq_basedc, flags);
} else {
pri_t pri = maxclsyspri;
/*
* The write issue taskq can be extremely CPU
* intensive. Run it at slightly less important
* priority than the other taskqs.
*
* Under Linux and FreeBSD this means incrementing
* the priority value as opposed to platforms like
* illumos where it should be decremented.
*
* On FreeBSD, if priorities divided by four (RQ_PPQ)
* are equal then a difference between them is
* insignificant.
*/
if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE) {
#if defined(__linux__)
pri++;
#elif defined(__FreeBSD__)
pri += 4;
#else
#error "unknown OS"
#endif
}
tq = taskq_create_proc(name, value, pri, 50,
INT_MAX, spa->spa_proc, flags);
}
tqs->stqs_taskq[i] = tq;
}
}
static void
spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
{
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
if (tqs->stqs_taskq == NULL) {
ASSERT3U(tqs->stqs_count, ==, 0);
return;
}
for (uint_t i = 0; i < tqs->stqs_count; i++) {
ASSERT3P(tqs->stqs_taskq[i], !=, NULL);
taskq_destroy(tqs->stqs_taskq[i]);
}
kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *));
tqs->stqs_taskq = NULL;
}
/*
* Dispatch a task to the appropriate taskq for the ZFS I/O type and priority.
* Note that a type may have multiple discrete taskqs to avoid lock contention
* on the taskq itself. In that case we choose which taskq at random by using
* the low bits of gethrtime().
*/
void
spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent)
{
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
taskq_t *tq;
ASSERT3P(tqs->stqs_taskq, !=, NULL);
ASSERT3U(tqs->stqs_count, !=, 0);
if (tqs->stqs_count == 1) {
tq = tqs->stqs_taskq[0];
} else {
tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count];
}
taskq_dispatch_ent(tq, func, arg, flags, ent);
}
/*
* Same as spa_taskq_dispatch_ent() but block on the task until completion.
*/
void
spa_taskq_dispatch_sync(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
task_func_t *func, void *arg, uint_t flags)
{
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
taskq_t *tq;
taskqid_t id;
ASSERT3P(tqs->stqs_taskq, !=, NULL);
ASSERT3U(tqs->stqs_count, !=, 0);
if (tqs->stqs_count == 1) {
tq = tqs->stqs_taskq[0];
} else {
tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count];
}
id = taskq_dispatch(tq, func, arg, flags);
if (id)
taskq_wait_id(tq, id);
}
static void
spa_create_zio_taskqs(spa_t *spa)
{
for (int t = 0; t < ZIO_TYPES; t++) {
for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
spa_taskqs_init(spa, t, q);
}
}
}
/*
* Disabled until spa_thread() can be adapted for Linux.
*/
#undef HAVE_SPA_THREAD
#if defined(_KERNEL) && defined(HAVE_SPA_THREAD)
static void
spa_thread(void *arg)
{
psetid_t zio_taskq_psrset_bind = PS_NONE;
callb_cpr_t cprinfo;
spa_t *spa = arg;
user_t *pu = PTOU(curproc);
CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr,
spa->spa_name);
ASSERT(curproc != &p0);
(void) snprintf(pu->u_psargs, sizeof (pu->u_psargs),
"zpool-%s", spa->spa_name);
(void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm));
/* bind this thread to the requested psrset */
if (zio_taskq_psrset_bind != PS_NONE) {
pool_lock();
mutex_enter(&cpu_lock);
mutex_enter(&pidlock);
mutex_enter(&curproc->p_lock);
if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind,
0, NULL, NULL) == 0) {
curthread->t_bind_pset = zio_taskq_psrset_bind;
} else {
cmn_err(CE_WARN,
"Couldn't bind process for zfs pool \"%s\" to "
"pset %d\n", spa->spa_name, zio_taskq_psrset_bind);
}
mutex_exit(&curproc->p_lock);
mutex_exit(&pidlock);
mutex_exit(&cpu_lock);
pool_unlock();
}
if (zio_taskq_sysdc) {
sysdc_thread_enter(curthread, 100, 0);
}
spa->spa_proc = curproc;
spa->spa_did = curthread->t_did;
spa_create_zio_taskqs(spa);
mutex_enter(&spa->spa_proc_lock);
ASSERT(spa->spa_proc_state == SPA_PROC_CREATED);
spa->spa_proc_state = SPA_PROC_ACTIVE;
cv_broadcast(&spa->spa_proc_cv);
CALLB_CPR_SAFE_BEGIN(&cprinfo);
while (spa->spa_proc_state == SPA_PROC_ACTIVE)
cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock);
ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE);
spa->spa_proc_state = SPA_PROC_GONE;
spa->spa_proc = &p0;
cv_broadcast(&spa->spa_proc_cv);
CALLB_CPR_EXIT(&cprinfo); /* drops spa_proc_lock */
mutex_enter(&curproc->p_lock);
lwp_exit();
}
#endif
/*
* Activate an uninitialized pool.
*/
static void
spa_activate(spa_t *spa, spa_mode_t mode)
{
ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
spa->spa_state = POOL_STATE_ACTIVE;
spa->spa_mode = mode;
spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops);
spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops);
spa->spa_embedded_log_class =
metaslab_class_create(spa, zfs_metaslab_ops);
spa->spa_special_class = metaslab_class_create(spa, zfs_metaslab_ops);
spa->spa_dedup_class = metaslab_class_create(spa, zfs_metaslab_ops);
/* Try to create a covering process */
mutex_enter(&spa->spa_proc_lock);
ASSERT(spa->spa_proc_state == SPA_PROC_NONE);
ASSERT(spa->spa_proc == &p0);
spa->spa_did = 0;
#ifdef HAVE_SPA_THREAD
/* Only create a process if we're going to be around a while. */
if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) {
if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri,
NULL, 0) == 0) {
spa->spa_proc_state = SPA_PROC_CREATED;
while (spa->spa_proc_state == SPA_PROC_CREATED) {
cv_wait(&spa->spa_proc_cv,
&spa->spa_proc_lock);
}
ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
ASSERT(spa->spa_proc != &p0);
ASSERT(spa->spa_did != 0);
} else {
#ifdef _KERNEL
cmn_err(CE_WARN,
"Couldn't create process for zfs pool \"%s\"\n",
spa->spa_name);
#endif
}
}
#endif /* HAVE_SPA_THREAD */
mutex_exit(&spa->spa_proc_lock);
/* If we didn't create a process, we need to create our taskqs. */
if (spa->spa_proc == &p0) {
spa_create_zio_taskqs(spa);
}
for (size_t i = 0; i < TXG_SIZE; i++) {
spa->spa_txg_zio[i] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL);
}
list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_config_dirty_node));
list_create(&spa->spa_evicting_os_list, sizeof (objset_t),
offsetof(objset_t, os_evicting_node));
list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_state_dirty_node));
txg_list_create(&spa->spa_vdev_txg_list, spa,
offsetof(struct vdev, vdev_txg_node));
avl_create(&spa->spa_errlist_scrub,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
avl_create(&spa->spa_errlist_last,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
spa_keystore_init(&spa->spa_keystore);
/*
* This taskq is used to perform zvol-minor-related tasks
* asynchronously. This has several advantages, including easy
* resolution of various deadlocks.
*
* The taskq must be single threaded to ensure tasks are always
* processed in the order in which they were dispatched.
*
* A taskq per pool allows one to keep the pools independent.
* This way if one pool is suspended, it will not impact another.
*
* The preferred location to dispatch a zvol minor task is a sync
* task. In this context, there is easy access to the spa_t and minimal
* error handling is required because the sync task must succeed.
*/
spa->spa_zvol_taskq = taskq_create("z_zvol", 1, defclsyspri,
1, INT_MAX, 0);
/*
* Taskq dedicated to prefetcher threads: this is used to prevent the
* pool traverse code from monopolizing the global (and limited)
* system_taskq by inappropriately scheduling long running tasks on it.
*/
spa->spa_prefetch_taskq = taskq_create("z_prefetch", 100,
defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT);
/*
* The taskq to upgrade datasets in this pool. Currently used by
* feature SPA_FEATURE_USEROBJ_ACCOUNTING/SPA_FEATURE_PROJECT_QUOTA.
*/
spa->spa_upgrade_taskq = taskq_create("z_upgrade", 100,
defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT);
}
/*
* Opposite of spa_activate().
*/
static void
spa_deactivate(spa_t *spa)
{
ASSERT(spa->spa_sync_on == B_FALSE);
ASSERT(spa->spa_dsl_pool == NULL);
ASSERT(spa->spa_root_vdev == NULL);
ASSERT(spa->spa_async_zio_root == NULL);
ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
spa_evicting_os_wait(spa);
if (spa->spa_zvol_taskq) {
taskq_destroy(spa->spa_zvol_taskq);
spa->spa_zvol_taskq = NULL;
}
if (spa->spa_prefetch_taskq) {
taskq_destroy(spa->spa_prefetch_taskq);
spa->spa_prefetch_taskq = NULL;
}
if (spa->spa_upgrade_taskq) {
taskq_destroy(spa->spa_upgrade_taskq);
spa->spa_upgrade_taskq = NULL;
}
txg_list_destroy(&spa->spa_vdev_txg_list);
list_destroy(&spa->spa_config_dirty_list);
list_destroy(&spa->spa_evicting_os_list);
list_destroy(&spa->spa_state_dirty_list);
taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
for (int t = 0; t < ZIO_TYPES; t++) {
for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
spa_taskqs_fini(spa, t, q);
}
}
for (size_t i = 0; i < TXG_SIZE; i++) {
ASSERT3P(spa->spa_txg_zio[i], !=, NULL);
VERIFY0(zio_wait(spa->spa_txg_zio[i]));
spa->spa_txg_zio[i] = NULL;
}
metaslab_class_destroy(spa->spa_normal_class);
spa->spa_normal_class = NULL;
metaslab_class_destroy(spa->spa_log_class);
spa->spa_log_class = NULL;
metaslab_class_destroy(spa->spa_embedded_log_class);
spa->spa_embedded_log_class = NULL;
metaslab_class_destroy(spa->spa_special_class);
spa->spa_special_class = NULL;
metaslab_class_destroy(spa->spa_dedup_class);
spa->spa_dedup_class = NULL;
/*
* If this was part of an import or the open otherwise failed, we may
* still have errors left in the queues. Empty them just in case.
*/
spa_errlog_drain(spa);
avl_destroy(&spa->spa_errlist_scrub);
avl_destroy(&spa->spa_errlist_last);
spa_keystore_fini(&spa->spa_keystore);
spa->spa_state = POOL_STATE_UNINITIALIZED;
mutex_enter(&spa->spa_proc_lock);
if (spa->spa_proc_state != SPA_PROC_NONE) {
ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
spa->spa_proc_state = SPA_PROC_DEACTIVATE;
cv_broadcast(&spa->spa_proc_cv);
while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) {
ASSERT(spa->spa_proc != &p0);
cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
}
ASSERT(spa->spa_proc_state == SPA_PROC_GONE);
spa->spa_proc_state = SPA_PROC_NONE;
}
ASSERT(spa->spa_proc == &p0);
mutex_exit(&spa->spa_proc_lock);
/*
* We want to make sure spa_thread() has actually exited the ZFS
* module, so that the module can't be unloaded out from underneath
* it.
*/
if (spa->spa_did != 0) {
thread_join(spa->spa_did);
spa->spa_did = 0;
}
}
/*
* Verify a pool configuration, and construct the vdev tree appropriately. This
* will create all the necessary vdevs in the appropriate layout, with each vdev
* in the CLOSED state. This will prep the pool before open/creation/import.
* All vdev validation is done by the vdev_alloc() routine.
*/
int
spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
uint_t id, int atype)
{
nvlist_t **child;
uint_t children;
int error;
if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
return (error);
if ((*vdp)->vdev_ops->vdev_op_leaf)
return (0);
error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children);
if (error == ENOENT)
return (0);
if (error) {
vdev_free(*vdp);
*vdp = NULL;
return (SET_ERROR(EINVAL));
}
for (int c = 0; c < children; c++) {
vdev_t *vd;
if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
atype)) != 0) {
vdev_free(*vdp);
*vdp = NULL;
return (error);
}
}
ASSERT(*vdp != NULL);
return (0);
}
static boolean_t
spa_should_flush_logs_on_unload(spa_t *spa)
{
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
return (B_FALSE);
if (!spa_writeable(spa))
return (B_FALSE);
if (!spa->spa_sync_on)
return (B_FALSE);
if (spa_state(spa) != POOL_STATE_EXPORTED)
return (B_FALSE);
if (zfs_keep_log_spacemaps_at_export)
return (B_FALSE);
return (B_TRUE);
}
/*
* Opens a transaction that will set the flag that will instruct
* spa_sync to attempt to flush all the metaslabs for that txg.
*/
static void
spa_unload_log_sm_flush_all(spa_t *spa)
{
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
ASSERT3U(spa->spa_log_flushall_txg, ==, 0);
spa->spa_log_flushall_txg = dmu_tx_get_txg(tx);
dmu_tx_commit(tx);
txg_wait_synced(spa_get_dsl(spa), spa->spa_log_flushall_txg);
}
static void
spa_unload_log_sm_metadata(spa_t *spa)
{
void *cookie = NULL;
spa_log_sm_t *sls;
while ((sls = avl_destroy_nodes(&spa->spa_sm_logs_by_txg,
&cookie)) != NULL) {
VERIFY0(sls->sls_mscount);
kmem_free(sls, sizeof (spa_log_sm_t));
}
for (log_summary_entry_t *e = list_head(&spa->spa_log_summary);
e != NULL; e = list_head(&spa->spa_log_summary)) {
VERIFY0(e->lse_mscount);
list_remove(&spa->spa_log_summary, e);
kmem_free(e, sizeof (log_summary_entry_t));
}
spa->spa_unflushed_stats.sus_nblocks = 0;
spa->spa_unflushed_stats.sus_memused = 0;
spa->spa_unflushed_stats.sus_blocklimit = 0;
}
static void
spa_destroy_aux_threads(spa_t *spa)
{
if (spa->spa_condense_zthr != NULL) {
zthr_destroy(spa->spa_condense_zthr);
spa->spa_condense_zthr = NULL;
}
if (spa->spa_checkpoint_discard_zthr != NULL) {
zthr_destroy(spa->spa_checkpoint_discard_zthr);
spa->spa_checkpoint_discard_zthr = NULL;
}
if (spa->spa_livelist_delete_zthr != NULL) {
zthr_destroy(spa->spa_livelist_delete_zthr);
spa->spa_livelist_delete_zthr = NULL;
}
if (spa->spa_livelist_condense_zthr != NULL) {
zthr_destroy(spa->spa_livelist_condense_zthr);
spa->spa_livelist_condense_zthr = NULL;
}
}
/*
* Opposite of spa_load().
*/
static void
spa_unload(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_state(spa) != POOL_STATE_UNINITIALIZED);
spa_import_progress_remove(spa_guid(spa));
spa_load_note(spa, "UNLOADING");
spa_wake_waiters(spa);
/*
- * If the log space map feature is enabled and the pool is getting
- * exported (but not destroyed), we want to spend some time flushing
- * as many metaslabs as we can in an attempt to destroy log space
- * maps and save import time.
+ * If we have set the spa_final_txg, we have already performed the
+ * tasks below in spa_export_common(). We should not redo it here since
+ * we delay the final TXGs beyond what spa_final_txg is set at.
*/
- if (spa_should_flush_logs_on_unload(spa))
- spa_unload_log_sm_flush_all(spa);
+ if (spa->spa_final_txg == UINT64_MAX) {
+ /*
+ * If the log space map feature is enabled and the pool is
+ * getting exported (but not destroyed), we want to spend some
+ * time flushing as many metaslabs as we can in an attempt to
+ * destroy log space maps and save import time.
+ */
+ if (spa_should_flush_logs_on_unload(spa))
+ spa_unload_log_sm_flush_all(spa);
- /*
- * Stop async tasks.
- */
- spa_async_suspend(spa);
+ /*
+ * Stop async tasks.
+ */
+ spa_async_suspend(spa);
- if (spa->spa_root_vdev) {
- vdev_t *root_vdev = spa->spa_root_vdev;
- vdev_initialize_stop_all(root_vdev, VDEV_INITIALIZE_ACTIVE);
- vdev_trim_stop_all(root_vdev, VDEV_TRIM_ACTIVE);
- vdev_autotrim_stop_all(spa);
- vdev_rebuild_stop_all(spa);
+ if (spa->spa_root_vdev) {
+ vdev_t *root_vdev = spa->spa_root_vdev;
+ vdev_initialize_stop_all(root_vdev,
+ VDEV_INITIALIZE_ACTIVE);
+ vdev_trim_stop_all(root_vdev, VDEV_TRIM_ACTIVE);
+ vdev_autotrim_stop_all(spa);
+ vdev_rebuild_stop_all(spa);
+ }
}
/*
* Stop syncing.
*/
if (spa->spa_sync_on) {
txg_sync_stop(spa->spa_dsl_pool);
spa->spa_sync_on = B_FALSE;
}
/*
* This ensures that there is no async metaslab prefetching
* while we attempt to unload the spa.
*/
if (spa->spa_root_vdev != NULL) {
for (int c = 0; c < spa->spa_root_vdev->vdev_children; c++) {
vdev_t *vc = spa->spa_root_vdev->vdev_child[c];
if (vc->vdev_mg != NULL)
taskq_wait(vc->vdev_mg->mg_taskq);
}
}
if (spa->spa_mmp.mmp_thread)
mmp_thread_stop(spa);
/*
* Wait for any outstanding async I/O to complete.
*/
if (spa->spa_async_zio_root != NULL) {
for (int i = 0; i < max_ncpus; i++)
(void) zio_wait(spa->spa_async_zio_root[i]);
kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *));
spa->spa_async_zio_root = NULL;
}
if (spa->spa_vdev_removal != NULL) {
spa_vdev_removal_destroy(spa->spa_vdev_removal);
spa->spa_vdev_removal = NULL;
}
spa_destroy_aux_threads(spa);
spa_condense_fini(spa);
bpobj_close(&spa->spa_deferred_bpobj);
spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
/*
* Close all vdevs.
*/
if (spa->spa_root_vdev)
vdev_free(spa->spa_root_vdev);
ASSERT(spa->spa_root_vdev == NULL);
/*
* Close the dsl pool.
*/
if (spa->spa_dsl_pool) {
dsl_pool_close(spa->spa_dsl_pool);
spa->spa_dsl_pool = NULL;
spa->spa_meta_objset = NULL;
}
ddt_unload(spa);
spa_unload_log_sm_metadata(spa);
/*
* Drop and purge level 2 cache
*/
spa_l2cache_drop(spa);
for (int i = 0; i < spa->spa_spares.sav_count; i++)
vdev_free(spa->spa_spares.sav_vdevs[i]);
if (spa->spa_spares.sav_vdevs) {
kmem_free(spa->spa_spares.sav_vdevs,
spa->spa_spares.sav_count * sizeof (void *));
spa->spa_spares.sav_vdevs = NULL;
}
if (spa->spa_spares.sav_config) {
nvlist_free(spa->spa_spares.sav_config);
spa->spa_spares.sav_config = NULL;
}
spa->spa_spares.sav_count = 0;
for (int i = 0; i < spa->spa_l2cache.sav_count; i++) {
vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]);
vdev_free(spa->spa_l2cache.sav_vdevs[i]);
}
if (spa->spa_l2cache.sav_vdevs) {
kmem_free(spa->spa_l2cache.sav_vdevs,
spa->spa_l2cache.sav_count * sizeof (void *));
spa->spa_l2cache.sav_vdevs = NULL;
}
if (spa->spa_l2cache.sav_config) {
nvlist_free(spa->spa_l2cache.sav_config);
spa->spa_l2cache.sav_config = NULL;
}
spa->spa_l2cache.sav_count = 0;
spa->spa_async_suspended = 0;
spa->spa_indirect_vdevs_loaded = B_FALSE;
if (spa->spa_comment != NULL) {
spa_strfree(spa->spa_comment);
spa->spa_comment = NULL;
}
if (spa->spa_compatibility != NULL) {
spa_strfree(spa->spa_compatibility);
spa->spa_compatibility = NULL;
}
spa_config_exit(spa, SCL_ALL, spa);
}
/*
* Load (or re-load) the current list of vdevs describing the active spares for
* this pool. When this is called, we have some form of basic information in
* 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and
* then re-generate a more complete list including status information.
*/
void
spa_load_spares(spa_t *spa)
{
nvlist_t **spares;
uint_t nspares;
int i;
vdev_t *vd, *tvd;
#ifndef _KERNEL
/*
* zdb opens both the current state of the pool and the
* checkpointed state (if present), with a different spa_t.
*
* As spare vdevs are shared among open pools, we skip loading
* them when we load the checkpointed state of the pool.
*/
if (!spa_writeable(spa))
return;
#endif
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
/*
* First, close and free any existing spare vdevs.
*/
for (i = 0; i < spa->spa_spares.sav_count; i++) {
vd = spa->spa_spares.sav_vdevs[i];
/* Undo the call to spa_activate() below */
if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
B_FALSE)) != NULL && tvd->vdev_isspare)
spa_spare_remove(tvd);
vdev_close(vd);
vdev_free(vd);
}
if (spa->spa_spares.sav_vdevs)
kmem_free(spa->spa_spares.sav_vdevs,
spa->spa_spares.sav_count * sizeof (void *));
if (spa->spa_spares.sav_config == NULL)
nspares = 0;
else
VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
spa->spa_spares.sav_count = (int)nspares;
spa->spa_spares.sav_vdevs = NULL;
if (nspares == 0)
return;
/*
* Construct the array of vdevs, opening them to get status in the
* process. For each spare, there is potentially two different vdev_t
* structures associated with it: one in the list of spares (used only
* for basic validation purposes) and one in the active vdev
* configuration (if it's spared in). During this phase we open and
* validate each vdev on the spare list. If the vdev also exists in the
* active configuration, then we also mark this vdev as an active spare.
*/
spa->spa_spares.sav_vdevs = kmem_zalloc(nspares * sizeof (void *),
KM_SLEEP);
for (i = 0; i < spa->spa_spares.sav_count; i++) {
VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
VDEV_ALLOC_SPARE) == 0);
ASSERT(vd != NULL);
spa->spa_spares.sav_vdevs[i] = vd;
if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
B_FALSE)) != NULL) {
if (!tvd->vdev_isspare)
spa_spare_add(tvd);
/*
* We only mark the spare active if we were successfully
* able to load the vdev. Otherwise, importing a pool
* with a bad active spare would result in strange
* behavior, because multiple pool would think the spare
* is actively in use.
*
* There is a vulnerability here to an equally bizarre
* circumstance, where a dead active spare is later
* brought back to life (onlined or otherwise). Given
* the rarity of this scenario, and the extra complexity
* it adds, we ignore the possibility.
*/
if (!vdev_is_dead(tvd))
spa_spare_activate(tvd);
}
vd->vdev_top = vd;
vd->vdev_aux = &spa->spa_spares;
if (vdev_open(vd) != 0)
continue;
if (vdev_validate_aux(vd) == 0)
spa_spare_add(vd);
}
/*
* Recompute the stashed list of spares, with status information
* this time.
*/
VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
DATA_TYPE_NVLIST_ARRAY) == 0);
spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
KM_SLEEP);
for (i = 0; i < spa->spa_spares.sav_count; i++)
spares[i] = vdev_config_generate(spa,
spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE);
VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0);
for (i = 0; i < spa->spa_spares.sav_count; i++)
nvlist_free(spares[i]);
kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
}
/*
* Load (or re-load) the current list of vdevs describing the active l2cache for
* this pool. When this is called, we have some form of basic information in
* 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and
* then re-generate a more complete list including status information.
* Devices which are already active have their details maintained, and are
* not re-opened.
*/
void
spa_load_l2cache(spa_t *spa)
{
nvlist_t **l2cache = NULL;
uint_t nl2cache;
int i, j, oldnvdevs;
uint64_t guid;
vdev_t *vd, **oldvdevs, **newvdevs;
spa_aux_vdev_t *sav = &spa->spa_l2cache;
#ifndef _KERNEL
/*
* zdb opens both the current state of the pool and the
* checkpointed state (if present), with a different spa_t.
*
* As L2 caches are part of the ARC which is shared among open
* pools, we skip loading them when we load the checkpointed
* state of the pool.
*/
if (!spa_writeable(spa))
return;
#endif
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
oldvdevs = sav->sav_vdevs;
oldnvdevs = sav->sav_count;
sav->sav_vdevs = NULL;
sav->sav_count = 0;
if (sav->sav_config == NULL) {
nl2cache = 0;
newvdevs = NULL;
goto out;
}
VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
/*
* Process new nvlist of vdevs.
*/
for (i = 0; i < nl2cache; i++) {
VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID,
&guid) == 0);
newvdevs[i] = NULL;
for (j = 0; j < oldnvdevs; j++) {
vd = oldvdevs[j];
if (vd != NULL && guid == vd->vdev_guid) {
/*
* Retain previous vdev for add/remove ops.
*/
newvdevs[i] = vd;
oldvdevs[j] = NULL;
break;
}
}
if (newvdevs[i] == NULL) {
/*
* Create new vdev
*/
VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
VDEV_ALLOC_L2CACHE) == 0);
ASSERT(vd != NULL);
newvdevs[i] = vd;
/*
* Commit this vdev as an l2cache device,
* even if it fails to open.
*/
spa_l2cache_add(vd);
vd->vdev_top = vd;
vd->vdev_aux = sav;
spa_l2cache_activate(vd);
if (vdev_open(vd) != 0)
continue;
(void) vdev_validate_aux(vd);
if (!vdev_is_dead(vd))
l2arc_add_vdev(spa, vd);
/*
* Upon cache device addition to a pool or pool
* creation with a cache device or if the header
* of the device is invalid we issue an async
* TRIM command for the whole device which will
* execute if l2arc_trim_ahead > 0.
*/
spa_async_request(spa, SPA_ASYNC_L2CACHE_TRIM);
}
}
sav->sav_vdevs = newvdevs;
sav->sav_count = (int)nl2cache;
/*
* Recompute the stashed list of l2cache devices, with status
* information this time.
*/
VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
DATA_TYPE_NVLIST_ARRAY) == 0);
if (sav->sav_count > 0)
l2cache = kmem_alloc(sav->sav_count * sizeof (void *),
KM_SLEEP);
for (i = 0; i < sav->sav_count; i++)
l2cache[i] = vdev_config_generate(spa,
sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE);
VERIFY(nvlist_add_nvlist_array(sav->sav_config,
ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0);
out:
/*
* Purge vdevs that were dropped
*/
for (i = 0; i < oldnvdevs; i++) {
uint64_t pool;
vd = oldvdevs[i];
if (vd != NULL) {
ASSERT(vd->vdev_isl2cache);
if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
pool != 0ULL && l2arc_vdev_present(vd))
l2arc_remove_vdev(vd);
vdev_clear_stats(vd);
vdev_free(vd);
}
}
if (oldvdevs)
kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
for (i = 0; i < sav->sav_count; i++)
nvlist_free(l2cache[i]);
if (sav->sav_count)
kmem_free(l2cache, sav->sav_count * sizeof (void *));
}
static int
load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
{
dmu_buf_t *db;
char *packed = NULL;
size_t nvsize = 0;
int error;
*value = NULL;
error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db);
if (error)
return (error);
nvsize = *(uint64_t *)db->db_data;
dmu_buf_rele(db, FTAG);
packed = vmem_alloc(nvsize, KM_SLEEP);
error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
DMU_READ_PREFETCH);
if (error == 0)
error = nvlist_unpack(packed, nvsize, value, 0);
vmem_free(packed, nvsize);
return (error);
}
/*
* Concrete top-level vdevs that are not missing and are not logs. At every
* spa_sync we write new uberblocks to at least SPA_SYNC_MIN_VDEVS core tvds.
*/
static uint64_t
spa_healthy_core_tvds(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
uint64_t tvds = 0;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *vd = rvd->vdev_child[i];
if (vd->vdev_islog)
continue;
if (vdev_is_concrete(vd) && !vdev_is_dead(vd))
tvds++;
}
return (tvds);
}
/*
* Checks to see if the given vdev could not be opened, in which case we post a
* sysevent to notify the autoreplace code that the device has been removed.
*/
static void
spa_check_removed(vdev_t *vd)
{
for (uint64_t c = 0; c < vd->vdev_children; c++)
spa_check_removed(vd->vdev_child[c]);
if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) &&
vdev_is_concrete(vd)) {
zfs_post_autoreplace(vd->vdev_spa, vd);
spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_CHECK);
}
}
static int
spa_check_for_missing_logs(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
/*
* If we're doing a normal import, then build up any additional
* diagnostic information about missing log devices.
* We'll pass this up to the user for further processing.
*/
if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) {
nvlist_t **child, *nv;
uint64_t idx = 0;
child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t *),
KM_SLEEP);
VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
/*
* We consider a device as missing only if it failed
* to open (i.e. offline or faulted is not considered
* as missing).
*/
if (tvd->vdev_islog &&
tvd->vdev_state == VDEV_STATE_CANT_OPEN) {
child[idx++] = vdev_config_generate(spa, tvd,
B_FALSE, VDEV_CONFIG_MISSING);
}
}
if (idx > 0) {
fnvlist_add_nvlist_array(nv,
ZPOOL_CONFIG_CHILDREN, child, idx);
fnvlist_add_nvlist(spa->spa_load_info,
ZPOOL_CONFIG_MISSING_DEVICES, nv);
for (uint64_t i = 0; i < idx; i++)
nvlist_free(child[i]);
}
nvlist_free(nv);
kmem_free(child, rvd->vdev_children * sizeof (char **));
if (idx > 0) {
spa_load_failed(spa, "some log devices are missing");
vdev_dbgmsg_print_tree(rvd, 2);
return (SET_ERROR(ENXIO));
}
} else {
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
if (tvd->vdev_islog &&
tvd->vdev_state == VDEV_STATE_CANT_OPEN) {
spa_set_log_state(spa, SPA_LOG_CLEAR);
spa_load_note(spa, "some log devices are "
"missing, ZIL is dropped.");
vdev_dbgmsg_print_tree(rvd, 2);
break;
}
}
}
return (0);
}
/*
* Check for missing log devices
*/
static boolean_t
spa_check_logs(spa_t *spa)
{
boolean_t rv = B_FALSE;
dsl_pool_t *dp = spa_get_dsl(spa);
switch (spa->spa_log_state) {
default:
break;
case SPA_LOG_MISSING:
/* need to recheck in case slog has been restored */
case SPA_LOG_UNKNOWN:
rv = (dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0);
if (rv)
spa_set_log_state(spa, SPA_LOG_MISSING);
break;
}
return (rv);
}
/*
* Passivate any log vdevs (note, does not apply to embedded log metaslabs).
*/
static boolean_t
spa_passivate_log(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
boolean_t slog_found = B_FALSE;
ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
if (tvd->vdev_islog) {
ASSERT3P(tvd->vdev_log_mg, ==, NULL);
metaslab_group_passivate(tvd->vdev_mg);
slog_found = B_TRUE;
}
}
return (slog_found);
}
/*
* Activate any log vdevs (note, does not apply to embedded log metaslabs).
*/
static void
spa_activate_log(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
if (tvd->vdev_islog) {
ASSERT3P(tvd->vdev_log_mg, ==, NULL);
metaslab_group_activate(tvd->vdev_mg);
}
}
}
int
spa_reset_logs(spa_t *spa)
{
int error;
error = dmu_objset_find(spa_name(spa), zil_reset,
NULL, DS_FIND_CHILDREN);
if (error == 0) {
/*
* We successfully offlined the log device, sync out the
* current txg so that the "stubby" block can be removed
* by zil_sync().
*/
txg_wait_synced(spa->spa_dsl_pool, 0);
}
return (error);
}
static void
spa_aux_check_removed(spa_aux_vdev_t *sav)
{
for (int i = 0; i < sav->sav_count; i++)
spa_check_removed(sav->sav_vdevs[i]);
}
void
spa_claim_notify(zio_t *zio)
{
spa_t *spa = zio->io_spa;
if (zio->io_error)
return;
mutex_enter(&spa->spa_props_lock); /* any mutex will do */
if (spa->spa_claim_max_txg < zio->io_bp->blk_birth)
spa->spa_claim_max_txg = zio->io_bp->blk_birth;
mutex_exit(&spa->spa_props_lock);
}
typedef struct spa_load_error {
uint64_t sle_meta_count;
uint64_t sle_data_count;
} spa_load_error_t;
static void
spa_load_verify_done(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
spa_load_error_t *sle = zio->io_private;
dmu_object_type_t type = BP_GET_TYPE(bp);
int error = zio->io_error;
spa_t *spa = zio->io_spa;
abd_free(zio->io_abd);
if (error) {
if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) &&
type != DMU_OT_INTENT_LOG)
atomic_inc_64(&sle->sle_meta_count);
else
atomic_inc_64(&sle->sle_data_count);
}
mutex_enter(&spa->spa_scrub_lock);
spa->spa_load_verify_bytes -= BP_GET_PSIZE(bp);
cv_broadcast(&spa->spa_scrub_io_cv);
mutex_exit(&spa->spa_scrub_lock);
}
/*
* Maximum number of inflight bytes is the log2 fraction of the arc size.
* By default, we set it to 1/16th of the arc.
*/
int spa_load_verify_shift = 4;
int spa_load_verify_metadata = B_TRUE;
int spa_load_verify_data = B_TRUE;
-/*ARGSUSED*/
static int
spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
+ (void) zilog, (void) dnp;
+
if (zb->zb_level == ZB_DNODE_LEVEL || BP_IS_HOLE(bp) ||
BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp))
return (0);
/*
* Note: normally this routine will not be called if
* spa_load_verify_metadata is not set. However, it may be useful
* to manually set the flag after the traversal has begun.
*/
if (!spa_load_verify_metadata)
return (0);
if (!BP_IS_METADATA(bp) && !spa_load_verify_data)
return (0);
uint64_t maxinflight_bytes =
arc_target_bytes() >> spa_load_verify_shift;
zio_t *rio = arg;
size_t size = BP_GET_PSIZE(bp);
mutex_enter(&spa->spa_scrub_lock);
while (spa->spa_load_verify_bytes >= maxinflight_bytes)
cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
spa->spa_load_verify_bytes += size;
mutex_exit(&spa->spa_scrub_lock);
zio_nowait(zio_read(rio, spa, bp, abd_alloc_for_io(size, B_FALSE), size,
spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB,
ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL |
ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb));
return (0);
}
-/* ARGSUSED */
static int
verify_dataset_name_len(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
{
+ (void) dp, (void) arg;
+
if (dsl_dataset_namelen(ds) >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
return (0);
}
static int
spa_load_verify(spa_t *spa)
{
zio_t *rio;
spa_load_error_t sle = { 0 };
zpool_load_policy_t policy;
boolean_t verify_ok = B_FALSE;
int error = 0;
zpool_get_load_policy(spa->spa_config, &policy);
if (policy.zlp_rewind & ZPOOL_NEVER_REWIND)
return (0);
dsl_pool_config_enter(spa->spa_dsl_pool, FTAG);
error = dmu_objset_find_dp(spa->spa_dsl_pool,
spa->spa_dsl_pool->dp_root_dir_obj, verify_dataset_name_len, NULL,
DS_FIND_CHILDREN);
dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
if (error != 0)
return (error);
rio = zio_root(spa, NULL, &sle,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
if (spa_load_verify_metadata) {
if (spa->spa_extreme_rewind) {
spa_load_note(spa, "performing a complete scan of the "
"pool since extreme rewind is on. This may take "
"a very long time.\n (spa_load_verify_data=%u, "
"spa_load_verify_metadata=%u)",
spa_load_verify_data, spa_load_verify_metadata);
}
error = traverse_pool(spa, spa->spa_verify_min_txg,
TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
TRAVERSE_NO_DECRYPT, spa_load_verify_cb, rio);
}
(void) zio_wait(rio);
ASSERT0(spa->spa_load_verify_bytes);
spa->spa_load_meta_errors = sle.sle_meta_count;
spa->spa_load_data_errors = sle.sle_data_count;
if (sle.sle_meta_count != 0 || sle.sle_data_count != 0) {
spa_load_note(spa, "spa_load_verify found %llu metadata errors "
"and %llu data errors", (u_longlong_t)sle.sle_meta_count,
(u_longlong_t)sle.sle_data_count);
}
if (spa_load_verify_dryrun ||
(!error && sle.sle_meta_count <= policy.zlp_maxmeta &&
sle.sle_data_count <= policy.zlp_maxdata)) {
int64_t loss = 0;
verify_ok = B_TRUE;
spa->spa_load_txg = spa->spa_uberblock.ub_txg;
spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp;
loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts;
VERIFY(nvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_LOAD_TIME, spa->spa_load_txg_ts) == 0);
VERIFY(nvlist_add_int64(spa->spa_load_info,
ZPOOL_CONFIG_REWIND_TIME, loss) == 0);
VERIFY(nvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count) == 0);
} else {
spa->spa_load_max_txg = spa->spa_uberblock.ub_txg;
}
if (spa_load_verify_dryrun)
return (0);
if (error) {
if (error != ENXIO && error != EIO)
error = SET_ERROR(EIO);
return (error);
}
return (verify_ok ? 0 : EIO);
}
/*
* Find a value in the pool props object.
*/
static void
spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val)
{
(void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object,
zpool_prop_to_name(prop), sizeof (uint64_t), 1, val);
}
/*
* Find a value in the pool directory object.
*/
static int
spa_dir_prop(spa_t *spa, const char *name, uint64_t *val, boolean_t log_enoent)
{
int error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
name, sizeof (uint64_t), 1, val);
if (error != 0 && (error != ENOENT || log_enoent)) {
spa_load_failed(spa, "couldn't get '%s' value in MOS directory "
"[error=%d]", name, error);
}
return (error);
}
static int
spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err)
{
vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux);
return (SET_ERROR(err));
}
boolean_t
spa_livelist_delete_check(spa_t *spa)
{
return (spa->spa_livelists_to_delete != 0);
}
-/* ARGSUSED */
static boolean_t
spa_livelist_delete_cb_check(void *arg, zthr_t *z)
{
+ (void) z;
spa_t *spa = arg;
return (spa_livelist_delete_check(spa));
}
static int
delete_blkptr_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
spa_t *spa = arg;
zio_free(spa, tx->tx_txg, bp);
dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD,
-bp_get_dsize_sync(spa, bp),
-BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx);
return (0);
}
static int
dsl_get_next_livelist_obj(objset_t *os, uint64_t zap_obj, uint64_t *llp)
{
int err;
zap_cursor_t zc;
zap_attribute_t za;
zap_cursor_init(&zc, os, zap_obj);
err = zap_cursor_retrieve(&zc, &za);
zap_cursor_fini(&zc);
if (err == 0)
*llp = za.za_first_integer;
return (err);
}
/*
* Components of livelist deletion that must be performed in syncing
* context: freeing block pointers and updating the pool-wide data
* structures to indicate how much work is left to do
*/
typedef struct sublist_delete_arg {
spa_t *spa;
dsl_deadlist_t *ll;
uint64_t key;
bplist_t *to_free;
} sublist_delete_arg_t;
static void
sublist_delete_sync(void *arg, dmu_tx_t *tx)
{
sublist_delete_arg_t *sda = arg;
spa_t *spa = sda->spa;
dsl_deadlist_t *ll = sda->ll;
uint64_t key = sda->key;
bplist_t *to_free = sda->to_free;
bplist_iterate(to_free, delete_blkptr_cb, spa, tx);
dsl_deadlist_remove_entry(ll, key, tx);
}
typedef struct livelist_delete_arg {
spa_t *spa;
uint64_t ll_obj;
uint64_t zap_obj;
} livelist_delete_arg_t;
static void
livelist_delete_sync(void *arg, dmu_tx_t *tx)
{
livelist_delete_arg_t *lda = arg;
spa_t *spa = lda->spa;
uint64_t ll_obj = lda->ll_obj;
uint64_t zap_obj = lda->zap_obj;
objset_t *mos = spa->spa_meta_objset;
uint64_t count;
/* free the livelist and decrement the feature count */
VERIFY0(zap_remove_int(mos, zap_obj, ll_obj, tx));
dsl_deadlist_free(mos, ll_obj, tx);
spa_feature_decr(spa, SPA_FEATURE_LIVELIST, tx);
VERIFY0(zap_count(mos, zap_obj, &count));
if (count == 0) {
/* no more livelists to delete */
VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_DELETED_CLONES, tx));
VERIFY0(zap_destroy(mos, zap_obj, tx));
spa->spa_livelists_to_delete = 0;
spa_notify_waiters(spa);
}
}
/*
* Load in the value for the livelist to be removed and open it. Then,
* load its first sublist and determine which block pointers should actually
* be freed. Then, call a synctask which performs the actual frees and updates
* the pool-wide livelist data.
*/
-/* ARGSUSED */
static void
spa_livelist_delete_cb(void *arg, zthr_t *z)
{
spa_t *spa = arg;
uint64_t ll_obj = 0, count;
objset_t *mos = spa->spa_meta_objset;
uint64_t zap_obj = spa->spa_livelists_to_delete;
/*
* Determine the next livelist to delete. This function should only
* be called if there is at least one deleted clone.
*/
VERIFY0(dsl_get_next_livelist_obj(mos, zap_obj, &ll_obj));
VERIFY0(zap_count(mos, ll_obj, &count));
if (count > 0) {
dsl_deadlist_t *ll;
dsl_deadlist_entry_t *dle;
bplist_t to_free;
ll = kmem_zalloc(sizeof (dsl_deadlist_t), KM_SLEEP);
dsl_deadlist_open(ll, mos, ll_obj);
dle = dsl_deadlist_first(ll);
ASSERT3P(dle, !=, NULL);
bplist_create(&to_free);
int err = dsl_process_sub_livelist(&dle->dle_bpobj, &to_free,
z, NULL);
if (err == 0) {
sublist_delete_arg_t sync_arg = {
.spa = spa,
.ll = ll,
.key = dle->dle_mintxg,
.to_free = &to_free
};
zfs_dbgmsg("deleting sublist (id %llu) from"
" livelist %llu, %lld remaining",
(u_longlong_t)dle->dle_bpobj.bpo_object,
(u_longlong_t)ll_obj, (longlong_t)count - 1);
VERIFY0(dsl_sync_task(spa_name(spa), NULL,
sublist_delete_sync, &sync_arg, 0,
ZFS_SPACE_CHECK_DESTROY));
} else {
VERIFY3U(err, ==, EINTR);
}
bplist_clear(&to_free);
bplist_destroy(&to_free);
dsl_deadlist_close(ll);
kmem_free(ll, sizeof (dsl_deadlist_t));
} else {
livelist_delete_arg_t sync_arg = {
.spa = spa,
.ll_obj = ll_obj,
.zap_obj = zap_obj
};
zfs_dbgmsg("deletion of livelist %llu completed",
(u_longlong_t)ll_obj);
VERIFY0(dsl_sync_task(spa_name(spa), NULL, livelist_delete_sync,
&sync_arg, 0, ZFS_SPACE_CHECK_DESTROY));
}
}
static void
spa_start_livelist_destroy_thread(spa_t *spa)
{
ASSERT3P(spa->spa_livelist_delete_zthr, ==, NULL);
spa->spa_livelist_delete_zthr =
zthr_create("z_livelist_destroy",
spa_livelist_delete_cb_check, spa_livelist_delete_cb, spa,
minclsyspri);
}
typedef struct livelist_new_arg {
bplist_t *allocs;
bplist_t *frees;
} livelist_new_arg_t;
static int
livelist_track_new_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
ASSERT(tx == NULL);
livelist_new_arg_t *lna = arg;
if (bp_freed) {
bplist_append(lna->frees, bp);
} else {
bplist_append(lna->allocs, bp);
zfs_livelist_condense_new_alloc++;
}
return (0);
}
typedef struct livelist_condense_arg {
spa_t *spa;
bplist_t to_keep;
uint64_t first_size;
uint64_t next_size;
} livelist_condense_arg_t;
static void
spa_livelist_condense_sync(void *arg, dmu_tx_t *tx)
{
livelist_condense_arg_t *lca = arg;
spa_t *spa = lca->spa;
bplist_t new_frees;
dsl_dataset_t *ds = spa->spa_to_condense.ds;
/* Have we been cancelled? */
if (spa->spa_to_condense.cancelled) {
zfs_livelist_condense_sync_cancel++;
goto out;
}
dsl_deadlist_entry_t *first = spa->spa_to_condense.first;
dsl_deadlist_entry_t *next = spa->spa_to_condense.next;
dsl_deadlist_t *ll = &ds->ds_dir->dd_livelist;
/*
* It's possible that the livelist was changed while the zthr was
* running. Therefore, we need to check for new blkptrs in the two
* entries being condensed and continue to track them in the livelist.
* Because of the way we handle remapped blkptrs (see dbuf_remap_impl),
* it's possible that the newly added blkptrs are FREEs or ALLOCs so
* we need to sort them into two different bplists.
*/
uint64_t first_obj = first->dle_bpobj.bpo_object;
uint64_t next_obj = next->dle_bpobj.bpo_object;
uint64_t cur_first_size = first->dle_bpobj.bpo_phys->bpo_num_blkptrs;
uint64_t cur_next_size = next->dle_bpobj.bpo_phys->bpo_num_blkptrs;
bplist_create(&new_frees);
livelist_new_arg_t new_bps = {
.allocs = &lca->to_keep,
.frees = &new_frees,
};
if (cur_first_size > lca->first_size) {
VERIFY0(livelist_bpobj_iterate_from_nofree(&first->dle_bpobj,
livelist_track_new_cb, &new_bps, lca->first_size));
}
if (cur_next_size > lca->next_size) {
VERIFY0(livelist_bpobj_iterate_from_nofree(&next->dle_bpobj,
livelist_track_new_cb, &new_bps, lca->next_size));
}
dsl_deadlist_clear_entry(first, ll, tx);
ASSERT(bpobj_is_empty(&first->dle_bpobj));
dsl_deadlist_remove_entry(ll, next->dle_mintxg, tx);
bplist_iterate(&lca->to_keep, dsl_deadlist_insert_alloc_cb, ll, tx);
bplist_iterate(&new_frees, dsl_deadlist_insert_free_cb, ll, tx);
bplist_destroy(&new_frees);
char dsname[ZFS_MAX_DATASET_NAME_LEN];
dsl_dataset_name(ds, dsname);
zfs_dbgmsg("txg %llu condensing livelist of %s (id %llu), bpobj %llu "
"(%llu blkptrs) and bpobj %llu (%llu blkptrs) -> bpobj %llu "
"(%llu blkptrs)", (u_longlong_t)tx->tx_txg, dsname,
(u_longlong_t)ds->ds_object, (u_longlong_t)first_obj,
(u_longlong_t)cur_first_size, (u_longlong_t)next_obj,
(u_longlong_t)cur_next_size,
(u_longlong_t)first->dle_bpobj.bpo_object,
(u_longlong_t)first->dle_bpobj.bpo_phys->bpo_num_blkptrs);
out:
dmu_buf_rele(ds->ds_dbuf, spa);
spa->spa_to_condense.ds = NULL;
bplist_clear(&lca->to_keep);
bplist_destroy(&lca->to_keep);
kmem_free(lca, sizeof (livelist_condense_arg_t));
spa->spa_to_condense.syncing = B_FALSE;
}
static void
spa_livelist_condense_cb(void *arg, zthr_t *t)
{
while (zfs_livelist_condense_zthr_pause &&
!(zthr_has_waiters(t) || zthr_iscancelled(t)))
delay(1);
spa_t *spa = arg;
dsl_deadlist_entry_t *first = spa->spa_to_condense.first;
dsl_deadlist_entry_t *next = spa->spa_to_condense.next;
uint64_t first_size, next_size;
livelist_condense_arg_t *lca =
kmem_alloc(sizeof (livelist_condense_arg_t), KM_SLEEP);
bplist_create(&lca->to_keep);
/*
* Process the livelists (matching FREEs and ALLOCs) in open context
* so we have minimal work in syncing context to condense.
*
* We save bpobj sizes (first_size and next_size) to use later in
* syncing context to determine if entries were added to these sublists
* while in open context. This is possible because the clone is still
* active and open for normal writes and we want to make sure the new,
* unprocessed blockpointers are inserted into the livelist normally.
*
* Note that dsl_process_sub_livelist() both stores the size number of
* blockpointers and iterates over them while the bpobj's lock held, so
* the sizes returned to us are consistent which what was actually
* processed.
*/
int err = dsl_process_sub_livelist(&first->dle_bpobj, &lca->to_keep, t,
&first_size);
if (err == 0)
err = dsl_process_sub_livelist(&next->dle_bpobj, &lca->to_keep,
t, &next_size);
if (err == 0) {
while (zfs_livelist_condense_sync_pause &&
!(zthr_has_waiters(t) || zthr_iscancelled(t)))
delay(1);
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
dmu_tx_mark_netfree(tx);
dmu_tx_hold_space(tx, 1);
err = dmu_tx_assign(tx, TXG_NOWAIT | TXG_NOTHROTTLE);
if (err == 0) {
/*
* Prevent the condense zthr restarting before
* the synctask completes.
*/
spa->spa_to_condense.syncing = B_TRUE;
lca->spa = spa;
lca->first_size = first_size;
lca->next_size = next_size;
dsl_sync_task_nowait(spa_get_dsl(spa),
spa_livelist_condense_sync, lca, tx);
dmu_tx_commit(tx);
return;
}
}
/*
* Condensing can not continue: either it was externally stopped or
* we were unable to assign to a tx because the pool has run out of
* space. In the second case, we'll just end up trying to condense
* again in a later txg.
*/
ASSERT(err != 0);
bplist_clear(&lca->to_keep);
bplist_destroy(&lca->to_keep);
kmem_free(lca, sizeof (livelist_condense_arg_t));
dmu_buf_rele(spa->spa_to_condense.ds->ds_dbuf, spa);
spa->spa_to_condense.ds = NULL;
if (err == EINTR)
zfs_livelist_condense_zthr_cancel++;
}
-/* ARGSUSED */
/*
* Check that there is something to condense but that a condense is not
* already in progress and that condensing has not been cancelled.
*/
static boolean_t
spa_livelist_condense_cb_check(void *arg, zthr_t *z)
{
+ (void) z;
spa_t *spa = arg;
if ((spa->spa_to_condense.ds != NULL) &&
(spa->spa_to_condense.syncing == B_FALSE) &&
(spa->spa_to_condense.cancelled == B_FALSE)) {
return (B_TRUE);
}
return (B_FALSE);
}
static void
spa_start_livelist_condensing_thread(spa_t *spa)
{
spa->spa_to_condense.ds = NULL;
spa->spa_to_condense.first = NULL;
spa->spa_to_condense.next = NULL;
spa->spa_to_condense.syncing = B_FALSE;
spa->spa_to_condense.cancelled = B_FALSE;
ASSERT3P(spa->spa_livelist_condense_zthr, ==, NULL);
spa->spa_livelist_condense_zthr =
zthr_create("z_livelist_condense",
spa_livelist_condense_cb_check,
spa_livelist_condense_cb, spa, minclsyspri);
}
static void
spa_spawn_aux_threads(spa_t *spa)
{
ASSERT(spa_writeable(spa));
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa_start_indirect_condensing_thread(spa);
spa_start_livelist_destroy_thread(spa);
spa_start_livelist_condensing_thread(spa);
ASSERT3P(spa->spa_checkpoint_discard_zthr, ==, NULL);
spa->spa_checkpoint_discard_zthr =
zthr_create("z_checkpoint_discard",
spa_checkpoint_discard_thread_check,
spa_checkpoint_discard_thread, spa, minclsyspri);
}
/*
* Fix up config after a partly-completed split. This is done with the
* ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off
* pool have that entry in their config, but only the splitting one contains
* a list of all the guids of the vdevs that are being split off.
*
* This function determines what to do with that list: either rejoin
* all the disks to the pool, or complete the splitting process. To attempt
* the rejoin, each disk that is offlined is marked online again, and
* we do a reopen() call. If the vdev label for every disk that was
* marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL)
* then we call vdev_split() on each disk, and complete the split.
*
* Otherwise we leave the config alone, with all the vdevs in place in
* the original pool.
*/
static void
spa_try_repair(spa_t *spa, nvlist_t *config)
{
uint_t extracted;
uint64_t *glist;
uint_t i, gcount;
nvlist_t *nvl;
vdev_t **vd;
boolean_t attempt_reopen;
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0)
return;
/* check that the config is complete */
if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
&glist, &gcount) != 0)
return;
vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP);
/* attempt to online all the vdevs & validate */
attempt_reopen = B_TRUE;
for (i = 0; i < gcount; i++) {
if (glist[i] == 0) /* vdev is hole */
continue;
vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE);
if (vd[i] == NULL) {
/*
* Don't bother attempting to reopen the disks;
* just do the split.
*/
attempt_reopen = B_FALSE;
} else {
/* attempt to re-online it */
vd[i]->vdev_offline = B_FALSE;
}
}
if (attempt_reopen) {
vdev_reopen(spa->spa_root_vdev);
/* check each device to see what state it's in */
for (extracted = 0, i = 0; i < gcount; i++) {
if (vd[i] != NULL &&
vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL)
break;
++extracted;
}
}
/*
* If every disk has been moved to the new pool, or if we never
* even attempted to look at them, then we split them off for
* good.
*/
if (!attempt_reopen || gcount == extracted) {
for (i = 0; i < gcount; i++)
if (vd[i] != NULL)
vdev_split(vd[i]);
vdev_reopen(spa->spa_root_vdev);
}
kmem_free(vd, gcount * sizeof (vdev_t *));
}
static int
spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type)
{
char *ereport = FM_EREPORT_ZFS_POOL;
int error;
spa->spa_load_state = state;
(void) spa_import_progress_set_state(spa_guid(spa),
spa_load_state(spa));
gethrestime(&spa->spa_loaded_ts);
error = spa_load_impl(spa, type, &ereport);
/*
* Don't count references from objsets that are already closed
* and are making their way through the eviction process.
*/
spa_evicting_os_wait(spa);
spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
if (error) {
if (error != EEXIST) {
spa->spa_loaded_ts.tv_sec = 0;
spa->spa_loaded_ts.tv_nsec = 0;
}
if (error != EBADF) {
(void) zfs_ereport_post(ereport, spa,
NULL, NULL, NULL, 0);
}
}
spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE;
spa->spa_ena = 0;
(void) spa_import_progress_set_state(spa_guid(spa),
spa_load_state(spa));
return (error);
}
#ifdef ZFS_DEBUG
/*
* Count the number of per-vdev ZAPs associated with all of the vdevs in the
* vdev tree rooted in the given vd, and ensure that each ZAP is present in the
* spa's per-vdev ZAP list.
*/
static uint64_t
vdev_count_verify_zaps(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
uint64_t total = 0;
if (vd->vdev_top_zap != 0) {
total++;
ASSERT0(zap_lookup_int(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, vd->vdev_top_zap));
}
if (vd->vdev_leaf_zap != 0) {
total++;
ASSERT0(zap_lookup_int(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, vd->vdev_leaf_zap));
}
for (uint64_t i = 0; i < vd->vdev_children; i++) {
total += vdev_count_verify_zaps(vd->vdev_child[i]);
}
return (total);
}
#endif
/*
* Determine whether the activity check is required.
*/
static boolean_t
spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *label,
nvlist_t *config)
{
uint64_t state = 0;
uint64_t hostid = 0;
uint64_t tryconfig_txg = 0;
uint64_t tryconfig_timestamp = 0;
uint16_t tryconfig_mmp_seq = 0;
nvlist_t *nvinfo;
if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) {
nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
(void) nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG,
&tryconfig_txg);
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
&tryconfig_timestamp);
(void) nvlist_lookup_uint16(nvinfo, ZPOOL_CONFIG_MMP_SEQ,
&tryconfig_mmp_seq);
}
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, &state);
/*
* Disable the MMP activity check - This is used by zdb which
* is intended to be used on potentially active pools.
*/
if (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP)
return (B_FALSE);
/*
* Skip the activity check when the MMP feature is disabled.
*/
if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay == 0)
return (B_FALSE);
/*
* If the tryconfig_ values are nonzero, they are the results of an
* earlier tryimport. If they all match the uberblock we just found,
* then the pool has not changed and we return false so we do not test
* a second time.
*/
if (tryconfig_txg && tryconfig_txg == ub->ub_txg &&
tryconfig_timestamp && tryconfig_timestamp == ub->ub_timestamp &&
tryconfig_mmp_seq && tryconfig_mmp_seq ==
(MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0))
return (B_FALSE);
/*
* Allow the activity check to be skipped when importing the pool
* on the same host which last imported it. Since the hostid from
* configuration may be stale use the one read from the label.
*/
if (nvlist_exists(label, ZPOOL_CONFIG_HOSTID))
hostid = fnvlist_lookup_uint64(label, ZPOOL_CONFIG_HOSTID);
if (hostid == spa_get_hostid(spa))
return (B_FALSE);
/*
* Skip the activity test when the pool was cleanly exported.
*/
if (state != POOL_STATE_ACTIVE)
return (B_FALSE);
return (B_TRUE);
}
/*
* Nanoseconds the activity check must watch for changes on-disk.
*/
static uint64_t
spa_activity_check_duration(spa_t *spa, uberblock_t *ub)
{
uint64_t import_intervals = MAX(zfs_multihost_import_intervals, 1);
uint64_t multihost_interval = MSEC2NSEC(
MMP_INTERVAL_OK(zfs_multihost_interval));
uint64_t import_delay = MAX(NANOSEC, import_intervals *
multihost_interval);
/*
* Local tunables determine a minimum duration except for the case
* where we know when the remote host will suspend the pool if MMP
* writes do not land.
*
* See Big Theory comment at the top of mmp.c for the reasoning behind
* these cases and times.
*/
ASSERT(MMP_IMPORT_SAFETY_FACTOR >= 100);
if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) &&
MMP_FAIL_INT(ub) > 0) {
/* MMP on remote host will suspend pool after failed writes */
import_delay = MMP_FAIL_INT(ub) * MSEC2NSEC(MMP_INTERVAL(ub)) *
MMP_IMPORT_SAFETY_FACTOR / 100;
zfs_dbgmsg("fail_intvals>0 import_delay=%llu ub_mmp "
"mmp_fails=%llu ub_mmp mmp_interval=%llu "
"import_intervals=%llu", (u_longlong_t)import_delay,
(u_longlong_t)MMP_FAIL_INT(ub),
(u_longlong_t)MMP_INTERVAL(ub),
(u_longlong_t)import_intervals);
} else if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) &&
MMP_FAIL_INT(ub) == 0) {
/* MMP on remote host will never suspend pool */
import_delay = MAX(import_delay, (MSEC2NSEC(MMP_INTERVAL(ub)) +
ub->ub_mmp_delay) * import_intervals);
zfs_dbgmsg("fail_intvals=0 import_delay=%llu ub_mmp "
"mmp_interval=%llu ub_mmp_delay=%llu "
"import_intervals=%llu", (u_longlong_t)import_delay,
(u_longlong_t)MMP_INTERVAL(ub),
(u_longlong_t)ub->ub_mmp_delay,
(u_longlong_t)import_intervals);
} else if (MMP_VALID(ub)) {
/*
* zfs-0.7 compatibility case
*/
import_delay = MAX(import_delay, (multihost_interval +
ub->ub_mmp_delay) * import_intervals);
zfs_dbgmsg("import_delay=%llu ub_mmp_delay=%llu "
"import_intervals=%llu leaves=%u",
(u_longlong_t)import_delay,
(u_longlong_t)ub->ub_mmp_delay,
(u_longlong_t)import_intervals,
vdev_count_leaves(spa));
} else {
/* Using local tunings is the only reasonable option */
zfs_dbgmsg("pool last imported on non-MMP aware "
"host using import_delay=%llu multihost_interval=%llu "
"import_intervals=%llu", (u_longlong_t)import_delay,
(u_longlong_t)multihost_interval,
(u_longlong_t)import_intervals);
}
return (import_delay);
}
/*
* Perform the import activity check. If the user canceled the import or
* we detected activity then fail.
*/
static int
spa_activity_check(spa_t *spa, uberblock_t *ub, nvlist_t *config)
{
uint64_t txg = ub->ub_txg;
uint64_t timestamp = ub->ub_timestamp;
uint64_t mmp_config = ub->ub_mmp_config;
uint16_t mmp_seq = MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0;
uint64_t import_delay;
hrtime_t import_expire;
nvlist_t *mmp_label = NULL;
vdev_t *rvd = spa->spa_root_vdev;
kcondvar_t cv;
kmutex_t mtx;
int error = 0;
cv_init(&cv, NULL, CV_DEFAULT, NULL);
mutex_init(&mtx, NULL, MUTEX_DEFAULT, NULL);
mutex_enter(&mtx);
/*
* If ZPOOL_CONFIG_MMP_TXG is present an activity check was performed
* during the earlier tryimport. If the txg recorded there is 0 then
* the pool is known to be active on another host.
*
* Otherwise, the pool might be in use on another host. Check for
* changes in the uberblocks on disk if necessary.
*/
if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) {
nvlist_t *nvinfo = fnvlist_lookup_nvlist(config,
ZPOOL_CONFIG_LOAD_INFO);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_TXG) &&
fnvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG) == 0) {
vdev_uberblock_load(rvd, ub, &mmp_label);
error = SET_ERROR(EREMOTEIO);
goto out;
}
}
import_delay = spa_activity_check_duration(spa, ub);
/* Add a small random factor in case of simultaneous imports (0-25%) */
import_delay += import_delay * random_in_range(250) / 1000;
import_expire = gethrtime() + import_delay;
while (gethrtime() < import_expire) {
(void) spa_import_progress_set_mmp_check(spa_guid(spa),
NSEC2SEC(import_expire - gethrtime()));
vdev_uberblock_load(rvd, ub, &mmp_label);
if (txg != ub->ub_txg || timestamp != ub->ub_timestamp ||
mmp_seq != (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0)) {
zfs_dbgmsg("multihost activity detected "
"txg %llu ub_txg %llu "
"timestamp %llu ub_timestamp %llu "
"mmp_config %#llx ub_mmp_config %#llx",
(u_longlong_t)txg, (u_longlong_t)ub->ub_txg,
(u_longlong_t)timestamp,
(u_longlong_t)ub->ub_timestamp,
(u_longlong_t)mmp_config,
(u_longlong_t)ub->ub_mmp_config);
error = SET_ERROR(EREMOTEIO);
break;
}
if (mmp_label) {
nvlist_free(mmp_label);
mmp_label = NULL;
}
error = cv_timedwait_sig(&cv, &mtx, ddi_get_lbolt() + hz);
if (error != -1) {
error = SET_ERROR(EINTR);
break;
}
error = 0;
}
out:
mutex_exit(&mtx);
mutex_destroy(&mtx);
cv_destroy(&cv);
/*
* If the pool is determined to be active store the status in the
* spa->spa_load_info nvlist. If the remote hostname or hostid are
* available from configuration read from disk store them as well.
* This allows 'zpool import' to generate a more useful message.
*
* ZPOOL_CONFIG_MMP_STATE - observed pool status (mandatory)
* ZPOOL_CONFIG_MMP_HOSTNAME - hostname from the active pool
* ZPOOL_CONFIG_MMP_HOSTID - hostid from the active pool
*/
if (error == EREMOTEIO) {
char *hostname = "<unknown>";
uint64_t hostid = 0;
if (mmp_label) {
if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTNAME)) {
hostname = fnvlist_lookup_string(mmp_label,
ZPOOL_CONFIG_HOSTNAME);
fnvlist_add_string(spa->spa_load_info,
ZPOOL_CONFIG_MMP_HOSTNAME, hostname);
}
if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTID)) {
hostid = fnvlist_lookup_uint64(mmp_label,
ZPOOL_CONFIG_HOSTID);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_HOSTID, hostid);
}
}
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_STATE, MMP_STATE_ACTIVE);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_TXG, 0);
error = spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO);
}
if (mmp_label)
nvlist_free(mmp_label);
return (error);
}
static int
spa_verify_host(spa_t *spa, nvlist_t *mos_config)
{
uint64_t hostid;
char *hostname;
uint64_t myhostid = 0;
if (!spa_is_root(spa) && nvlist_lookup_uint64(mos_config,
ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
hostname = fnvlist_lookup_string(mos_config,
ZPOOL_CONFIG_HOSTNAME);
myhostid = zone_get_hostid(NULL);
if (hostid != 0 && myhostid != 0 && hostid != myhostid) {
cmn_err(CE_WARN, "pool '%s' could not be "
"loaded as it was last accessed by "
"another system (host: %s hostid: 0x%llx). "
"See: https://openzfs.github.io/openzfs-docs/msg/"
"ZFS-8000-EY",
spa_name(spa), hostname, (u_longlong_t)hostid);
spa_load_failed(spa, "hostid verification failed: pool "
"last accessed by host: %s (hostid: 0x%llx)",
hostname, (u_longlong_t)hostid);
return (SET_ERROR(EBADF));
}
}
return (0);
}
static int
spa_ld_parse_config(spa_t *spa, spa_import_type_t type)
{
int error = 0;
nvlist_t *nvtree, *nvl, *config = spa->spa_config;
int parse;
vdev_t *rvd;
uint64_t pool_guid;
char *comment;
char *compatibility;
/*
* Versioning wasn't explicitly added to the label until later, so if
* it's not present treat it as the initial version.
*/
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&spa->spa_ubsync.ub_version) != 0)
spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) {
spa_load_failed(spa, "invalid config provided: '%s' missing",
ZPOOL_CONFIG_POOL_GUID);
return (SET_ERROR(EINVAL));
}
/*
* If we are doing an import, ensure that the pool is not already
* imported by checking if its pool guid already exists in the
* spa namespace.
*
* The only case that we allow an already imported pool to be
* imported again, is when the pool is checkpointed and we want to
* look at its checkpointed state from userland tools like zdb.
*/
#ifdef _KERNEL
if ((spa->spa_load_state == SPA_LOAD_IMPORT ||
spa->spa_load_state == SPA_LOAD_TRYIMPORT) &&
spa_guid_exists(pool_guid, 0)) {
#else
if ((spa->spa_load_state == SPA_LOAD_IMPORT ||
spa->spa_load_state == SPA_LOAD_TRYIMPORT) &&
spa_guid_exists(pool_guid, 0) &&
!spa_importing_readonly_checkpoint(spa)) {
#endif
spa_load_failed(spa, "a pool with guid %llu is already open",
(u_longlong_t)pool_guid);
return (SET_ERROR(EEXIST));
}
spa->spa_config_guid = pool_guid;
nvlist_free(spa->spa_load_info);
spa->spa_load_info = fnvlist_alloc();
ASSERT(spa->spa_comment == NULL);
if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
spa->spa_comment = spa_strdup(comment);
ASSERT(spa->spa_compatibility == NULL);
if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMPATIBILITY,
&compatibility) == 0)
spa->spa_compatibility = spa_strdup(compatibility);
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
&spa->spa_config_txg);
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) == 0)
spa->spa_config_splitting = fnvlist_dup(nvl);
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtree)) {
spa_load_failed(spa, "invalid config provided: '%s' missing",
ZPOOL_CONFIG_VDEV_TREE);
return (SET_ERROR(EINVAL));
}
/*
* Create "The Godfather" zio to hold all async IOs
*/
spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
KM_SLEEP);
for (int i = 0; i < max_ncpus; i++) {
spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER);
}
/*
* Parse the configuration into a vdev tree. We explicitly set the
* value that will be returned by spa_version() since parsing the
* configuration requires knowing the version number.
*/
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
parse = (type == SPA_IMPORT_EXISTING ?
VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT);
error = spa_config_parse(spa, &rvd, nvtree, NULL, 0, parse);
spa_config_exit(spa, SCL_ALL, FTAG);
if (error != 0) {
spa_load_failed(spa, "unable to parse config [error=%d]",
error);
return (error);
}
ASSERT(spa->spa_root_vdev == rvd);
ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT);
ASSERT3U(spa->spa_max_ashift, <=, SPA_MAXBLOCKSHIFT);
if (type != SPA_IMPORT_ASSEMBLE) {
ASSERT(spa_guid(spa) == pool_guid);
}
return (0);
}
/*
* Recursively open all vdevs in the vdev tree. This function is called twice:
* first with the untrusted config, then with the trusted config.
*/
static int
spa_ld_open_vdevs(spa_t *spa)
{
int error = 0;
/*
* spa_missing_tvds_allowed defines how many top-level vdevs can be
* missing/unopenable for the root vdev to be still considered openable.
*/
if (spa->spa_trust_config) {
spa->spa_missing_tvds_allowed = zfs_max_missing_tvds;
} else if (spa->spa_config_source == SPA_CONFIG_SRC_CACHEFILE) {
spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_cachefile;
} else if (spa->spa_config_source == SPA_CONFIG_SRC_SCAN) {
spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_scan;
} else {
spa->spa_missing_tvds_allowed = 0;
}
spa->spa_missing_tvds_allowed =
MAX(zfs_max_missing_tvds, spa->spa_missing_tvds_allowed);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
error = vdev_open(spa->spa_root_vdev);
spa_config_exit(spa, SCL_ALL, FTAG);
if (spa->spa_missing_tvds != 0) {
spa_load_note(spa, "vdev tree has %lld missing top-level "
"vdevs.", (u_longlong_t)spa->spa_missing_tvds);
if (spa->spa_trust_config && (spa->spa_mode & SPA_MODE_WRITE)) {
/*
* Although theoretically we could allow users to open
* incomplete pools in RW mode, we'd need to add a lot
* of extra logic (e.g. adjust pool space to account
* for missing vdevs).
* This limitation also prevents users from accidentally
* opening the pool in RW mode during data recovery and
* damaging it further.
*/
spa_load_note(spa, "pools with missing top-level "
"vdevs can only be opened in read-only mode.");
error = SET_ERROR(ENXIO);
} else {
spa_load_note(spa, "current settings allow for maximum "
"%lld missing top-level vdevs at this stage.",
(u_longlong_t)spa->spa_missing_tvds_allowed);
}
}
if (error != 0) {
spa_load_failed(spa, "unable to open vdev tree [error=%d]",
error);
}
if (spa->spa_missing_tvds != 0 || error != 0)
vdev_dbgmsg_print_tree(spa->spa_root_vdev, 2);
return (error);
}
/*
* We need to validate the vdev labels against the configuration that
* we have in hand. This function is called twice: first with an untrusted
* config, then with a trusted config. The validation is more strict when the
* config is trusted.
*/
static int
spa_ld_validate_vdevs(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
error = vdev_validate(rvd);
spa_config_exit(spa, SCL_ALL, FTAG);
if (error != 0) {
spa_load_failed(spa, "vdev_validate failed [error=%d]", error);
return (error);
}
if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
spa_load_failed(spa, "cannot open vdev tree after invalidating "
"some vdevs");
vdev_dbgmsg_print_tree(rvd, 2);
return (SET_ERROR(ENXIO));
}
return (0);
}
static void
spa_ld_select_uberblock_done(spa_t *spa, uberblock_t *ub)
{
spa->spa_state = POOL_STATE_ACTIVE;
spa->spa_ubsync = spa->spa_uberblock;
spa->spa_verify_min_txg = spa->spa_extreme_rewind ?
TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1;
spa->spa_first_txg = spa->spa_last_ubsync_txg ?
spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1;
spa->spa_claim_max_txg = spa->spa_first_txg;
spa->spa_prev_software_version = ub->ub_software_version;
}
static int
spa_ld_select_uberblock(spa_t *spa, spa_import_type_t type)
{
vdev_t *rvd = spa->spa_root_vdev;
nvlist_t *label;
uberblock_t *ub = &spa->spa_uberblock;
boolean_t activity_check = B_FALSE;
/*
* If we are opening the checkpointed state of the pool by
* rewinding to it, at this point we will have written the
* checkpointed uberblock to the vdev labels, so searching
* the labels will find the right uberblock. However, if
* we are opening the checkpointed state read-only, we have
* not modified the labels. Therefore, we must ignore the
* labels and continue using the spa_uberblock that was set
* by spa_ld_checkpoint_rewind.
*
* Note that it would be fine to ignore the labels when
* rewinding (opening writeable) as well. However, if we
* crash just after writing the labels, we will end up
* searching the labels. Doing so in the common case means
* that this code path gets exercised normally, rather than
* just in the edge case.
*/
if (ub->ub_checkpoint_txg != 0 &&
spa_importing_readonly_checkpoint(spa)) {
spa_ld_select_uberblock_done(spa, ub);
return (0);
}
/*
* Find the best uberblock.
*/
vdev_uberblock_load(rvd, ub, &label);
/*
* If we weren't able to find a single valid uberblock, return failure.
*/
if (ub->ub_txg == 0) {
nvlist_free(label);
spa_load_failed(spa, "no valid uberblock found");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO));
}
if (spa->spa_load_max_txg != UINT64_MAX) {
(void) spa_import_progress_set_max_txg(spa_guid(spa),
(u_longlong_t)spa->spa_load_max_txg);
}
spa_load_note(spa, "using uberblock with txg=%llu",
(u_longlong_t)ub->ub_txg);
/*
* For pools which have the multihost property on determine if the
* pool is truly inactive and can be safely imported. Prevent
* hosts which don't have a hostid set from importing the pool.
*/
activity_check = spa_activity_check_required(spa, ub, label,
spa->spa_config);
if (activity_check) {
if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay &&
spa_get_hostid(spa) == 0) {
nvlist_free(label);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
}
int error = spa_activity_check(spa, ub, spa->spa_config);
if (error) {
nvlist_free(label);
return (error);
}
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_STATE, MMP_STATE_INACTIVE);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_TXG, ub->ub_txg);
fnvlist_add_uint16(spa->spa_load_info,
ZPOOL_CONFIG_MMP_SEQ,
(MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0));
}
/*
* If the pool has an unsupported version we can't open it.
*/
if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) {
nvlist_free(label);
spa_load_failed(spa, "version %llu is not supported",
(u_longlong_t)ub->ub_version);
return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP));
}
if (ub->ub_version >= SPA_VERSION_FEATURES) {
nvlist_t *features;
/*
* If we weren't able to find what's necessary for reading the
* MOS in the label, return failure.
*/
if (label == NULL) {
spa_load_failed(spa, "label config unavailable");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
ENXIO));
}
if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_FEATURES_FOR_READ,
&features) != 0) {
nvlist_free(label);
spa_load_failed(spa, "invalid label: '%s' missing",
ZPOOL_CONFIG_FEATURES_FOR_READ);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
ENXIO));
}
/*
* Update our in-core representation with the definitive values
* from the label.
*/
nvlist_free(spa->spa_label_features);
VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0);
}
nvlist_free(label);
/*
* Look through entries in the label nvlist's features_for_read. If
* there is a feature listed there which we don't understand then we
* cannot open a pool.
*/
if (ub->ub_version >= SPA_VERSION_FEATURES) {
nvlist_t *unsup_feat;
VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) ==
0);
for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features,
NULL); nvp != NULL;
nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) {
if (!zfeature_is_supported(nvpair_name(nvp))) {
VERIFY(nvlist_add_string(unsup_feat,
nvpair_name(nvp), "") == 0);
}
}
if (!nvlist_empty(unsup_feat)) {
VERIFY(nvlist_add_nvlist(spa->spa_load_info,
ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0);
nvlist_free(unsup_feat);
spa_load_failed(spa, "some features are unsupported");
return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
ENOTSUP));
}
nvlist_free(unsup_feat);
}
if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) {
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_try_repair(spa, spa->spa_config);
spa_config_exit(spa, SCL_ALL, FTAG);
nvlist_free(spa->spa_config_splitting);
spa->spa_config_splitting = NULL;
}
/*
* Initialize internal SPA structures.
*/
spa_ld_select_uberblock_done(spa, ub);
return (0);
}
static int
spa_ld_open_rootbp(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
if (error != 0) {
spa_load_failed(spa, "unable to open rootbp in dsl_pool_init "
"[error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
return (0);
}
static int
spa_ld_trusted_config(spa_t *spa, spa_import_type_t type,
boolean_t reloading)
{
vdev_t *mrvd, *rvd = spa->spa_root_vdev;
nvlist_t *nv, *mos_config, *policy;
int error = 0, copy_error;
uint64_t healthy_tvds, healthy_tvds_mos;
uint64_t mos_config_txg;
if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object, B_TRUE)
!= 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* If we're assembling a pool from a split, the config provided is
* already trusted so there is nothing to do.
*/
if (type == SPA_IMPORT_ASSEMBLE)
return (0);
healthy_tvds = spa_healthy_core_tvds(spa);
if (load_nvlist(spa, spa->spa_config_object, &mos_config)
!= 0) {
spa_load_failed(spa, "unable to retrieve MOS config");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
/*
* If we are doing an open, pool owner wasn't verified yet, thus do
* the verification here.
*/
if (spa->spa_load_state == SPA_LOAD_OPEN) {
error = spa_verify_host(spa, mos_config);
if (error != 0) {
nvlist_free(mos_config);
return (error);
}
}
nv = fnvlist_lookup_nvlist(mos_config, ZPOOL_CONFIG_VDEV_TREE);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
/*
* Build a new vdev tree from the trusted config
*/
error = spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD);
if (error != 0) {
nvlist_free(mos_config);
spa_config_exit(spa, SCL_ALL, FTAG);
spa_load_failed(spa, "spa_config_parse failed [error=%d]",
error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
}
/*
* Vdev paths in the MOS may be obsolete. If the untrusted config was
* obtained by scanning /dev/dsk, then it will have the right vdev
* paths. We update the trusted MOS config with this information.
* We first try to copy the paths with vdev_copy_path_strict, which
* succeeds only when both configs have exactly the same vdev tree.
* If that fails, we fall back to a more flexible method that has a
* best effort policy.
*/
copy_error = vdev_copy_path_strict(rvd, mrvd);
if (copy_error != 0 || spa_load_print_vdev_tree) {
spa_load_note(spa, "provided vdev tree:");
vdev_dbgmsg_print_tree(rvd, 2);
spa_load_note(spa, "MOS vdev tree:");
vdev_dbgmsg_print_tree(mrvd, 2);
}
if (copy_error != 0) {
spa_load_note(spa, "vdev_copy_path_strict failed, falling "
"back to vdev_copy_path_relaxed");
vdev_copy_path_relaxed(rvd, mrvd);
}
vdev_close(rvd);
vdev_free(rvd);
spa->spa_root_vdev = mrvd;
rvd = mrvd;
spa_config_exit(spa, SCL_ALL, FTAG);
/*
* We will use spa_config if we decide to reload the spa or if spa_load
* fails and we rewind. We must thus regenerate the config using the
* MOS information with the updated paths. ZPOOL_LOAD_POLICY is used to
* pass settings on how to load the pool and is not stored in the MOS.
* We copy it over to our new, trusted config.
*/
mos_config_txg = fnvlist_lookup_uint64(mos_config,
ZPOOL_CONFIG_POOL_TXG);
nvlist_free(mos_config);
mos_config = spa_config_generate(spa, NULL, mos_config_txg, B_FALSE);
if (nvlist_lookup_nvlist(spa->spa_config, ZPOOL_LOAD_POLICY,
&policy) == 0)
fnvlist_add_nvlist(mos_config, ZPOOL_LOAD_POLICY, policy);
spa_config_set(spa, mos_config);
spa->spa_config_source = SPA_CONFIG_SRC_MOS;
/*
* Now that we got the config from the MOS, we should be more strict
* in checking blkptrs and can make assumptions about the consistency
* of the vdev tree. spa_trust_config must be set to true before opening
* vdevs in order for them to be writeable.
*/
spa->spa_trust_config = B_TRUE;
/*
* Open and validate the new vdev tree
*/
error = spa_ld_open_vdevs(spa);
if (error != 0)
return (error);
error = spa_ld_validate_vdevs(spa);
if (error != 0)
return (error);
if (copy_error != 0 || spa_load_print_vdev_tree) {
spa_load_note(spa, "final vdev tree:");
vdev_dbgmsg_print_tree(rvd, 2);
}
if (spa->spa_load_state != SPA_LOAD_TRYIMPORT &&
!spa->spa_extreme_rewind && zfs_max_missing_tvds == 0) {
/*
* Sanity check to make sure that we are indeed loading the
* latest uberblock. If we missed SPA_SYNC_MIN_VDEVS tvds
* in the config provided and they happened to be the only ones
* to have the latest uberblock, we could involuntarily perform
* an extreme rewind.
*/
healthy_tvds_mos = spa_healthy_core_tvds(spa);
if (healthy_tvds_mos - healthy_tvds >=
SPA_SYNC_MIN_VDEVS) {
spa_load_note(spa, "config provided misses too many "
"top-level vdevs compared to MOS (%lld vs %lld). ",
(u_longlong_t)healthy_tvds,
(u_longlong_t)healthy_tvds_mos);
spa_load_note(spa, "vdev tree:");
vdev_dbgmsg_print_tree(rvd, 2);
if (reloading) {
spa_load_failed(spa, "config was already "
"provided from MOS. Aborting.");
return (spa_vdev_err(rvd,
VDEV_AUX_CORRUPT_DATA, EIO));
}
spa_load_note(spa, "spa must be reloaded using MOS "
"config");
return (SET_ERROR(EAGAIN));
}
}
error = spa_check_for_missing_logs(spa);
if (error != 0)
return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO));
if (rvd->vdev_guid_sum != spa->spa_uberblock.ub_guid_sum) {
spa_load_failed(spa, "uberblock guid sum doesn't match MOS "
"guid sum (%llu != %llu)",
(u_longlong_t)spa->spa_uberblock.ub_guid_sum,
(u_longlong_t)rvd->vdev_guid_sum);
return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM,
ENXIO));
}
return (0);
}
static int
spa_ld_open_indirect_vdev_metadata(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
/*
* Everything that we read before spa_remove_init() must be stored
* on concreted vdevs. Therefore we do this as early as possible.
*/
error = spa_remove_init(spa);
if (error != 0) {
spa_load_failed(spa, "spa_remove_init failed [error=%d]",
error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
/*
* Retrieve information needed to condense indirect vdev mappings.
*/
error = spa_condense_init(spa);
if (error != 0) {
spa_load_failed(spa, "spa_condense_init failed [error=%d]",
error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
}
return (0);
}
static int
spa_ld_check_features(spa_t *spa, boolean_t *missing_feat_writep)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
if (spa_version(spa) >= SPA_VERSION_FEATURES) {
boolean_t missing_feat_read = B_FALSE;
nvlist_t *unsup_feat, *enabled_feat;
if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ,
&spa->spa_feat_for_read_obj, B_TRUE) != 0) {
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE,
&spa->spa_feat_for_write_obj, B_TRUE) != 0) {
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS,
&spa->spa_feat_desc_obj, B_TRUE) != 0) {
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
enabled_feat = fnvlist_alloc();
unsup_feat = fnvlist_alloc();
if (!spa_features_check(spa, B_FALSE,
unsup_feat, enabled_feat))
missing_feat_read = B_TRUE;
if (spa_writeable(spa) ||
spa->spa_load_state == SPA_LOAD_TRYIMPORT) {
if (!spa_features_check(spa, B_TRUE,
unsup_feat, enabled_feat)) {
*missing_feat_writep = B_TRUE;
}
}
fnvlist_add_nvlist(spa->spa_load_info,
ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat);
if (!nvlist_empty(unsup_feat)) {
fnvlist_add_nvlist(spa->spa_load_info,
ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat);
}
fnvlist_free(enabled_feat);
fnvlist_free(unsup_feat);
if (!missing_feat_read) {
fnvlist_add_boolean(spa->spa_load_info,
ZPOOL_CONFIG_CAN_RDONLY);
}
/*
* If the state is SPA_LOAD_TRYIMPORT, our objective is
* twofold: to determine whether the pool is available for
* import in read-write mode and (if it is not) whether the
* pool is available for import in read-only mode. If the pool
* is available for import in read-write mode, it is displayed
* as available in userland; if it is not available for import
* in read-only mode, it is displayed as unavailable in
* userland. If the pool is available for import in read-only
* mode but not read-write mode, it is displayed as unavailable
* in userland with a special note that the pool is actually
* available for open in read-only mode.
*
* As a result, if the state is SPA_LOAD_TRYIMPORT and we are
* missing a feature for write, we must first determine whether
* the pool can be opened read-only before returning to
* userland in order to know whether to display the
* abovementioned note.
*/
if (missing_feat_read || (*missing_feat_writep &&
spa_writeable(spa))) {
spa_load_failed(spa, "pool uses unsupported features");
return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
ENOTSUP));
}
/*
* Load refcounts for ZFS features from disk into an in-memory
* cache during SPA initialization.
*/
for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
uint64_t refcount;
error = feature_get_refcount_from_disk(spa,
&spa_feature_table[i], &refcount);
if (error == 0) {
spa->spa_feat_refcount_cache[i] = refcount;
} else if (error == ENOTSUP) {
spa->spa_feat_refcount_cache[i] =
SPA_FEATURE_DISABLED;
} else {
spa_load_failed(spa, "error getting refcount "
"for feature %s [error=%d]",
spa_feature_table[i].fi_guid, error);
return (spa_vdev_err(rvd,
VDEV_AUX_CORRUPT_DATA, EIO));
}
}
}
if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) {
if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG,
&spa->spa_feat_enabled_txg_obj, B_TRUE) != 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
/*
* Encryption was added before bookmark_v2, even though bookmark_v2
* is now a dependency. If this pool has encryption enabled without
* bookmark_v2, trigger an errata message.
*/
if (spa_feature_is_enabled(spa, SPA_FEATURE_ENCRYPTION) &&
!spa_feature_is_enabled(spa, SPA_FEATURE_BOOKMARK_V2)) {
spa->spa_errata = ZPOOL_ERRATA_ZOL_8308_ENCRYPTION;
}
return (0);
}
static int
spa_ld_load_special_directories(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
spa->spa_is_initializing = B_TRUE;
error = dsl_pool_open(spa->spa_dsl_pool);
spa->spa_is_initializing = B_FALSE;
if (error != 0) {
spa_load_failed(spa, "dsl_pool_open failed [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
return (0);
}
static int
spa_ld_get_props(spa_t *spa)
{
int error = 0;
uint64_t obj;
vdev_t *rvd = spa->spa_root_vdev;
/* Grab the checksum salt from the MOS. */
error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_CHECKSUM_SALT, 1,
sizeof (spa->spa_cksum_salt.zcs_bytes),
spa->spa_cksum_salt.zcs_bytes);
if (error == ENOENT) {
/* Generate a new salt for subsequent use */
(void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
sizeof (spa->spa_cksum_salt.zcs_bytes));
} else if (error != 0) {
spa_load_failed(spa, "unable to retrieve checksum salt from "
"MOS [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj, B_TRUE) != 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj);
if (error != 0) {
spa_load_failed(spa, "error opening deferred-frees bpobj "
"[error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
/*
* Load the bit that tells us to use the new accounting function
* (raid-z deflation). If we have an older pool, this will not
* be present.
*/
error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION,
&spa->spa_creation_version, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* Load the persistent error log. If we have an older pool, this will
* not be present.
*/
error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last,
B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB,
&spa->spa_errlog_scrub, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* Load the livelist deletion field. If a livelist is queued for
* deletion, indicate that in the spa
*/
error = spa_dir_prop(spa, DMU_POOL_DELETED_CLONES,
&spa->spa_livelists_to_delete, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* Load the history object. If we have an older pool, this
* will not be present.
*/
error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* Load the per-vdev ZAP map. If we have an older pool, this will not
* be present; in this case, defer its creation to a later time to
* avoid dirtying the MOS this early / out of sync context. See
* spa_sync_config_object.
*/
/* The sentinel is only available in the MOS config. */
nvlist_t *mos_config;
if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0) {
spa_load_failed(spa, "unable to retrieve MOS config");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
error = spa_dir_prop(spa, DMU_POOL_VDEV_ZAP_MAP,
&spa->spa_all_vdev_zaps, B_FALSE);
if (error == ENOENT) {
VERIFY(!nvlist_exists(mos_config,
ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
spa->spa_avz_action = AVZ_ACTION_INITIALIZE;
ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
} else if (error != 0) {
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
} else if (!nvlist_exists(mos_config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)) {
/*
* An older version of ZFS overwrote the sentinel value, so
* we have orphaned per-vdev ZAPs in the MOS. Defer their
* destruction to later; see spa_sync_config_object.
*/
spa->spa_avz_action = AVZ_ACTION_DESTROY;
/*
* We're assuming that no vdevs have had their ZAPs created
* before this. Better be sure of it.
*/
ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
}
nvlist_free(mos_config);
spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object,
B_FALSE);
if (error && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
if (error == 0) {
uint64_t autoreplace = 0;
spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs);
spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace);
spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation);
spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode);
spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand);
spa_prop_find(spa, ZPOOL_PROP_MULTIHOST, &spa->spa_multihost);
spa_prop_find(spa, ZPOOL_PROP_AUTOTRIM, &spa->spa_autotrim);
spa->spa_autoreplace = (autoreplace != 0);
}
/*
* If we are importing a pool with missing top-level vdevs,
* we enforce that the pool doesn't panic or get suspended on
* error since the likelihood of missing data is extremely high.
*/
if (spa->spa_missing_tvds > 0 &&
spa->spa_failmode != ZIO_FAILURE_MODE_CONTINUE &&
spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
spa_load_note(spa, "forcing failmode to 'continue' "
"as some top level vdevs are missing");
spa->spa_failmode = ZIO_FAILURE_MODE_CONTINUE;
}
return (0);
}
static int
spa_ld_open_aux_vdevs(spa_t *spa, spa_import_type_t type)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
/*
* If we're assembling the pool from the split-off vdevs of
* an existing pool, we don't want to attach the spares & cache
* devices.
*/
/*
* Load any hot spares for this pool.
*/
error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object,
B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
if (load_nvlist(spa, spa->spa_spares.sav_object,
&spa->spa_spares.sav_config) != 0) {
spa_load_failed(spa, "error loading spares nvlist");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_spares(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
} else if (error == 0) {
spa->spa_spares.sav_sync = B_TRUE;
}
/*
* Load any level 2 ARC devices for this pool.
*/
error = spa_dir_prop(spa, DMU_POOL_L2CACHE,
&spa->spa_l2cache.sav_object, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
if (load_nvlist(spa, spa->spa_l2cache.sav_object,
&spa->spa_l2cache.sav_config) != 0) {
spa_load_failed(spa, "error loading l2cache nvlist");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_l2cache(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
} else if (error == 0) {
spa->spa_l2cache.sav_sync = B_TRUE;
}
return (0);
}
static int
spa_ld_load_vdev_metadata(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
/*
* If the 'multihost' property is set, then never allow a pool to
* be imported when the system hostid is zero. The exception to
* this rule is zdb which is always allowed to access pools.
*/
if (spa_multihost(spa) && spa_get_hostid(spa) == 0 &&
(spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP) == 0) {
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
}
/*
* If the 'autoreplace' property is set, then post a resource notifying
* the ZFS DE that it should not issue any faults for unopenable
* devices. We also iterate over the vdevs, and post a sysevent for any
* unopenable vdevs so that the normal autoreplace handler can take
* over.
*/
if (spa->spa_autoreplace && spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
spa_check_removed(spa->spa_root_vdev);
/*
* For the import case, this is done in spa_import(), because
* at this point we're using the spare definitions from
* the MOS config, not necessarily from the userland config.
*/
if (spa->spa_load_state != SPA_LOAD_IMPORT) {
spa_aux_check_removed(&spa->spa_spares);
spa_aux_check_removed(&spa->spa_l2cache);
}
}
/*
* Load the vdev metadata such as metaslabs, DTLs, spacemap object, etc.
*/
error = vdev_load(rvd);
if (error != 0) {
spa_load_failed(spa, "vdev_load failed [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
}
error = spa_ld_log_spacemaps(spa);
if (error != 0) {
spa_load_failed(spa, "spa_ld_log_sm_data failed [error=%d]",
error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
}
/*
* Propagate the leaf DTLs we just loaded all the way up the vdev tree.
*/
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
vdev_dtl_reassess(rvd, 0, 0, B_FALSE, B_FALSE);
spa_config_exit(spa, SCL_ALL, FTAG);
return (0);
}
static int
spa_ld_load_dedup_tables(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
error = ddt_load(spa);
if (error != 0) {
spa_load_failed(spa, "ddt_load failed [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
return (0);
}
static int
spa_ld_verify_logs(spa_t *spa, spa_import_type_t type, char **ereport)
{
vdev_t *rvd = spa->spa_root_vdev;
if (type != SPA_IMPORT_ASSEMBLE && spa_writeable(spa)) {
boolean_t missing = spa_check_logs(spa);
if (missing) {
if (spa->spa_missing_tvds != 0) {
spa_load_note(spa, "spa_check_logs failed "
"so dropping the logs");
} else {
*ereport = FM_EREPORT_ZFS_LOG_REPLAY;
spa_load_failed(spa, "spa_check_logs failed");
return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG,
ENXIO));
}
}
}
return (0);
}
static int
spa_ld_verify_pool_data(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
/*
* We've successfully opened the pool, verify that we're ready
* to start pushing transactions.
*/
if (spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
error = spa_load_verify(spa);
if (error != 0) {
spa_load_failed(spa, "spa_load_verify failed "
"[error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
error));
}
}
return (0);
}
static void
spa_ld_claim_log_blocks(spa_t *spa)
{
dmu_tx_t *tx;
dsl_pool_t *dp = spa_get_dsl(spa);
/*
* Claim log blocks that haven't been committed yet.
* This must all happen in a single txg.
* Note: spa_claim_max_txg is updated by spa_claim_notify(),
* invoked from zil_claim_log_block()'s i/o done callback.
* Price of rollback is that we abandon the log.
*/
spa->spa_claiming = B_TRUE;
tx = dmu_tx_create_assigned(dp, spa_first_txg(spa));
(void) dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
zil_claim, tx, DS_FIND_CHILDREN);
dmu_tx_commit(tx);
spa->spa_claiming = B_FALSE;
spa_set_log_state(spa, SPA_LOG_GOOD);
}
static void
spa_ld_check_for_config_update(spa_t *spa, uint64_t config_cache_txg,
boolean_t update_config_cache)
{
vdev_t *rvd = spa->spa_root_vdev;
int need_update = B_FALSE;
/*
* If the config cache is stale, or we have uninitialized
* metaslabs (see spa_vdev_add()), then update the config.
*
* If this is a verbatim import, trust the current
* in-core spa_config and update the disk labels.
*/
if (update_config_cache || config_cache_txg != spa->spa_config_txg ||
spa->spa_load_state == SPA_LOAD_IMPORT ||
spa->spa_load_state == SPA_LOAD_RECOVER ||
(spa->spa_import_flags & ZFS_IMPORT_VERBATIM))
need_update = B_TRUE;
for (int c = 0; c < rvd->vdev_children; c++)
if (rvd->vdev_child[c]->vdev_ms_array == 0)
need_update = B_TRUE;
/*
* Update the config cache asynchronously in case we're the
* root pool, in which case the config cache isn't writable yet.
*/
if (need_update)
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
static void
spa_ld_prepare_for_reload(spa_t *spa)
{
spa_mode_t mode = spa->spa_mode;
int async_suspended = spa->spa_async_suspended;
spa_unload(spa);
spa_deactivate(spa);
spa_activate(spa, mode);
/*
* We save the value of spa_async_suspended as it gets reset to 0 by
* spa_unload(). We want to restore it back to the original value before
* returning as we might be calling spa_async_resume() later.
*/
spa->spa_async_suspended = async_suspended;
}
static int
spa_ld_read_checkpoint_txg(spa_t *spa)
{
uberblock_t checkpoint;
int error = 0;
ASSERT0(spa->spa_checkpoint_txg);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
if (error == ENOENT)
return (0);
if (error != 0)
return (error);
ASSERT3U(checkpoint.ub_txg, !=, 0);
ASSERT3U(checkpoint.ub_checkpoint_txg, !=, 0);
ASSERT3U(checkpoint.ub_timestamp, !=, 0);
spa->spa_checkpoint_txg = checkpoint.ub_txg;
spa->spa_checkpoint_info.sci_timestamp = checkpoint.ub_timestamp;
return (0);
}
static int
spa_ld_mos_init(spa_t *spa, spa_import_type_t type)
{
int error = 0;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE);
/*
* Never trust the config that is provided unless we are assembling
* a pool following a split.
* This means don't trust blkptrs and the vdev tree in general. This
* also effectively puts the spa in read-only mode since
* spa_writeable() checks for spa_trust_config to be true.
* We will later load a trusted config from the MOS.
*/
if (type != SPA_IMPORT_ASSEMBLE)
spa->spa_trust_config = B_FALSE;
/*
* Parse the config provided to create a vdev tree.
*/
error = spa_ld_parse_config(spa, type);
if (error != 0)
return (error);
spa_import_progress_add(spa);
/*
* Now that we have the vdev tree, try to open each vdev. This involves
* opening the underlying physical device, retrieving its geometry and
* probing the vdev with a dummy I/O. The state of each vdev will be set
* based on the success of those operations. After this we'll be ready
* to read from the vdevs.
*/
error = spa_ld_open_vdevs(spa);
if (error != 0)
return (error);
/*
* Read the label of each vdev and make sure that the GUIDs stored
* there match the GUIDs in the config provided.
* If we're assembling a new pool that's been split off from an
* existing pool, the labels haven't yet been updated so we skip
* validation for now.
*/
if (type != SPA_IMPORT_ASSEMBLE) {
error = spa_ld_validate_vdevs(spa);
if (error != 0)
return (error);
}
/*
* Read all vdev labels to find the best uberblock (i.e. latest,
* unless spa_load_max_txg is set) and store it in spa_uberblock. We
* get the list of features required to read blkptrs in the MOS from
* the vdev label with the best uberblock and verify that our version
* of zfs supports them all.
*/
error = spa_ld_select_uberblock(spa, type);
if (error != 0)
return (error);
/*
* Pass that uberblock to the dsl_pool layer which will open the root
* blkptr. This blkptr points to the latest version of the MOS and will
* allow us to read its contents.
*/
error = spa_ld_open_rootbp(spa);
if (error != 0)
return (error);
return (0);
}
static int
spa_ld_checkpoint_rewind(spa_t *spa)
{
uberblock_t checkpoint;
int error = 0;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
if (error != 0) {
spa_load_failed(spa, "unable to retrieve checkpointed "
"uberblock from the MOS config [error=%d]", error);
if (error == ENOENT)
error = ZFS_ERR_NO_CHECKPOINT;
return (error);
}
ASSERT3U(checkpoint.ub_txg, <, spa->spa_uberblock.ub_txg);
ASSERT3U(checkpoint.ub_txg, ==, checkpoint.ub_checkpoint_txg);
/*
* We need to update the txg and timestamp of the checkpointed
* uberblock to be higher than the latest one. This ensures that
* the checkpointed uberblock is selected if we were to close and
* reopen the pool right after we've written it in the vdev labels.
* (also see block comment in vdev_uberblock_compare)
*/
checkpoint.ub_txg = spa->spa_uberblock.ub_txg + 1;
checkpoint.ub_timestamp = gethrestime_sec();
/*
* Set current uberblock to be the checkpointed uberblock.
*/
spa->spa_uberblock = checkpoint;
/*
* If we are doing a normal rewind, then the pool is open for
* writing and we sync the "updated" checkpointed uberblock to
* disk. Once this is done, we've basically rewound the whole
* pool and there is no way back.
*
* There are cases when we don't want to attempt and sync the
* checkpointed uberblock to disk because we are opening a
* pool as read-only. Specifically, verifying the checkpointed
* state with zdb, and importing the checkpointed state to get
* a "preview" of its content.
*/
if (spa_writeable(spa)) {
vdev_t *rvd = spa->spa_root_vdev;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL };
int svdcount = 0;
int children = rvd->vdev_children;
int c0 = random_in_range(children);
for (int c = 0; c < children; c++) {
vdev_t *vd = rvd->vdev_child[(c0 + c) % children];
/* Stop when revisiting the first vdev */
if (c > 0 && svd[0] == vd)
break;
if (vd->vdev_ms_array == 0 || vd->vdev_islog ||
!vdev_is_concrete(vd))
continue;
svd[svdcount++] = vd;
if (svdcount == SPA_SYNC_MIN_VDEVS)
break;
}
error = vdev_config_sync(svd, svdcount, spa->spa_first_txg);
if (error == 0)
spa->spa_last_synced_guid = rvd->vdev_guid;
spa_config_exit(spa, SCL_ALL, FTAG);
if (error != 0) {
spa_load_failed(spa, "failed to write checkpointed "
"uberblock to the vdev labels [error=%d]", error);
return (error);
}
}
return (0);
}
static int
spa_ld_mos_with_trusted_config(spa_t *spa, spa_import_type_t type,
boolean_t *update_config_cache)
{
int error;
/*
* Parse the config for pool, open and validate vdevs,
* select an uberblock, and use that uberblock to open
* the MOS.
*/
error = spa_ld_mos_init(spa, type);
if (error != 0)
return (error);
/*
* Retrieve the trusted config stored in the MOS and use it to create
* a new, exact version of the vdev tree, then reopen all vdevs.
*/
error = spa_ld_trusted_config(spa, type, B_FALSE);
if (error == EAGAIN) {
if (update_config_cache != NULL)
*update_config_cache = B_TRUE;
/*
* Redo the loading process with the trusted config if it is
* too different from the untrusted config.
*/
spa_ld_prepare_for_reload(spa);
spa_load_note(spa, "RELOADING");
error = spa_ld_mos_init(spa, type);
if (error != 0)
return (error);
error = spa_ld_trusted_config(spa, type, B_TRUE);
if (error != 0)
return (error);
} else if (error != 0) {
return (error);
}
return (0);
}
/*
* Load an existing storage pool, using the config provided. This config
* describes which vdevs are part of the pool and is later validated against
* partial configs present in each vdev's label and an entire copy of the
* config stored in the MOS.
*/
static int
spa_load_impl(spa_t *spa, spa_import_type_t type, char **ereport)
{
int error = 0;
boolean_t missing_feat_write = B_FALSE;
boolean_t checkpoint_rewind =
(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
boolean_t update_config_cache = B_FALSE;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE);
spa_load_note(spa, "LOADING");
error = spa_ld_mos_with_trusted_config(spa, type, &update_config_cache);
if (error != 0)
return (error);
/*
* If we are rewinding to the checkpoint then we need to repeat
* everything we've done so far in this function but this time
* selecting the checkpointed uberblock and using that to open
* the MOS.
*/
if (checkpoint_rewind) {
/*
* If we are rewinding to the checkpoint update config cache
* anyway.
*/
update_config_cache = B_TRUE;
/*
* Extract the checkpointed uberblock from the current MOS
* and use this as the pool's uberblock from now on. If the
* pool is imported as writeable we also write the checkpoint
* uberblock to the labels, making the rewind permanent.
*/
error = spa_ld_checkpoint_rewind(spa);
if (error != 0)
return (error);
/*
* Redo the loading process again with the
* checkpointed uberblock.
*/
spa_ld_prepare_for_reload(spa);
spa_load_note(spa, "LOADING checkpointed uberblock");
error = spa_ld_mos_with_trusted_config(spa, type, NULL);
if (error != 0)
return (error);
}
/*
* Retrieve the checkpoint txg if the pool has a checkpoint.
*/
error = spa_ld_read_checkpoint_txg(spa);
if (error != 0)
return (error);
/*
* Retrieve the mapping of indirect vdevs. Those vdevs were removed
* from the pool and their contents were re-mapped to other vdevs. Note
* that everything that we read before this step must have been
* rewritten on concrete vdevs after the last device removal was
* initiated. Otherwise we could be reading from indirect vdevs before
* we have loaded their mappings.
*/
error = spa_ld_open_indirect_vdev_metadata(spa);
if (error != 0)
return (error);
/*
* Retrieve the full list of active features from the MOS and check if
* they are all supported.
*/
error = spa_ld_check_features(spa, &missing_feat_write);
if (error != 0)
return (error);
/*
* Load several special directories from the MOS needed by the dsl_pool
* layer.
*/
error = spa_ld_load_special_directories(spa);
if (error != 0)
return (error);
/*
* Retrieve pool properties from the MOS.
*/
error = spa_ld_get_props(spa);
if (error != 0)
return (error);
/*
* Retrieve the list of auxiliary devices - cache devices and spares -
* and open them.
*/
error = spa_ld_open_aux_vdevs(spa, type);
if (error != 0)
return (error);
/*
* Load the metadata for all vdevs. Also check if unopenable devices
* should be autoreplaced.
*/
error = spa_ld_load_vdev_metadata(spa);
if (error != 0)
return (error);
error = spa_ld_load_dedup_tables(spa);
if (error != 0)
return (error);
/*
* Verify the logs now to make sure we don't have any unexpected errors
* when we claim log blocks later.
*/
error = spa_ld_verify_logs(spa, type, ereport);
if (error != 0)
return (error);
if (missing_feat_write) {
ASSERT(spa->spa_load_state == SPA_LOAD_TRYIMPORT);
/*
* At this point, we know that we can open the pool in
* read-only mode but not read-write mode. We now have enough
* information and can return to userland.
*/
return (spa_vdev_err(spa->spa_root_vdev, VDEV_AUX_UNSUP_FEAT,
ENOTSUP));
}
/*
* Traverse the last txgs to make sure the pool was left off in a safe
* state. When performing an extreme rewind, we verify the whole pool,
* which can take a very long time.
*/
error = spa_ld_verify_pool_data(spa);
if (error != 0)
return (error);
/*
* Calculate the deflated space for the pool. This must be done before
* we write anything to the pool because we'd need to update the space
* accounting using the deflated sizes.
*/
spa_update_dspace(spa);
/*
* We have now retrieved all the information we needed to open the
* pool. If we are importing the pool in read-write mode, a few
* additional steps must be performed to finish the import.
*/
if (spa_writeable(spa) && (spa->spa_load_state == SPA_LOAD_RECOVER ||
spa->spa_load_max_txg == UINT64_MAX)) {
uint64_t config_cache_txg = spa->spa_config_txg;
ASSERT(spa->spa_load_state != SPA_LOAD_TRYIMPORT);
/*
* In case of a checkpoint rewind, log the original txg
* of the checkpointed uberblock.
*/
if (checkpoint_rewind) {
spa_history_log_internal(spa, "checkpoint rewind",
NULL, "rewound state to txg=%llu",
(u_longlong_t)spa->spa_uberblock.ub_checkpoint_txg);
}
/*
* Traverse the ZIL and claim all blocks.
*/
spa_ld_claim_log_blocks(spa);
/*
* Kick-off the syncing thread.
*/
spa->spa_sync_on = B_TRUE;
txg_sync_start(spa->spa_dsl_pool);
mmp_thread_start(spa);
/*
* Wait for all claims to sync. We sync up to the highest
* claimed log block birth time so that claimed log blocks
* don't appear to be from the future. spa_claim_max_txg
* will have been set for us by ZIL traversal operations
* performed above.
*/
txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg);
/*
* Check if we need to request an update of the config. On the
* next sync, we would update the config stored in vdev labels
* and the cachefile (by default /etc/zfs/zpool.cache).
*/
spa_ld_check_for_config_update(spa, config_cache_txg,
update_config_cache);
/*
* Check if a rebuild was in progress and if so resume it.
* Then check all DTLs to see if anything needs resilvering.
* The resilver will be deferred if a rebuild was started.
*/
if (vdev_rebuild_active(spa->spa_root_vdev)) {
vdev_rebuild_restart(spa);
} else if (!dsl_scan_resilvering(spa->spa_dsl_pool) &&
vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
spa_async_request(spa, SPA_ASYNC_RESILVER);
}
/*
* Log the fact that we booted up (so that we can detect if
* we rebooted in the middle of an operation).
*/
spa_history_log_version(spa, "open", NULL);
spa_restart_removal(spa);
spa_spawn_aux_threads(spa);
/*
* Delete any inconsistent datasets.
*
* Note:
* Since we may be issuing deletes for clones here,
* we make sure to do so after we've spawned all the
* auxiliary threads above (from which the livelist
* deletion zthr is part of).
*/
(void) dmu_objset_find(spa_name(spa),
dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN);
/*
* Clean up any stale temporary dataset userrefs.
*/
dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_initialize_restart(spa->spa_root_vdev);
vdev_trim_restart(spa->spa_root_vdev);
vdev_autotrim_restart(spa);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
spa_import_progress_remove(spa_guid(spa));
spa_async_request(spa, SPA_ASYNC_L2CACHE_REBUILD);
spa_load_note(spa, "LOADED");
return (0);
}
static int
spa_load_retry(spa_t *spa, spa_load_state_t state)
{
spa_mode_t mode = spa->spa_mode;
spa_unload(spa);
spa_deactivate(spa);
spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1;
spa_activate(spa, mode);
spa_async_suspend(spa);
spa_load_note(spa, "spa_load_retry: rewind, max txg: %llu",
(u_longlong_t)spa->spa_load_max_txg);
return (spa_load(spa, state, SPA_IMPORT_EXISTING));
}
/*
* If spa_load() fails this function will try loading prior txg's. If
* 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool
* will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this
* function will not rewind the pool and will return the same error as
* spa_load().
*/
static int
spa_load_best(spa_t *spa, spa_load_state_t state, uint64_t max_request,
int rewind_flags)
{
nvlist_t *loadinfo = NULL;
nvlist_t *config = NULL;
int load_error, rewind_error;
uint64_t safe_rewind_txg;
uint64_t min_txg;
if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) {
spa->spa_load_max_txg = spa->spa_load_txg;
spa_set_log_state(spa, SPA_LOG_CLEAR);
} else {
spa->spa_load_max_txg = max_request;
if (max_request != UINT64_MAX)
spa->spa_extreme_rewind = B_TRUE;
}
load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING);
if (load_error == 0)
return (0);
if (load_error == ZFS_ERR_NO_CHECKPOINT) {
/*
* When attempting checkpoint-rewind on a pool with no
* checkpoint, we should not attempt to load uberblocks
* from previous txgs when spa_load fails.
*/
ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
spa_import_progress_remove(spa_guid(spa));
return (load_error);
}
if (spa->spa_root_vdev != NULL)
config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg;
spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp;
if (rewind_flags & ZPOOL_NEVER_REWIND) {
nvlist_free(config);
spa_import_progress_remove(spa_guid(spa));
return (load_error);
}
if (state == SPA_LOAD_RECOVER) {
/* Price of rolling back is discarding txgs, including log */
spa_set_log_state(spa, SPA_LOG_CLEAR);
} else {
/*
* If we aren't rolling back save the load info from our first
* import attempt so that we can restore it after attempting
* to rewind.
*/
loadinfo = spa->spa_load_info;
spa->spa_load_info = fnvlist_alloc();
}
spa->spa_load_max_txg = spa->spa_last_ubsync_txg;
safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE;
min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ?
TXG_INITIAL : safe_rewind_txg;
/*
* Continue as long as we're finding errors, we're still within
* the acceptable rewind range, and we're still finding uberblocks
*/
while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg &&
spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) {
if (spa->spa_load_max_txg < safe_rewind_txg)
spa->spa_extreme_rewind = B_TRUE;
rewind_error = spa_load_retry(spa, state);
}
spa->spa_extreme_rewind = B_FALSE;
spa->spa_load_max_txg = UINT64_MAX;
if (config && (rewind_error || state != SPA_LOAD_RECOVER))
spa_config_set(spa, config);
else
nvlist_free(config);
if (state == SPA_LOAD_RECOVER) {
ASSERT3P(loadinfo, ==, NULL);
spa_import_progress_remove(spa_guid(spa));
return (rewind_error);
} else {
/* Store the rewind info as part of the initial load info */
fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO,
spa->spa_load_info);
/* Restore the initial load info */
fnvlist_free(spa->spa_load_info);
spa->spa_load_info = loadinfo;
spa_import_progress_remove(spa_guid(spa));
return (load_error);
}
}
/*
* Pool Open/Import
*
* The import case is identical to an open except that the configuration is sent
* down from userland, instead of grabbed from the configuration cache. For the
* case of an open, the pool configuration will exist in the
* POOL_STATE_UNINITIALIZED state.
*
* The stats information (gen/count/ustats) is used to gather vdev statistics at
* the same time open the pool, without having to keep around the spa_t in some
* ambiguous state.
*/
static int
spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
nvlist_t **config)
{
spa_t *spa;
spa_load_state_t state = SPA_LOAD_OPEN;
int error;
int locked = B_FALSE;
int firstopen = B_FALSE;
*spapp = NULL;
/*
* As disgusting as this is, we need to support recursive calls to this
* function because dsl_dir_open() is called during spa_load(), and ends
* up calling spa_open() again. The real fix is to figure out how to
* avoid dsl_dir_open() calling this in the first place.
*/
if (MUTEX_NOT_HELD(&spa_namespace_lock)) {
mutex_enter(&spa_namespace_lock);
locked = B_TRUE;
}
if ((spa = spa_lookup(pool)) == NULL) {
if (locked)
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(ENOENT));
}
if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
zpool_load_policy_t policy;
firstopen = B_TRUE;
zpool_get_load_policy(nvpolicy ? nvpolicy : spa->spa_config,
&policy);
if (policy.zlp_rewind & ZPOOL_DO_REWIND)
state = SPA_LOAD_RECOVER;
spa_activate(spa, spa_mode_global);
if (state != SPA_LOAD_RECOVER)
spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE;
zfs_dbgmsg("spa_open_common: opening %s", pool);
error = spa_load_best(spa, state, policy.zlp_txg,
policy.zlp_rewind);
if (error == EBADF) {
/*
* If vdev_validate() returns failure (indicated by
* EBADF), it indicates that one of the vdevs indicates
* that the pool has been exported or destroyed. If
* this is the case, the config cache is out of sync and
* we should remove the pool from the namespace.
*/
spa_unload(spa);
spa_deactivate(spa);
spa_write_cachefile(spa, B_TRUE, B_TRUE);
spa_remove(spa);
if (locked)
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(ENOENT));
}
if (error) {
/*
* We can't open the pool, but we still have useful
* information: the state of each vdev after the
* attempted vdev_open(). Return this to the user.
*/
if (config != NULL && spa->spa_config) {
VERIFY(nvlist_dup(spa->spa_config, config,
KM_SLEEP) == 0);
VERIFY(nvlist_add_nvlist(*config,
ZPOOL_CONFIG_LOAD_INFO,
spa->spa_load_info) == 0);
}
spa_unload(spa);
spa_deactivate(spa);
spa->spa_last_open_failed = error;
if (locked)
mutex_exit(&spa_namespace_lock);
*spapp = NULL;
return (error);
}
}
spa_open_ref(spa, tag);
if (config != NULL)
*config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
/*
* If we've recovered the pool, pass back any information we
* gathered while doing the load.
*/
if (state == SPA_LOAD_RECOVER) {
VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO,
spa->spa_load_info) == 0);
}
if (locked) {
spa->spa_last_open_failed = 0;
spa->spa_last_ubsync_txg = 0;
spa->spa_load_txg = 0;
mutex_exit(&spa_namespace_lock);
}
if (firstopen)
zvol_create_minors_recursive(spa_name(spa));
*spapp = spa;
return (0);
}
int
spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy,
nvlist_t **config)
{
return (spa_open_common(name, spapp, tag, policy, config));
}
int
spa_open(const char *name, spa_t **spapp, void *tag)
{
return (spa_open_common(name, spapp, tag, NULL, NULL));
}
/*
* Lookup the given spa_t, incrementing the inject count in the process,
* preventing it from being exported or destroyed.
*/
spa_t *
spa_inject_addref(char *name)
{
spa_t *spa;
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(name)) == NULL) {
mutex_exit(&spa_namespace_lock);
return (NULL);
}
spa->spa_inject_ref++;
mutex_exit(&spa_namespace_lock);
return (spa);
}
void
spa_inject_delref(spa_t *spa)
{
mutex_enter(&spa_namespace_lock);
spa->spa_inject_ref--;
mutex_exit(&spa_namespace_lock);
}
/*
* Add spares device information to the nvlist.
*/
static void
spa_add_spares(spa_t *spa, nvlist_t *config)
{
nvlist_t **spares;
uint_t i, nspares;
nvlist_t *nvroot;
uint64_t guid;
vdev_stat_t *vs;
uint_t vsc;
uint64_t pool;
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
if (spa->spa_spares.sav_count == 0)
return;
VERIFY(nvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
if (nspares != 0) {
VERIFY(nvlist_add_nvlist_array(nvroot,
ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
VERIFY(nvlist_lookup_nvlist_array(nvroot,
ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
/*
* Go through and find any spares which have since been
* repurposed as an active spare. If this is the case, update
* their status appropriately.
*/
for (i = 0; i < nspares; i++) {
VERIFY(nvlist_lookup_uint64(spares[i],
ZPOOL_CONFIG_GUID, &guid) == 0);
if (spa_spare_exists(guid, &pool, NULL) &&
pool != 0ULL) {
VERIFY(nvlist_lookup_uint64_array(
spares[i], ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &vsc) == 0);
vs->vs_state = VDEV_STATE_CANT_OPEN;
vs->vs_aux = VDEV_AUX_SPARED;
}
}
}
}
/*
* Add l2cache device information to the nvlist, including vdev stats.
*/
static void
spa_add_l2cache(spa_t *spa, nvlist_t *config)
{
nvlist_t **l2cache;
uint_t i, j, nl2cache;
nvlist_t *nvroot;
uint64_t guid;
vdev_t *vd;
vdev_stat_t *vs;
uint_t vsc;
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
if (spa->spa_l2cache.sav_count == 0)
return;
VERIFY(nvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
if (nl2cache != 0) {
VERIFY(nvlist_add_nvlist_array(nvroot,
ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
VERIFY(nvlist_lookup_nvlist_array(nvroot,
ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
/*
* Update level 2 cache device stats.
*/
for (i = 0; i < nl2cache; i++) {
VERIFY(nvlist_lookup_uint64(l2cache[i],
ZPOOL_CONFIG_GUID, &guid) == 0);
vd = NULL;
for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
if (guid ==
spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
vd = spa->spa_l2cache.sav_vdevs[j];
break;
}
}
ASSERT(vd != NULL);
VERIFY(nvlist_lookup_uint64_array(l2cache[i],
ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
== 0);
vdev_get_stats(vd, vs);
vdev_config_generate_stats(vd, l2cache[i]);
}
}
}
static void
spa_feature_stats_from_disk(spa_t *spa, nvlist_t *features)
{
zap_cursor_t zc;
zap_attribute_t za;
if (spa->spa_feat_for_read_obj != 0) {
for (zap_cursor_init(&zc, spa->spa_meta_objset,
spa->spa_feat_for_read_obj);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
ASSERT(za.za_integer_length == sizeof (uint64_t) &&
za.za_num_integers == 1);
VERIFY0(nvlist_add_uint64(features, za.za_name,
za.za_first_integer));
}
zap_cursor_fini(&zc);
}
if (spa->spa_feat_for_write_obj != 0) {
for (zap_cursor_init(&zc, spa->spa_meta_objset,
spa->spa_feat_for_write_obj);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
ASSERT(za.za_integer_length == sizeof (uint64_t) &&
za.za_num_integers == 1);
VERIFY0(nvlist_add_uint64(features, za.za_name,
za.za_first_integer));
}
zap_cursor_fini(&zc);
}
}
static void
spa_feature_stats_from_cache(spa_t *spa, nvlist_t *features)
{
int i;
for (i = 0; i < SPA_FEATURES; i++) {
zfeature_info_t feature = spa_feature_table[i];
uint64_t refcount;
if (feature_get_refcount(spa, &feature, &refcount) != 0)
continue;
VERIFY0(nvlist_add_uint64(features, feature.fi_guid, refcount));
}
}
/*
* Store a list of pool features and their reference counts in the
* config.
*
* The first time this is called on a spa, allocate a new nvlist, fetch
* the pool features and reference counts from disk, then save the list
* in the spa. In subsequent calls on the same spa use the saved nvlist
* and refresh its values from the cached reference counts. This
* ensures we don't block here on I/O on a suspended pool so 'zpool
* clear' can resume the pool.
*/
static void
spa_add_feature_stats(spa_t *spa, nvlist_t *config)
{
nvlist_t *features;
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
mutex_enter(&spa->spa_feat_stats_lock);
features = spa->spa_feat_stats;
if (features != NULL) {
spa_feature_stats_from_cache(spa, features);
} else {
VERIFY0(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP));
spa->spa_feat_stats = features;
spa_feature_stats_from_disk(spa, features);
}
VERIFY0(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
features));
mutex_exit(&spa->spa_feat_stats_lock);
}
int
spa_get_stats(const char *name, nvlist_t **config,
char *altroot, size_t buflen)
{
int error;
spa_t *spa;
*config = NULL;
error = spa_open_common(name, &spa, FTAG, NULL, config);
if (spa != NULL) {
/*
* This still leaves a window of inconsistency where the spares
* or l2cache devices could change and the config would be
* self-inconsistent.
*/
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
if (*config != NULL) {
uint64_t loadtimes[2];
loadtimes[0] = spa->spa_loaded_ts.tv_sec;
loadtimes[1] = spa->spa_loaded_ts.tv_nsec;
VERIFY(nvlist_add_uint64_array(*config,
ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2) == 0);
VERIFY(nvlist_add_uint64(*config,
ZPOOL_CONFIG_ERRCOUNT,
spa_get_errlog_size(spa)) == 0);
if (spa_suspended(spa)) {
VERIFY(nvlist_add_uint64(*config,
ZPOOL_CONFIG_SUSPENDED,
spa->spa_failmode) == 0);
VERIFY(nvlist_add_uint64(*config,
ZPOOL_CONFIG_SUSPENDED_REASON,
spa->spa_suspended) == 0);
}
spa_add_spares(spa, *config);
spa_add_l2cache(spa, *config);
spa_add_feature_stats(spa, *config);
}
}
/*
* We want to get the alternate root even for faulted pools, so we cheat
* and call spa_lookup() directly.
*/
if (altroot) {
if (spa == NULL) {
mutex_enter(&spa_namespace_lock);
spa = spa_lookup(name);
if (spa)
spa_altroot(spa, altroot, buflen);
else
altroot[0] = '\0';
spa = NULL;
mutex_exit(&spa_namespace_lock);
} else {
spa_altroot(spa, altroot, buflen);
}
}
if (spa != NULL) {
spa_config_exit(spa, SCL_CONFIG, FTAG);
spa_close(spa, FTAG);
}
return (error);
}
/*
* Validate that the auxiliary device array is well formed. We must have an
* array of nvlists, each which describes a valid leaf vdev. If this is an
* import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
* specified, as long as they are well-formed.
*/
static int
spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
spa_aux_vdev_t *sav, const char *config, uint64_t version,
vdev_labeltype_t label)
{
nvlist_t **dev;
uint_t i, ndev;
vdev_t *vd;
int error;
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
/*
* It's acceptable to have no devs specified.
*/
if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
return (0);
if (ndev == 0)
return (SET_ERROR(EINVAL));
/*
* Make sure the pool is formatted with a version that supports this
* device type.
*/
if (spa_version(spa) < version)
return (SET_ERROR(ENOTSUP));
/*
* Set the pending device list so we correctly handle device in-use
* checking.
*/
sav->sav_pending = dev;
sav->sav_npending = ndev;
for (i = 0; i < ndev; i++) {
if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
mode)) != 0)
goto out;
if (!vd->vdev_ops->vdev_op_leaf) {
vdev_free(vd);
error = SET_ERROR(EINVAL);
goto out;
}
vd->vdev_top = vd;
if ((error = vdev_open(vd)) == 0 &&
(error = vdev_label_init(vd, crtxg, label)) == 0) {
VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
vd->vdev_guid) == 0);
}
vdev_free(vd);
if (error &&
(mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
goto out;
else
error = 0;
}
out:
sav->sav_pending = NULL;
sav->sav_npending = 0;
return (error);
}
static int
spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
{
int error;
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
&spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
VDEV_LABEL_SPARE)) != 0) {
return (error);
}
return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
&spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
VDEV_LABEL_L2CACHE));
}
static void
spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
const char *config)
{
int i;
if (sav->sav_config != NULL) {
nvlist_t **olddevs;
uint_t oldndevs;
nvlist_t **newdevs;
/*
* Generate new dev list by concatenating with the
* current dev list.
*/
VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config,
&olddevs, &oldndevs) == 0);
newdevs = kmem_alloc(sizeof (void *) *
(ndevs + oldndevs), KM_SLEEP);
for (i = 0; i < oldndevs; i++)
VERIFY(nvlist_dup(olddevs[i], &newdevs[i],
KM_SLEEP) == 0);
for (i = 0; i < ndevs; i++)
VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs],
KM_SLEEP) == 0);
VERIFY(nvlist_remove(sav->sav_config, config,
DATA_TYPE_NVLIST_ARRAY) == 0);
VERIFY(nvlist_add_nvlist_array(sav->sav_config,
config, newdevs, ndevs + oldndevs) == 0);
for (i = 0; i < oldndevs + ndevs; i++)
nvlist_free(newdevs[i]);
kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
} else {
/*
* Generate a new dev list.
*/
VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME,
KM_SLEEP) == 0);
VERIFY(nvlist_add_nvlist_array(sav->sav_config, config,
devs, ndevs) == 0);
}
}
/*
* Stop and drop level 2 ARC devices
*/
void
spa_l2cache_drop(spa_t *spa)
{
vdev_t *vd;
int i;
spa_aux_vdev_t *sav = &spa->spa_l2cache;
for (i = 0; i < sav->sav_count; i++) {
uint64_t pool;
vd = sav->sav_vdevs[i];
ASSERT(vd != NULL);
if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
pool != 0ULL && l2arc_vdev_present(vd))
l2arc_remove_vdev(vd);
}
}
/*
* Verify encryption parameters for spa creation. If we are encrypting, we must
* have the encryption feature flag enabled.
*/
static int
spa_create_check_encryption_params(dsl_crypto_params_t *dcp,
boolean_t has_encryption)
{
if (dcp->cp_crypt != ZIO_CRYPT_OFF &&
dcp->cp_crypt != ZIO_CRYPT_INHERIT &&
!has_encryption)
return (SET_ERROR(ENOTSUP));
return (dmu_objset_create_crypt_check(NULL, dcp, NULL));
}
/*
* Pool Creation
*/
int
spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
nvlist_t *zplprops, dsl_crypto_params_t *dcp)
{
spa_t *spa;
char *altroot = NULL;
vdev_t *rvd;
dsl_pool_t *dp;
dmu_tx_t *tx;
int error = 0;
uint64_t txg = TXG_INITIAL;
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
uint64_t version, obj, ndraid = 0;
boolean_t has_features;
boolean_t has_encryption;
boolean_t has_allocclass;
spa_feature_t feat;
char *feat_name;
char *poolname;
nvlist_t *nvl;
if (props == NULL ||
nvlist_lookup_string(props, "tname", &poolname) != 0)
poolname = (char *)pool;
/*
* If this pool already exists, return failure.
*/
mutex_enter(&spa_namespace_lock);
if (spa_lookup(poolname) != NULL) {
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(EEXIST));
}
/*
* Allocate a new spa_t structure.
*/
nvl = fnvlist_alloc();
fnvlist_add_string(nvl, ZPOOL_CONFIG_POOL_NAME, pool);
(void) nvlist_lookup_string(props,
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
spa = spa_add(poolname, nvl, altroot);
fnvlist_free(nvl);
spa_activate(spa, spa_mode_global);
if (props && (error = spa_prop_validate(spa, props))) {
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
/*
* Temporary pool names should never be written to disk.
*/
if (poolname != pool)
spa->spa_import_flags |= ZFS_IMPORT_TEMP_NAME;
has_features = B_FALSE;
has_encryption = B_FALSE;
has_allocclass = B_FALSE;
for (nvpair_t *elem = nvlist_next_nvpair(props, NULL);
elem != NULL; elem = nvlist_next_nvpair(props, elem)) {
if (zpool_prop_feature(nvpair_name(elem))) {
has_features = B_TRUE;
feat_name = strchr(nvpair_name(elem), '@') + 1;
VERIFY0(zfeature_lookup_name(feat_name, &feat));
if (feat == SPA_FEATURE_ENCRYPTION)
has_encryption = B_TRUE;
if (feat == SPA_FEATURE_ALLOCATION_CLASSES)
has_allocclass = B_TRUE;
}
}
/* verify encryption params, if they were provided */
if (dcp != NULL) {
error = spa_create_check_encryption_params(dcp, has_encryption);
if (error != 0) {
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
}
if (!has_allocclass && zfs_special_devs(nvroot, NULL)) {
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (ENOTSUP);
}
if (has_features || nvlist_lookup_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) {
version = SPA_VERSION;
}
ASSERT(SPA_VERSION_IS_SUPPORTED(version));
spa->spa_first_txg = txg;
spa->spa_uberblock.ub_txg = txg - 1;
spa->spa_uberblock.ub_version = version;
spa->spa_ubsync = spa->spa_uberblock;
spa->spa_load_state = SPA_LOAD_CREATE;
spa->spa_removing_phys.sr_state = DSS_NONE;
spa->spa_removing_phys.sr_removing_vdev = -1;
spa->spa_removing_phys.sr_prev_indirect_vdev = -1;
spa->spa_indirect_vdevs_loaded = B_TRUE;
/*
* Create "The Godfather" zio to hold all async IOs
*/
spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
KM_SLEEP);
for (int i = 0; i < max_ncpus; i++) {
spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER);
}
/*
* Create the root vdev.
*/
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
ASSERT(error != 0 || rvd != NULL);
ASSERT(error != 0 || spa->spa_root_vdev == rvd);
if (error == 0 && !zfs_allocatable_devs(nvroot))
error = SET_ERROR(EINVAL);
if (error == 0 &&
(error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
(error = vdev_draid_spare_create(nvroot, rvd, &ndraid, 0)) == 0 &&
(error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) == 0) {
/*
* instantiate the metaslab groups (this will dirty the vdevs)
* we can no longer error exit past this point
*/
for (int c = 0; error == 0 && c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
vdev_metaslab_set_size(vd);
vdev_expand(vd, txg);
}
}
spa_config_exit(spa, SCL_ALL, FTAG);
if (error != 0) {
spa_unload(spa);
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
/*
* Get the list of spares, if specified.
*/
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0) {
VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
KM_SLEEP) == 0);
VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_spares(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
spa->spa_spares.sav_sync = B_TRUE;
}
/*
* Get the list of level 2 cache devices, if specified.
*/
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0) {
VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_l2cache(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
spa->spa_l2cache.sav_sync = B_TRUE;
}
spa->spa_is_initializing = B_TRUE;
spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, dcp, txg);
spa->spa_is_initializing = B_FALSE;
/*
* Create DDTs (dedup tables).
*/
ddt_create(spa);
spa_update_dspace(spa);
tx = dmu_tx_create_assigned(dp, txg);
/*
* Create the pool's history object.
*/
if (version >= SPA_VERSION_ZPOOL_HISTORY && !spa->spa_history)
spa_history_create_obj(spa, tx);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_CREATE);
spa_history_log_version(spa, "create", tx);
/*
* Create the pool config object.
*/
spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
if (zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
cmn_err(CE_PANIC, "failed to add pool config");
}
if (zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION,
sizeof (uint64_t), 1, &version, tx) != 0) {
cmn_err(CE_PANIC, "failed to add pool version");
}
/* Newly created pools with the right version are always deflated. */
if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
spa->spa_deflate = TRUE;
if (zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
cmn_err(CE_PANIC, "failed to add deflate");
}
}
/*
* Create the deferred-free bpobj. Turn off compression
* because sync-to-convergence takes longer if the blocksize
* keeps changing.
*/
obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx);
dmu_object_set_compress(spa->spa_meta_objset, obj,
ZIO_COMPRESS_OFF, tx);
if (zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ,
sizeof (uint64_t), 1, &obj, tx) != 0) {
cmn_err(CE_PANIC, "failed to add bpobj");
}
VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj,
spa->spa_meta_objset, obj));
/*
* Generate some random noise for salted checksums to operate on.
*/
(void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
sizeof (spa->spa_cksum_salt.zcs_bytes));
/*
* Set pool properties.
*/
spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
spa->spa_multihost = zpool_prop_default_numeric(ZPOOL_PROP_MULTIHOST);
spa->spa_autotrim = zpool_prop_default_numeric(ZPOOL_PROP_AUTOTRIM);
if (props != NULL) {
spa_configfile_set(spa, props, B_FALSE);
spa_sync_props(props, tx);
}
for (int i = 0; i < ndraid; i++)
spa_feature_incr(spa, SPA_FEATURE_DRAID, tx);
dmu_tx_commit(tx);
spa->spa_sync_on = B_TRUE;
txg_sync_start(dp);
mmp_thread_start(spa);
txg_wait_synced(dp, txg);
spa_spawn_aux_threads(spa);
spa_write_cachefile(spa, B_FALSE, B_TRUE);
/*
* Don't count references from objsets that are already closed
* and are making their way through the eviction process.
*/
spa_evicting_os_wait(spa);
spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
spa->spa_load_state = SPA_LOAD_NONE;
mutex_exit(&spa_namespace_lock);
return (0);
}
/*
* Import a non-root pool into the system.
*/
int
spa_import(char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
{
spa_t *spa;
char *altroot = NULL;
spa_load_state_t state = SPA_LOAD_IMPORT;
zpool_load_policy_t policy;
spa_mode_t mode = spa_mode_global;
uint64_t readonly = B_FALSE;
int error;
nvlist_t *nvroot;
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
/*
* If a pool with this name exists, return failure.
*/
mutex_enter(&spa_namespace_lock);
if (spa_lookup(pool) != NULL) {
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(EEXIST));
}
/*
* Create and initialize the spa structure.
*/
(void) nvlist_lookup_string(props,
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
(void) nvlist_lookup_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
if (readonly)
mode = SPA_MODE_READ;
spa = spa_add(pool, config, altroot);
spa->spa_import_flags = flags;
/*
* Verbatim import - Take a pool and insert it into the namespace
* as if it had been loaded at boot.
*/
if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) {
if (props != NULL)
spa_configfile_set(spa, props, B_FALSE);
spa_write_cachefile(spa, B_FALSE, B_TRUE);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
zfs_dbgmsg("spa_import: verbatim import of %s", pool);
mutex_exit(&spa_namespace_lock);
return (0);
}
spa_activate(spa, mode);
/*
* Don't start async tasks until we know everything is healthy.
*/
spa_async_suspend(spa);
zpool_get_load_policy(config, &policy);
if (policy.zlp_rewind & ZPOOL_DO_REWIND)
state = SPA_LOAD_RECOVER;
spa->spa_config_source = SPA_CONFIG_SRC_TRYIMPORT;
if (state != SPA_LOAD_RECOVER) {
spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
zfs_dbgmsg("spa_import: importing %s", pool);
} else {
zfs_dbgmsg("spa_import: importing %s, max_txg=%lld "
"(RECOVERY MODE)", pool, (longlong_t)policy.zlp_txg);
}
error = spa_load_best(spa, state, policy.zlp_txg, policy.zlp_rewind);
/*
* Propagate anything learned while loading the pool and pass it
* back to caller (i.e. rewind info, missing devices, etc).
*/
VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
spa->spa_load_info) == 0);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
/*
* Toss any existing sparelist, as it doesn't have any validity
* anymore, and conflicts with spa_has_spare().
*/
if (spa->spa_spares.sav_config) {
nvlist_free(spa->spa_spares.sav_config);
spa->spa_spares.sav_config = NULL;
spa_load_spares(spa);
}
if (spa->spa_l2cache.sav_config) {
nvlist_free(spa->spa_l2cache.sav_config);
spa->spa_l2cache.sav_config = NULL;
spa_load_l2cache(spa);
}
VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
spa_config_exit(spa, SCL_ALL, FTAG);
if (props != NULL)
spa_configfile_set(spa, props, B_FALSE);
if (error != 0 || (props && spa_writeable(spa) &&
(error = spa_prop_set(spa, props)))) {
spa_unload(spa);
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
spa_async_resume(spa);
/*
* Override any spares and level 2 cache devices as specified by
* the user, as these may have correct device names/devids, etc.
*/
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0) {
if (spa->spa_spares.sav_config)
VERIFY(nvlist_remove(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
else
VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_spares(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
spa->spa_spares.sav_sync = B_TRUE;
}
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0) {
if (spa->spa_l2cache.sav_config)
VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
else
VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_l2cache(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
spa->spa_l2cache.sav_sync = B_TRUE;
}
/*
* Check for any removed devices.
*/
if (spa->spa_autoreplace) {
spa_aux_check_removed(&spa->spa_spares);
spa_aux_check_removed(&spa->spa_l2cache);
}
if (spa_writeable(spa)) {
/*
* Update the config cache to include the newly-imported pool.
*/
spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
}
/*
* It's possible that the pool was expanded while it was exported.
* We kick off an async task to handle this for us.
*/
spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
spa_history_log_version(spa, "import", NULL);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
mutex_exit(&spa_namespace_lock);
zvol_create_minors_recursive(pool);
return (0);
}
nvlist_t *
spa_tryimport(nvlist_t *tryconfig)
{
nvlist_t *config = NULL;
char *poolname, *cachefile;
spa_t *spa;
uint64_t state;
int error;
zpool_load_policy_t policy;
if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
return (NULL);
if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
return (NULL);
/*
* Create and initialize the spa structure.
*/
mutex_enter(&spa_namespace_lock);
spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL);
spa_activate(spa, SPA_MODE_READ);
/*
* Rewind pool if a max txg was provided.
*/
zpool_get_load_policy(spa->spa_config, &policy);
if (policy.zlp_txg != UINT64_MAX) {
spa->spa_load_max_txg = policy.zlp_txg;
spa->spa_extreme_rewind = B_TRUE;
zfs_dbgmsg("spa_tryimport: importing %s, max_txg=%lld",
poolname, (longlong_t)policy.zlp_txg);
} else {
zfs_dbgmsg("spa_tryimport: importing %s", poolname);
}
if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_CACHEFILE, &cachefile)
== 0) {
zfs_dbgmsg("spa_tryimport: using cachefile '%s'", cachefile);
spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE;
} else {
spa->spa_config_source = SPA_CONFIG_SRC_SCAN;
}
error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING);
/*
* If 'tryconfig' was at least parsable, return the current config.
*/
if (spa->spa_root_vdev != NULL) {
config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
poolname) == 0);
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
state) == 0);
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
spa->spa_uberblock.ub_timestamp) == 0);
VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
spa->spa_load_info) == 0);
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_ERRATA,
spa->spa_errata) == 0);
/*
* If the bootfs property exists on this pool then we
* copy it out so that external consumers can tell which
* pools are bootable.
*/
if ((!error || error == EEXIST) && spa->spa_bootfs) {
char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
/*
* We have to play games with the name since the
* pool was opened as TRYIMPORT_NAME.
*/
if (dsl_dsobj_to_dsname(spa_name(spa),
spa->spa_bootfs, tmpname) == 0) {
char *cp;
char *dsname;
dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
cp = strchr(tmpname, '/');
if (cp == NULL) {
(void) strlcpy(dsname, tmpname,
MAXPATHLEN);
} else {
(void) snprintf(dsname, MAXPATHLEN,
"%s/%s", poolname, ++cp);
}
VERIFY(nvlist_add_string(config,
ZPOOL_CONFIG_BOOTFS, dsname) == 0);
kmem_free(dsname, MAXPATHLEN);
}
kmem_free(tmpname, MAXPATHLEN);
}
/*
* Add the list of hot spares and level 2 cache devices.
*/
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
spa_add_spares(spa, config);
spa_add_l2cache(spa, config);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
spa_unload(spa);
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (config);
}
/*
* Pool export/destroy
*
* The act of destroying or exporting a pool is very simple. We make sure there
* is no more pending I/O and any references to the pool are gone. Then, we
* update the pool state and sync all the labels to disk, removing the
* configuration from the cache afterwards. If the 'hardforce' flag is set, then
* we don't sync the labels or remove the configuration cache.
*/
static int
spa_export_common(const char *pool, int new_state, nvlist_t **oldconfig,
boolean_t force, boolean_t hardforce)
{
int error;
spa_t *spa;
if (oldconfig)
*oldconfig = NULL;
if (!(spa_mode_global & SPA_MODE_WRITE))
return (SET_ERROR(EROFS));
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(pool)) == NULL) {
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(ENOENT));
}
if (spa->spa_is_exporting) {
/* the pool is being exported by another thread */
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(ZFS_ERR_EXPORT_IN_PROGRESS));
}
spa->spa_is_exporting = B_TRUE;
/*
* Put a hold on the pool, drop the namespace lock, stop async tasks,
* reacquire the namespace lock, and see if we can export.
*/
spa_open_ref(spa, FTAG);
mutex_exit(&spa_namespace_lock);
spa_async_suspend(spa);
if (spa->spa_zvol_taskq) {
zvol_remove_minors(spa, spa_name(spa), B_TRUE);
taskq_wait(spa->spa_zvol_taskq);
}
mutex_enter(&spa_namespace_lock);
spa_close(spa, FTAG);
if (spa->spa_state == POOL_STATE_UNINITIALIZED)
goto export_spa;
/*
* The pool will be in core if it's openable, in which case we can
* modify its state. Objsets may be open only because they're dirty,
* so we have to force it to sync before checking spa_refcnt.
*/
if (spa->spa_sync_on) {
txg_wait_synced(spa->spa_dsl_pool, 0);
spa_evicting_os_wait(spa);
}
/*
* A pool cannot be exported or destroyed if there are active
* references. If we are resetting a pool, allow references by
* fault injection handlers.
*/
if (!spa_refcount_zero(spa) || (spa->spa_inject_ref != 0)) {
error = SET_ERROR(EBUSY);
goto fail;
}
if (spa->spa_sync_on) {
/*
* A pool cannot be exported if it has an active shared spare.
* This is to prevent other pools stealing the active spare
* from an exported pool. At user's own will, such pool can
* be forcedly exported.
*/
if (!force && new_state == POOL_STATE_EXPORTED &&
spa_has_active_shared_spare(spa)) {
error = SET_ERROR(EXDEV);
goto fail;
}
/*
* We're about to export or destroy this pool. Make sure
* we stop all initialization and trim activity here before
* we set the spa_final_txg. This will ensure that all
* dirty data resulting from the initialization is
* committed to disk before we unload the pool.
*/
if (spa->spa_root_vdev != NULL) {
vdev_t *rvd = spa->spa_root_vdev;
vdev_initialize_stop_all(rvd, VDEV_INITIALIZE_ACTIVE);
vdev_trim_stop_all(rvd, VDEV_TRIM_ACTIVE);
vdev_autotrim_stop_all(spa);
vdev_rebuild_stop_all(spa);
}
/*
* We want this to be reflected on every label,
* so mark them all dirty. spa_unload() will do the
* final sync that pushes these changes out.
*/
if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa->spa_state = new_state;
+ vdev_config_dirty(spa->spa_root_vdev);
+ spa_config_exit(spa, SCL_ALL, FTAG);
+ }
+
+ /*
+ * If the log space map feature is enabled and the pool is
+ * getting exported (but not destroyed), we want to spend some
+ * time flushing as many metaslabs as we can in an attempt to
+ * destroy log space maps and save import time. This has to be
+ * done before we set the spa_final_txg, otherwise
+ * spa_sync() -> spa_flush_metaslabs() may dirty the final TXGs.
+ * spa_should_flush_logs_on_unload() should be called after
+ * spa_state has been set to the new_state.
+ */
+ if (spa_should_flush_logs_on_unload(spa))
+ spa_unload_log_sm_flush_all(spa);
+
+ if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
+ spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa->spa_final_txg = spa_last_synced_txg(spa) +
TXG_DEFER_SIZE + 1;
- vdev_config_dirty(spa->spa_root_vdev);
spa_config_exit(spa, SCL_ALL, FTAG);
}
}
export_spa:
if (new_state == POOL_STATE_DESTROYED)
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_DESTROY);
else if (new_state == POOL_STATE_EXPORTED)
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_EXPORT);
if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
spa_unload(spa);
spa_deactivate(spa);
}
if (oldconfig && spa->spa_config)
VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
if (new_state != POOL_STATE_UNINITIALIZED) {
if (!hardforce)
spa_write_cachefile(spa, B_TRUE, B_TRUE);
spa_remove(spa);
} else {
/*
* If spa_remove() is not called for this spa_t and
* there is any possibility that it can be reused,
* we make sure to reset the exporting flag.
*/
spa->spa_is_exporting = B_FALSE;
}
mutex_exit(&spa_namespace_lock);
return (0);
fail:
spa->spa_is_exporting = B_FALSE;
spa_async_resume(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
/*
* Destroy a storage pool.
*/
int
spa_destroy(const char *pool)
{
return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
B_FALSE, B_FALSE));
}
/*
* Export a storage pool.
*/
int
spa_export(const char *pool, nvlist_t **oldconfig, boolean_t force,
boolean_t hardforce)
{
return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
force, hardforce));
}
/*
* Similar to spa_export(), this unloads the spa_t without actually removing it
* from the namespace in any way.
*/
int
spa_reset(const char *pool)
{
return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
B_FALSE, B_FALSE));
}
/*
* ==========================================================================
* Device manipulation
* ==========================================================================
*/
/*
* This is called as a synctask to increment the draid feature flag
*/
static void
spa_draid_feature_incr(void *arg, dmu_tx_t *tx)
{
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
int draid = (int)(uintptr_t)arg;
for (int c = 0; c < draid; c++)
spa_feature_incr(spa, SPA_FEATURE_DRAID, tx);
}
/*
* Add a device to a storage pool.
*/
int
spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
{
uint64_t txg, ndraid = 0;
int error;
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *vd, *tvd;
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
ASSERT(spa_writeable(spa));
txg = spa_vdev_enter(spa);
if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
VDEV_ALLOC_ADD)) != 0)
return (spa_vdev_exit(spa, NULL, txg, error));
spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
&nspares) != 0)
nspares = 0;
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
&nl2cache) != 0)
nl2cache = 0;
if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
return (spa_vdev_exit(spa, vd, txg, EINVAL));
if (vd->vdev_children != 0 &&
(error = vdev_create(vd, txg, B_FALSE)) != 0) {
return (spa_vdev_exit(spa, vd, txg, error));
}
/*
* The virtual dRAID spares must be added after vdev tree is created
* and the vdev guids are generated. The guid of their associated
* dRAID is stored in the config and used when opening the spare.
*/
if ((error = vdev_draid_spare_create(nvroot, vd, &ndraid,
rvd->vdev_children)) == 0) {
if (ndraid > 0 && nvlist_lookup_nvlist_array(nvroot,
ZPOOL_CONFIG_SPARES, &spares, &nspares) != 0)
nspares = 0;
} else {
return (spa_vdev_exit(spa, vd, txg, error));
}
/*
* We must validate the spares and l2cache devices after checking the
* children. Otherwise, vdev_inuse() will blindly overwrite the spare.
*/
if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
return (spa_vdev_exit(spa, vd, txg, error));
/*
* If we are in the middle of a device removal, we can only add
* devices which match the existing devices in the pool.
* If we are in the middle of a removal, or have some indirect
* vdevs, we can not add raidz or dRAID top levels.
*/
if (spa->spa_vdev_removal != NULL ||
spa->spa_removing_phys.sr_prev_indirect_vdev != -1) {
for (int c = 0; c < vd->vdev_children; c++) {
tvd = vd->vdev_child[c];
if (spa->spa_vdev_removal != NULL &&
tvd->vdev_ashift != spa->spa_max_ashift) {
return (spa_vdev_exit(spa, vd, txg, EINVAL));
}
/* Fail if top level vdev is raidz or a dRAID */
if (vdev_get_nparity(tvd) != 0)
return (spa_vdev_exit(spa, vd, txg, EINVAL));
/*
* Need the top level mirror to be
* a mirror of leaf vdevs only
*/
if (tvd->vdev_ops == &vdev_mirror_ops) {
for (uint64_t cid = 0;
cid < tvd->vdev_children; cid++) {
vdev_t *cvd = tvd->vdev_child[cid];
if (!cvd->vdev_ops->vdev_op_leaf) {
return (spa_vdev_exit(spa, vd,
txg, EINVAL));
}
}
}
}
}
for (int c = 0; c < vd->vdev_children; c++) {
tvd = vd->vdev_child[c];
vdev_remove_child(vd, tvd);
tvd->vdev_id = rvd->vdev_children;
vdev_add_child(rvd, tvd);
vdev_config_dirty(tvd);
}
if (nspares != 0) {
spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
ZPOOL_CONFIG_SPARES);
spa_load_spares(spa);
spa->spa_spares.sav_sync = B_TRUE;
}
if (nl2cache != 0) {
spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
ZPOOL_CONFIG_L2CACHE);
spa_load_l2cache(spa);
spa->spa_l2cache.sav_sync = B_TRUE;
}
/*
* We can't increment a feature while holding spa_vdev so we
* have to do it in a synctask.
*/
if (ndraid != 0) {
dmu_tx_t *tx;
tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
dsl_sync_task_nowait(spa->spa_dsl_pool, spa_draid_feature_incr,
(void *)(uintptr_t)ndraid, tx);
dmu_tx_commit(tx);
}
/*
* We have to be careful when adding new vdevs to an existing pool.
* If other threads start allocating from these vdevs before we
* sync the config cache, and we lose power, then upon reboot we may
* fail to open the pool because there are DVAs that the config cache
* can't translate. Therefore, we first add the vdevs without
* initializing metaslabs; sync the config cache (via spa_vdev_exit());
* and then let spa_config_update() initialize the new metaslabs.
*
* spa_load() checks for added-but-not-initialized vdevs, so that
* if we lose power at any point in this sequence, the remaining
* steps will be completed the next time we load the pool.
*/
(void) spa_vdev_exit(spa, vd, txg, 0);
mutex_enter(&spa_namespace_lock);
spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_VDEV_ADD);
mutex_exit(&spa_namespace_lock);
return (0);
}
/*
* Attach a device to a mirror. The arguments are the path to any device
* in the mirror, and the nvroot for the new device. If the path specifies
* a device that is not mirrored, we automatically insert the mirror vdev.
*
* If 'replacing' is specified, the new device is intended to replace the
* existing device; in this case the two devices are made into their own
* mirror using the 'replacing' vdev, which is functionally identical to
* the mirror vdev (it actually reuses all the same ops) but has a few
* extra rules: you can't attach to it after it's been created, and upon
* completion of resilvering, the first disk (the one being replaced)
* is automatically detached.
*
* If 'rebuild' is specified, then sequential reconstruction (a.ka. rebuild)
* should be performed instead of traditional healing reconstruction. From
* an administrators perspective these are both resilver operations.
*/
int
spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing,
int rebuild)
{
uint64_t txg, dtl_max_txg;
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
vdev_ops_t *pvops;
char *oldvdpath, *newvdpath;
int newvd_isspare;
int error;
ASSERT(spa_writeable(spa));
txg = spa_vdev_enter(spa);
oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
error = (spa_has_checkpoint(spa)) ?
ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
return (spa_vdev_exit(spa, NULL, txg, error));
}
if (rebuild) {
if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD))
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
if (dsl_scan_resilvering(spa_get_dsl(spa)))
return (spa_vdev_exit(spa, NULL, txg,
ZFS_ERR_RESILVER_IN_PROGRESS));
} else {
if (vdev_rebuild_active(rvd))
return (spa_vdev_exit(spa, NULL, txg,
ZFS_ERR_REBUILD_IN_PROGRESS));
}
if (spa->spa_vdev_removal != NULL)
return (spa_vdev_exit(spa, NULL, txg, EBUSY));
if (oldvd == NULL)
return (spa_vdev_exit(spa, NULL, txg, ENODEV));
if (!oldvd->vdev_ops->vdev_op_leaf)
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
pvd = oldvd->vdev_parent;
if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
VDEV_ALLOC_ATTACH)) != 0)
return (spa_vdev_exit(spa, NULL, txg, EINVAL));
if (newrootvd->vdev_children != 1)
return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
newvd = newrootvd->vdev_child[0];
if (!newvd->vdev_ops->vdev_op_leaf)
return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
return (spa_vdev_exit(spa, newrootvd, txg, error));
/*
* Spares can't replace logs
*/
if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare)
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
/*
* A dRAID spare can only replace a child of its parent dRAID vdev.
*/
if (newvd->vdev_ops == &vdev_draid_spare_ops &&
oldvd->vdev_top != vdev_draid_spare_get_parent(newvd)) {
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
}
if (rebuild) {
/*
* For rebuilds, the top vdev must support reconstruction
* using only space maps. This means the only allowable
* vdevs types are the root vdev, a mirror, or dRAID.
*/
tvd = pvd;
if (pvd->vdev_top != NULL)
tvd = pvd->vdev_top;
if (tvd->vdev_ops != &vdev_mirror_ops &&
tvd->vdev_ops != &vdev_root_ops &&
tvd->vdev_ops != &vdev_draid_ops) {
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
}
}
if (!replacing) {
/*
* For attach, the only allowable parent is a mirror or the root
* vdev.
*/
if (pvd->vdev_ops != &vdev_mirror_ops &&
pvd->vdev_ops != &vdev_root_ops)
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
pvops = &vdev_mirror_ops;
} else {
/*
* Active hot spares can only be replaced by inactive hot
* spares.
*/
if (pvd->vdev_ops == &vdev_spare_ops &&
oldvd->vdev_isspare &&
!spa_has_spare(spa, newvd->vdev_guid))
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
/*
* If the source is a hot spare, and the parent isn't already a
* spare, then we want to create a new hot spare. Otherwise, we
* want to create a replacing vdev. The user is not allowed to
* attach to a spared vdev child unless the 'isspare' state is
* the same (spare replaces spare, non-spare replaces
* non-spare).
*/
if (pvd->vdev_ops == &vdev_replacing_ops &&
spa_version(spa) < SPA_VERSION_MULTI_REPLACE) {
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
} else if (pvd->vdev_ops == &vdev_spare_ops &&
newvd->vdev_isspare != oldvd->vdev_isspare) {
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
}
if (newvd->vdev_isspare)
pvops = &vdev_spare_ops;
else
pvops = &vdev_replacing_ops;
}
/*
* Make sure the new device is big enough.
*/
if (newvd->vdev_asize < vdev_get_min_asize(oldvd))
return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
/*
* The new device cannot have a higher alignment requirement
* than the top-level vdev.
*/
if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
/*
* If this is an in-place replacement, update oldvd's path and devid
* to make it distinguishable from newvd, and unopenable from now on.
*/
if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
spa_strfree(oldvd->vdev_path);
oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
KM_SLEEP);
(void) snprintf(oldvd->vdev_path, strlen(newvd->vdev_path) + 5,
"%s/%s", newvd->vdev_path, "old");
if (oldvd->vdev_devid != NULL) {
spa_strfree(oldvd->vdev_devid);
oldvd->vdev_devid = NULL;
}
}
/*
* If the parent is not a mirror, or if we're replacing, insert the new
* mirror/replacing/spare vdev above oldvd.
*/
if (pvd->vdev_ops != pvops)
pvd = vdev_add_parent(oldvd, pvops);
ASSERT(pvd->vdev_top->vdev_parent == rvd);
ASSERT(pvd->vdev_ops == pvops);
ASSERT(oldvd->vdev_parent == pvd);
/*
* Extract the new device from its root and add it to pvd.
*/
vdev_remove_child(newrootvd, newvd);
newvd->vdev_id = pvd->vdev_children;
newvd->vdev_crtxg = oldvd->vdev_crtxg;
vdev_add_child(pvd, newvd);
/*
* Reevaluate the parent vdev state.
*/
vdev_propagate_state(pvd);
tvd = newvd->vdev_top;
ASSERT(pvd->vdev_top == tvd);
ASSERT(tvd->vdev_parent == rvd);
vdev_config_dirty(tvd);
/*
* Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account
* for any dmu_sync-ed blocks. It will propagate upward when
* spa_vdev_exit() calls vdev_dtl_reassess().
*/
dtl_max_txg = txg + TXG_CONCURRENT_STATES;
vdev_dtl_dirty(newvd, DTL_MISSING,
TXG_INITIAL, dtl_max_txg - TXG_INITIAL);
if (newvd->vdev_isspare) {
spa_spare_activate(newvd);
spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_SPARE);
}
oldvdpath = spa_strdup(oldvd->vdev_path);
newvdpath = spa_strdup(newvd->vdev_path);
newvd_isspare = newvd->vdev_isspare;
/*
* Mark newvd's DTL dirty in this txg.
*/
vdev_dirty(tvd, VDD_DTL, newvd, txg);
/*
* Schedule the resilver or rebuild to restart in the future. We do
* this to ensure that dmu_sync-ed blocks have been stitched into the
* respective datasets.
*/
if (rebuild) {
newvd->vdev_rebuild_txg = txg;
vdev_rebuild(tvd);
} else {
newvd->vdev_resilver_txg = txg;
if (dsl_scan_resilvering(spa_get_dsl(spa)) &&
spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER)) {
vdev_defer_resilver(newvd);
} else {
dsl_scan_restart_resilver(spa->spa_dsl_pool,
dtl_max_txg);
}
}
if (spa->spa_bootfs)
spa_event_notify(spa, newvd, NULL, ESC_ZFS_BOOTFS_VDEV_ATTACH);
spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_ATTACH);
/*
* Commit the config
*/
(void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0);
spa_history_log_internal(spa, "vdev attach", NULL,
"%s vdev=%s %s vdev=%s",
replacing && newvd_isspare ? "spare in" :
replacing ? "replace" : "attach", newvdpath,
replacing ? "for" : "to", oldvdpath);
spa_strfree(oldvdpath);
spa_strfree(newvdpath);
return (0);
}
/*
* Detach a device from a mirror or replacing vdev.
*
* If 'replace_done' is specified, only detach if the parent
* is a replacing vdev.
*/
int
spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
{
uint64_t txg;
int error;
vdev_t *rvd __maybe_unused = spa->spa_root_vdev;
vdev_t *vd, *pvd, *cvd, *tvd;
boolean_t unspare = B_FALSE;
uint64_t unspare_guid = 0;
char *vdpath;
ASSERT(spa_writeable(spa));
txg = spa_vdev_detach_enter(spa, guid);
vd = spa_lookup_by_guid(spa, guid, B_FALSE);
/*
* Besides being called directly from the userland through the
* ioctl interface, spa_vdev_detach() can be potentially called
* at the end of spa_vdev_resilver_done().
*
* In the regular case, when we have a checkpoint this shouldn't
* happen as we never empty the DTLs of a vdev during the scrub
* [see comment in dsl_scan_done()]. Thus spa_vdev_resilvering_done()
* should never get here when we have a checkpoint.
*
* That said, even in a case when we checkpoint the pool exactly
* as spa_vdev_resilver_done() calls this function everything
* should be fine as the resilver will return right away.
*/
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
error = (spa_has_checkpoint(spa)) ?
ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
return (spa_vdev_exit(spa, NULL, txg, error));
}
if (vd == NULL)
return (spa_vdev_exit(spa, NULL, txg, ENODEV));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
pvd = vd->vdev_parent;
/*
* If the parent/child relationship is not as expected, don't do it.
* Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing
* vdev that's replacing B with C. The user's intent in replacing
* is to go from M(A,B) to M(A,C). If the user decides to cancel
* the replace by detaching C, the expected behavior is to end up
* M(A,B). But suppose that right after deciding to detach C,
* the replacement of B completes. We would have M(A,C), and then
* ask to detach C, which would leave us with just A -- not what
* the user wanted. To prevent this, we make sure that the
* parent/child relationship hasn't changed -- in this example,
* that C's parent is still the replacing vdev R.
*/
if (pvd->vdev_guid != pguid && pguid != 0)
return (spa_vdev_exit(spa, NULL, txg, EBUSY));
/*
* Only 'replacing' or 'spare' vdevs can be replaced.
*/
if (replace_done && pvd->vdev_ops != &vdev_replacing_ops &&
pvd->vdev_ops != &vdev_spare_ops)
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
spa_version(spa) >= SPA_VERSION_SPARES);
/*
* Only mirror, replacing, and spare vdevs support detach.
*/
if (pvd->vdev_ops != &vdev_replacing_ops &&
pvd->vdev_ops != &vdev_mirror_ops &&
pvd->vdev_ops != &vdev_spare_ops)
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
/*
* If this device has the only valid copy of some data,
* we cannot safely detach it.
*/
if (vdev_dtl_required(vd))
return (spa_vdev_exit(spa, NULL, txg, EBUSY));
ASSERT(pvd->vdev_children >= 2);
/*
* If we are detaching the second disk from a replacing vdev, then
* check to see if we changed the original vdev's path to have "/old"
* at the end in spa_vdev_attach(). If so, undo that change now.
*/
if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 &&
vd->vdev_path != NULL) {
size_t len = strlen(vd->vdev_path);
for (int c = 0; c < pvd->vdev_children; c++) {
cvd = pvd->vdev_child[c];
if (cvd == vd || cvd->vdev_path == NULL)
continue;
if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
strcmp(cvd->vdev_path + len, "/old") == 0) {
spa_strfree(cvd->vdev_path);
cvd->vdev_path = spa_strdup(vd->vdev_path);
break;
}
}
}
/*
* If we are detaching the original disk from a normal spare, then it
* implies that the spare should become a real disk, and be removed
* from the active spare list for the pool. dRAID spares on the
* other hand are coupled to the pool and thus should never be removed
* from the spares list.
*/
if (pvd->vdev_ops == &vdev_spare_ops && vd->vdev_id == 0) {
vdev_t *last_cvd = pvd->vdev_child[pvd->vdev_children - 1];
if (last_cvd->vdev_isspare &&
last_cvd->vdev_ops != &vdev_draid_spare_ops) {
unspare = B_TRUE;
}
}
/*
* Erase the disk labels so the disk can be used for other things.
* This must be done after all other error cases are handled,
* but before we disembowel vd (so we can still do I/O to it).
* But if we can't do it, don't treat the error as fatal --
* it may be that the unwritability of the disk is the reason
* it's being detached!
*/
error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
/*
* Remove vd from its parent and compact the parent's children.
*/
vdev_remove_child(pvd, vd);
vdev_compact_children(pvd);
/*
* Remember one of the remaining children so we can get tvd below.
*/
cvd = pvd->vdev_child[pvd->vdev_children - 1];
/*
* If we need to remove the remaining child from the list of hot spares,
* do it now, marking the vdev as no longer a spare in the process.
* We must do this before vdev_remove_parent(), because that can
* change the GUID if it creates a new toplevel GUID. For a similar
* reason, we must remove the spare now, in the same txg as the detach;
* otherwise someone could attach a new sibling, change the GUID, and
* the subsequent attempt to spa_vdev_remove(unspare_guid) would fail.
*/
if (unspare) {
ASSERT(cvd->vdev_isspare);
spa_spare_remove(cvd);
unspare_guid = cvd->vdev_guid;
(void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
cvd->vdev_unspare = B_TRUE;
}
/*
* If the parent mirror/replacing vdev only has one child,
* the parent is no longer needed. Remove it from the tree.
*/
if (pvd->vdev_children == 1) {
if (pvd->vdev_ops == &vdev_spare_ops)
cvd->vdev_unspare = B_FALSE;
vdev_remove_parent(cvd);
}
/*
* We don't set tvd until now because the parent we just removed
* may have been the previous top-level vdev.
*/
tvd = cvd->vdev_top;
ASSERT(tvd->vdev_parent == rvd);
/*
* Reevaluate the parent vdev state.
*/
vdev_propagate_state(cvd);
/*
* If the 'autoexpand' property is set on the pool then automatically
* try to expand the size of the pool. For example if the device we
* just detached was smaller than the others, it may be possible to
* add metaslabs (i.e. grow the pool). We need to reopen the vdev
* first so that we can obtain the updated sizes of the leaf vdevs.
*/
if (spa->spa_autoexpand) {
vdev_reopen(tvd);
vdev_expand(tvd, txg);
}
vdev_config_dirty(tvd);
/*
* Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that
* vd->vdev_detached is set and free vd's DTL object in syncing context.
* But first make sure we're not on any *other* txg's DTL list, to
* prevent vd from being accessed after it's freed.
*/
vdpath = spa_strdup(vd->vdev_path ? vd->vdev_path : "none");
for (int t = 0; t < TXG_SIZE; t++)
(void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
vd->vdev_detached = B_TRUE;
vdev_dirty(tvd, VDD_DTL, vd, txg);
spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE);
spa_notify_waiters(spa);
/* hang on to the spa before we release the lock */
spa_open_ref(spa, FTAG);
error = spa_vdev_exit(spa, vd, txg, 0);
spa_history_log_internal(spa, "detach", NULL,
"vdev=%s", vdpath);
spa_strfree(vdpath);
/*
* If this was the removal of the original device in a hot spare vdev,
* then we want to go through and remove the device from the hot spare
* list of every other pool.
*/
if (unspare) {
spa_t *altspa = NULL;
mutex_enter(&spa_namespace_lock);
while ((altspa = spa_next(altspa)) != NULL) {
if (altspa->spa_state != POOL_STATE_ACTIVE ||
altspa == spa)
continue;
spa_open_ref(altspa, FTAG);
mutex_exit(&spa_namespace_lock);
(void) spa_vdev_remove(altspa, unspare_guid, B_TRUE);
mutex_enter(&spa_namespace_lock);
spa_close(altspa, FTAG);
}
mutex_exit(&spa_namespace_lock);
/* search the rest of the vdevs for spares to remove */
spa_vdev_resilver_done(spa);
}
/* all done with the spa; OK to release */
mutex_enter(&spa_namespace_lock);
spa_close(spa, FTAG);
mutex_exit(&spa_namespace_lock);
return (error);
}
static int
spa_vdev_initialize_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type,
list_t *vd_list)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
/* Look up vdev and ensure it's a leaf. */
vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
if (vd == NULL || vd->vdev_detached) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(ENODEV));
} else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EINVAL));
} else if (!vdev_writeable(vd)) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EROFS));
}
mutex_enter(&vd->vdev_initialize_lock);
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
/*
* When we activate an initialize action we check to see
* if the vdev_initialize_thread is NULL. We do this instead
* of using the vdev_initialize_state since there might be
* a previous initialization process which has completed but
* the thread is not exited.
*/
if (cmd_type == POOL_INITIALIZE_START &&
(vd->vdev_initialize_thread != NULL ||
vd->vdev_top->vdev_removing)) {
mutex_exit(&vd->vdev_initialize_lock);
return (SET_ERROR(EBUSY));
} else if (cmd_type == POOL_INITIALIZE_CANCEL &&
(vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE &&
vd->vdev_initialize_state != VDEV_INITIALIZE_SUSPENDED)) {
mutex_exit(&vd->vdev_initialize_lock);
return (SET_ERROR(ESRCH));
} else if (cmd_type == POOL_INITIALIZE_SUSPEND &&
vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE) {
mutex_exit(&vd->vdev_initialize_lock);
return (SET_ERROR(ESRCH));
}
switch (cmd_type) {
case POOL_INITIALIZE_START:
vdev_initialize(vd);
break;
case POOL_INITIALIZE_CANCEL:
vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED, vd_list);
break;
case POOL_INITIALIZE_SUSPEND:
vdev_initialize_stop(vd, VDEV_INITIALIZE_SUSPENDED, vd_list);
break;
default:
panic("invalid cmd_type %llu", (unsigned long long)cmd_type);
}
mutex_exit(&vd->vdev_initialize_lock);
return (0);
}
int
spa_vdev_initialize(spa_t *spa, nvlist_t *nv, uint64_t cmd_type,
nvlist_t *vdev_errlist)
{
int total_errors = 0;
list_t vd_list;
list_create(&vd_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_initialize_node));
/*
* We hold the namespace lock through the whole function
* to prevent any changes to the pool while we're starting or
* stopping initialization. The config and state locks are held so that
* we can properly assess the vdev state before we commit to
* the initializing operation.
*/
mutex_enter(&spa_namespace_lock);
for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL);
pair != NULL; pair = nvlist_next_nvpair(nv, pair)) {
uint64_t vdev_guid = fnvpair_value_uint64(pair);
int error = spa_vdev_initialize_impl(spa, vdev_guid, cmd_type,
&vd_list);
if (error != 0) {
char guid_as_str[MAXNAMELEN];
(void) snprintf(guid_as_str, sizeof (guid_as_str),
"%llu", (unsigned long long)vdev_guid);
fnvlist_add_int64(vdev_errlist, guid_as_str, error);
total_errors++;
}
}
/* Wait for all initialize threads to stop. */
vdev_initialize_stop_wait(spa, &vd_list);
/* Sync out the initializing state */
txg_wait_synced(spa->spa_dsl_pool, 0);
mutex_exit(&spa_namespace_lock);
list_destroy(&vd_list);
return (total_errors);
}
static int
spa_vdev_trim_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type,
uint64_t rate, boolean_t partial, boolean_t secure, list_t *vd_list)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
/* Look up vdev and ensure it's a leaf. */
vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
if (vd == NULL || vd->vdev_detached) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(ENODEV));
} else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EINVAL));
} else if (!vdev_writeable(vd)) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EROFS));
} else if (!vd->vdev_has_trim) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EOPNOTSUPP));
} else if (secure && !vd->vdev_has_securetrim) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EOPNOTSUPP));
}
mutex_enter(&vd->vdev_trim_lock);
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
/*
* When we activate a TRIM action we check to see if the
* vdev_trim_thread is NULL. We do this instead of using the
* vdev_trim_state since there might be a previous TRIM process
* which has completed but the thread is not exited.
*/
if (cmd_type == POOL_TRIM_START &&
(vd->vdev_trim_thread != NULL || vd->vdev_top->vdev_removing)) {
mutex_exit(&vd->vdev_trim_lock);
return (SET_ERROR(EBUSY));
} else if (cmd_type == POOL_TRIM_CANCEL &&
(vd->vdev_trim_state != VDEV_TRIM_ACTIVE &&
vd->vdev_trim_state != VDEV_TRIM_SUSPENDED)) {
mutex_exit(&vd->vdev_trim_lock);
return (SET_ERROR(ESRCH));
} else if (cmd_type == POOL_TRIM_SUSPEND &&
vd->vdev_trim_state != VDEV_TRIM_ACTIVE) {
mutex_exit(&vd->vdev_trim_lock);
return (SET_ERROR(ESRCH));
}
switch (cmd_type) {
case POOL_TRIM_START:
vdev_trim(vd, rate, partial, secure);
break;
case POOL_TRIM_CANCEL:
vdev_trim_stop(vd, VDEV_TRIM_CANCELED, vd_list);
break;
case POOL_TRIM_SUSPEND:
vdev_trim_stop(vd, VDEV_TRIM_SUSPENDED, vd_list);
break;
default:
panic("invalid cmd_type %llu", (unsigned long long)cmd_type);
}
mutex_exit(&vd->vdev_trim_lock);
return (0);
}
/*
* Initiates a manual TRIM for the requested vdevs. This kicks off individual
* TRIM threads for each child vdev. These threads pass over all of the free
* space in the vdev's metaslabs and issues TRIM commands for that space.
*/
int
spa_vdev_trim(spa_t *spa, nvlist_t *nv, uint64_t cmd_type, uint64_t rate,
boolean_t partial, boolean_t secure, nvlist_t *vdev_errlist)
{
int total_errors = 0;
list_t vd_list;
list_create(&vd_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_trim_node));
/*
* We hold the namespace lock through the whole function
* to prevent any changes to the pool while we're starting or
* stopping TRIM. The config and state locks are held so that
* we can properly assess the vdev state before we commit to
* the TRIM operation.
*/
mutex_enter(&spa_namespace_lock);
for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL);
pair != NULL; pair = nvlist_next_nvpair(nv, pair)) {
uint64_t vdev_guid = fnvpair_value_uint64(pair);
int error = spa_vdev_trim_impl(spa, vdev_guid, cmd_type,
rate, partial, secure, &vd_list);
if (error != 0) {
char guid_as_str[MAXNAMELEN];
(void) snprintf(guid_as_str, sizeof (guid_as_str),
"%llu", (unsigned long long)vdev_guid);
fnvlist_add_int64(vdev_errlist, guid_as_str, error);
total_errors++;
}
}
/* Wait for all TRIM threads to stop. */
vdev_trim_stop_wait(spa, &vd_list);
/* Sync out the TRIM state */
txg_wait_synced(spa->spa_dsl_pool, 0);
mutex_exit(&spa_namespace_lock);
list_destroy(&vd_list);
return (total_errors);
}
/*
* Split a set of devices from their mirrors, and create a new pool from them.
*/
int
spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
nvlist_t *props, boolean_t exp)
{
int error = 0;
uint64_t txg, *glist;
spa_t *newspa;
uint_t c, children, lastlog;
nvlist_t **child, *nvl, *tmp;
dmu_tx_t *tx;
char *altroot = NULL;
vdev_t *rvd, **vml = NULL; /* vdev modify list */
boolean_t activate_slog;
ASSERT(spa_writeable(spa));
txg = spa_vdev_enter(spa);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
error = (spa_has_checkpoint(spa)) ?
ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
return (spa_vdev_exit(spa, NULL, txg, error));
}
/* clear the log and flush everything up to now */
activate_slog = spa_passivate_log(spa);
(void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
error = spa_reset_logs(spa);
txg = spa_vdev_config_enter(spa);
if (activate_slog)
spa_activate_log(spa);
if (error != 0)
return (spa_vdev_exit(spa, NULL, txg, error));
/* check new spa name before going any further */
if (spa_lookup(newname) != NULL)
return (spa_vdev_exit(spa, NULL, txg, EEXIST));
/*
* scan through all the children to ensure they're all mirrors
*/
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 ||
nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child,
&children) != 0)
return (spa_vdev_exit(spa, NULL, txg, EINVAL));
/* first, check to ensure we've got the right child count */
rvd = spa->spa_root_vdev;
lastlog = 0;
for (c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
/* don't count the holes & logs as children */
if (vd->vdev_islog || (vd->vdev_ops != &vdev_indirect_ops &&
!vdev_is_concrete(vd))) {
if (lastlog == 0)
lastlog = c;
continue;
}
lastlog = 0;
}
if (children != (lastlog != 0 ? lastlog : rvd->vdev_children))
return (spa_vdev_exit(spa, NULL, txg, EINVAL));
/* next, ensure no spare or cache devices are part of the split */
if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 ||
nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0)
return (spa_vdev_exit(spa, NULL, txg, EINVAL));
vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP);
glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP);
/* then, loop over each vdev and validate it */
for (c = 0; c < children; c++) {
uint64_t is_hole = 0;
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
&is_hole);
if (is_hole != 0) {
if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole ||
spa->spa_root_vdev->vdev_child[c]->vdev_islog) {
continue;
} else {
error = SET_ERROR(EINVAL);
break;
}
}
/* deal with indirect vdevs */
if (spa->spa_root_vdev->vdev_child[c]->vdev_ops ==
&vdev_indirect_ops)
continue;
/* which disk is going to be split? */
if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID,
&glist[c]) != 0) {
error = SET_ERROR(EINVAL);
break;
}
/* look it up in the spa */
vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE);
if (vml[c] == NULL) {
error = SET_ERROR(ENODEV);
break;
}
/* make sure there's nothing stopping the split */
if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops ||
vml[c]->vdev_islog ||
!vdev_is_concrete(vml[c]) ||
vml[c]->vdev_isspare ||
vml[c]->vdev_isl2cache ||
!vdev_writeable(vml[c]) ||
vml[c]->vdev_children != 0 ||
vml[c]->vdev_state != VDEV_STATE_HEALTHY ||
c != spa->spa_root_vdev->vdev_child[c]->vdev_id) {
error = SET_ERROR(EINVAL);
break;
}
if (vdev_dtl_required(vml[c]) ||
vdev_resilver_needed(vml[c], NULL, NULL)) {
error = SET_ERROR(EBUSY);
break;
}
/* we need certain info from the top level */
VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY,
vml[c]->vdev_top->vdev_ms_array) == 0);
VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT,
vml[c]->vdev_top->vdev_ms_shift) == 0);
VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE,
vml[c]->vdev_top->vdev_asize) == 0);
VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT,
vml[c]->vdev_top->vdev_ashift) == 0);
/* transfer per-vdev ZAPs */
ASSERT3U(vml[c]->vdev_leaf_zap, !=, 0);
VERIFY0(nvlist_add_uint64(child[c],
ZPOOL_CONFIG_VDEV_LEAF_ZAP, vml[c]->vdev_leaf_zap));
ASSERT3U(vml[c]->vdev_top->vdev_top_zap, !=, 0);
VERIFY0(nvlist_add_uint64(child[c],
ZPOOL_CONFIG_VDEV_TOP_ZAP,
vml[c]->vdev_parent->vdev_top_zap));
}
if (error != 0) {
kmem_free(vml, children * sizeof (vdev_t *));
kmem_free(glist, children * sizeof (uint64_t));
return (spa_vdev_exit(spa, NULL, txg, error));
}
/* stop writers from using the disks */
for (c = 0; c < children; c++) {
if (vml[c] != NULL)
vml[c]->vdev_offline = B_TRUE;
}
vdev_reopen(spa->spa_root_vdev);
/*
* Temporarily record the splitting vdevs in the spa config. This
* will disappear once the config is regenerated.
*/
VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
glist, children) == 0);
kmem_free(glist, children * sizeof (uint64_t));
mutex_enter(&spa->spa_props_lock);
VERIFY(nvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT,
nvl) == 0);
mutex_exit(&spa->spa_props_lock);
spa->spa_config_splitting = nvl;
vdev_config_dirty(spa->spa_root_vdev);
/* configure and create the new pool */
VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname) == 0);
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE) == 0);
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
spa_version(spa)) == 0);
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG,
spa->spa_config_txg) == 0);
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID,
spa_generate_guid(NULL)) == 0);
VERIFY0(nvlist_add_boolean(config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
(void) nvlist_lookup_string(props,
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
/* add the new pool to the namespace */
newspa = spa_add(newname, config, altroot);
newspa->spa_avz_action = AVZ_ACTION_REBUILD;
newspa->spa_config_txg = spa->spa_config_txg;
spa_set_log_state(newspa, SPA_LOG_CLEAR);
/* release the spa config lock, retaining the namespace lock */
spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
if (zio_injection_enabled)
zio_handle_panic_injection(spa, FTAG, 1);
spa_activate(newspa, spa_mode_global);
spa_async_suspend(newspa);
/*
* Temporarily stop the initializing and TRIM activity. We set the
* state to ACTIVE so that we know to resume initializing or TRIM
* once the split has completed.
*/
list_t vd_initialize_list;
list_create(&vd_initialize_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_initialize_node));
list_t vd_trim_list;
list_create(&vd_trim_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_trim_node));
for (c = 0; c < children; c++) {
if (vml[c] != NULL && vml[c]->vdev_ops != &vdev_indirect_ops) {
mutex_enter(&vml[c]->vdev_initialize_lock);
vdev_initialize_stop(vml[c],
VDEV_INITIALIZE_ACTIVE, &vd_initialize_list);
mutex_exit(&vml[c]->vdev_initialize_lock);
mutex_enter(&vml[c]->vdev_trim_lock);
vdev_trim_stop(vml[c], VDEV_TRIM_ACTIVE, &vd_trim_list);
mutex_exit(&vml[c]->vdev_trim_lock);
}
}
vdev_initialize_stop_wait(spa, &vd_initialize_list);
vdev_trim_stop_wait(spa, &vd_trim_list);
list_destroy(&vd_initialize_list);
list_destroy(&vd_trim_list);
newspa->spa_config_source = SPA_CONFIG_SRC_SPLIT;
newspa->spa_is_splitting = B_TRUE;
/* create the new pool from the disks of the original pool */
error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE);
if (error)
goto out;
/* if that worked, generate a real config for the new pool */
if (newspa->spa_root_vdev != NULL) {
VERIFY(nvlist_alloc(&newspa->spa_config_splitting,
NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_uint64(newspa->spa_config_splitting,
ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0);
spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL,
B_TRUE));
}
/* set the props */
if (props != NULL) {
spa_configfile_set(newspa, props, B_FALSE);
error = spa_prop_set(newspa, props);
if (error)
goto out;
}
/* flush everything */
txg = spa_vdev_config_enter(newspa);
vdev_config_dirty(newspa->spa_root_vdev);
(void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG);
if (zio_injection_enabled)
zio_handle_panic_injection(spa, FTAG, 2);
spa_async_resume(newspa);
/* finally, update the original pool's config */
txg = spa_vdev_config_enter(spa);
tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error != 0)
dmu_tx_abort(tx);
for (c = 0; c < children; c++) {
if (vml[c] != NULL && vml[c]->vdev_ops != &vdev_indirect_ops) {
vdev_t *tvd = vml[c]->vdev_top;
/*
* Need to be sure the detachable VDEV is not
* on any *other* txg's DTL list to prevent it
* from being accessed after it's freed.
*/
for (int t = 0; t < TXG_SIZE; t++) {
(void) txg_list_remove_this(
&tvd->vdev_dtl_list, vml[c], t);
}
vdev_split(vml[c]);
if (error == 0)
spa_history_log_internal(spa, "detach", tx,
"vdev=%s", vml[c]->vdev_path);
vdev_free(vml[c]);
}
}
spa->spa_avz_action = AVZ_ACTION_REBUILD;
vdev_config_dirty(spa->spa_root_vdev);
spa->spa_config_splitting = NULL;
nvlist_free(nvl);
if (error == 0)
dmu_tx_commit(tx);
(void) spa_vdev_exit(spa, NULL, txg, 0);
if (zio_injection_enabled)
zio_handle_panic_injection(spa, FTAG, 3);
/* split is complete; log a history record */
spa_history_log_internal(newspa, "split", NULL,
"from pool %s", spa_name(spa));
newspa->spa_is_splitting = B_FALSE;
kmem_free(vml, children * sizeof (vdev_t *));
/* if we're not going to mount the filesystems in userland, export */
if (exp)
error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL,
B_FALSE, B_FALSE);
return (error);
out:
spa_unload(newspa);
spa_deactivate(newspa);
spa_remove(newspa);
txg = spa_vdev_config_enter(spa);
/* re-online all offlined disks */
for (c = 0; c < children; c++) {
if (vml[c] != NULL)
vml[c]->vdev_offline = B_FALSE;
}
/* restart initializing or trimming disks as necessary */
spa_async_request(spa, SPA_ASYNC_INITIALIZE_RESTART);
spa_async_request(spa, SPA_ASYNC_TRIM_RESTART);
spa_async_request(spa, SPA_ASYNC_AUTOTRIM_RESTART);
vdev_reopen(spa->spa_root_vdev);
nvlist_free(spa->spa_config_splitting);
spa->spa_config_splitting = NULL;
(void) spa_vdev_exit(spa, NULL, txg, error);
kmem_free(vml, children * sizeof (vdev_t *));
return (error);
}
/*
* Find any device that's done replacing, or a vdev marked 'unspare' that's
* currently spared, so we can detach it.
*/
static vdev_t *
spa_vdev_resilver_done_hunt(vdev_t *vd)
{
vdev_t *newvd, *oldvd;
for (int c = 0; c < vd->vdev_children; c++) {
oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
if (oldvd != NULL)
return (oldvd);
}
/*
* Check for a completed replacement. We always consider the first
* vdev in the list to be the oldest vdev, and the last one to be
* the newest (see spa_vdev_attach() for how that works). In
* the case where the newest vdev is faulted, we will not automatically
* remove it after a resilver completes. This is OK as it will require
* user intervention to determine which disk the admin wishes to keep.
*/
if (vd->vdev_ops == &vdev_replacing_ops) {
ASSERT(vd->vdev_children > 1);
newvd = vd->vdev_child[vd->vdev_children - 1];
oldvd = vd->vdev_child[0];
if (vdev_dtl_empty(newvd, DTL_MISSING) &&
vdev_dtl_empty(newvd, DTL_OUTAGE) &&
!vdev_dtl_required(oldvd))
return (oldvd);
}
/*
* Check for a completed resilver with the 'unspare' flag set.
* Also potentially update faulted state.
*/
if (vd->vdev_ops == &vdev_spare_ops) {
vdev_t *first = vd->vdev_child[0];
vdev_t *last = vd->vdev_child[vd->vdev_children - 1];
if (last->vdev_unspare) {
oldvd = first;
newvd = last;
} else if (first->vdev_unspare) {
oldvd = last;
newvd = first;
} else {
oldvd = NULL;
}
if (oldvd != NULL &&
vdev_dtl_empty(newvd, DTL_MISSING) &&
vdev_dtl_empty(newvd, DTL_OUTAGE) &&
!vdev_dtl_required(oldvd))
return (oldvd);
vdev_propagate_state(vd);
/*
* If there are more than two spares attached to a disk,
* and those spares are not required, then we want to
* attempt to free them up now so that they can be used
* by other pools. Once we're back down to a single
* disk+spare, we stop removing them.
*/
if (vd->vdev_children > 2) {
newvd = vd->vdev_child[1];
if (newvd->vdev_isspare && last->vdev_isspare &&
vdev_dtl_empty(last, DTL_MISSING) &&
vdev_dtl_empty(last, DTL_OUTAGE) &&
!vdev_dtl_required(newvd))
return (newvd);
}
}
return (NULL);
}
static void
spa_vdev_resilver_done(spa_t *spa)
{
vdev_t *vd, *pvd, *ppvd;
uint64_t guid, sguid, pguid, ppguid;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
pvd = vd->vdev_parent;
ppvd = pvd->vdev_parent;
guid = vd->vdev_guid;
pguid = pvd->vdev_guid;
ppguid = ppvd->vdev_guid;
sguid = 0;
/*
* If we have just finished replacing a hot spared device, then
* we need to detach the parent's first child (the original hot
* spare) as well.
*/
if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 &&
ppvd->vdev_children == 2) {
ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
sguid = ppvd->vdev_child[1]->vdev_guid;
}
ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd));
spa_config_exit(spa, SCL_ALL, FTAG);
if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
return;
if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
return;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
}
spa_config_exit(spa, SCL_ALL, FTAG);
/*
* If a detach was not performed above replace waiters will not have
* been notified. In which case we must do so now.
*/
spa_notify_waiters(spa);
}
/*
* Update the stored path or FRU for this vdev.
*/
static int
spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value,
boolean_t ispath)
{
vdev_t *vd;
boolean_t sync = B_FALSE;
ASSERT(spa_writeable(spa));
spa_vdev_state_enter(spa, SCL_ALL);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, ENOENT));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
if (ispath) {
if (strcmp(value, vd->vdev_path) != 0) {
spa_strfree(vd->vdev_path);
vd->vdev_path = spa_strdup(value);
sync = B_TRUE;
}
} else {
if (vd->vdev_fru == NULL) {
vd->vdev_fru = spa_strdup(value);
sync = B_TRUE;
} else if (strcmp(value, vd->vdev_fru) != 0) {
spa_strfree(vd->vdev_fru);
vd->vdev_fru = spa_strdup(value);
sync = B_TRUE;
}
}
return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0));
}
int
spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
{
return (spa_vdev_set_common(spa, guid, newpath, B_TRUE));
}
int
spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
{
return (spa_vdev_set_common(spa, guid, newfru, B_FALSE));
}
/*
* ==========================================================================
* SPA Scanning
* ==========================================================================
*/
int
spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t cmd)
{
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
if (dsl_scan_resilvering(spa->spa_dsl_pool))
return (SET_ERROR(EBUSY));
return (dsl_scrub_set_pause_resume(spa->spa_dsl_pool, cmd));
}
int
spa_scan_stop(spa_t *spa)
{
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
if (dsl_scan_resilvering(spa->spa_dsl_pool))
return (SET_ERROR(EBUSY));
return (dsl_scan_cancel(spa->spa_dsl_pool));
}
int
spa_scan(spa_t *spa, pool_scan_func_t func)
{
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE)
return (SET_ERROR(ENOTSUP));
if (func == POOL_SCAN_RESILVER &&
!spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER))
return (SET_ERROR(ENOTSUP));
/*
* If a resilver was requested, but there is no DTL on a
* writeable leaf device, we have nothing to do.
*/
if (func == POOL_SCAN_RESILVER &&
!vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
return (0);
}
return (dsl_scan(spa->spa_dsl_pool, func));
}
/*
* ==========================================================================
* SPA async task processing
* ==========================================================================
*/
static void
spa_async_remove(spa_t *spa, vdev_t *vd)
{
if (vd->vdev_remove_wanted) {
vd->vdev_remove_wanted = B_FALSE;
vd->vdev_delayed_close = B_FALSE;
vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
/*
* We want to clear the stats, but we don't want to do a full
* vdev_clear() as that will cause us to throw away
* degraded/faulted state as well as attempt to reopen the
* device, all of which is a waste.
*/
vd->vdev_stat.vs_read_errors = 0;
vd->vdev_stat.vs_write_errors = 0;
vd->vdev_stat.vs_checksum_errors = 0;
vdev_state_dirty(vd->vdev_top);
/* Tell userspace that the vdev is gone. */
zfs_post_remove(spa, vd);
}
for (int c = 0; c < vd->vdev_children; c++)
spa_async_remove(spa, vd->vdev_child[c]);
}
static void
spa_async_probe(spa_t *spa, vdev_t *vd)
{
if (vd->vdev_probe_wanted) {
vd->vdev_probe_wanted = B_FALSE;
vdev_reopen(vd); /* vdev_open() does the actual probe */
}
for (int c = 0; c < vd->vdev_children; c++)
spa_async_probe(spa, vd->vdev_child[c]);
}
static void
spa_async_autoexpand(spa_t *spa, vdev_t *vd)
{
if (!spa->spa_autoexpand)
return;
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
spa_async_autoexpand(spa, cvd);
}
if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL)
return;
spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_AUTOEXPAND);
}
static void
spa_async_thread(void *arg)
{
spa_t *spa = (spa_t *)arg;
dsl_pool_t *dp = spa->spa_dsl_pool;
int tasks;
ASSERT(spa->spa_sync_on);
mutex_enter(&spa->spa_async_lock);
tasks = spa->spa_async_tasks;
spa->spa_async_tasks = 0;
mutex_exit(&spa->spa_async_lock);
/*
* See if the config needs to be updated.
*/
if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
uint64_t old_space, new_space;
mutex_enter(&spa_namespace_lock);
old_space = metaslab_class_get_space(spa_normal_class(spa));
old_space += metaslab_class_get_space(spa_special_class(spa));
old_space += metaslab_class_get_space(spa_dedup_class(spa));
old_space += metaslab_class_get_space(
spa_embedded_log_class(spa));
spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
new_space = metaslab_class_get_space(spa_normal_class(spa));
new_space += metaslab_class_get_space(spa_special_class(spa));
new_space += metaslab_class_get_space(spa_dedup_class(spa));
new_space += metaslab_class_get_space(
spa_embedded_log_class(spa));
mutex_exit(&spa_namespace_lock);
/*
* If the pool grew as a result of the config update,
* then log an internal history event.
*/
if (new_space != old_space) {
spa_history_log_internal(spa, "vdev online", NULL,
"pool '%s' size: %llu(+%llu)",
spa_name(spa), (u_longlong_t)new_space,
(u_longlong_t)(new_space - old_space));
}
}
/*
* See if any devices need to be marked REMOVED.
*/
if (tasks & SPA_ASYNC_REMOVE) {
spa_vdev_state_enter(spa, SCL_NONE);
spa_async_remove(spa, spa->spa_root_vdev);
for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
for (int i = 0; i < spa->spa_spares.sav_count; i++)
spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
(void) spa_vdev_state_exit(spa, NULL, 0);
}
if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) {
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
spa_async_autoexpand(spa, spa->spa_root_vdev);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
/*
* See if any devices need to be probed.
*/
if (tasks & SPA_ASYNC_PROBE) {
spa_vdev_state_enter(spa, SCL_NONE);
spa_async_probe(spa, spa->spa_root_vdev);
(void) spa_vdev_state_exit(spa, NULL, 0);
}
/*
* If any devices are done replacing, detach them.
*/
if (tasks & SPA_ASYNC_RESILVER_DONE ||
tasks & SPA_ASYNC_REBUILD_DONE) {
spa_vdev_resilver_done(spa);
}
/*
* Kick off a resilver.
*/
if (tasks & SPA_ASYNC_RESILVER &&
!vdev_rebuild_active(spa->spa_root_vdev) &&
(!dsl_scan_resilvering(dp) ||
!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER)))
dsl_scan_restart_resilver(dp, 0);
if (tasks & SPA_ASYNC_INITIALIZE_RESTART) {
mutex_enter(&spa_namespace_lock);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_initialize_restart(spa->spa_root_vdev);
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_exit(&spa_namespace_lock);
}
if (tasks & SPA_ASYNC_TRIM_RESTART) {
mutex_enter(&spa_namespace_lock);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_trim_restart(spa->spa_root_vdev);
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_exit(&spa_namespace_lock);
}
if (tasks & SPA_ASYNC_AUTOTRIM_RESTART) {
mutex_enter(&spa_namespace_lock);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_autotrim_restart(spa);
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_exit(&spa_namespace_lock);
}
/*
* Kick off L2 cache whole device TRIM.
*/
if (tasks & SPA_ASYNC_L2CACHE_TRIM) {
mutex_enter(&spa_namespace_lock);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_trim_l2arc(spa);
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_exit(&spa_namespace_lock);
}
/*
* Kick off L2 cache rebuilding.
*/
if (tasks & SPA_ASYNC_L2CACHE_REBUILD) {
mutex_enter(&spa_namespace_lock);
spa_config_enter(spa, SCL_L2ARC, FTAG, RW_READER);
l2arc_spa_rebuild_start(spa);
spa_config_exit(spa, SCL_L2ARC, FTAG);
mutex_exit(&spa_namespace_lock);
}
/*
* Let the world know that we're done.
*/
mutex_enter(&spa->spa_async_lock);
spa->spa_async_thread = NULL;
cv_broadcast(&spa->spa_async_cv);
mutex_exit(&spa->spa_async_lock);
thread_exit();
}
void
spa_async_suspend(spa_t *spa)
{
mutex_enter(&spa->spa_async_lock);
spa->spa_async_suspended++;
while (spa->spa_async_thread != NULL)
cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
mutex_exit(&spa->spa_async_lock);
spa_vdev_remove_suspend(spa);
zthr_t *condense_thread = spa->spa_condense_zthr;
if (condense_thread != NULL)
zthr_cancel(condense_thread);
zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr;
if (discard_thread != NULL)
zthr_cancel(discard_thread);
zthr_t *ll_delete_thread = spa->spa_livelist_delete_zthr;
if (ll_delete_thread != NULL)
zthr_cancel(ll_delete_thread);
zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr;
if (ll_condense_thread != NULL)
zthr_cancel(ll_condense_thread);
}
void
spa_async_resume(spa_t *spa)
{
mutex_enter(&spa->spa_async_lock);
ASSERT(spa->spa_async_suspended != 0);
spa->spa_async_suspended--;
mutex_exit(&spa->spa_async_lock);
spa_restart_removal(spa);
zthr_t *condense_thread = spa->spa_condense_zthr;
if (condense_thread != NULL)
zthr_resume(condense_thread);
zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr;
if (discard_thread != NULL)
zthr_resume(discard_thread);
zthr_t *ll_delete_thread = spa->spa_livelist_delete_zthr;
if (ll_delete_thread != NULL)
zthr_resume(ll_delete_thread);
zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr;
if (ll_condense_thread != NULL)
zthr_resume(ll_condense_thread);
}
static boolean_t
spa_async_tasks_pending(spa_t *spa)
{
uint_t non_config_tasks;
uint_t config_task;
boolean_t config_task_suspended;
non_config_tasks = spa->spa_async_tasks & ~SPA_ASYNC_CONFIG_UPDATE;
config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE;
if (spa->spa_ccw_fail_time == 0) {
config_task_suspended = B_FALSE;
} else {
config_task_suspended =
(gethrtime() - spa->spa_ccw_fail_time) <
((hrtime_t)zfs_ccw_retry_interval * NANOSEC);
}
return (non_config_tasks || (config_task && !config_task_suspended));
}
static void
spa_async_dispatch(spa_t *spa)
{
mutex_enter(&spa->spa_async_lock);
if (spa_async_tasks_pending(spa) &&
!spa->spa_async_suspended &&
spa->spa_async_thread == NULL)
spa->spa_async_thread = thread_create(NULL, 0,
spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
mutex_exit(&spa->spa_async_lock);
}
void
spa_async_request(spa_t *spa, int task)
{
zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task);
mutex_enter(&spa->spa_async_lock);
spa->spa_async_tasks |= task;
mutex_exit(&spa->spa_async_lock);
}
int
spa_async_tasks(spa_t *spa)
{
return (spa->spa_async_tasks);
}
/*
* ==========================================================================
* SPA syncing routines
* ==========================================================================
*/
static int
bpobj_enqueue_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
bpobj_t *bpo = arg;
bpobj_enqueue(bpo, bp, bp_freed, tx);
return (0);
}
int
bpobj_enqueue_alloc_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
return (bpobj_enqueue_cb(arg, bp, B_FALSE, tx));
}
int
bpobj_enqueue_free_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
return (bpobj_enqueue_cb(arg, bp, B_TRUE, tx));
}
static int
spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
zio_t *pio = arg;
zio_nowait(zio_free_sync(pio, pio->io_spa, dmu_tx_get_txg(tx), bp,
pio->io_flags));
return (0);
}
static int
bpobj_spa_free_sync_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
ASSERT(!bp_freed);
return (spa_free_sync_cb(arg, bp, tx));
}
/*
* Note: this simple function is not inlined to make it easier to dtrace the
* amount of time spent syncing frees.
*/
static void
spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx)
{
zio_t *zio = zio_root(spa, NULL, NULL, 0);
bplist_iterate(bpl, spa_free_sync_cb, zio, tx);
VERIFY(zio_wait(zio) == 0);
}
/*
* Note: this simple function is not inlined to make it easier to dtrace the
* amount of time spent syncing deferred frees.
*/
static void
spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx)
{
if (spa_sync_pass(spa) != 1)
return;
/*
* Note:
* If the log space map feature is active, we stop deferring
* frees to the next TXG and therefore running this function
* would be considered a no-op as spa_deferred_bpobj should
* not have any entries.
*
* That said we run this function anyway (instead of returning
* immediately) for the edge-case scenario where we just
* activated the log space map feature in this TXG but we have
* deferred frees from the previous TXG.
*/
zio_t *zio = zio_root(spa, NULL, NULL, 0);
VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj,
bpobj_spa_free_sync_cb, zio, tx), ==, 0);
VERIFY0(zio_wait(zio));
}
static void
spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
{
char *packed = NULL;
size_t bufsize;
size_t nvsize = 0;
dmu_buf_t *db;
VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
/*
* Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
* information. This avoids the dmu_buf_will_dirty() path and
* saves us a pre-read to get data we don't actually care about.
*/
bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE);
packed = vmem_alloc(bufsize, KM_SLEEP);
VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
KM_SLEEP) == 0);
bzero(packed + nvsize, bufsize - nvsize);
dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
vmem_free(packed, bufsize);
VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
dmu_buf_will_dirty(db, tx);
*(uint64_t *)db->db_data = nvsize;
dmu_buf_rele(db, FTAG);
}
static void
spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
const char *config, const char *entry)
{
nvlist_t *nvroot;
nvlist_t **list;
int i;
if (!sav->sav_sync)
return;
/*
* Update the MOS nvlist describing the list of available devices.
* spa_validate_aux() will have already made sure this nvlist is
* valid and the vdevs are labeled appropriately.
*/
if (sav->sav_object == 0) {
sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
sizeof (uint64_t), tx);
VERIFY(zap_update(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
&sav->sav_object, tx) == 0);
}
VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
if (sav->sav_count == 0) {
VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0);
} else {
list = kmem_alloc(sav->sav_count*sizeof (void *), KM_SLEEP);
for (i = 0; i < sav->sav_count; i++)
list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
B_FALSE, VDEV_CONFIG_L2CACHE);
VERIFY(nvlist_add_nvlist_array(nvroot, config, list,
sav->sav_count) == 0);
for (i = 0; i < sav->sav_count; i++)
nvlist_free(list[i]);
kmem_free(list, sav->sav_count * sizeof (void *));
}
spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
nvlist_free(nvroot);
sav->sav_sync = B_FALSE;
}
/*
* Rebuild spa's all-vdev ZAP from the vdev ZAPs indicated in each vdev_t.
* The all-vdev ZAP must be empty.
*/
static void
spa_avz_build(vdev_t *vd, uint64_t avz, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
if (vd->vdev_top_zap != 0) {
VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
vd->vdev_top_zap, tx));
}
if (vd->vdev_leaf_zap != 0) {
VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
vd->vdev_leaf_zap, tx));
}
for (uint64_t i = 0; i < vd->vdev_children; i++) {
spa_avz_build(vd->vdev_child[i], avz, tx);
}
}
static void
spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
{
nvlist_t *config;
/*
* If the pool is being imported from a pre-per-vdev-ZAP version of ZFS,
* its config may not be dirty but we still need to build per-vdev ZAPs.
* Similarly, if the pool is being assembled (e.g. after a split), we
* need to rebuild the AVZ although the config may not be dirty.
*/
if (list_is_empty(&spa->spa_config_dirty_list) &&
spa->spa_avz_action == AVZ_ACTION_NONE)
return;
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
ASSERT(spa->spa_avz_action == AVZ_ACTION_NONE ||
spa->spa_avz_action == AVZ_ACTION_INITIALIZE ||
spa->spa_all_vdev_zaps != 0);
if (spa->spa_avz_action == AVZ_ACTION_REBUILD) {
/* Make and build the new AVZ */
uint64_t new_avz = zap_create(spa->spa_meta_objset,
DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx);
spa_avz_build(spa->spa_root_vdev, new_avz, tx);
/* Diff old AVZ with new one */
zap_cursor_t zc;
zap_attribute_t za;
for (zap_cursor_init(&zc, spa->spa_meta_objset,
spa->spa_all_vdev_zaps);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
uint64_t vdzap = za.za_first_integer;
if (zap_lookup_int(spa->spa_meta_objset, new_avz,
vdzap) == ENOENT) {
/*
* ZAP is listed in old AVZ but not in new one;
* destroy it
*/
VERIFY0(zap_destroy(spa->spa_meta_objset, vdzap,
tx));
}
}
zap_cursor_fini(&zc);
/* Destroy the old AVZ */
VERIFY0(zap_destroy(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, tx));
/* Replace the old AVZ in the dir obj with the new one */
VERIFY0(zap_update(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP,
sizeof (new_avz), 1, &new_avz, tx));
spa->spa_all_vdev_zaps = new_avz;
} else if (spa->spa_avz_action == AVZ_ACTION_DESTROY) {
zap_cursor_t zc;
zap_attribute_t za;
/* Walk through the AVZ and destroy all listed ZAPs */
for (zap_cursor_init(&zc, spa->spa_meta_objset,
spa->spa_all_vdev_zaps);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
uint64_t zap = za.za_first_integer;
VERIFY0(zap_destroy(spa->spa_meta_objset, zap, tx));
}
zap_cursor_fini(&zc);
/* Destroy and unlink the AVZ itself */
VERIFY0(zap_destroy(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, tx));
VERIFY0(zap_remove(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, tx));
spa->spa_all_vdev_zaps = 0;
}
if (spa->spa_all_vdev_zaps == 0) {
spa->spa_all_vdev_zaps = zap_create_link(spa->spa_meta_objset,
DMU_OTN_ZAP_METADATA, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_VDEV_ZAP_MAP, tx);
}
spa->spa_avz_action = AVZ_ACTION_NONE;
/* Create ZAPs for vdevs that don't have them. */
vdev_construct_zaps(spa->spa_root_vdev, tx);
config = spa_config_generate(spa, spa->spa_root_vdev,
dmu_tx_get_txg(tx), B_FALSE);
/*
* If we're upgrading the spa version then make sure that
* the config object gets updated with the correct version.
*/
if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version)
fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
spa->spa_uberblock.ub_version);
spa_config_exit(spa, SCL_STATE, FTAG);
nvlist_free(spa->spa_config_syncing);
spa->spa_config_syncing = config;
spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
}
static void
spa_sync_version(void *arg, dmu_tx_t *tx)
{
uint64_t *versionp = arg;
uint64_t version = *versionp;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
/*
* Setting the version is special cased when first creating the pool.
*/
ASSERT(tx->tx_txg != TXG_INITIAL);
ASSERT(SPA_VERSION_IS_SUPPORTED(version));
ASSERT(version >= spa_version(spa));
spa->spa_uberblock.ub_version = version;
vdev_config_dirty(spa->spa_root_vdev);
spa_history_log_internal(spa, "set", tx, "version=%lld",
(longlong_t)version);
}
/*
* Set zpool properties.
*/
static void
spa_sync_props(void *arg, dmu_tx_t *tx)
{
nvlist_t *nvp = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
objset_t *mos = spa->spa_meta_objset;
nvpair_t *elem = NULL;
mutex_enter(&spa->spa_props_lock);
while ((elem = nvlist_next_nvpair(nvp, elem))) {
uint64_t intval;
char *strval, *fname;
zpool_prop_t prop;
const char *propname;
zprop_type_t proptype;
spa_feature_t fid;
switch (prop = zpool_name_to_prop(nvpair_name(elem))) {
case ZPOOL_PROP_INVAL:
/*
* We checked this earlier in spa_prop_validate().
*/
ASSERT(zpool_prop_feature(nvpair_name(elem)));
fname = strchr(nvpair_name(elem), '@') + 1;
VERIFY0(zfeature_lookup_name(fname, &fid));
spa_feature_enable(spa, fid, tx);
spa_history_log_internal(spa, "set", tx,
"%s=enabled", nvpair_name(elem));
break;
case ZPOOL_PROP_VERSION:
intval = fnvpair_value_uint64(elem);
/*
* The version is synced separately before other
* properties and should be correct by now.
*/
ASSERT3U(spa_version(spa), >=, intval);
break;
case ZPOOL_PROP_ALTROOT:
/*
* 'altroot' is a non-persistent property. It should
* have been set temporarily at creation or import time.
*/
ASSERT(spa->spa_root != NULL);
break;
case ZPOOL_PROP_READONLY:
case ZPOOL_PROP_CACHEFILE:
/*
* 'readonly' and 'cachefile' are also non-persistent
* properties.
*/
break;
case ZPOOL_PROP_COMMENT:
strval = fnvpair_value_string(elem);
if (spa->spa_comment != NULL)
spa_strfree(spa->spa_comment);
spa->spa_comment = spa_strdup(strval);
/*
* We need to dirty the configuration on all the vdevs
* so that their labels get updated. We also need to
* update the cache file to keep it in sync with the
* MOS version. It's unnecessary to do this for pool
* creation since the vdev's configuration has already
* been dirtied.
*/
if (tx->tx_txg != TXG_INITIAL) {
vdev_config_dirty(spa->spa_root_vdev);
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
spa_history_log_internal(spa, "set", tx,
"%s=%s", nvpair_name(elem), strval);
break;
case ZPOOL_PROP_COMPATIBILITY:
strval = fnvpair_value_string(elem);
if (spa->spa_compatibility != NULL)
spa_strfree(spa->spa_compatibility);
spa->spa_compatibility = spa_strdup(strval);
/*
* Dirty the configuration on vdevs as above.
*/
if (tx->tx_txg != TXG_INITIAL) {
vdev_config_dirty(spa->spa_root_vdev);
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
spa_history_log_internal(spa, "set", tx,
"%s=%s", nvpair_name(elem), strval);
break;
default:
/*
* Set pool property values in the poolprops mos object.
*/
if (spa->spa_pool_props_object == 0) {
spa->spa_pool_props_object =
zap_create_link(mos, DMU_OT_POOL_PROPS,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
tx);
}
/* normalize the property name */
propname = zpool_prop_to_name(prop);
proptype = zpool_prop_get_type(prop);
if (nvpair_type(elem) == DATA_TYPE_STRING) {
ASSERT(proptype == PROP_TYPE_STRING);
strval = fnvpair_value_string(elem);
VERIFY0(zap_update(mos,
spa->spa_pool_props_object, propname,
1, strlen(strval) + 1, strval, tx));
spa_history_log_internal(spa, "set", tx,
"%s=%s", nvpair_name(elem), strval);
} else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
intval = fnvpair_value_uint64(elem);
if (proptype == PROP_TYPE_INDEX) {
const char *unused;
VERIFY0(zpool_prop_index_to_string(
prop, intval, &unused));
}
VERIFY0(zap_update(mos,
spa->spa_pool_props_object, propname,
8, 1, &intval, tx));
spa_history_log_internal(spa, "set", tx,
"%s=%lld", nvpair_name(elem),
(longlong_t)intval);
} else {
ASSERT(0); /* not allowed */
}
switch (prop) {
case ZPOOL_PROP_DELEGATION:
spa->spa_delegation = intval;
break;
case ZPOOL_PROP_BOOTFS:
spa->spa_bootfs = intval;
break;
case ZPOOL_PROP_FAILUREMODE:
spa->spa_failmode = intval;
break;
case ZPOOL_PROP_AUTOTRIM:
spa->spa_autotrim = intval;
spa_async_request(spa,
SPA_ASYNC_AUTOTRIM_RESTART);
break;
case ZPOOL_PROP_AUTOEXPAND:
spa->spa_autoexpand = intval;
if (tx->tx_txg != TXG_INITIAL)
spa_async_request(spa,
SPA_ASYNC_AUTOEXPAND);
break;
case ZPOOL_PROP_MULTIHOST:
spa->spa_multihost = intval;
break;
default:
break;
}
}
}
mutex_exit(&spa->spa_props_lock);
}
/*
* Perform one-time upgrade on-disk changes. spa_version() does not
* reflect the new version this txg, so there must be no changes this
* txg to anything that the upgrade code depends on after it executes.
* Therefore this must be called after dsl_pool_sync() does the sync
* tasks.
*/
static void
spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx)
{
if (spa_sync_pass(spa) != 1)
return;
dsl_pool_t *dp = spa->spa_dsl_pool;
rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
dsl_pool_create_origin(dp, tx);
/* Keeping the origin open increases spa_minref */
spa->spa_minref += 3;
}
if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
dsl_pool_upgrade_clones(dp, tx);
}
if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES &&
spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) {
dsl_pool_upgrade_dir_clones(dp, tx);
/* Keeping the freedir open increases spa_minref */
spa->spa_minref += 3;
}
if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES &&
spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
spa_feature_create_zap_objects(spa, tx);
}
/*
* LZ4_COMPRESS feature's behaviour was changed to activate_on_enable
* when possibility to use lz4 compression for metadata was added
* Old pools that have this feature enabled must be upgraded to have
* this feature active
*/
if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
boolean_t lz4_en = spa_feature_is_enabled(spa,
SPA_FEATURE_LZ4_COMPRESS);
boolean_t lz4_ac = spa_feature_is_active(spa,
SPA_FEATURE_LZ4_COMPRESS);
if (lz4_en && !lz4_ac)
spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx);
}
/*
* If we haven't written the salt, do so now. Note that the
* feature may not be activated yet, but that's fine since
* the presence of this ZAP entry is backwards compatible.
*/
if (zap_contains(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_CHECKSUM_SALT) == ENOENT) {
VERIFY0(zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CHECKSUM_SALT, 1,
sizeof (spa->spa_cksum_salt.zcs_bytes),
spa->spa_cksum_salt.zcs_bytes, tx));
}
rrw_exit(&dp->dp_config_rwlock, FTAG);
}
static void
vdev_indirect_state_sync_verify(vdev_t *vd)
{
vdev_indirect_mapping_t *vim __maybe_unused = vd->vdev_indirect_mapping;
vdev_indirect_births_t *vib __maybe_unused = vd->vdev_indirect_births;
if (vd->vdev_ops == &vdev_indirect_ops) {
ASSERT(vim != NULL);
ASSERT(vib != NULL);
}
uint64_t obsolete_sm_object = 0;
ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
if (obsolete_sm_object != 0) {
ASSERT(vd->vdev_obsolete_sm != NULL);
ASSERT(vd->vdev_removing ||
vd->vdev_ops == &vdev_indirect_ops);
ASSERT(vdev_indirect_mapping_num_entries(vim) > 0);
ASSERT(vdev_indirect_mapping_bytes_mapped(vim) > 0);
ASSERT3U(obsolete_sm_object, ==,
space_map_object(vd->vdev_obsolete_sm));
ASSERT3U(vdev_indirect_mapping_bytes_mapped(vim), >=,
space_map_allocated(vd->vdev_obsolete_sm));
}
ASSERT(vd->vdev_obsolete_segments != NULL);
/*
* Since frees / remaps to an indirect vdev can only
* happen in syncing context, the obsolete segments
* tree must be empty when we start syncing.
*/
ASSERT0(range_tree_space(vd->vdev_obsolete_segments));
}
/*
* Set the top-level vdev's max queue depth. Evaluate each top-level's
* async write queue depth in case it changed. The max queue depth will
* not change in the middle of syncing out this txg.
*/
static void
spa_sync_adjust_vdev_max_queue_depth(spa_t *spa)
{
ASSERT(spa_writeable(spa));
vdev_t *rvd = spa->spa_root_vdev;
uint32_t max_queue_depth = zfs_vdev_async_write_max_active *
zfs_vdev_queue_depth_pct / 100;
metaslab_class_t *normal = spa_normal_class(spa);
metaslab_class_t *special = spa_special_class(spa);
metaslab_class_t *dedup = spa_dedup_class(spa);
uint64_t slots_per_allocator = 0;
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
if (mg == NULL || !metaslab_group_initialized(mg))
continue;
metaslab_class_t *mc = mg->mg_class;
if (mc != normal && mc != special && mc != dedup)
continue;
/*
* It is safe to do a lock-free check here because only async
* allocations look at mg_max_alloc_queue_depth, and async
* allocations all happen from spa_sync().
*/
for (int i = 0; i < mg->mg_allocators; i++) {
ASSERT0(zfs_refcount_count(
&(mg->mg_allocator[i].mga_alloc_queue_depth)));
}
mg->mg_max_alloc_queue_depth = max_queue_depth;
for (int i = 0; i < mg->mg_allocators; i++) {
mg->mg_allocator[i].mga_cur_max_alloc_queue_depth =
zfs_vdev_def_queue_depth;
}
slots_per_allocator += zfs_vdev_def_queue_depth;
}
for (int i = 0; i < spa->spa_alloc_count; i++) {
ASSERT0(zfs_refcount_count(&normal->mc_allocator[i].
mca_alloc_slots));
ASSERT0(zfs_refcount_count(&special->mc_allocator[i].
mca_alloc_slots));
ASSERT0(zfs_refcount_count(&dedup->mc_allocator[i].
mca_alloc_slots));
normal->mc_allocator[i].mca_alloc_max_slots =
slots_per_allocator;
special->mc_allocator[i].mca_alloc_max_slots =
slots_per_allocator;
dedup->mc_allocator[i].mca_alloc_max_slots =
slots_per_allocator;
}
normal->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
special->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
dedup->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
}
static void
spa_sync_condense_indirect(spa_t *spa, dmu_tx_t *tx)
{
ASSERT(spa_writeable(spa));
vdev_t *rvd = spa->spa_root_vdev;
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
vdev_indirect_state_sync_verify(vd);
if (vdev_indirect_should_condense(vd)) {
spa_condense_indirect_start_sync(vd, tx);
break;
}
}
}
static void
spa_sync_iterate_to_convergence(spa_t *spa, dmu_tx_t *tx)
{
objset_t *mos = spa->spa_meta_objset;
dsl_pool_t *dp = spa->spa_dsl_pool;
uint64_t txg = tx->tx_txg;
bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK];
do {
int pass = ++spa->spa_sync_pass;
spa_sync_config_object(spa, tx);
spa_sync_aux_dev(spa, &spa->spa_spares, tx,
ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
spa_errlog_sync(spa, txg);
dsl_pool_sync(dp, txg);
if (pass < zfs_sync_pass_deferred_free ||
spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
/*
* If the log space map feature is active we don't
* care about deferred frees and the deferred bpobj
* as the log space map should effectively have the
* same results (i.e. appending only to one object).
*/
spa_sync_frees(spa, free_bpl, tx);
} else {
/*
* We can not defer frees in pass 1, because
* we sync the deferred frees later in pass 1.
*/
ASSERT3U(pass, >, 1);
bplist_iterate(free_bpl, bpobj_enqueue_alloc_cb,
&spa->spa_deferred_bpobj, tx);
}
ddt_sync(spa, txg);
dsl_scan_sync(dp, tx);
svr_sync(spa, tx);
spa_sync_upgrades(spa, tx);
spa_flush_metaslabs(spa, tx);
vdev_t *vd = NULL;
while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg))
!= NULL)
vdev_sync(vd, txg);
/*
* Note: We need to check if the MOS is dirty because we could
* have marked the MOS dirty without updating the uberblock
* (e.g. if we have sync tasks but no dirty user data). We need
* to check the uberblock's rootbp because it is updated if we
* have synced out dirty data (though in this case the MOS will
* most likely also be dirty due to second order effects, we
* don't want to rely on that here).
*/
if (pass == 1 &&
spa->spa_uberblock.ub_rootbp.blk_birth < txg &&
!dmu_objset_is_dirty(mos, txg)) {
/*
* Nothing changed on the first pass, therefore this
* TXG is a no-op. Avoid syncing deferred frees, so
* that we can keep this TXG as a no-op.
*/
ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
ASSERT(txg_list_empty(&dp->dp_sync_tasks, txg));
ASSERT(txg_list_empty(&dp->dp_early_sync_tasks, txg));
break;
}
spa_sync_deferred_frees(spa, tx);
} while (dmu_objset_is_dirty(mos, txg));
}
/*
* Rewrite the vdev configuration (which includes the uberblock) to
* commit the transaction group.
*
* If there are no dirty vdevs, we sync the uberblock to a few random
* top-level vdevs that are known to be visible in the config cache
* (see spa_vdev_add() for a complete description). If there *are* dirty
* vdevs, sync the uberblock to all vdevs.
*/
static void
spa_sync_rewrite_vdev_config(spa_t *spa, dmu_tx_t *tx)
{
vdev_t *rvd = spa->spa_root_vdev;
uint64_t txg = tx->tx_txg;
for (;;) {
int error = 0;
/*
* We hold SCL_STATE to prevent vdev open/close/etc.
* while we're attempting to write the vdev labels.
*/
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
if (list_is_empty(&spa->spa_config_dirty_list)) {
vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL };
int svdcount = 0;
int children = rvd->vdev_children;
int c0 = random_in_range(children);
for (int c = 0; c < children; c++) {
vdev_t *vd =
rvd->vdev_child[(c0 + c) % children];
/* Stop when revisiting the first vdev */
if (c > 0 && svd[0] == vd)
break;
if (vd->vdev_ms_array == 0 ||
vd->vdev_islog ||
!vdev_is_concrete(vd))
continue;
svd[svdcount++] = vd;
if (svdcount == SPA_SYNC_MIN_VDEVS)
break;
}
error = vdev_config_sync(svd, svdcount, txg);
} else {
error = vdev_config_sync(rvd->vdev_child,
rvd->vdev_children, txg);
}
if (error == 0)
spa->spa_last_synced_guid = rvd->vdev_guid;
spa_config_exit(spa, SCL_STATE, FTAG);
if (error == 0)
break;
zio_suspend(spa, NULL, ZIO_SUSPEND_IOERR);
zio_resume_wait(spa);
}
}
/*
* Sync the specified transaction group. New blocks may be dirtied as
* part of the process, so we iterate until it converges.
*/
void
spa_sync(spa_t *spa, uint64_t txg)
{
vdev_t *vd = NULL;
VERIFY(spa_writeable(spa));
/*
* Wait for i/os issued in open context that need to complete
* before this txg syncs.
*/
(void) zio_wait(spa->spa_txg_zio[txg & TXG_MASK]);
spa->spa_txg_zio[txg & TXG_MASK] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL);
/*
* Lock out configuration changes.
*/
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
spa->spa_syncing_txg = txg;
spa->spa_sync_pass = 0;
for (int i = 0; i < spa->spa_alloc_count; i++) {
mutex_enter(&spa->spa_allocs[i].spaa_lock);
VERIFY0(avl_numnodes(&spa->spa_allocs[i].spaa_tree));
mutex_exit(&spa->spa_allocs[i].spaa_lock);
}
/*
* If there are any pending vdev state changes, convert them
* into config changes that go out with this transaction group.
*/
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
while (list_head(&spa->spa_state_dirty_list) != NULL) {
/*
* We need the write lock here because, for aux vdevs,
* calling vdev_config_dirty() modifies sav_config.
* This is ugly and will become unnecessary when we
* eliminate the aux vdev wart by integrating all vdevs
* into the root vdev tree.
*/
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
vdev_state_clean(vd);
vdev_config_dirty(vd);
}
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
}
spa_config_exit(spa, SCL_STATE, FTAG);
dsl_pool_t *dp = spa->spa_dsl_pool;
dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg);
spa->spa_sync_starttime = gethrtime();
taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq,
spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() +
NSEC_TO_TICK(spa->spa_deadman_synctime));
/*
* If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
* set spa_deflate if we have no raid-z vdevs.
*/
if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
vdev_t *rvd = spa->spa_root_vdev;
int i;
for (i = 0; i < rvd->vdev_children; i++) {
vd = rvd->vdev_child[i];
if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
break;
}
if (i == rvd->vdev_children) {
spa->spa_deflate = TRUE;
VERIFY0(zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
sizeof (uint64_t), 1, &spa->spa_deflate, tx));
}
}
spa_sync_adjust_vdev_max_queue_depth(spa);
spa_sync_condense_indirect(spa, tx);
spa_sync_iterate_to_convergence(spa, tx);
#ifdef ZFS_DEBUG
if (!list_is_empty(&spa->spa_config_dirty_list)) {
/*
* Make sure that the number of ZAPs for all the vdevs matches
* the number of ZAPs in the per-vdev ZAP list. This only gets
* called if the config is dirty; otherwise there may be
* outstanding AVZ operations that weren't completed in
* spa_sync_config_object.
*/
uint64_t all_vdev_zap_entry_count;
ASSERT0(zap_count(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, &all_vdev_zap_entry_count));
ASSERT3U(vdev_count_verify_zaps(spa->spa_root_vdev), ==,
all_vdev_zap_entry_count);
}
#endif
if (spa->spa_vdev_removal != NULL) {
ASSERT0(spa->spa_vdev_removal->svr_bytes_done[txg & TXG_MASK]);
}
spa_sync_rewrite_vdev_config(spa, tx);
dmu_tx_commit(tx);
taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
spa->spa_deadman_tqid = 0;
/*
* Clear the dirty config list.
*/
while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
vdev_config_clean(vd);
/*
* Now that the new config has synced transactionally,
* let it become visible to the config cache.
*/
if (spa->spa_config_syncing != NULL) {
spa_config_set(spa, spa->spa_config_syncing);
spa->spa_config_txg = txg;
spa->spa_config_syncing = NULL;
}
dsl_pool_sync_done(dp, txg);
for (int i = 0; i < spa->spa_alloc_count; i++) {
mutex_enter(&spa->spa_allocs[i].spaa_lock);
VERIFY0(avl_numnodes(&spa->spa_allocs[i].spaa_tree));
mutex_exit(&spa->spa_allocs[i].spaa_lock);
}
/*
* Update usable space statistics.
*/
while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
!= NULL)
vdev_sync_done(vd, txg);
metaslab_class_evict_old(spa->spa_normal_class, txg);
metaslab_class_evict_old(spa->spa_log_class, txg);
spa_sync_close_syncing_log_sm(spa);
spa_update_dspace(spa);
/*
* It had better be the case that we didn't dirty anything
* since vdev_config_sync().
*/
ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
while (zfs_pause_spa_sync)
delay(1);
spa->spa_sync_pass = 0;
/*
* Update the last synced uberblock here. We want to do this at
* the end of spa_sync() so that consumers of spa_last_synced_txg()
* will be guaranteed that all the processing associated with
* that txg has been completed.
*/
spa->spa_ubsync = spa->spa_uberblock;
spa_config_exit(spa, SCL_CONFIG, FTAG);
spa_handle_ignored_writes(spa);
/*
* If any async tasks have been requested, kick them off.
*/
spa_async_dispatch(spa);
}
/*
* Sync all pools. We don't want to hold the namespace lock across these
* operations, so we take a reference on the spa_t and drop the lock during the
* sync.
*/
void
spa_sync_allpools(void)
{
spa_t *spa = NULL;
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(spa)) != NULL) {
if (spa_state(spa) != POOL_STATE_ACTIVE ||
!spa_writeable(spa) || spa_suspended(spa))
continue;
spa_open_ref(spa, FTAG);
mutex_exit(&spa_namespace_lock);
txg_wait_synced(spa_get_dsl(spa), 0);
mutex_enter(&spa_namespace_lock);
spa_close(spa, FTAG);
}
mutex_exit(&spa_namespace_lock);
}
/*
* ==========================================================================
* Miscellaneous routines
* ==========================================================================
*/
/*
* Remove all pools in the system.
*/
void
spa_evict_all(void)
{
spa_t *spa;
/*
* Remove all cached state. All pools should be closed now,
* so every spa in the AVL tree should be unreferenced.
*/
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(NULL)) != NULL) {
/*
* Stop async tasks. The async thread may need to detach
* a device that's been replaced, which requires grabbing
* spa_namespace_lock, so we must drop it here.
*/
spa_open_ref(spa, FTAG);
mutex_exit(&spa_namespace_lock);
spa_async_suspend(spa);
mutex_enter(&spa_namespace_lock);
spa_close(spa, FTAG);
if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
spa_unload(spa);
spa_deactivate(spa);
}
spa_remove(spa);
}
mutex_exit(&spa_namespace_lock);
}
vdev_t *
spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux)
{
vdev_t *vd;
int i;
if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
return (vd);
if (aux) {
for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
vd = spa->spa_l2cache.sav_vdevs[i];
if (vd->vdev_guid == guid)
return (vd);
}
for (i = 0; i < spa->spa_spares.sav_count; i++) {
vd = spa->spa_spares.sav_vdevs[i];
if (vd->vdev_guid == guid)
return (vd);
}
}
return (NULL);
}
void
spa_upgrade(spa_t *spa, uint64_t version)
{
ASSERT(spa_writeable(spa));
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
/*
* This should only be called for a non-faulted pool, and since a
* future version would result in an unopenable pool, this shouldn't be
* possible.
*/
ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version));
ASSERT3U(version, >=, spa->spa_uberblock.ub_version);
spa->spa_uberblock.ub_version = version;
vdev_config_dirty(spa->spa_root_vdev);
spa_config_exit(spa, SCL_ALL, FTAG);
txg_wait_synced(spa_get_dsl(spa), 0);
}
boolean_t
spa_has_spare(spa_t *spa, uint64_t guid)
{
+ (void) spa;
int i;
uint64_t spareguid;
spa_aux_vdev_t *sav = &spa->spa_spares;
for (i = 0; i < sav->sav_count; i++)
if (sav->sav_vdevs[i]->vdev_guid == guid)
return (B_TRUE);
for (i = 0; i < sav->sav_npending; i++) {
if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
&spareguid) == 0 && spareguid == guid)
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Check if a pool has an active shared spare device.
* Note: reference count of an active spare is 2, as a spare and as a replace
*/
static boolean_t
spa_has_active_shared_spare(spa_t *spa)
{
int i, refcnt;
uint64_t pool;
spa_aux_vdev_t *sav = &spa->spa_spares;
for (i = 0; i < sav->sav_count; i++) {
if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
&refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
refcnt > 2)
return (B_TRUE);
}
return (B_FALSE);
}
uint64_t
spa_total_metaslabs(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
uint64_t m = 0;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
if (!vdev_is_concrete(vd))
continue;
m += vd->vdev_ms_count;
}
return (m);
}
/*
* Notify any waiting threads that some activity has switched from being in-
* progress to not-in-progress so that the thread can wake up and determine
* whether it is finished waiting.
*/
void
spa_notify_waiters(spa_t *spa)
{
/*
* Acquiring spa_activities_lock here prevents the cv_broadcast from
* happening between the waiting thread's check and cv_wait.
*/
mutex_enter(&spa->spa_activities_lock);
cv_broadcast(&spa->spa_activities_cv);
mutex_exit(&spa->spa_activities_lock);
}
/*
* Notify any waiting threads that the pool is exporting, and then block until
* they are finished using the spa_t.
*/
void
spa_wake_waiters(spa_t *spa)
{
mutex_enter(&spa->spa_activities_lock);
spa->spa_waiters_cancel = B_TRUE;
cv_broadcast(&spa->spa_activities_cv);
while (spa->spa_waiters != 0)
cv_wait(&spa->spa_waiters_cv, &spa->spa_activities_lock);
spa->spa_waiters_cancel = B_FALSE;
mutex_exit(&spa->spa_activities_lock);
}
/* Whether the vdev or any of its descendants are being initialized/trimmed. */
static boolean_t
spa_vdev_activity_in_progress_impl(vdev_t *vd, zpool_wait_activity_t activity)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_config_held(spa, SCL_CONFIG | SCL_STATE, RW_READER));
ASSERT(MUTEX_HELD(&spa->spa_activities_lock));
ASSERT(activity == ZPOOL_WAIT_INITIALIZE ||
activity == ZPOOL_WAIT_TRIM);
kmutex_t *lock = activity == ZPOOL_WAIT_INITIALIZE ?
&vd->vdev_initialize_lock : &vd->vdev_trim_lock;
mutex_exit(&spa->spa_activities_lock);
mutex_enter(lock);
mutex_enter(&spa->spa_activities_lock);
boolean_t in_progress = (activity == ZPOOL_WAIT_INITIALIZE) ?
(vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) :
(vd->vdev_trim_state == VDEV_TRIM_ACTIVE);
mutex_exit(lock);
if (in_progress)
return (B_TRUE);
for (int i = 0; i < vd->vdev_children; i++) {
if (spa_vdev_activity_in_progress_impl(vd->vdev_child[i],
activity))
return (B_TRUE);
}
return (B_FALSE);
}
/*
* If use_guid is true, this checks whether the vdev specified by guid is
* being initialized/trimmed. Otherwise, it checks whether any vdev in the pool
* is being initialized/trimmed. The caller must hold the config lock and
* spa_activities_lock.
*/
static int
spa_vdev_activity_in_progress(spa_t *spa, boolean_t use_guid, uint64_t guid,
zpool_wait_activity_t activity, boolean_t *in_progress)
{
mutex_exit(&spa->spa_activities_lock);
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
mutex_enter(&spa->spa_activities_lock);
vdev_t *vd;
if (use_guid) {
vd = spa_lookup_by_guid(spa, guid, B_FALSE);
if (vd == NULL || !vd->vdev_ops->vdev_op_leaf) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (EINVAL);
}
} else {
vd = spa->spa_root_vdev;
}
*in_progress = spa_vdev_activity_in_progress_impl(vd, activity);
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (0);
}
/*
* Locking for waiting threads
* ---------------------------
*
* Waiting threads need a way to check whether a given activity is in progress,
* and then, if it is, wait for it to complete. Each activity will have some
* in-memory representation of the relevant on-disk state which can be used to
* determine whether or not the activity is in progress. The in-memory state and
* the locking used to protect it will be different for each activity, and may
* not be suitable for use with a cvar (e.g., some state is protected by the
* config lock). To allow waiting threads to wait without any races, another
* lock, spa_activities_lock, is used.
*
* When the state is checked, both the activity-specific lock (if there is one)
* and spa_activities_lock are held. In some cases, the activity-specific lock
* is acquired explicitly (e.g. the config lock). In others, the locking is
* internal to some check (e.g. bpobj_is_empty). After checking, the waiting
* thread releases the activity-specific lock and, if the activity is in
* progress, then cv_waits using spa_activities_lock.
*
* The waiting thread is woken when another thread, one completing some
* activity, updates the state of the activity and then calls
* spa_notify_waiters, which will cv_broadcast. This 'completing' thread only
* needs to hold its activity-specific lock when updating the state, and this
* lock can (but doesn't have to) be dropped before calling spa_notify_waiters.
*
* Because spa_notify_waiters acquires spa_activities_lock before broadcasting,
* and because it is held when the waiting thread checks the state of the
* activity, it can never be the case that the completing thread both updates
* the activity state and cv_broadcasts in between the waiting thread's check
* and cv_wait. Thus, a waiting thread can never miss a wakeup.
*
* In order to prevent deadlock, when the waiting thread does its check, in some
* cases it will temporarily drop spa_activities_lock in order to acquire the
* activity-specific lock. The order in which spa_activities_lock and the
* activity specific lock are acquired in the waiting thread is determined by
* the order in which they are acquired in the completing thread; if the
* completing thread calls spa_notify_waiters with the activity-specific lock
* held, then the waiting thread must also acquire the activity-specific lock
* first.
*/
static int
spa_activity_in_progress(spa_t *spa, zpool_wait_activity_t activity,
boolean_t use_tag, uint64_t tag, boolean_t *in_progress)
{
int error = 0;
ASSERT(MUTEX_HELD(&spa->spa_activities_lock));
switch (activity) {
case ZPOOL_WAIT_CKPT_DISCARD:
*in_progress =
(spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT) &&
zap_contains(spa_meta_objset(spa),
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ZPOOL_CHECKPOINT) ==
ENOENT);
break;
case ZPOOL_WAIT_FREE:
*in_progress = ((spa_version(spa) >= SPA_VERSION_DEADLISTS &&
!bpobj_is_empty(&spa->spa_dsl_pool->dp_free_bpobj)) ||
spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY) ||
spa_livelist_delete_check(spa));
break;
case ZPOOL_WAIT_INITIALIZE:
case ZPOOL_WAIT_TRIM:
error = spa_vdev_activity_in_progress(spa, use_tag, tag,
activity, in_progress);
break;
case ZPOOL_WAIT_REPLACE:
mutex_exit(&spa->spa_activities_lock);
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
mutex_enter(&spa->spa_activities_lock);
*in_progress = vdev_replace_in_progress(spa->spa_root_vdev);
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
break;
case ZPOOL_WAIT_REMOVE:
*in_progress = (spa->spa_removing_phys.sr_state ==
DSS_SCANNING);
break;
case ZPOOL_WAIT_RESILVER:
if ((*in_progress = vdev_rebuild_active(spa->spa_root_vdev)))
break;
fallthrough;
case ZPOOL_WAIT_SCRUB:
{
boolean_t scanning, paused, is_scrub;
dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
is_scrub = (scn->scn_phys.scn_func == POOL_SCAN_SCRUB);
scanning = (scn->scn_phys.scn_state == DSS_SCANNING);
paused = dsl_scan_is_paused_scrub(scn);
*in_progress = (scanning && !paused &&
is_scrub == (activity == ZPOOL_WAIT_SCRUB));
break;
}
default:
panic("unrecognized value for activity %d", activity);
}
return (error);
}
static int
spa_wait_common(const char *pool, zpool_wait_activity_t activity,
boolean_t use_tag, uint64_t tag, boolean_t *waited)
{
/*
* The tag is used to distinguish between instances of an activity.
* 'initialize' and 'trim' are the only activities that we use this for.
* The other activities can only have a single instance in progress in a
* pool at one time, making the tag unnecessary.
*
* There can be multiple devices being replaced at once, but since they
* all finish once resilvering finishes, we don't bother keeping track
* of them individually, we just wait for them all to finish.
*/
if (use_tag && activity != ZPOOL_WAIT_INITIALIZE &&
activity != ZPOOL_WAIT_TRIM)
return (EINVAL);
if (activity < 0 || activity >= ZPOOL_WAIT_NUM_ACTIVITIES)
return (EINVAL);
spa_t *spa;
int error = spa_open(pool, &spa, FTAG);
if (error != 0)
return (error);
/*
* Increment the spa's waiter count so that we can call spa_close and
* still ensure that the spa_t doesn't get freed before this thread is
* finished with it when the pool is exported. We want to call spa_close
* before we start waiting because otherwise the additional ref would
* prevent the pool from being exported or destroyed throughout the
* potentially long wait.
*/
mutex_enter(&spa->spa_activities_lock);
spa->spa_waiters++;
spa_close(spa, FTAG);
*waited = B_FALSE;
for (;;) {
boolean_t in_progress;
error = spa_activity_in_progress(spa, activity, use_tag, tag,
&in_progress);
if (error || !in_progress || spa->spa_waiters_cancel)
break;
*waited = B_TRUE;
if (cv_wait_sig(&spa->spa_activities_cv,
&spa->spa_activities_lock) == 0) {
error = EINTR;
break;
}
}
spa->spa_waiters--;
cv_signal(&spa->spa_waiters_cv);
mutex_exit(&spa->spa_activities_lock);
return (error);
}
/*
* Wait for a particular instance of the specified activity to complete, where
* the instance is identified by 'tag'
*/
int
spa_wait_tag(const char *pool, zpool_wait_activity_t activity, uint64_t tag,
boolean_t *waited)
{
return (spa_wait_common(pool, activity, B_TRUE, tag, waited));
}
/*
* Wait for all instances of the specified activity complete
*/
int
spa_wait(const char *pool, zpool_wait_activity_t activity, boolean_t *waited)
{
return (spa_wait_common(pool, activity, B_FALSE, 0, waited));
}
sysevent_t *
spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name)
{
sysevent_t *ev = NULL;
#ifdef _KERNEL
nvlist_t *resource;
resource = zfs_event_create(spa, vd, FM_SYSEVENT_CLASS, name, hist_nvl);
if (resource) {
ev = kmem_alloc(sizeof (sysevent_t), KM_SLEEP);
ev->resource = resource;
}
+#else
+ (void) spa, (void) vd, (void) hist_nvl, (void) name;
#endif
return (ev);
}
void
spa_event_post(sysevent_t *ev)
{
#ifdef _KERNEL
if (ev) {
zfs_zevent_post(ev->resource, NULL, zfs_zevent_post_cb);
kmem_free(ev, sizeof (*ev));
}
+#else
+ (void) ev;
#endif
}
/*
* Post a zevent corresponding to the given sysevent. The 'name' must be one
* of the event definitions in sys/sysevent/eventdefs.h. The payload will be
* filled in from the spa and (optionally) the vdev. This doesn't do anything
* in the userland libzpool, as we don't want consumers to misinterpret ztest
* or zdb as real changes.
*/
void
spa_event_notify(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name)
{
spa_event_post(spa_event_create(spa, vd, hist_nvl, name));
}
/* state manipulation functions */
EXPORT_SYMBOL(spa_open);
EXPORT_SYMBOL(spa_open_rewind);
EXPORT_SYMBOL(spa_get_stats);
EXPORT_SYMBOL(spa_create);
EXPORT_SYMBOL(spa_import);
EXPORT_SYMBOL(spa_tryimport);
EXPORT_SYMBOL(spa_destroy);
EXPORT_SYMBOL(spa_export);
EXPORT_SYMBOL(spa_reset);
EXPORT_SYMBOL(spa_async_request);
EXPORT_SYMBOL(spa_async_suspend);
EXPORT_SYMBOL(spa_async_resume);
EXPORT_SYMBOL(spa_inject_addref);
EXPORT_SYMBOL(spa_inject_delref);
EXPORT_SYMBOL(spa_scan_stat_init);
EXPORT_SYMBOL(spa_scan_get_stats);
/* device manipulation */
EXPORT_SYMBOL(spa_vdev_add);
EXPORT_SYMBOL(spa_vdev_attach);
EXPORT_SYMBOL(spa_vdev_detach);
EXPORT_SYMBOL(spa_vdev_setpath);
EXPORT_SYMBOL(spa_vdev_setfru);
EXPORT_SYMBOL(spa_vdev_split_mirror);
/* spare statech is global across all pools) */
EXPORT_SYMBOL(spa_spare_add);
EXPORT_SYMBOL(spa_spare_remove);
EXPORT_SYMBOL(spa_spare_exists);
EXPORT_SYMBOL(spa_spare_activate);
/* L2ARC statech is global across all pools) */
EXPORT_SYMBOL(spa_l2cache_add);
EXPORT_SYMBOL(spa_l2cache_remove);
EXPORT_SYMBOL(spa_l2cache_exists);
EXPORT_SYMBOL(spa_l2cache_activate);
EXPORT_SYMBOL(spa_l2cache_drop);
/* scanning */
EXPORT_SYMBOL(spa_scan);
EXPORT_SYMBOL(spa_scan_stop);
/* spa syncing */
EXPORT_SYMBOL(spa_sync); /* only for DMU use */
EXPORT_SYMBOL(spa_sync_allpools);
/* properties */
EXPORT_SYMBOL(spa_prop_set);
EXPORT_SYMBOL(spa_prop_get);
EXPORT_SYMBOL(spa_prop_clear_bootfs);
/* asynchronous event notification */
EXPORT_SYMBOL(spa_event_notify);
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_shift, INT, ZMOD_RW,
"log2 fraction of arc that can be used by inflight I/Os when "
"verifying pool during import");
ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_metadata, INT, ZMOD_RW,
"Set to traverse metadata on pool import");
ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_data, INT, ZMOD_RW,
"Set to traverse data on pool import");
ZFS_MODULE_PARAM(zfs_spa, spa_, load_print_vdev_tree, INT, ZMOD_RW,
"Print vdev tree to zfs_dbgmsg during pool import");
ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_batch_pct, UINT, ZMOD_RD,
"Percentage of CPUs to run an IO worker thread");
ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_batch_tpq, UINT, ZMOD_RD,
"Number of threads per IO worker taskqueue");
ZFS_MODULE_PARAM(zfs, zfs_, max_missing_tvds, ULONG, ZMOD_RW,
"Allow importing pool with up to this number of missing top-level "
"vdevs (in read-only mode)");
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_pause, INT, ZMOD_RW,
"Set the livelist condense zthr to pause");
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_pause, INT, ZMOD_RW,
"Set the livelist condense synctask to pause");
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_cancel, INT, ZMOD_RW,
"Whether livelist condensing was canceled in the synctask");
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_cancel, INT, ZMOD_RW,
"Whether livelist condensing was canceled in the zthr function");
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, new_alloc, INT, ZMOD_RW,
"Whether extra ALLOC blkptrs were added to a livelist entry while it "
"was being condensed");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/spa_checkpoint.c b/sys/contrib/openzfs/module/zfs/spa_checkpoint.c
index 09f62996853d..ddcdb6801053 100644
--- a/sys/contrib/openzfs/module/zfs/spa_checkpoint.c
+++ b/sys/contrib/openzfs/module/zfs/spa_checkpoint.c
@@ -1,637 +1,637 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2017 by Delphix. All rights reserved.
*/
/*
* Storage Pool Checkpoint
*
* A storage pool checkpoint can be thought of as a pool-wide snapshot or
* a stable version of extreme rewind that guarantees no blocks from the
* checkpointed state will have been overwritten. It remembers the entire
* state of the storage pool (e.g. snapshots, dataset names, etc..) from the
* point that it was taken and the user can rewind back to that point even if
* they applied destructive operations on their datasets or even enabled new
* zpool on-disk features. If a pool has a checkpoint that is no longer
* needed, the user can discard it.
*
* == On disk data structures used ==
*
* - The pool has a new feature flag and a new entry in the MOS. The feature
* flag is set to active when we create the checkpoint and remains active
* until the checkpoint is fully discarded. The entry in the MOS config
* (DMU_POOL_ZPOOL_CHECKPOINT) is populated with the uberblock that
* references the state of the pool when we take the checkpoint. The entry
* remains populated until we start discarding the checkpoint or we rewind
* back to it.
*
* - Each vdev contains a vdev-wide space map while the pool has a checkpoint,
* which persists until the checkpoint is fully discarded. The space map
* contains entries that have been freed in the current state of the pool
* but we want to keep around in case we decide to rewind to the checkpoint.
* [see vdev_checkpoint_sm]
*
* - Each metaslab's ms_sm space map behaves the same as without the
* checkpoint, with the only exception being the scenario when we free
* blocks that belong to the checkpoint. In this case, these blocks remain
* ALLOCATED in the metaslab's space map and they are added as FREE in the
* vdev's checkpoint space map.
*
* - Each uberblock has a field (ub_checkpoint_txg) which holds the txg that
* the uberblock was checkpointed. For normal uberblocks this field is 0.
*
* == Overview of operations ==
*
* - To create a checkpoint, we first wait for the current TXG to be synced,
* so we can use the most recently synced uberblock (spa_ubsync) as the
* checkpointed uberblock. Then we use an early synctask to place that
* uberblock in MOS config, increment the feature flag for the checkpoint
* (marking it active), and setting spa_checkpoint_txg (see its use below)
* to the TXG of the checkpointed uberblock. We use an early synctask for
* the aforementioned operations to ensure that no blocks were dirtied
* between the current TXG and the TXG of the checkpointed uberblock
* (e.g the previous txg).
*
* - When a checkpoint exists, we need to ensure that the blocks that
* belong to the checkpoint are freed but never reused. This means that
* these blocks should never end up in the ms_allocatable or the ms_freeing
* trees of a metaslab. Therefore, whenever there is a checkpoint the new
* ms_checkpointing tree is used in addition to the aforementioned ones.
*
* Whenever a block is freed and we find out that it is referenced by the
* checkpoint (we find out by comparing its birth to spa_checkpoint_txg),
* we place it in the ms_checkpointing tree instead of the ms_freeingtree.
* This way, we divide the blocks that are being freed into checkpointed
* and not-checkpointed blocks.
*
* In order to persist these frees, we write the extents from the
* ms_freeingtree to the ms_sm as usual, and the extents from the
* ms_checkpointing tree to the vdev_checkpoint_sm. This way, these
* checkpointed extents will remain allocated in the metaslab's ms_sm space
* map, and therefore won't be reused [see metaslab_sync()]. In addition,
* when we discard the checkpoint, we can find the entries that have
* actually been freed in vdev_checkpoint_sm.
* [see spa_checkpoint_discard_thread_sync()]
*
* - To discard the checkpoint we use an early synctask to delete the
* checkpointed uberblock from the MOS config, set spa_checkpoint_txg to 0,
* and wakeup the discarding zthr thread (an open-context async thread).
* We use an early synctask to ensure that the operation happens before any
* new data end up in the checkpoint's data structures.
*
* Once the synctask is done and the discarding zthr is awake, we discard
* the checkpointed data over multiple TXGs by having the zthr prefetching
* entries from vdev_checkpoint_sm and then starting a synctask that places
* them as free blocks into their respective ms_allocatable and ms_sm
* structures.
* [see spa_checkpoint_discard_thread()]
*
* When there are no entries left in the vdev_checkpoint_sm of all
* top-level vdevs, a final synctask runs that decrements the feature flag.
*
* - To rewind to the checkpoint, we first use the current uberblock and
* open the MOS so we can access the checkpointed uberblock from the MOS
* config. After we retrieve the checkpointed uberblock, we use it as the
* current uberblock for the pool by writing it to disk with an updated
* TXG, opening its version of the MOS, and moving on as usual from there.
* [see spa_ld_checkpoint_rewind()]
*
* An important note on rewinding to the checkpoint has to do with how we
* handle ZIL blocks. In the scenario of a rewind, we clear out any ZIL
* blocks that have not been claimed by the time we took the checkpoint
* as they should no longer be valid.
* [see comment in zil_claim()]
*
* == Miscellaneous information ==
*
* - In the hypothetical event that we take a checkpoint, remove a vdev,
* and attempt to rewind, the rewind would fail as the checkpointed
* uberblock would reference data in the removed device. For this reason
* and others of similar nature, we disallow the following operations that
* can change the config:
* vdev removal and attach/detach, mirror splitting, and pool reguid.
*
* - As most of the checkpoint logic is implemented in the SPA and doesn't
* distinguish datasets when it comes to space accounting, having a
* checkpoint can potentially break the boundaries set by dataset
* reservations.
*/
#include <sys/dmu_tx.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_synctask.h>
#include <sys/metaslab_impl.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/spa_checkpoint.h>
#include <sys/vdev_impl.h>
#include <sys/zap.h>
#include <sys/zfeature.h>
/*
* The following parameter limits the amount of memory to be used for the
* prefetching of the checkpoint space map done on each vdev while
* discarding the checkpoint.
*
* The reason it exists is because top-level vdevs with long checkpoint
* space maps can potentially take up a lot of memory depending on the
* amount of checkpointed data that has been freed within them while
* the pool had a checkpoint.
*/
unsigned long zfs_spa_discard_memory_limit = 16 * 1024 * 1024;
int
spa_checkpoint_get_stats(spa_t *spa, pool_checkpoint_stat_t *pcs)
{
if (!spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT))
return (SET_ERROR(ZFS_ERR_NO_CHECKPOINT));
bzero(pcs, sizeof (pool_checkpoint_stat_t));
int error = zap_contains(spa_meta_objset(spa),
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ZPOOL_CHECKPOINT);
ASSERT(error == 0 || error == ENOENT);
if (error == ENOENT)
pcs->pcs_state = CS_CHECKPOINT_DISCARDING;
else
pcs->pcs_state = CS_CHECKPOINT_EXISTS;
pcs->pcs_space = spa->spa_checkpoint_info.sci_dspace;
pcs->pcs_start_time = spa->spa_checkpoint_info.sci_timestamp;
return (0);
}
static void
spa_checkpoint_discard_complete_sync(void *arg, dmu_tx_t *tx)
{
spa_t *spa = arg;
spa->spa_checkpoint_info.sci_timestamp = 0;
spa_feature_decr(spa, SPA_FEATURE_POOL_CHECKPOINT, tx);
spa_notify_waiters(spa);
spa_history_log_internal(spa, "spa discard checkpoint", tx,
"finished discarding checkpointed state from the pool");
}
typedef struct spa_checkpoint_discard_sync_callback_arg {
vdev_t *sdc_vd;
uint64_t sdc_txg;
uint64_t sdc_entry_limit;
} spa_checkpoint_discard_sync_callback_arg_t;
static int
spa_checkpoint_discard_sync_callback(space_map_entry_t *sme, void *arg)
{
spa_checkpoint_discard_sync_callback_arg_t *sdc = arg;
vdev_t *vd = sdc->sdc_vd;
metaslab_t *ms = vd->vdev_ms[sme->sme_offset >> vd->vdev_ms_shift];
uint64_t end = sme->sme_offset + sme->sme_run;
if (sdc->sdc_entry_limit == 0)
return (SET_ERROR(EINTR));
/*
* Since the space map is not condensed, we know that
* none of its entries is crossing the boundaries of
* its respective metaslab.
*
* That said, there is no fundamental requirement that
* the checkpoint's space map entries should not cross
* metaslab boundaries. So if needed we could add code
* that handles metaslab-crossing segments in the future.
*/
VERIFY3U(sme->sme_type, ==, SM_FREE);
VERIFY3U(sme->sme_offset, >=, ms->ms_start);
VERIFY3U(end, <=, ms->ms_start + ms->ms_size);
/*
* At this point we should not be processing any
* other frees concurrently, so the lock is technically
* unnecessary. We use the lock anyway though to
* potentially save ourselves from future headaches.
*/
mutex_enter(&ms->ms_lock);
if (range_tree_is_empty(ms->ms_freeing))
vdev_dirty(vd, VDD_METASLAB, ms, sdc->sdc_txg);
range_tree_add(ms->ms_freeing, sme->sme_offset, sme->sme_run);
mutex_exit(&ms->ms_lock);
ASSERT3U(vd->vdev_spa->spa_checkpoint_info.sci_dspace, >=,
sme->sme_run);
ASSERT3U(vd->vdev_stat.vs_checkpoint_space, >=, sme->sme_run);
vd->vdev_spa->spa_checkpoint_info.sci_dspace -= sme->sme_run;
vd->vdev_stat.vs_checkpoint_space -= sme->sme_run;
sdc->sdc_entry_limit--;
return (0);
}
#ifdef ZFS_DEBUG
static void
spa_checkpoint_accounting_verify(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
uint64_t ckpoint_sm_space_sum = 0;
uint64_t vs_ckpoint_space_sum = 0;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
if (vd->vdev_checkpoint_sm != NULL) {
ckpoint_sm_space_sum +=
-space_map_allocated(vd->vdev_checkpoint_sm);
vs_ckpoint_space_sum +=
vd->vdev_stat.vs_checkpoint_space;
ASSERT3U(ckpoint_sm_space_sum, ==,
vs_ckpoint_space_sum);
} else {
ASSERT0(vd->vdev_stat.vs_checkpoint_space);
}
}
ASSERT3U(spa->spa_checkpoint_info.sci_dspace, ==, ckpoint_sm_space_sum);
}
#endif
static void
spa_checkpoint_discard_thread_sync(void *arg, dmu_tx_t *tx)
{
vdev_t *vd = arg;
int error;
/*
* The space map callback is applied only to non-debug entries.
* Because the number of debug entries is less or equal to the
* number of non-debug entries, we want to ensure that we only
* read what we prefetched from open-context.
*
* Thus, we set the maximum entries that the space map callback
* will be applied to be half the entries that could fit in the
* imposed memory limit.
*
* Note that since this is a conservative estimate we also
* assume the worst case scenario in our computation where each
* entry is two-word.
*/
uint64_t max_entry_limit =
(zfs_spa_discard_memory_limit / (2 * sizeof (uint64_t))) >> 1;
/*
* Iterate from the end of the space map towards the beginning,
* placing its entries on ms_freeing and removing them from the
* space map. The iteration stops if one of the following
* conditions is true:
*
* 1] We reached the beginning of the space map. At this point
* the space map should be completely empty and
* space_map_incremental_destroy should have returned 0.
* The next step would be to free and close the space map
* and remove its entry from its vdev's top zap. This allows
* spa_checkpoint_discard_thread() to move on to the next vdev.
*
* 2] We reached the memory limit (amount of memory used to hold
* space map entries in memory) and space_map_incremental_destroy
* returned EINTR. This means that there are entries remaining
* in the space map that will be cleared in a future invocation
* of this function by spa_checkpoint_discard_thread().
*/
spa_checkpoint_discard_sync_callback_arg_t sdc;
sdc.sdc_vd = vd;
sdc.sdc_txg = tx->tx_txg;
sdc.sdc_entry_limit = max_entry_limit;
uint64_t words_before =
space_map_length(vd->vdev_checkpoint_sm) / sizeof (uint64_t);
error = space_map_incremental_destroy(vd->vdev_checkpoint_sm,
spa_checkpoint_discard_sync_callback, &sdc, tx);
uint64_t words_after =
space_map_length(vd->vdev_checkpoint_sm) / sizeof (uint64_t);
#ifdef ZFS_DEBUG
spa_checkpoint_accounting_verify(vd->vdev_spa);
#endif
zfs_dbgmsg("discarding checkpoint: txg %llu, vdev id %lld, "
"deleted %llu words - %llu words are left",
(u_longlong_t)tx->tx_txg, (longlong_t)vd->vdev_id,
(u_longlong_t)(words_before - words_after),
(u_longlong_t)words_after);
if (error != EINTR) {
if (error != 0) {
zfs_panic_recover("zfs: error %lld was returned "
"while incrementally destroying the checkpoint "
"space map of vdev %u\n",
(longlong_t)error, vd->vdev_id);
}
ASSERT0(words_after);
ASSERT0(space_map_allocated(vd->vdev_checkpoint_sm));
ASSERT0(space_map_length(vd->vdev_checkpoint_sm));
space_map_free(vd->vdev_checkpoint_sm, tx);
space_map_close(vd->vdev_checkpoint_sm);
vd->vdev_checkpoint_sm = NULL;
VERIFY0(zap_remove(spa_meta_objset(vd->vdev_spa),
vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, tx));
}
}
static boolean_t
spa_checkpoint_discard_is_done(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(!spa_has_checkpoint(spa));
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT));
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
if (rvd->vdev_child[c]->vdev_checkpoint_sm != NULL)
return (B_FALSE);
ASSERT0(rvd->vdev_child[c]->vdev_stat.vs_checkpoint_space);
}
return (B_TRUE);
}
-/* ARGSUSED */
boolean_t
spa_checkpoint_discard_thread_check(void *arg, zthr_t *zthr)
{
+ (void) zthr;
spa_t *spa = arg;
if (!spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT))
return (B_FALSE);
if (spa_has_checkpoint(spa))
return (B_FALSE);
return (B_TRUE);
}
void
spa_checkpoint_discard_thread(void *arg, zthr_t *zthr)
{
spa_t *spa = arg;
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
while (vd->vdev_checkpoint_sm != NULL) {
space_map_t *checkpoint_sm = vd->vdev_checkpoint_sm;
int numbufs;
dmu_buf_t **dbp;
if (zthr_iscancelled(zthr))
return;
ASSERT3P(vd->vdev_ops, !=, &vdev_indirect_ops);
uint64_t size = MIN(space_map_length(checkpoint_sm),
zfs_spa_discard_memory_limit);
uint64_t offset =
space_map_length(checkpoint_sm) - size;
/*
* Ensure that the part of the space map that will
* be destroyed by the synctask, is prefetched in
* memory before the synctask runs.
*/
int error = dmu_buf_hold_array_by_bonus(
checkpoint_sm->sm_dbuf, offset, size,
B_TRUE, FTAG, &numbufs, &dbp);
if (error != 0) {
zfs_panic_recover("zfs: error %d was returned "
"while prefetching checkpoint space map "
"entries of vdev %llu\n",
error, vd->vdev_id);
}
VERIFY0(dsl_sync_task(spa->spa_name, NULL,
spa_checkpoint_discard_thread_sync, vd,
0, ZFS_SPACE_CHECK_NONE));
dmu_buf_rele_array(dbp, numbufs, FTAG);
}
}
VERIFY(spa_checkpoint_discard_is_done(spa));
VERIFY0(spa->spa_checkpoint_info.sci_dspace);
VERIFY0(dsl_sync_task(spa->spa_name, NULL,
spa_checkpoint_discard_complete_sync, spa,
0, ZFS_SPACE_CHECK_NONE));
}
-/* ARGSUSED */
static int
spa_checkpoint_check(void *arg, dmu_tx_t *tx)
{
+ (void) arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
if (!spa_feature_is_enabled(spa, SPA_FEATURE_POOL_CHECKPOINT))
return (SET_ERROR(ENOTSUP));
if (!spa_top_vdevs_spacemap_addressable(spa))
return (SET_ERROR(ZFS_ERR_VDEV_TOO_BIG));
if (spa->spa_removing_phys.sr_state == DSS_SCANNING)
return (SET_ERROR(ZFS_ERR_DEVRM_IN_PROGRESS));
if (spa->spa_checkpoint_txg != 0)
return (SET_ERROR(ZFS_ERR_CHECKPOINT_EXISTS));
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT))
return (SET_ERROR(ZFS_ERR_DISCARDING_CHECKPOINT));
return (0);
}
-/* ARGSUSED */
static void
spa_checkpoint_sync(void *arg, dmu_tx_t *tx)
{
+ (void) arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
spa_t *spa = dp->dp_spa;
uberblock_t checkpoint = spa->spa_ubsync;
/*
* At this point, there should not be a checkpoint in the MOS.
*/
ASSERT3U(zap_contains(spa_meta_objset(spa), DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ZPOOL_CHECKPOINT), ==, ENOENT);
ASSERT0(spa->spa_checkpoint_info.sci_timestamp);
ASSERT0(spa->spa_checkpoint_info.sci_dspace);
/*
* Since the checkpointed uberblock is the one that just got synced
* (we use spa_ubsync), its txg must be equal to the txg number of
* the txg we are syncing, minus 1.
*/
ASSERT3U(checkpoint.ub_txg, ==, spa->spa_syncing_txg - 1);
/*
* Once the checkpoint is in place, we need to ensure that none of
* its blocks will be marked for reuse after it has been freed.
* When there is a checkpoint and a block is freed, we compare its
* birth txg to the txg of the checkpointed uberblock to see if the
* block is part of the checkpoint or not. Therefore, we have to set
* spa_checkpoint_txg before any frees happen in this txg (which is
* why this is done as an early_synctask as explained in the comment
* in spa_checkpoint()).
*/
spa->spa_checkpoint_txg = checkpoint.ub_txg;
spa->spa_checkpoint_info.sci_timestamp = checkpoint.ub_timestamp;
checkpoint.ub_checkpoint_txg = checkpoint.ub_txg;
VERIFY0(zap_add(spa->spa_dsl_pool->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ZPOOL_CHECKPOINT,
sizeof (uint64_t), sizeof (uberblock_t) / sizeof (uint64_t),
&checkpoint, tx));
/*
* Increment the feature refcount and thus activate the feature.
* Note that the feature will be deactivated when we've
* completely discarded all checkpointed state (both vdev
* space maps and uberblock).
*/
spa_feature_incr(spa, SPA_FEATURE_POOL_CHECKPOINT, tx);
spa_history_log_internal(spa, "spa checkpoint", tx,
"checkpointed uberblock txg=%llu", (u_longlong_t)checkpoint.ub_txg);
}
/*
* Create a checkpoint for the pool.
*/
int
spa_checkpoint(const char *pool)
{
int error;
spa_t *spa;
error = spa_open(pool, &spa, FTAG);
if (error != 0)
return (error);
mutex_enter(&spa->spa_vdev_top_lock);
/*
* Wait for current syncing txg to finish so the latest synced
* uberblock (spa_ubsync) has all the changes that we expect
* to see if we were to revert later to the checkpoint. In other
* words we want the checkpointed uberblock to include/reference
* all the changes that were pending at the time that we issued
* the checkpoint command.
*/
txg_wait_synced(spa_get_dsl(spa), 0);
/*
* As the checkpointed uberblock references blocks from the previous
* txg (spa_ubsync) we want to ensure that are not freeing any of
* these blocks in the same txg that the following synctask will
* run. Thus, we run it as an early synctask, so the dirty changes
* that are synced to disk afterwards during zios and other synctasks
* do not reuse checkpointed blocks.
*/
error = dsl_early_sync_task(pool, spa_checkpoint_check,
spa_checkpoint_sync, NULL, 0, ZFS_SPACE_CHECK_NORMAL);
mutex_exit(&spa->spa_vdev_top_lock);
spa_close(spa, FTAG);
return (error);
}
-/* ARGSUSED */
static int
spa_checkpoint_discard_check(void *arg, dmu_tx_t *tx)
{
+ (void) arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
if (!spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT))
return (SET_ERROR(ZFS_ERR_NO_CHECKPOINT));
if (spa->spa_checkpoint_txg == 0)
return (SET_ERROR(ZFS_ERR_DISCARDING_CHECKPOINT));
VERIFY0(zap_contains(spa_meta_objset(spa),
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ZPOOL_CHECKPOINT));
return (0);
}
-/* ARGSUSED */
static void
spa_checkpoint_discard_sync(void *arg, dmu_tx_t *tx)
{
+ (void) arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
VERIFY0(zap_remove(spa_meta_objset(spa), DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ZPOOL_CHECKPOINT, tx));
spa->spa_checkpoint_txg = 0;
zthr_wakeup(spa->spa_checkpoint_discard_zthr);
spa_history_log_internal(spa, "spa discard checkpoint", tx,
"started discarding checkpointed state from the pool");
}
/*
* Discard the checkpoint from a pool.
*/
int
spa_checkpoint_discard(const char *pool)
{
/*
* Similarly to spa_checkpoint(), we want our synctask to run
* before any pending dirty data are written to disk so they
* won't end up in the checkpoint's data structures (e.g.
* ms_checkpointing and vdev_checkpoint_sm) and re-create any
* space maps that the discarding open-context thread has
* deleted.
* [see spa_discard_checkpoint_sync and spa_discard_checkpoint_thread]
*/
return (dsl_early_sync_task(pool, spa_checkpoint_discard_check,
spa_checkpoint_discard_sync, NULL, 0,
ZFS_SPACE_CHECK_DISCARD_CHECKPOINT));
}
EXPORT_SYMBOL(spa_checkpoint_get_stats);
EXPORT_SYMBOL(spa_checkpoint_discard_thread);
EXPORT_SYMBOL(spa_checkpoint_discard_thread_check);
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_spa, zfs_spa_, discard_memory_limit, ULONG, ZMOD_RW,
"Limit for memory used in prefetching the checkpoint space map done "
"on each vdev while discarding the checkpoint");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/spa_errlog.c b/sys/contrib/openzfs/module/zfs/spa_errlog.c
index fa5120eb61b3..c6b28ea7d1b8 100644
--- a/sys/contrib/openzfs/module/zfs/spa_errlog.c
+++ b/sys/contrib/openzfs/module/zfs/spa_errlog.c
@@ -1,416 +1,418 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2014 by Delphix. All rights reserved.
*/
/*
* Routines to manage the on-disk persistent error log.
*
* Each pool stores a log of all logical data errors seen during normal
* operation. This is actually the union of two distinct logs: the last log,
* and the current log. All errors seen are logged to the current log. When a
* scrub completes, the current log becomes the last log, the last log is thrown
* out, and the current log is reinitialized. This way, if an error is somehow
* corrected, a new scrub will show that it no longer exists, and will be
* deleted from the log when the scrub completes.
*
* The log is stored using a ZAP object whose key is a string form of the
* zbookmark_phys tuple (objset, object, level, blkid), and whose contents is an
* optional 'objset:object' human-readable string describing the data. When an
* error is first logged, this string will be empty, indicating that no name is
* known. This prevents us from having to issue a potentially large amount of
* I/O to discover the object name during an error path. Instead, we do the
* calculation when the data is requested, storing the result so future queries
* will be faster.
*
* This log is then shipped into an nvlist where the key is the dataset name and
* the value is the object name. Userland is then responsible for uniquifying
* this list and displaying it to the user.
*/
#include <sys/dmu_tx.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/zap.h>
#include <sys/zio.h>
/*
* Convert a bookmark to a string.
*/
static void
bookmark_to_name(zbookmark_phys_t *zb, char *buf, size_t len)
{
(void) snprintf(buf, len, "%llx:%llx:%llx:%llx",
(u_longlong_t)zb->zb_objset, (u_longlong_t)zb->zb_object,
(u_longlong_t)zb->zb_level, (u_longlong_t)zb->zb_blkid);
}
/*
* Convert a string to a bookmark
*/
#ifdef _KERNEL
static void
name_to_bookmark(char *buf, zbookmark_phys_t *zb)
{
zb->zb_objset = zfs_strtonum(buf, &buf);
ASSERT(*buf == ':');
zb->zb_object = zfs_strtonum(buf + 1, &buf);
ASSERT(*buf == ':');
zb->zb_level = (int)zfs_strtonum(buf + 1, &buf);
ASSERT(*buf == ':');
zb->zb_blkid = zfs_strtonum(buf + 1, &buf);
ASSERT(*buf == '\0');
}
#endif
/*
* Log an uncorrectable error to the persistent error log. We add it to the
* spa's list of pending errors. The changes are actually synced out to disk
* during spa_errlog_sync().
*/
void
spa_log_error(spa_t *spa, const zbookmark_phys_t *zb)
{
spa_error_entry_t search;
spa_error_entry_t *new;
avl_tree_t *tree;
avl_index_t where;
/*
* If we are trying to import a pool, ignore any errors, as we won't be
* writing to the pool any time soon.
*/
if (spa_load_state(spa) == SPA_LOAD_TRYIMPORT)
return;
mutex_enter(&spa->spa_errlist_lock);
/*
* If we have had a request to rotate the log, log it to the next list
* instead of the current one.
*/
if (spa->spa_scrub_active || spa->spa_scrub_finished)
tree = &spa->spa_errlist_scrub;
else
tree = &spa->spa_errlist_last;
search.se_bookmark = *zb;
if (avl_find(tree, &search, &where) != NULL) {
mutex_exit(&spa->spa_errlist_lock);
return;
}
new = kmem_zalloc(sizeof (spa_error_entry_t), KM_SLEEP);
new->se_bookmark = *zb;
avl_insert(tree, new, where);
mutex_exit(&spa->spa_errlist_lock);
}
/*
* Return the number of errors currently in the error log. This is actually the
* sum of both the last log and the current log, since we don't know the union
* of these logs until we reach userland.
*/
uint64_t
spa_get_errlog_size(spa_t *spa)
{
uint64_t total = 0, count;
mutex_enter(&spa->spa_errlog_lock);
if (spa->spa_errlog_scrub != 0 &&
zap_count(spa->spa_meta_objset, spa->spa_errlog_scrub,
&count) == 0)
total += count;
if (spa->spa_errlog_last != 0 && !spa->spa_scrub_finished &&
zap_count(spa->spa_meta_objset, spa->spa_errlog_last,
&count) == 0)
total += count;
mutex_exit(&spa->spa_errlog_lock);
mutex_enter(&spa->spa_errlist_lock);
total += avl_numnodes(&spa->spa_errlist_last);
total += avl_numnodes(&spa->spa_errlist_scrub);
mutex_exit(&spa->spa_errlist_lock);
return (total);
}
#ifdef _KERNEL
static int
process_error_log(spa_t *spa, uint64_t obj, void *addr, size_t *count)
{
zap_cursor_t zc;
zap_attribute_t za;
zbookmark_phys_t zb;
if (obj == 0)
return (0);
for (zap_cursor_init(&zc, spa->spa_meta_objset, obj);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
if (*count == 0) {
zap_cursor_fini(&zc);
return (SET_ERROR(ENOMEM));
}
name_to_bookmark(za.za_name, &zb);
if (copyout(&zb, (char *)addr +
(*count - 1) * sizeof (zbookmark_phys_t),
sizeof (zbookmark_phys_t)) != 0) {
zap_cursor_fini(&zc);
return (SET_ERROR(EFAULT));
}
*count -= 1;
}
zap_cursor_fini(&zc);
return (0);
}
static int
process_error_list(avl_tree_t *list, void *addr, size_t *count)
{
spa_error_entry_t *se;
for (se = avl_first(list); se != NULL; se = AVL_NEXT(list, se)) {
if (*count == 0)
return (SET_ERROR(ENOMEM));
if (copyout(&se->se_bookmark, (char *)addr +
(*count - 1) * sizeof (zbookmark_phys_t),
sizeof (zbookmark_phys_t)) != 0)
return (SET_ERROR(EFAULT));
*count -= 1;
}
return (0);
}
#endif
/*
* Copy all known errors to userland as an array of bookmarks. This is
* actually a union of the on-disk last log and current log, as well as any
* pending error requests.
*
* Because the act of reading the on-disk log could cause errors to be
* generated, we have two separate locks: one for the error log and one for the
* in-core error lists. We only need the error list lock to log and error, so
* we grab the error log lock while we read the on-disk logs, and only pick up
* the error list lock when we are finished.
*/
int
spa_get_errlog(spa_t *spa, void *uaddr, size_t *count)
{
int ret = 0;
#ifdef _KERNEL
mutex_enter(&spa->spa_errlog_lock);
ret = process_error_log(spa, spa->spa_errlog_scrub, uaddr, count);
if (!ret && !spa->spa_scrub_finished)
ret = process_error_log(spa, spa->spa_errlog_last, uaddr,
count);
mutex_enter(&spa->spa_errlist_lock);
if (!ret)
ret = process_error_list(&spa->spa_errlist_scrub, uaddr,
count);
if (!ret)
ret = process_error_list(&spa->spa_errlist_last, uaddr,
count);
mutex_exit(&spa->spa_errlist_lock);
mutex_exit(&spa->spa_errlog_lock);
+#else
+ (void) spa, (void) uaddr, (void) count;
#endif
return (ret);
}
/*
* Called when a scrub completes. This simply set a bit which tells which AVL
* tree to add new errors. spa_errlog_sync() is responsible for actually
* syncing the changes to the underlying objects.
*/
void
spa_errlog_rotate(spa_t *spa)
{
mutex_enter(&spa->spa_errlist_lock);
spa->spa_scrub_finished = B_TRUE;
mutex_exit(&spa->spa_errlist_lock);
}
/*
* Discard any pending errors from the spa_t. Called when unloading a faulted
* pool, as the errors encountered during the open cannot be synced to disk.
*/
void
spa_errlog_drain(spa_t *spa)
{
spa_error_entry_t *se;
void *cookie;
mutex_enter(&spa->spa_errlist_lock);
cookie = NULL;
while ((se = avl_destroy_nodes(&spa->spa_errlist_last,
&cookie)) != NULL)
kmem_free(se, sizeof (spa_error_entry_t));
cookie = NULL;
while ((se = avl_destroy_nodes(&spa->spa_errlist_scrub,
&cookie)) != NULL)
kmem_free(se, sizeof (spa_error_entry_t));
mutex_exit(&spa->spa_errlist_lock);
}
/*
* Process a list of errors into the current on-disk log.
*/
static void
sync_error_list(spa_t *spa, avl_tree_t *t, uint64_t *obj, dmu_tx_t *tx)
{
spa_error_entry_t *se;
char buf[64];
void *cookie;
if (avl_numnodes(t) != 0) {
/* create log if necessary */
if (*obj == 0)
*obj = zap_create(spa->spa_meta_objset,
DMU_OT_ERROR_LOG, DMU_OT_NONE,
0, tx);
/* add errors to the current log */
for (se = avl_first(t); se != NULL; se = AVL_NEXT(t, se)) {
char *name = se->se_name ? se->se_name : "";
bookmark_to_name(&se->se_bookmark, buf, sizeof (buf));
(void) zap_update(spa->spa_meta_objset,
*obj, buf, 1, strlen(name) + 1, name, tx);
}
/* purge the error list */
cookie = NULL;
while ((se = avl_destroy_nodes(t, &cookie)) != NULL)
kmem_free(se, sizeof (spa_error_entry_t));
}
}
/*
* Sync the error log out to disk. This is a little tricky because the act of
* writing the error log requires the spa_errlist_lock. So, we need to lock the
* error lists, take a copy of the lists, and then reinitialize them. Then, we
* drop the error list lock and take the error log lock, at which point we
* do the errlog processing. Then, if we encounter an I/O error during this
* process, we can successfully add the error to the list. Note that this will
* result in the perpetual recycling of errors, but it is an unlikely situation
* and not a performance critical operation.
*/
void
spa_errlog_sync(spa_t *spa, uint64_t txg)
{
dmu_tx_t *tx;
avl_tree_t scrub, last;
int scrub_finished;
mutex_enter(&spa->spa_errlist_lock);
/*
* Bail out early under normal circumstances.
*/
if (avl_numnodes(&spa->spa_errlist_scrub) == 0 &&
avl_numnodes(&spa->spa_errlist_last) == 0 &&
!spa->spa_scrub_finished) {
mutex_exit(&spa->spa_errlist_lock);
return;
}
spa_get_errlists(spa, &last, &scrub);
scrub_finished = spa->spa_scrub_finished;
spa->spa_scrub_finished = B_FALSE;
mutex_exit(&spa->spa_errlist_lock);
mutex_enter(&spa->spa_errlog_lock);
tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
/*
* Sync out the current list of errors.
*/
sync_error_list(spa, &last, &spa->spa_errlog_last, tx);
/*
* Rotate the log if necessary.
*/
if (scrub_finished) {
if (spa->spa_errlog_last != 0)
VERIFY(dmu_object_free(spa->spa_meta_objset,
spa->spa_errlog_last, tx) == 0);
spa->spa_errlog_last = spa->spa_errlog_scrub;
spa->spa_errlog_scrub = 0;
sync_error_list(spa, &scrub, &spa->spa_errlog_last, tx);
}
/*
* Sync out any pending scrub errors.
*/
sync_error_list(spa, &scrub, &spa->spa_errlog_scrub, tx);
/*
* Update the MOS to reflect the new values.
*/
(void) zap_update(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ERRLOG_LAST, sizeof (uint64_t), 1,
&spa->spa_errlog_last, tx);
(void) zap_update(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ERRLOG_SCRUB, sizeof (uint64_t), 1,
&spa->spa_errlog_scrub, tx);
dmu_tx_commit(tx);
mutex_exit(&spa->spa_errlog_lock);
}
#if defined(_KERNEL)
/* error handling */
EXPORT_SYMBOL(spa_log_error);
EXPORT_SYMBOL(spa_get_errlog_size);
EXPORT_SYMBOL(spa_get_errlog);
EXPORT_SYMBOL(spa_errlog_rotate);
EXPORT_SYMBOL(spa_errlog_drain);
EXPORT_SYMBOL(spa_errlog_sync);
EXPORT_SYMBOL(spa_get_errlists);
#endif
diff --git a/sys/contrib/openzfs/module/zfs/spa_misc.c b/sys/contrib/openzfs/module/zfs/spa_misc.c
index 00515db027d0..bcf6360c25c3 100644
--- a/sys/contrib/openzfs/module/zfs/spa_misc.c
+++ b/sys/contrib/openzfs/module/zfs/spa_misc.c
@@ -1,2966 +1,2968 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2019 by Delphix. All rights reserved.
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2013 Saso Kiselkov. All rights reserved.
* Copyright (c) 2017 Datto Inc.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa_impl.h>
#include <sys/zio.h>
#include <sys/zio_checksum.h>
#include <sys/zio_compress.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/zap.h>
#include <sys/zil.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_initialize.h>
#include <sys/vdev_trim.h>
#include <sys/vdev_file.h>
#include <sys/vdev_raidz.h>
#include <sys/metaslab.h>
#include <sys/uberblock_impl.h>
#include <sys/txg.h>
#include <sys/avl.h>
#include <sys/unique.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/fm/util.h>
#include <sys/dsl_scan.h>
#include <sys/fs/zfs.h>
#include <sys/metaslab_impl.h>
#include <sys/arc.h>
#include <sys/ddt.h>
#include <sys/kstat.h>
#include "zfs_prop.h"
#include <sys/btree.h>
#include <sys/zfeature.h>
#include <sys/qat.h>
#include <sys/zstd/zstd.h>
/*
* SPA locking
*
* There are three basic locks for managing spa_t structures:
*
* spa_namespace_lock (global mutex)
*
* This lock must be acquired to do any of the following:
*
* - Lookup a spa_t by name
* - Add or remove a spa_t from the namespace
* - Increase spa_refcount from non-zero
* - Check if spa_refcount is zero
* - Rename a spa_t
* - add/remove/attach/detach devices
* - Held for the duration of create/destroy/import/export
*
* It does not need to handle recursion. A create or destroy may
* reference objects (files or zvols) in other pools, but by
* definition they must have an existing reference, and will never need
* to lookup a spa_t by name.
*
* spa_refcount (per-spa zfs_refcount_t protected by mutex)
*
* This reference count keep track of any active users of the spa_t. The
* spa_t cannot be destroyed or freed while this is non-zero. Internally,
* the refcount is never really 'zero' - opening a pool implicitly keeps
* some references in the DMU. Internally we check against spa_minref, but
* present the image of a zero/non-zero value to consumers.
*
* spa_config_lock[] (per-spa array of rwlocks)
*
* This protects the spa_t from config changes, and must be held in
* the following circumstances:
*
* - RW_READER to perform I/O to the spa
* - RW_WRITER to change the vdev config
*
* The locking order is fairly straightforward:
*
* spa_namespace_lock -> spa_refcount
*
* The namespace lock must be acquired to increase the refcount from 0
* or to check if it is zero.
*
* spa_refcount -> spa_config_lock[]
*
* There must be at least one valid reference on the spa_t to acquire
* the config lock.
*
* spa_namespace_lock -> spa_config_lock[]
*
* The namespace lock must always be taken before the config lock.
*
*
* The spa_namespace_lock can be acquired directly and is globally visible.
*
* The namespace is manipulated using the following functions, all of which
* require the spa_namespace_lock to be held.
*
* spa_lookup() Lookup a spa_t by name.
*
* spa_add() Create a new spa_t in the namespace.
*
* spa_remove() Remove a spa_t from the namespace. This also
* frees up any memory associated with the spa_t.
*
* spa_next() Returns the next spa_t in the system, or the
* first if NULL is passed.
*
* spa_evict_all() Shutdown and remove all spa_t structures in
* the system.
*
* spa_guid_exists() Determine whether a pool/device guid exists.
*
* The spa_refcount is manipulated using the following functions:
*
* spa_open_ref() Adds a reference to the given spa_t. Must be
* called with spa_namespace_lock held if the
* refcount is currently zero.
*
* spa_close() Remove a reference from the spa_t. This will
* not free the spa_t or remove it from the
* namespace. No locking is required.
*
* spa_refcount_zero() Returns true if the refcount is currently
* zero. Must be called with spa_namespace_lock
* held.
*
* The spa_config_lock[] is an array of rwlocks, ordered as follows:
* SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV.
* spa_config_lock[] is manipulated with spa_config_{enter,exit,held}().
*
* To read the configuration, it suffices to hold one of these locks as reader.
* To modify the configuration, you must hold all locks as writer. To modify
* vdev state without altering the vdev tree's topology (e.g. online/offline),
* you must hold SCL_STATE and SCL_ZIO as writer.
*
* We use these distinct config locks to avoid recursive lock entry.
* For example, spa_sync() (which holds SCL_CONFIG as reader) induces
* block allocations (SCL_ALLOC), which may require reading space maps
* from disk (dmu_read() -> zio_read() -> SCL_ZIO).
*
* The spa config locks cannot be normal rwlocks because we need the
* ability to hand off ownership. For example, SCL_ZIO is acquired
* by the issuing thread and later released by an interrupt thread.
* They do, however, obey the usual write-wanted semantics to prevent
* writer (i.e. system administrator) starvation.
*
* The lock acquisition rules are as follows:
*
* SCL_CONFIG
* Protects changes to the vdev tree topology, such as vdev
* add/remove/attach/detach. Protects the dirty config list
* (spa_config_dirty_list) and the set of spares and l2arc devices.
*
* SCL_STATE
* Protects changes to pool state and vdev state, such as vdev
* online/offline/fault/degrade/clear. Protects the dirty state list
* (spa_state_dirty_list) and global pool state (spa_state).
*
* SCL_ALLOC
* Protects changes to metaslab groups and classes.
* Held as reader by metaslab_alloc() and metaslab_claim().
*
* SCL_ZIO
* Held by bp-level zios (those which have no io_vd upon entry)
* to prevent changes to the vdev tree. The bp-level zio implicitly
* protects all of its vdev child zios, which do not hold SCL_ZIO.
*
* SCL_FREE
* Protects changes to metaslab groups and classes.
* Held as reader by metaslab_free(). SCL_FREE is distinct from
* SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free
* blocks in zio_done() while another i/o that holds either
* SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete.
*
* SCL_VDEV
* Held as reader to prevent changes to the vdev tree during trivial
* inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the
* other locks, and lower than all of them, to ensure that it's safe
* to acquire regardless of caller context.
*
* In addition, the following rules apply:
*
* (a) spa_props_lock protects pool properties, spa_config and spa_config_list.
* The lock ordering is SCL_CONFIG > spa_props_lock.
*
* (b) I/O operations on leaf vdevs. For any zio operation that takes
* an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(),
* or zio_write_phys() -- the caller must ensure that the config cannot
* cannot change in the interim, and that the vdev cannot be reopened.
* SCL_STATE as reader suffices for both.
*
* The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
*
* spa_vdev_enter() Acquire the namespace lock and the config lock
* for writing.
*
* spa_vdev_exit() Release the config lock, wait for all I/O
* to complete, sync the updated configs to the
* cache, and release the namespace lock.
*
* vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit().
* Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
* locking is, always, based on spa_namespace_lock and spa_config_lock[].
*/
static avl_tree_t spa_namespace_avl;
kmutex_t spa_namespace_lock;
static kcondvar_t spa_namespace_cv;
int spa_max_replication_override = SPA_DVAS_PER_BP;
static kmutex_t spa_spare_lock;
static avl_tree_t spa_spare_avl;
static kmutex_t spa_l2cache_lock;
static avl_tree_t spa_l2cache_avl;
kmem_cache_t *spa_buffer_pool;
spa_mode_t spa_mode_global = SPA_MODE_UNINIT;
#ifdef ZFS_DEBUG
/*
* Everything except dprintf, set_error, spa, and indirect_remap is on
* by default in debug builds.
*/
int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SET_ERROR |
ZFS_DEBUG_INDIRECT_REMAP);
#else
int zfs_flags = 0;
#endif
/*
* zfs_recover can be set to nonzero to attempt to recover from
* otherwise-fatal errors, typically caused by on-disk corruption. When
* set, calls to zfs_panic_recover() will turn into warning messages.
* This should only be used as a last resort, as it typically results
* in leaked space, or worse.
*/
int zfs_recover = B_FALSE;
/*
* If destroy encounters an EIO while reading metadata (e.g. indirect
* blocks), space referenced by the missing metadata can not be freed.
* Normally this causes the background destroy to become "stalled", as
* it is unable to make forward progress. While in this stalled state,
* all remaining space to free from the error-encountering filesystem is
* "temporarily leaked". Set this flag to cause it to ignore the EIO,
* permanently leak the space from indirect blocks that can not be read,
* and continue to free everything else that it can.
*
* The default, "stalling" behavior is useful if the storage partially
* fails (i.e. some but not all i/os fail), and then later recovers. In
* this case, we will be able to continue pool operations while it is
* partially failed, and when it recovers, we can continue to free the
* space, with no leaks. However, note that this case is actually
* fairly rare.
*
* Typically pools either (a) fail completely (but perhaps temporarily,
* e.g. a top-level vdev going offline), or (b) have localized,
* permanent errors (e.g. disk returns the wrong data due to bit flip or
* firmware bug). In case (a), this setting does not matter because the
* pool will be suspended and the sync thread will not be able to make
* forward progress regardless. In case (b), because the error is
* permanent, the best we can do is leak the minimum amount of space,
* which is what setting this flag will do. Therefore, it is reasonable
* for this flag to normally be set, but we chose the more conservative
* approach of not setting it, so that there is no possibility of
* leaking space in the "partial temporary" failure case.
*/
int zfs_free_leak_on_eio = B_FALSE;
/*
* Expiration time in milliseconds. This value has two meanings. First it is
* used to determine when the spa_deadman() logic should fire. By default the
* spa_deadman() will fire if spa_sync() has not completed in 600 seconds.
* Secondly, the value determines if an I/O is considered "hung". Any I/O that
* has not completed in zfs_deadman_synctime_ms is considered "hung" resulting
* in one of three behaviors controlled by zfs_deadman_failmode.
*/
unsigned long zfs_deadman_synctime_ms = 600000UL;
/*
* This value controls the maximum amount of time zio_wait() will block for an
* outstanding IO. By default this is 300 seconds at which point the "hung"
* behavior will be applied as described for zfs_deadman_synctime_ms.
*/
unsigned long zfs_deadman_ziotime_ms = 300000UL;
/*
* Check time in milliseconds. This defines the frequency at which we check
* for hung I/O.
*/
unsigned long zfs_deadman_checktime_ms = 60000UL;
/*
* By default the deadman is enabled.
*/
int zfs_deadman_enabled = 1;
/*
* Controls the behavior of the deadman when it detects a "hung" I/O.
* Valid values are zfs_deadman_failmode=<wait|continue|panic>.
*
* wait - Wait for the "hung" I/O (default)
* continue - Attempt to recover from a "hung" I/O
* panic - Panic the system
*/
char *zfs_deadman_failmode = "wait";
/*
* The worst case is single-sector max-parity RAID-Z blocks, in which
* case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
* times the size; so just assume that. Add to this the fact that
* we can have up to 3 DVAs per bp, and one more factor of 2 because
* the block may be dittoed with up to 3 DVAs by ddt_sync(). All together,
* the worst case is:
* (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24
*/
int spa_asize_inflation = 24;
/*
* Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in
* the pool to be consumed (bounded by spa_max_slop). This ensures that we
* don't run the pool completely out of space, due to unaccounted changes (e.g.
* to the MOS). It also limits the worst-case time to allocate space. If we
* have less than this amount of free space, most ZPL operations (e.g. write,
* create) will return ENOSPC. The ZIL metaslabs (spa_embedded_log_class) are
* also part of this 3.2% of space which can't be consumed by normal writes;
* the slop space "proper" (spa_get_slop_space()) is decreased by the embedded
* log space.
*
* Certain operations (e.g. file removal, most administrative actions) can
* use half the slop space. They will only return ENOSPC if less than half
* the slop space is free. Typically, once the pool has less than the slop
* space free, the user will use these operations to free up space in the pool.
* These are the operations that call dsl_pool_adjustedsize() with the netfree
* argument set to TRUE.
*
* Operations that are almost guaranteed to free up space in the absence of
* a pool checkpoint can use up to three quarters of the slop space
* (e.g zfs destroy).
*
* A very restricted set of operations are always permitted, regardless of
* the amount of free space. These are the operations that call
* dsl_sync_task(ZFS_SPACE_CHECK_NONE). If these operations result in a net
* increase in the amount of space used, it is possible to run the pool
* completely out of space, causing it to be permanently read-only.
*
* Note that on very small pools, the slop space will be larger than
* 3.2%, in an effort to have it be at least spa_min_slop (128MB),
* but we never allow it to be more than half the pool size.
*
* Further, on very large pools, the slop space will be smaller than
* 3.2%, to avoid reserving much more space than we actually need; bounded
* by spa_max_slop (128GB).
*
* See also the comments in zfs_space_check_t.
*/
int spa_slop_shift = 5;
uint64_t spa_min_slop = 128ULL * 1024 * 1024;
uint64_t spa_max_slop = 128ULL * 1024 * 1024 * 1024;
int spa_allocators = 4;
/*PRINTFLIKE2*/
void
spa_load_failed(spa_t *spa, const char *fmt, ...)
{
va_list adx;
char buf[256];
va_start(adx, fmt);
(void) vsnprintf(buf, sizeof (buf), fmt, adx);
va_end(adx);
zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa->spa_name,
spa->spa_trust_config ? "trusted" : "untrusted", buf);
}
/*PRINTFLIKE2*/
void
spa_load_note(spa_t *spa, const char *fmt, ...)
{
va_list adx;
char buf[256];
va_start(adx, fmt);
(void) vsnprintf(buf, sizeof (buf), fmt, adx);
va_end(adx);
zfs_dbgmsg("spa_load(%s, config %s): %s", spa->spa_name,
spa->spa_trust_config ? "trusted" : "untrusted", buf);
}
/*
* By default dedup and user data indirects land in the special class
*/
int zfs_ddt_data_is_special = B_TRUE;
int zfs_user_indirect_is_special = B_TRUE;
/*
* The percentage of special class final space reserved for metadata only.
* Once we allocate 100 - zfs_special_class_metadata_reserve_pct we only
* let metadata into the class.
*/
int zfs_special_class_metadata_reserve_pct = 25;
/*
* ==========================================================================
* SPA config locking
* ==========================================================================
*/
static void
spa_config_lock_init(spa_t *spa)
{
for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
zfs_refcount_create_untracked(&scl->scl_count);
scl->scl_writer = NULL;
scl->scl_write_wanted = 0;
}
}
static void
spa_config_lock_destroy(spa_t *spa)
{
for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
mutex_destroy(&scl->scl_lock);
cv_destroy(&scl->scl_cv);
zfs_refcount_destroy(&scl->scl_count);
ASSERT(scl->scl_writer == NULL);
ASSERT(scl->scl_write_wanted == 0);
}
}
int
spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
{
for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (!(locks & (1 << i)))
continue;
mutex_enter(&scl->scl_lock);
if (rw == RW_READER) {
if (scl->scl_writer || scl->scl_write_wanted) {
mutex_exit(&scl->scl_lock);
spa_config_exit(spa, locks & ((1 << i) - 1),
tag);
return (0);
}
} else {
ASSERT(scl->scl_writer != curthread);
if (!zfs_refcount_is_zero(&scl->scl_count)) {
mutex_exit(&scl->scl_lock);
spa_config_exit(spa, locks & ((1 << i) - 1),
tag);
return (0);
}
scl->scl_writer = curthread;
}
(void) zfs_refcount_add(&scl->scl_count, tag);
mutex_exit(&scl->scl_lock);
}
return (1);
}
void
spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw)
{
+ (void) tag;
int wlocks_held = 0;
ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY);
for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (scl->scl_writer == curthread)
wlocks_held |= (1 << i);
if (!(locks & (1 << i)))
continue;
mutex_enter(&scl->scl_lock);
if (rw == RW_READER) {
while (scl->scl_writer || scl->scl_write_wanted) {
cv_wait(&scl->scl_cv, &scl->scl_lock);
}
} else {
ASSERT(scl->scl_writer != curthread);
while (!zfs_refcount_is_zero(&scl->scl_count)) {
scl->scl_write_wanted++;
cv_wait(&scl->scl_cv, &scl->scl_lock);
scl->scl_write_wanted--;
}
scl->scl_writer = curthread;
}
(void) zfs_refcount_add(&scl->scl_count, tag);
mutex_exit(&scl->scl_lock);
}
ASSERT3U(wlocks_held, <=, locks);
}
void
spa_config_exit(spa_t *spa, int locks, const void *tag)
{
+ (void) tag;
for (int i = SCL_LOCKS - 1; i >= 0; i--) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (!(locks & (1 << i)))
continue;
mutex_enter(&scl->scl_lock);
ASSERT(!zfs_refcount_is_zero(&scl->scl_count));
if (zfs_refcount_remove(&scl->scl_count, tag) == 0) {
ASSERT(scl->scl_writer == NULL ||
scl->scl_writer == curthread);
scl->scl_writer = NULL; /* OK in either case */
cv_broadcast(&scl->scl_cv);
}
mutex_exit(&scl->scl_lock);
}
}
int
spa_config_held(spa_t *spa, int locks, krw_t rw)
{
int locks_held = 0;
for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (!(locks & (1 << i)))
continue;
if ((rw == RW_READER &&
!zfs_refcount_is_zero(&scl->scl_count)) ||
(rw == RW_WRITER && scl->scl_writer == curthread))
locks_held |= 1 << i;
}
return (locks_held);
}
/*
* ==========================================================================
* SPA namespace functions
* ==========================================================================
*/
/*
* Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held.
* Returns NULL if no matching spa_t is found.
*/
spa_t *
spa_lookup(const char *name)
{
static spa_t search; /* spa_t is large; don't allocate on stack */
spa_t *spa;
avl_index_t where;
char *cp;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
(void) strlcpy(search.spa_name, name, sizeof (search.spa_name));
/*
* If it's a full dataset name, figure out the pool name and
* just use that.
*/
cp = strpbrk(search.spa_name, "/@#");
if (cp != NULL)
*cp = '\0';
spa = avl_find(&spa_namespace_avl, &search, &where);
return (spa);
}
/*
* Fires when spa_sync has not completed within zfs_deadman_synctime_ms.
* If the zfs_deadman_enabled flag is set then it inspects all vdev queues
* looking for potentially hung I/Os.
*/
void
spa_deadman(void *arg)
{
spa_t *spa = arg;
/* Disable the deadman if the pool is suspended. */
if (spa_suspended(spa))
return;
zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu",
(gethrtime() - spa->spa_sync_starttime) / NANOSEC,
(u_longlong_t)++spa->spa_deadman_calls);
if (zfs_deadman_enabled)
vdev_deadman(spa->spa_root_vdev, FTAG);
spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq,
spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() +
MSEC_TO_TICK(zfs_deadman_checktime_ms));
}
static int
spa_log_sm_sort_by_txg(const void *va, const void *vb)
{
const spa_log_sm_t *a = va;
const spa_log_sm_t *b = vb;
return (TREE_CMP(a->sls_txg, b->sls_txg));
}
/*
* Create an uninitialized spa_t with the given name. Requires
* spa_namespace_lock. The caller must ensure that the spa_t doesn't already
* exist by calling spa_lookup() first.
*/
spa_t *
spa_add(const char *name, nvlist_t *config, const char *altroot)
{
spa_t *spa;
spa_config_dirent_t *dp;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_feat_stats_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_flushed_ms_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_activities_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_activities_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_waiters_cv, NULL, CV_DEFAULT, NULL);
for (int t = 0; t < TXG_SIZE; t++)
bplist_create(&spa->spa_free_bplist[t]);
(void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name));
spa->spa_state = POOL_STATE_UNINITIALIZED;
spa->spa_freeze_txg = UINT64_MAX;
spa->spa_final_txg = UINT64_MAX;
spa->spa_load_max_txg = UINT64_MAX;
spa->spa_proc = &p0;
spa->spa_proc_state = SPA_PROC_NONE;
spa->spa_trust_config = B_TRUE;
spa->spa_hostid = zone_get_hostid(NULL);
spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms);
spa->spa_deadman_ziotime = MSEC2NSEC(zfs_deadman_ziotime_ms);
spa_set_deadman_failmode(spa, zfs_deadman_failmode);
zfs_refcount_create(&spa->spa_refcount);
spa_config_lock_init(spa);
spa_stats_init(spa);
avl_add(&spa_namespace_avl, spa);
/*
* Set the alternate root, if there is one.
*/
if (altroot)
spa->spa_root = spa_strdup(altroot);
spa->spa_alloc_count = spa_allocators;
spa->spa_allocs = kmem_zalloc(spa->spa_alloc_count *
sizeof (spa_alloc_t), KM_SLEEP);
for (int i = 0; i < spa->spa_alloc_count; i++) {
mutex_init(&spa->spa_allocs[i].spaa_lock, NULL, MUTEX_DEFAULT,
NULL);
avl_create(&spa->spa_allocs[i].spaa_tree, zio_bookmark_compare,
sizeof (zio_t), offsetof(zio_t, io_alloc_node));
}
avl_create(&spa->spa_metaslabs_by_flushed, metaslab_sort_by_flushed,
sizeof (metaslab_t), offsetof(metaslab_t, ms_spa_txg_node));
avl_create(&spa->spa_sm_logs_by_txg, spa_log_sm_sort_by_txg,
sizeof (spa_log_sm_t), offsetof(spa_log_sm_t, sls_node));
list_create(&spa->spa_log_summary, sizeof (log_summary_entry_t),
offsetof(log_summary_entry_t, lse_node));
/*
* Every pool starts with the default cachefile
*/
list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t),
offsetof(spa_config_dirent_t, scd_link));
dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP);
dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
list_insert_head(&spa->spa_config_list, dp);
VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
KM_SLEEP) == 0);
if (config != NULL) {
nvlist_t *features;
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
&features) == 0) {
VERIFY(nvlist_dup(features, &spa->spa_label_features,
0) == 0);
}
VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
}
if (spa->spa_label_features == NULL) {
VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
KM_SLEEP) == 0);
}
spa->spa_min_ashift = INT_MAX;
spa->spa_max_ashift = 0;
spa->spa_min_alloc = INT_MAX;
/* Reset cached value */
spa->spa_dedup_dspace = ~0ULL;
/*
* As a pool is being created, treat all features as disabled by
* setting SPA_FEATURE_DISABLED for all entries in the feature
* refcount cache.
*/
for (int i = 0; i < SPA_FEATURES; i++) {
spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED;
}
list_create(&spa->spa_leaf_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_leaf_node));
return (spa);
}
/*
* Removes a spa_t from the namespace, freeing up any memory used. Requires
* spa_namespace_lock. This is called only after the spa_t has been closed and
* deactivated.
*/
void
spa_remove(spa_t *spa)
{
spa_config_dirent_t *dp;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_state(spa) == POOL_STATE_UNINITIALIZED);
ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0);
ASSERT0(spa->spa_waiters);
nvlist_free(spa->spa_config_splitting);
avl_remove(&spa_namespace_avl, spa);
cv_broadcast(&spa_namespace_cv);
if (spa->spa_root)
spa_strfree(spa->spa_root);
while ((dp = list_head(&spa->spa_config_list)) != NULL) {
list_remove(&spa->spa_config_list, dp);
if (dp->scd_path != NULL)
spa_strfree(dp->scd_path);
kmem_free(dp, sizeof (spa_config_dirent_t));
}
for (int i = 0; i < spa->spa_alloc_count; i++) {
avl_destroy(&spa->spa_allocs[i].spaa_tree);
mutex_destroy(&spa->spa_allocs[i].spaa_lock);
}
kmem_free(spa->spa_allocs, spa->spa_alloc_count *
sizeof (spa_alloc_t));
avl_destroy(&spa->spa_metaslabs_by_flushed);
avl_destroy(&spa->spa_sm_logs_by_txg);
list_destroy(&spa->spa_log_summary);
list_destroy(&spa->spa_config_list);
list_destroy(&spa->spa_leaf_list);
nvlist_free(spa->spa_label_features);
nvlist_free(spa->spa_load_info);
nvlist_free(spa->spa_feat_stats);
spa_config_set(spa, NULL);
zfs_refcount_destroy(&spa->spa_refcount);
spa_stats_destroy(spa);
spa_config_lock_destroy(spa);
for (int t = 0; t < TXG_SIZE; t++)
bplist_destroy(&spa->spa_free_bplist[t]);
zio_checksum_templates_free(spa);
cv_destroy(&spa->spa_async_cv);
cv_destroy(&spa->spa_evicting_os_cv);
cv_destroy(&spa->spa_proc_cv);
cv_destroy(&spa->spa_scrub_io_cv);
cv_destroy(&spa->spa_suspend_cv);
cv_destroy(&spa->spa_activities_cv);
cv_destroy(&spa->spa_waiters_cv);
mutex_destroy(&spa->spa_flushed_ms_lock);
mutex_destroy(&spa->spa_async_lock);
mutex_destroy(&spa->spa_errlist_lock);
mutex_destroy(&spa->spa_errlog_lock);
mutex_destroy(&spa->spa_evicting_os_lock);
mutex_destroy(&spa->spa_history_lock);
mutex_destroy(&spa->spa_proc_lock);
mutex_destroy(&spa->spa_props_lock);
mutex_destroy(&spa->spa_cksum_tmpls_lock);
mutex_destroy(&spa->spa_scrub_lock);
mutex_destroy(&spa->spa_suspend_lock);
mutex_destroy(&spa->spa_vdev_top_lock);
mutex_destroy(&spa->spa_feat_stats_lock);
mutex_destroy(&spa->spa_activities_lock);
kmem_free(spa, sizeof (spa_t));
}
/*
* Given a pool, return the next pool in the namespace, or NULL if there is
* none. If 'prev' is NULL, return the first pool.
*/
spa_t *
spa_next(spa_t *prev)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (prev)
return (AVL_NEXT(&spa_namespace_avl, prev));
else
return (avl_first(&spa_namespace_avl));
}
/*
* ==========================================================================
* SPA refcount functions
* ==========================================================================
*/
/*
* Add a reference to the given spa_t. Must have at least one reference, or
* have the namespace lock held.
*/
void
spa_open_ref(spa_t *spa, void *tag)
{
ASSERT(zfs_refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
MUTEX_HELD(&spa_namespace_lock));
(void) zfs_refcount_add(&spa->spa_refcount, tag);
}
/*
* Remove a reference to the given spa_t. Must have at least one reference, or
* have the namespace lock held.
*/
void
spa_close(spa_t *spa, void *tag)
{
ASSERT(zfs_refcount_count(&spa->spa_refcount) > spa->spa_minref ||
MUTEX_HELD(&spa_namespace_lock));
(void) zfs_refcount_remove(&spa->spa_refcount, tag);
}
/*
* Remove a reference to the given spa_t held by a dsl dir that is
* being asynchronously released. Async releases occur from a taskq
* performing eviction of dsl datasets and dirs. The namespace lock
* isn't held and the hold by the object being evicted may contribute to
* spa_minref (e.g. dataset or directory released during pool export),
* so the asserts in spa_close() do not apply.
*/
void
spa_async_close(spa_t *spa, void *tag)
{
(void) zfs_refcount_remove(&spa->spa_refcount, tag);
}
/*
* Check to see if the spa refcount is zero. Must be called with
* spa_namespace_lock held. We really compare against spa_minref, which is the
* number of references acquired when opening a pool
*/
boolean_t
spa_refcount_zero(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
return (zfs_refcount_count(&spa->spa_refcount) == spa->spa_minref);
}
/*
* ==========================================================================
* SPA spare and l2cache tracking
* ==========================================================================
*/
/*
* Hot spares and cache devices are tracked using the same code below,
* for 'auxiliary' devices.
*/
typedef struct spa_aux {
uint64_t aux_guid;
uint64_t aux_pool;
avl_node_t aux_avl;
int aux_count;
} spa_aux_t;
static inline int
spa_aux_compare(const void *a, const void *b)
{
const spa_aux_t *sa = (const spa_aux_t *)a;
const spa_aux_t *sb = (const spa_aux_t *)b;
return (TREE_CMP(sa->aux_guid, sb->aux_guid));
}
static void
spa_aux_add(vdev_t *vd, avl_tree_t *avl)
{
avl_index_t where;
spa_aux_t search;
spa_aux_t *aux;
search.aux_guid = vd->vdev_guid;
if ((aux = avl_find(avl, &search, &where)) != NULL) {
aux->aux_count++;
} else {
aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP);
aux->aux_guid = vd->vdev_guid;
aux->aux_count = 1;
avl_insert(avl, aux, where);
}
}
static void
spa_aux_remove(vdev_t *vd, avl_tree_t *avl)
{
spa_aux_t search;
spa_aux_t *aux;
avl_index_t where;
search.aux_guid = vd->vdev_guid;
aux = avl_find(avl, &search, &where);
ASSERT(aux != NULL);
if (--aux->aux_count == 0) {
avl_remove(avl, aux);
kmem_free(aux, sizeof (spa_aux_t));
} else if (aux->aux_pool == spa_guid(vd->vdev_spa)) {
aux->aux_pool = 0ULL;
}
}
static boolean_t
spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl)
{
spa_aux_t search, *found;
search.aux_guid = guid;
found = avl_find(avl, &search, NULL);
if (pool) {
if (found)
*pool = found->aux_pool;
else
*pool = 0ULL;
}
if (refcnt) {
if (found)
*refcnt = found->aux_count;
else
*refcnt = 0;
}
return (found != NULL);
}
static void
spa_aux_activate(vdev_t *vd, avl_tree_t *avl)
{
spa_aux_t search, *found;
avl_index_t where;
search.aux_guid = vd->vdev_guid;
found = avl_find(avl, &search, &where);
ASSERT(found != NULL);
ASSERT(found->aux_pool == 0ULL);
found->aux_pool = spa_guid(vd->vdev_spa);
}
/*
* Spares are tracked globally due to the following constraints:
*
* - A spare may be part of multiple pools.
* - A spare may be added to a pool even if it's actively in use within
* another pool.
* - A spare in use in any pool can only be the source of a replacement if
* the target is a spare in the same pool.
*
* We keep track of all spares on the system through the use of a reference
* counted AVL tree. When a vdev is added as a spare, or used as a replacement
* spare, then we bump the reference count in the AVL tree. In addition, we set
* the 'vdev_isspare' member to indicate that the device is a spare (active or
* inactive). When a spare is made active (used to replace a device in the
* pool), we also keep track of which pool its been made a part of.
*
* The 'spa_spare_lock' protects the AVL tree. These functions are normally
* called under the spa_namespace lock as part of vdev reconfiguration. The
* separate spare lock exists for the status query path, which does not need to
* be completely consistent with respect to other vdev configuration changes.
*/
static int
spa_spare_compare(const void *a, const void *b)
{
return (spa_aux_compare(a, b));
}
void
spa_spare_add(vdev_t *vd)
{
mutex_enter(&spa_spare_lock);
ASSERT(!vd->vdev_isspare);
spa_aux_add(vd, &spa_spare_avl);
vd->vdev_isspare = B_TRUE;
mutex_exit(&spa_spare_lock);
}
void
spa_spare_remove(vdev_t *vd)
{
mutex_enter(&spa_spare_lock);
ASSERT(vd->vdev_isspare);
spa_aux_remove(vd, &spa_spare_avl);
vd->vdev_isspare = B_FALSE;
mutex_exit(&spa_spare_lock);
}
boolean_t
spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt)
{
boolean_t found;
mutex_enter(&spa_spare_lock);
found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl);
mutex_exit(&spa_spare_lock);
return (found);
}
void
spa_spare_activate(vdev_t *vd)
{
mutex_enter(&spa_spare_lock);
ASSERT(vd->vdev_isspare);
spa_aux_activate(vd, &spa_spare_avl);
mutex_exit(&spa_spare_lock);
}
/*
* Level 2 ARC devices are tracked globally for the same reasons as spares.
* Cache devices currently only support one pool per cache device, and so
* for these devices the aux reference count is currently unused beyond 1.
*/
static int
spa_l2cache_compare(const void *a, const void *b)
{
return (spa_aux_compare(a, b));
}
void
spa_l2cache_add(vdev_t *vd)
{
mutex_enter(&spa_l2cache_lock);
ASSERT(!vd->vdev_isl2cache);
spa_aux_add(vd, &spa_l2cache_avl);
vd->vdev_isl2cache = B_TRUE;
mutex_exit(&spa_l2cache_lock);
}
void
spa_l2cache_remove(vdev_t *vd)
{
mutex_enter(&spa_l2cache_lock);
ASSERT(vd->vdev_isl2cache);
spa_aux_remove(vd, &spa_l2cache_avl);
vd->vdev_isl2cache = B_FALSE;
mutex_exit(&spa_l2cache_lock);
}
boolean_t
spa_l2cache_exists(uint64_t guid, uint64_t *pool)
{
boolean_t found;
mutex_enter(&spa_l2cache_lock);
found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl);
mutex_exit(&spa_l2cache_lock);
return (found);
}
void
spa_l2cache_activate(vdev_t *vd)
{
mutex_enter(&spa_l2cache_lock);
ASSERT(vd->vdev_isl2cache);
spa_aux_activate(vd, &spa_l2cache_avl);
mutex_exit(&spa_l2cache_lock);
}
/*
* ==========================================================================
* SPA vdev locking
* ==========================================================================
*/
/*
* Lock the given spa_t for the purpose of adding or removing a vdev.
* Grabs the global spa_namespace_lock plus the spa config lock for writing.
* It returns the next transaction group for the spa_t.
*/
uint64_t
spa_vdev_enter(spa_t *spa)
{
mutex_enter(&spa->spa_vdev_top_lock);
mutex_enter(&spa_namespace_lock);
vdev_autotrim_stop_all(spa);
return (spa_vdev_config_enter(spa));
}
/*
* The same as spa_vdev_enter() above but additionally takes the guid of
* the vdev being detached. When there is a rebuild in process it will be
* suspended while the vdev tree is modified then resumed by spa_vdev_exit().
* The rebuild is canceled if only a single child remains after the detach.
*/
uint64_t
spa_vdev_detach_enter(spa_t *spa, uint64_t guid)
{
mutex_enter(&spa->spa_vdev_top_lock);
mutex_enter(&spa_namespace_lock);
vdev_autotrim_stop_all(spa);
if (guid != 0) {
vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
if (vd) {
vdev_rebuild_stop_wait(vd->vdev_top);
}
}
return (spa_vdev_config_enter(spa));
}
/*
* Internal implementation for spa_vdev_enter(). Used when a vdev
* operation requires multiple syncs (i.e. removing a device) while
* keeping the spa_namespace_lock held.
*/
uint64_t
spa_vdev_config_enter(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
return (spa_last_synced_txg(spa) + 1);
}
/*
* Used in combination with spa_vdev_config_enter() to allow the syncing
* of multiple transactions without releasing the spa_namespace_lock.
*/
void
spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
int config_changed = B_FALSE;
ASSERT(txg > spa_last_synced_txg(spa));
spa->spa_pending_vdev = NULL;
/*
* Reassess the DTLs.
*/
vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE, B_FALSE);
if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) {
config_changed = B_TRUE;
spa->spa_config_generation++;
}
/*
* Verify the metaslab classes.
*/
ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0);
ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0);
ASSERT(metaslab_class_validate(spa_embedded_log_class(spa)) == 0);
ASSERT(metaslab_class_validate(spa_special_class(spa)) == 0);
ASSERT(metaslab_class_validate(spa_dedup_class(spa)) == 0);
spa_config_exit(spa, SCL_ALL, spa);
/*
* Panic the system if the specified tag requires it. This
* is useful for ensuring that configurations are updated
* transactionally.
*/
if (zio_injection_enabled)
zio_handle_panic_injection(spa, tag, 0);
/*
* Note: this txg_wait_synced() is important because it ensures
* that there won't be more than one config change per txg.
* This allows us to use the txg as the generation number.
*/
if (error == 0)
txg_wait_synced(spa->spa_dsl_pool, txg);
if (vd != NULL) {
ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL);
if (vd->vdev_ops->vdev_op_leaf) {
mutex_enter(&vd->vdev_initialize_lock);
vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED,
NULL);
mutex_exit(&vd->vdev_initialize_lock);
mutex_enter(&vd->vdev_trim_lock);
vdev_trim_stop(vd, VDEV_TRIM_CANCELED, NULL);
mutex_exit(&vd->vdev_trim_lock);
}
/*
* The vdev may be both a leaf and top-level device.
*/
vdev_autotrim_stop_wait(vd);
spa_config_enter(spa, SCL_STATE_ALL, spa, RW_WRITER);
vdev_free(vd);
spa_config_exit(spa, SCL_STATE_ALL, spa);
}
/*
* If the config changed, update the config cache.
*/
if (config_changed)
spa_write_cachefile(spa, B_FALSE, B_TRUE);
}
/*
* Unlock the spa_t after adding or removing a vdev. Besides undoing the
* locking of spa_vdev_enter(), we also want make sure the transactions have
* synced to disk, and then update the global configuration cache with the new
* information.
*/
int
spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
{
vdev_autotrim_restart(spa);
vdev_rebuild_restart(spa);
spa_vdev_config_exit(spa, vd, txg, error, FTAG);
mutex_exit(&spa_namespace_lock);
mutex_exit(&spa->spa_vdev_top_lock);
return (error);
}
/*
* Lock the given spa_t for the purpose of changing vdev state.
*/
void
spa_vdev_state_enter(spa_t *spa, int oplocks)
{
int locks = SCL_STATE_ALL | oplocks;
/*
* Root pools may need to read of the underlying devfs filesystem
* when opening up a vdev. Unfortunately if we're holding the
* SCL_ZIO lock it will result in a deadlock when we try to issue
* the read from the root filesystem. Instead we "prefetch"
* the associated vnodes that we need prior to opening the
* underlying devices and cache them so that we can prevent
* any I/O when we are doing the actual open.
*/
if (spa_is_root(spa)) {
int low = locks & ~(SCL_ZIO - 1);
int high = locks & ~low;
spa_config_enter(spa, high, spa, RW_WRITER);
vdev_hold(spa->spa_root_vdev);
spa_config_enter(spa, low, spa, RW_WRITER);
} else {
spa_config_enter(spa, locks, spa, RW_WRITER);
}
spa->spa_vdev_locks = locks;
}
int
spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error)
{
boolean_t config_changed = B_FALSE;
vdev_t *vdev_top;
if (vd == NULL || vd == spa->spa_root_vdev) {
vdev_top = spa->spa_root_vdev;
} else {
vdev_top = vd->vdev_top;
}
if (vd != NULL || error == 0)
vdev_dtl_reassess(vdev_top, 0, 0, B_FALSE, B_FALSE);
if (vd != NULL) {
if (vd != spa->spa_root_vdev)
vdev_state_dirty(vdev_top);
config_changed = B_TRUE;
spa->spa_config_generation++;
}
if (spa_is_root(spa))
vdev_rele(spa->spa_root_vdev);
ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL);
spa_config_exit(spa, spa->spa_vdev_locks, spa);
/*
* If anything changed, wait for it to sync. This ensures that,
* from the system administrator's perspective, zpool(8) commands
* are synchronous. This is important for things like zpool offline:
* when the command completes, you expect no further I/O from ZFS.
*/
if (vd != NULL)
txg_wait_synced(spa->spa_dsl_pool, 0);
/*
* If the config changed, update the config cache.
*/
if (config_changed) {
mutex_enter(&spa_namespace_lock);
spa_write_cachefile(spa, B_FALSE, B_TRUE);
mutex_exit(&spa_namespace_lock);
}
return (error);
}
/*
* ==========================================================================
* Miscellaneous functions
* ==========================================================================
*/
void
spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx)
{
if (!nvlist_exists(spa->spa_label_features, feature)) {
fnvlist_add_boolean(spa->spa_label_features, feature);
/*
* When we are creating the pool (tx_txg==TXG_INITIAL), we can't
* dirty the vdev config because lock SCL_CONFIG is not held.
* Thankfully, in this case we don't need to dirty the config
* because it will be written out anyway when we finish
* creating the pool.
*/
if (tx->tx_txg != TXG_INITIAL)
vdev_config_dirty(spa->spa_root_vdev);
}
}
void
spa_deactivate_mos_feature(spa_t *spa, const char *feature)
{
if (nvlist_remove_all(spa->spa_label_features, feature) == 0)
vdev_config_dirty(spa->spa_root_vdev);
}
/*
* Return the spa_t associated with given pool_guid, if it exists. If
* device_guid is non-zero, determine whether the pool exists *and* contains
* a device with the specified device_guid.
*/
spa_t *
spa_by_guid(uint64_t pool_guid, uint64_t device_guid)
{
spa_t *spa;
avl_tree_t *t = &spa_namespace_avl;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
if (spa->spa_state == POOL_STATE_UNINITIALIZED)
continue;
if (spa->spa_root_vdev == NULL)
continue;
if (spa_guid(spa) == pool_guid) {
if (device_guid == 0)
break;
if (vdev_lookup_by_guid(spa->spa_root_vdev,
device_guid) != NULL)
break;
/*
* Check any devices we may be in the process of adding.
*/
if (spa->spa_pending_vdev) {
if (vdev_lookup_by_guid(spa->spa_pending_vdev,
device_guid) != NULL)
break;
}
}
}
return (spa);
}
/*
* Determine whether a pool with the given pool_guid exists.
*/
boolean_t
spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
{
return (spa_by_guid(pool_guid, device_guid) != NULL);
}
char *
spa_strdup(const char *s)
{
size_t len;
char *new;
len = strlen(s);
new = kmem_alloc(len + 1, KM_SLEEP);
bcopy(s, new, len);
new[len] = '\0';
return (new);
}
void
spa_strfree(char *s)
{
kmem_free(s, strlen(s) + 1);
}
uint64_t
spa_generate_guid(spa_t *spa)
{
uint64_t guid;
if (spa != NULL) {
do {
(void) random_get_pseudo_bytes((void *)&guid,
sizeof (guid));
} while (guid == 0 || spa_guid_exists(spa_guid(spa), guid));
} else {
do {
(void) random_get_pseudo_bytes((void *)&guid,
sizeof (guid));
} while (guid == 0 || spa_guid_exists(guid, 0));
}
return (guid);
}
void
snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp)
{
char type[256];
char *checksum = NULL;
char *compress = NULL;
if (bp != NULL) {
if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) {
dmu_object_byteswap_t bswap =
DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
(void) snprintf(type, sizeof (type), "bswap %s %s",
DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ?
"metadata" : "data",
dmu_ot_byteswap[bswap].ob_name);
} else {
(void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name,
sizeof (type));
}
if (!BP_IS_EMBEDDED(bp)) {
checksum =
zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name;
}
compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name;
}
SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum,
compress);
}
void
spa_freeze(spa_t *spa)
{
uint64_t freeze_txg = 0;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
if (spa->spa_freeze_txg == UINT64_MAX) {
freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE;
spa->spa_freeze_txg = freeze_txg;
}
spa_config_exit(spa, SCL_ALL, FTAG);
if (freeze_txg != 0)
txg_wait_synced(spa_get_dsl(spa), freeze_txg);
}
void
zfs_panic_recover(const char *fmt, ...)
{
va_list adx;
va_start(adx, fmt);
vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx);
va_end(adx);
}
/*
* This is a stripped-down version of strtoull, suitable only for converting
* lowercase hexadecimal numbers that don't overflow.
*/
uint64_t
zfs_strtonum(const char *str, char **nptr)
{
uint64_t val = 0;
char c;
int digit;
while ((c = *str) != '\0') {
if (c >= '0' && c <= '9')
digit = c - '0';
else if (c >= 'a' && c <= 'f')
digit = 10 + c - 'a';
else
break;
val *= 16;
val += digit;
str++;
}
if (nptr)
*nptr = (char *)str;
return (val);
}
void
spa_activate_allocation_classes(spa_t *spa, dmu_tx_t *tx)
{
/*
* We bump the feature refcount for each special vdev added to the pool
*/
ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_ALLOCATION_CLASSES));
spa_feature_incr(spa, SPA_FEATURE_ALLOCATION_CLASSES, tx);
}
/*
* ==========================================================================
* Accessor functions
* ==========================================================================
*/
boolean_t
spa_shutting_down(spa_t *spa)
{
return (spa->spa_async_suspended);
}
dsl_pool_t *
spa_get_dsl(spa_t *spa)
{
return (spa->spa_dsl_pool);
}
boolean_t
spa_is_initializing(spa_t *spa)
{
return (spa->spa_is_initializing);
}
boolean_t
spa_indirect_vdevs_loaded(spa_t *spa)
{
return (spa->spa_indirect_vdevs_loaded);
}
blkptr_t *
spa_get_rootblkptr(spa_t *spa)
{
return (&spa->spa_ubsync.ub_rootbp);
}
void
spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp)
{
spa->spa_uberblock.ub_rootbp = *bp;
}
void
spa_altroot(spa_t *spa, char *buf, size_t buflen)
{
if (spa->spa_root == NULL)
buf[0] = '\0';
else
(void) strncpy(buf, spa->spa_root, buflen);
}
int
spa_sync_pass(spa_t *spa)
{
return (spa->spa_sync_pass);
}
char *
spa_name(spa_t *spa)
{
return (spa->spa_name);
}
uint64_t
spa_guid(spa_t *spa)
{
dsl_pool_t *dp = spa_get_dsl(spa);
uint64_t guid;
/*
* If we fail to parse the config during spa_load(), we can go through
* the error path (which posts an ereport) and end up here with no root
* vdev. We stash the original pool guid in 'spa_config_guid' to handle
* this case.
*/
if (spa->spa_root_vdev == NULL)
return (spa->spa_config_guid);
guid = spa->spa_last_synced_guid != 0 ?
spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid;
/*
* Return the most recently synced out guid unless we're
* in syncing context.
*/
if (dp && dsl_pool_sync_context(dp))
return (spa->spa_root_vdev->vdev_guid);
else
return (guid);
}
uint64_t
spa_load_guid(spa_t *spa)
{
/*
* This is a GUID that exists solely as a reference for the
* purposes of the arc. It is generated at load time, and
* is never written to persistent storage.
*/
return (spa->spa_load_guid);
}
uint64_t
spa_last_synced_txg(spa_t *spa)
{
return (spa->spa_ubsync.ub_txg);
}
uint64_t
spa_first_txg(spa_t *spa)
{
return (spa->spa_first_txg);
}
uint64_t
spa_syncing_txg(spa_t *spa)
{
return (spa->spa_syncing_txg);
}
/*
* Return the last txg where data can be dirtied. The final txgs
* will be used to just clear out any deferred frees that remain.
*/
uint64_t
spa_final_dirty_txg(spa_t *spa)
{
return (spa->spa_final_txg - TXG_DEFER_SIZE);
}
pool_state_t
spa_state(spa_t *spa)
{
return (spa->spa_state);
}
spa_load_state_t
spa_load_state(spa_t *spa)
{
return (spa->spa_load_state);
}
uint64_t
spa_freeze_txg(spa_t *spa)
{
return (spa->spa_freeze_txg);
}
/*
* Return the inflated asize for a logical write in bytes. This is used by the
* DMU to calculate the space a logical write will require on disk.
* If lsize is smaller than the largest physical block size allocatable on this
* pool we use its value instead, since the write will end up using the whole
* block anyway.
*/
uint64_t
spa_get_worst_case_asize(spa_t *spa, uint64_t lsize)
{
if (lsize == 0)
return (0); /* No inflation needed */
return (MAX(lsize, 1 << spa->spa_max_ashift) * spa_asize_inflation);
}
/*
* Return the amount of slop space in bytes. It is typically 1/32 of the pool
* (3.2%), minus the embedded log space. On very small pools, it may be
* slightly larger than this. On very large pools, it will be capped to
* the value of spa_max_slop. The embedded log space is not included in
* spa_dspace. By subtracting it, the usable space (per "zfs list") is a
* constant 97% of the total space, regardless of metaslab size (assuming the
* default spa_slop_shift=5 and a non-tiny pool).
*
* See the comment above spa_slop_shift for more details.
*/
uint64_t
spa_get_slop_space(spa_t *spa)
{
uint64_t space = 0;
uint64_t slop = 0;
/*
* Make sure spa_dedup_dspace has been set.
*/
if (spa->spa_dedup_dspace == ~0ULL)
spa_update_dspace(spa);
/*
* spa_get_dspace() includes the space only logically "used" by
* deduplicated data, so since it's not useful to reserve more
* space with more deduplicated data, we subtract that out here.
*/
space = spa_get_dspace(spa) - spa->spa_dedup_dspace;
slop = MIN(space >> spa_slop_shift, spa_max_slop);
/*
* Subtract the embedded log space, but no more than half the (3.2%)
* unusable space. Note, the "no more than half" is only relevant if
* zfs_embedded_slog_min_ms >> spa_slop_shift < 2, which is not true by
* default.
*/
uint64_t embedded_log =
metaslab_class_get_dspace(spa_embedded_log_class(spa));
slop -= MIN(embedded_log, slop >> 1);
/*
* Slop space should be at least spa_min_slop, but no more than half
* the entire pool.
*/
slop = MAX(slop, MIN(space >> 1, spa_min_slop));
return (slop);
}
uint64_t
spa_get_dspace(spa_t *spa)
{
return (spa->spa_dspace);
}
uint64_t
spa_get_checkpoint_space(spa_t *spa)
{
return (spa->spa_checkpoint_info.sci_dspace);
}
void
spa_update_dspace(spa_t *spa)
{
spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) +
ddt_get_dedup_dspace(spa);
if (spa->spa_vdev_removal != NULL) {
/*
* We can't allocate from the removing device, so subtract
* its size if it was included in dspace (i.e. if this is a
* normal-class vdev, not special/dedup). This prevents the
* DMU/DSL from filling up the (now smaller) pool while we
* are in the middle of removing the device.
*
* Note that the DMU/DSL doesn't actually know or care
* how much space is allocated (it does its own tracking
* of how much space has been logically used). So it
* doesn't matter that the data we are moving may be
* allocated twice (on the old device and the new
* device).
*/
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
vdev_t *vd =
vdev_lookup_top(spa, spa->spa_vdev_removal->svr_vdev_id);
/*
* If the stars align, we can wind up here after
* vdev_remove_complete() has cleared vd->vdev_mg but before
* spa->spa_vdev_removal gets cleared, so we must check before
* we dereference.
*/
if (vd->vdev_mg &&
vd->vdev_mg->mg_class == spa_normal_class(spa)) {
spa->spa_dspace -= spa_deflate(spa) ?
vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space;
}
spa_config_exit(spa, SCL_VDEV, FTAG);
}
}
/*
* Return the failure mode that has been set to this pool. The default
* behavior will be to block all I/Os when a complete failure occurs.
*/
uint64_t
spa_get_failmode(spa_t *spa)
{
return (spa->spa_failmode);
}
boolean_t
spa_suspended(spa_t *spa)
{
return (spa->spa_suspended != ZIO_SUSPEND_NONE);
}
uint64_t
spa_version(spa_t *spa)
{
return (spa->spa_ubsync.ub_version);
}
boolean_t
spa_deflate(spa_t *spa)
{
return (spa->spa_deflate);
}
metaslab_class_t *
spa_normal_class(spa_t *spa)
{
return (spa->spa_normal_class);
}
metaslab_class_t *
spa_log_class(spa_t *spa)
{
return (spa->spa_log_class);
}
metaslab_class_t *
spa_embedded_log_class(spa_t *spa)
{
return (spa->spa_embedded_log_class);
}
metaslab_class_t *
spa_special_class(spa_t *spa)
{
return (spa->spa_special_class);
}
metaslab_class_t *
spa_dedup_class(spa_t *spa)
{
return (spa->spa_dedup_class);
}
/*
* Locate an appropriate allocation class
*/
metaslab_class_t *
spa_preferred_class(spa_t *spa, uint64_t size, dmu_object_type_t objtype,
uint_t level, uint_t special_smallblk)
{
/*
* ZIL allocations determine their class in zio_alloc_zil().
*/
ASSERT(objtype != DMU_OT_INTENT_LOG);
boolean_t has_special_class = spa->spa_special_class->mc_groups != 0;
if (DMU_OT_IS_DDT(objtype)) {
if (spa->spa_dedup_class->mc_groups != 0)
return (spa_dedup_class(spa));
else if (has_special_class && zfs_ddt_data_is_special)
return (spa_special_class(spa));
else
return (spa_normal_class(spa));
}
/* Indirect blocks for user data can land in special if allowed */
if (level > 0 && (DMU_OT_IS_FILE(objtype) || objtype == DMU_OT_ZVOL)) {
if (has_special_class && zfs_user_indirect_is_special)
return (spa_special_class(spa));
else
return (spa_normal_class(spa));
}
if (DMU_OT_IS_METADATA(objtype) || level > 0) {
if (has_special_class)
return (spa_special_class(spa));
else
return (spa_normal_class(spa));
}
/*
* Allow small file blocks in special class in some cases (like
* for the dRAID vdev feature). But always leave a reserve of
* zfs_special_class_metadata_reserve_pct exclusively for metadata.
*/
if (DMU_OT_IS_FILE(objtype) &&
has_special_class && size <= special_smallblk) {
metaslab_class_t *special = spa_special_class(spa);
uint64_t alloc = metaslab_class_get_alloc(special);
uint64_t space = metaslab_class_get_space(special);
uint64_t limit =
(space * (100 - zfs_special_class_metadata_reserve_pct))
/ 100;
if (alloc < limit)
return (special);
}
return (spa_normal_class(spa));
}
void
spa_evicting_os_register(spa_t *spa, objset_t *os)
{
mutex_enter(&spa->spa_evicting_os_lock);
list_insert_head(&spa->spa_evicting_os_list, os);
mutex_exit(&spa->spa_evicting_os_lock);
}
void
spa_evicting_os_deregister(spa_t *spa, objset_t *os)
{
mutex_enter(&spa->spa_evicting_os_lock);
list_remove(&spa->spa_evicting_os_list, os);
cv_broadcast(&spa->spa_evicting_os_cv);
mutex_exit(&spa->spa_evicting_os_lock);
}
void
spa_evicting_os_wait(spa_t *spa)
{
mutex_enter(&spa->spa_evicting_os_lock);
while (!list_is_empty(&spa->spa_evicting_os_list))
cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock);
mutex_exit(&spa->spa_evicting_os_lock);
dmu_buf_user_evict_wait();
}
int
spa_max_replication(spa_t *spa)
{
/*
* As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to
* handle BPs with more than one DVA allocated. Set our max
* replication level accordingly.
*/
if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS)
return (1);
return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override));
}
int
spa_prev_software_version(spa_t *spa)
{
return (spa->spa_prev_software_version);
}
uint64_t
spa_deadman_synctime(spa_t *spa)
{
return (spa->spa_deadman_synctime);
}
spa_autotrim_t
spa_get_autotrim(spa_t *spa)
{
return (spa->spa_autotrim);
}
uint64_t
spa_deadman_ziotime(spa_t *spa)
{
return (spa->spa_deadman_ziotime);
}
uint64_t
spa_get_deadman_failmode(spa_t *spa)
{
return (spa->spa_deadman_failmode);
}
void
spa_set_deadman_failmode(spa_t *spa, const char *failmode)
{
if (strcmp(failmode, "wait") == 0)
spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT;
else if (strcmp(failmode, "continue") == 0)
spa->spa_deadman_failmode = ZIO_FAILURE_MODE_CONTINUE;
else if (strcmp(failmode, "panic") == 0)
spa->spa_deadman_failmode = ZIO_FAILURE_MODE_PANIC;
else
spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT;
}
void
spa_set_deadman_ziotime(hrtime_t ns)
{
spa_t *spa = NULL;
if (spa_mode_global != SPA_MODE_UNINIT) {
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(spa)) != NULL)
spa->spa_deadman_ziotime = ns;
mutex_exit(&spa_namespace_lock);
}
}
void
spa_set_deadman_synctime(hrtime_t ns)
{
spa_t *spa = NULL;
if (spa_mode_global != SPA_MODE_UNINIT) {
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(spa)) != NULL)
spa->spa_deadman_synctime = ns;
mutex_exit(&spa_namespace_lock);
}
}
uint64_t
dva_get_dsize_sync(spa_t *spa, const dva_t *dva)
{
uint64_t asize = DVA_GET_ASIZE(dva);
uint64_t dsize = asize;
ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
if (asize != 0 && spa->spa_deflate) {
vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
if (vd != NULL)
dsize = (asize >> SPA_MINBLOCKSHIFT) *
vd->vdev_deflate_ratio;
}
return (dsize);
}
uint64_t
bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp)
{
uint64_t dsize = 0;
for (int d = 0; d < BP_GET_NDVAS(bp); d++)
dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
return (dsize);
}
uint64_t
bp_get_dsize(spa_t *spa, const blkptr_t *bp)
{
uint64_t dsize = 0;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
for (int d = 0; d < BP_GET_NDVAS(bp); d++)
dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
spa_config_exit(spa, SCL_VDEV, FTAG);
return (dsize);
}
uint64_t
spa_dirty_data(spa_t *spa)
{
return (spa->spa_dsl_pool->dp_dirty_total);
}
/*
* ==========================================================================
* SPA Import Progress Routines
* ==========================================================================
*/
typedef struct spa_import_progress {
uint64_t pool_guid; /* unique id for updates */
char *pool_name;
spa_load_state_t spa_load_state;
uint64_t mmp_sec_remaining; /* MMP activity check */
uint64_t spa_load_max_txg; /* rewind txg */
procfs_list_node_t smh_node;
} spa_import_progress_t;
spa_history_list_t *spa_import_progress_list = NULL;
static int
spa_import_progress_show_header(struct seq_file *f)
{
seq_printf(f, "%-20s %-14s %-14s %-12s %s\n", "pool_guid",
"load_state", "multihost_secs", "max_txg",
"pool_name");
return (0);
}
static int
spa_import_progress_show(struct seq_file *f, void *data)
{
spa_import_progress_t *sip = (spa_import_progress_t *)data;
seq_printf(f, "%-20llu %-14llu %-14llu %-12llu %s\n",
(u_longlong_t)sip->pool_guid, (u_longlong_t)sip->spa_load_state,
(u_longlong_t)sip->mmp_sec_remaining,
(u_longlong_t)sip->spa_load_max_txg,
(sip->pool_name ? sip->pool_name : "-"));
return (0);
}
/* Remove oldest elements from list until there are no more than 'size' left */
static void
spa_import_progress_truncate(spa_history_list_t *shl, unsigned int size)
{
spa_import_progress_t *sip;
while (shl->size > size) {
sip = list_remove_head(&shl->procfs_list.pl_list);
if (sip->pool_name)
spa_strfree(sip->pool_name);
kmem_free(sip, sizeof (spa_import_progress_t));
shl->size--;
}
IMPLY(size == 0, list_is_empty(&shl->procfs_list.pl_list));
}
static void
spa_import_progress_init(void)
{
spa_import_progress_list = kmem_zalloc(sizeof (spa_history_list_t),
KM_SLEEP);
spa_import_progress_list->size = 0;
spa_import_progress_list->procfs_list.pl_private =
spa_import_progress_list;
procfs_list_install("zfs",
NULL,
"import_progress",
0644,
&spa_import_progress_list->procfs_list,
spa_import_progress_show,
spa_import_progress_show_header,
NULL,
offsetof(spa_import_progress_t, smh_node));
}
static void
spa_import_progress_destroy(void)
{
spa_history_list_t *shl = spa_import_progress_list;
procfs_list_uninstall(&shl->procfs_list);
spa_import_progress_truncate(shl, 0);
procfs_list_destroy(&shl->procfs_list);
kmem_free(shl, sizeof (spa_history_list_t));
}
int
spa_import_progress_set_state(uint64_t pool_guid,
spa_load_state_t load_state)
{
spa_history_list_t *shl = spa_import_progress_list;
spa_import_progress_t *sip;
int error = ENOENT;
if (shl->size == 0)
return (0);
mutex_enter(&shl->procfs_list.pl_lock);
for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
sip = list_prev(&shl->procfs_list.pl_list, sip)) {
if (sip->pool_guid == pool_guid) {
sip->spa_load_state = load_state;
error = 0;
break;
}
}
mutex_exit(&shl->procfs_list.pl_lock);
return (error);
}
int
spa_import_progress_set_max_txg(uint64_t pool_guid, uint64_t load_max_txg)
{
spa_history_list_t *shl = spa_import_progress_list;
spa_import_progress_t *sip;
int error = ENOENT;
if (shl->size == 0)
return (0);
mutex_enter(&shl->procfs_list.pl_lock);
for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
sip = list_prev(&shl->procfs_list.pl_list, sip)) {
if (sip->pool_guid == pool_guid) {
sip->spa_load_max_txg = load_max_txg;
error = 0;
break;
}
}
mutex_exit(&shl->procfs_list.pl_lock);
return (error);
}
int
spa_import_progress_set_mmp_check(uint64_t pool_guid,
uint64_t mmp_sec_remaining)
{
spa_history_list_t *shl = spa_import_progress_list;
spa_import_progress_t *sip;
int error = ENOENT;
if (shl->size == 0)
return (0);
mutex_enter(&shl->procfs_list.pl_lock);
for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
sip = list_prev(&shl->procfs_list.pl_list, sip)) {
if (sip->pool_guid == pool_guid) {
sip->mmp_sec_remaining = mmp_sec_remaining;
error = 0;
break;
}
}
mutex_exit(&shl->procfs_list.pl_lock);
return (error);
}
/*
* A new import is in progress, add an entry.
*/
void
spa_import_progress_add(spa_t *spa)
{
spa_history_list_t *shl = spa_import_progress_list;
spa_import_progress_t *sip;
char *poolname = NULL;
sip = kmem_zalloc(sizeof (spa_import_progress_t), KM_SLEEP);
sip->pool_guid = spa_guid(spa);
(void) nvlist_lookup_string(spa->spa_config, ZPOOL_CONFIG_POOL_NAME,
&poolname);
if (poolname == NULL)
poolname = spa_name(spa);
sip->pool_name = spa_strdup(poolname);
sip->spa_load_state = spa_load_state(spa);
mutex_enter(&shl->procfs_list.pl_lock);
procfs_list_add(&shl->procfs_list, sip);
shl->size++;
mutex_exit(&shl->procfs_list.pl_lock);
}
void
spa_import_progress_remove(uint64_t pool_guid)
{
spa_history_list_t *shl = spa_import_progress_list;
spa_import_progress_t *sip;
mutex_enter(&shl->procfs_list.pl_lock);
for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
sip = list_prev(&shl->procfs_list.pl_list, sip)) {
if (sip->pool_guid == pool_guid) {
if (sip->pool_name)
spa_strfree(sip->pool_name);
list_remove(&shl->procfs_list.pl_list, sip);
shl->size--;
kmem_free(sip, sizeof (spa_import_progress_t));
break;
}
}
mutex_exit(&shl->procfs_list.pl_lock);
}
/*
* ==========================================================================
* Initialization and Termination
* ==========================================================================
*/
static int
spa_name_compare(const void *a1, const void *a2)
{
const spa_t *s1 = a1;
const spa_t *s2 = a2;
int s;
s = strcmp(s1->spa_name, s2->spa_name);
return (TREE_ISIGN(s));
}
void
spa_boot_init(void)
{
spa_config_load();
}
void
spa_init(spa_mode_t mode)
{
mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL);
avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t),
offsetof(spa_t, spa_avl));
avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t),
offsetof(spa_aux_t, aux_avl));
avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t),
offsetof(spa_aux_t, aux_avl));
spa_mode_global = mode;
#ifndef _KERNEL
if (spa_mode_global != SPA_MODE_READ && dprintf_find_string("watch")) {
struct sigaction sa;
sa.sa_flags = SA_SIGINFO;
sigemptyset(&sa.sa_mask);
sa.sa_sigaction = arc_buf_sigsegv;
if (sigaction(SIGSEGV, &sa, NULL) == -1) {
perror("could not enable watchpoints: "
"sigaction(SIGSEGV, ...) = ");
} else {
arc_watch = B_TRUE;
}
}
#endif
fm_init();
zfs_refcount_init();
unique_init();
zfs_btree_init();
metaslab_stat_init();
ddt_init();
zio_init();
dmu_init();
zil_init();
vdev_cache_stat_init();
vdev_mirror_stat_init();
vdev_raidz_math_init();
vdev_file_init();
zfs_prop_init();
zpool_prop_init();
zpool_feature_init();
spa_config_load();
l2arc_start();
scan_init();
qat_init();
spa_import_progress_init();
}
void
spa_fini(void)
{
l2arc_stop();
spa_evict_all();
vdev_file_fini();
vdev_cache_stat_fini();
vdev_mirror_stat_fini();
vdev_raidz_math_fini();
zil_fini();
dmu_fini();
zio_fini();
ddt_fini();
metaslab_stat_fini();
zfs_btree_fini();
unique_fini();
zfs_refcount_fini();
fm_fini();
scan_fini();
qat_fini();
spa_import_progress_destroy();
avl_destroy(&spa_namespace_avl);
avl_destroy(&spa_spare_avl);
avl_destroy(&spa_l2cache_avl);
cv_destroy(&spa_namespace_cv);
mutex_destroy(&spa_namespace_lock);
mutex_destroy(&spa_spare_lock);
mutex_destroy(&spa_l2cache_lock);
}
/*
* Return whether this pool has a dedicated slog device. No locking needed.
* It's not a problem if the wrong answer is returned as it's only for
* performance and not correctness.
*/
boolean_t
spa_has_slogs(spa_t *spa)
{
return (spa->spa_log_class->mc_groups != 0);
}
spa_log_state_t
spa_get_log_state(spa_t *spa)
{
return (spa->spa_log_state);
}
void
spa_set_log_state(spa_t *spa, spa_log_state_t state)
{
spa->spa_log_state = state;
}
boolean_t
spa_is_root(spa_t *spa)
{
return (spa->spa_is_root);
}
boolean_t
spa_writeable(spa_t *spa)
{
return (!!(spa->spa_mode & SPA_MODE_WRITE) && spa->spa_trust_config);
}
/*
* Returns true if there is a pending sync task in any of the current
* syncing txg, the current quiescing txg, or the current open txg.
*/
boolean_t
spa_has_pending_synctask(spa_t *spa)
{
return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks) ||
!txg_all_lists_empty(&spa->spa_dsl_pool->dp_early_sync_tasks));
}
spa_mode_t
spa_mode(spa_t *spa)
{
return (spa->spa_mode);
}
uint64_t
spa_bootfs(spa_t *spa)
{
return (spa->spa_bootfs);
}
uint64_t
spa_delegation(spa_t *spa)
{
return (spa->spa_delegation);
}
objset_t *
spa_meta_objset(spa_t *spa)
{
return (spa->spa_meta_objset);
}
enum zio_checksum
spa_dedup_checksum(spa_t *spa)
{
return (spa->spa_dedup_checksum);
}
/*
* Reset pool scan stat per scan pass (or reboot).
*/
void
spa_scan_stat_init(spa_t *spa)
{
/* data not stored on disk */
spa->spa_scan_pass_start = gethrestime_sec();
if (dsl_scan_is_paused_scrub(spa->spa_dsl_pool->dp_scan))
spa->spa_scan_pass_scrub_pause = spa->spa_scan_pass_start;
else
spa->spa_scan_pass_scrub_pause = 0;
spa->spa_scan_pass_scrub_spent_paused = 0;
spa->spa_scan_pass_exam = 0;
spa->spa_scan_pass_issued = 0;
vdev_scan_stat_init(spa->spa_root_vdev);
}
/*
* Get scan stats for zpool status reports
*/
int
spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps)
{
dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL;
if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE)
return (SET_ERROR(ENOENT));
bzero(ps, sizeof (pool_scan_stat_t));
/* data stored on disk */
ps->pss_func = scn->scn_phys.scn_func;
ps->pss_state = scn->scn_phys.scn_state;
ps->pss_start_time = scn->scn_phys.scn_start_time;
ps->pss_end_time = scn->scn_phys.scn_end_time;
ps->pss_to_examine = scn->scn_phys.scn_to_examine;
ps->pss_examined = scn->scn_phys.scn_examined;
ps->pss_to_process = scn->scn_phys.scn_to_process;
ps->pss_processed = scn->scn_phys.scn_processed;
ps->pss_errors = scn->scn_phys.scn_errors;
/* data not stored on disk */
ps->pss_pass_exam = spa->spa_scan_pass_exam;
ps->pss_pass_start = spa->spa_scan_pass_start;
ps->pss_pass_scrub_pause = spa->spa_scan_pass_scrub_pause;
ps->pss_pass_scrub_spent_paused = spa->spa_scan_pass_scrub_spent_paused;
ps->pss_pass_issued = spa->spa_scan_pass_issued;
ps->pss_issued =
scn->scn_issued_before_pass + spa->spa_scan_pass_issued;
return (0);
}
int
spa_maxblocksize(spa_t *spa)
{
if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS))
return (SPA_MAXBLOCKSIZE);
else
return (SPA_OLD_MAXBLOCKSIZE);
}
/*
* Returns the txg that the last device removal completed. No indirect mappings
* have been added since this txg.
*/
uint64_t
spa_get_last_removal_txg(spa_t *spa)
{
uint64_t vdevid;
uint64_t ret = -1ULL;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
/*
* sr_prev_indirect_vdev is only modified while holding all the
* config locks, so it is sufficient to hold SCL_VDEV as reader when
* examining it.
*/
vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev;
while (vdevid != -1ULL) {
vdev_t *vd = vdev_lookup_top(spa, vdevid);
vdev_indirect_births_t *vib = vd->vdev_indirect_births;
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
/*
* If the removal did not remap any data, we don't care.
*/
if (vdev_indirect_births_count(vib) != 0) {
ret = vdev_indirect_births_last_entry_txg(vib);
break;
}
vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev;
}
spa_config_exit(spa, SCL_VDEV, FTAG);
IMPLY(ret != -1ULL,
spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL));
return (ret);
}
int
spa_maxdnodesize(spa_t *spa)
{
if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE))
return (DNODE_MAX_SIZE);
else
return (DNODE_MIN_SIZE);
}
boolean_t
spa_multihost(spa_t *spa)
{
return (spa->spa_multihost ? B_TRUE : B_FALSE);
}
uint32_t
spa_get_hostid(spa_t *spa)
{
return (spa->spa_hostid);
}
boolean_t
spa_trust_config(spa_t *spa)
{
return (spa->spa_trust_config);
}
uint64_t
spa_missing_tvds_allowed(spa_t *spa)
{
return (spa->spa_missing_tvds_allowed);
}
space_map_t *
spa_syncing_log_sm(spa_t *spa)
{
return (spa->spa_syncing_log_sm);
}
void
spa_set_missing_tvds(spa_t *spa, uint64_t missing)
{
spa->spa_missing_tvds = missing;
}
/*
* Return the pool state string ("ONLINE", "DEGRADED", "SUSPENDED", etc).
*/
const char *
spa_state_to_name(spa_t *spa)
{
ASSERT3P(spa, !=, NULL);
/*
* it is possible for the spa to exist, without root vdev
* as the spa transitions during import/export
*/
vdev_t *rvd = spa->spa_root_vdev;
if (rvd == NULL) {
return ("TRANSITIONING");
}
vdev_state_t state = rvd->vdev_state;
vdev_aux_t aux = rvd->vdev_stat.vs_aux;
if (spa_suspended(spa) &&
(spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE))
return ("SUSPENDED");
switch (state) {
case VDEV_STATE_CLOSED:
case VDEV_STATE_OFFLINE:
return ("OFFLINE");
case VDEV_STATE_REMOVED:
return ("REMOVED");
case VDEV_STATE_CANT_OPEN:
if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
return ("FAULTED");
else if (aux == VDEV_AUX_SPLIT_POOL)
return ("SPLIT");
else
return ("UNAVAIL");
case VDEV_STATE_FAULTED:
return ("FAULTED");
case VDEV_STATE_DEGRADED:
return ("DEGRADED");
case VDEV_STATE_HEALTHY:
return ("ONLINE");
default:
break;
}
return ("UNKNOWN");
}
boolean_t
spa_top_vdevs_spacemap_addressable(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
if (!vdev_is_spacemap_addressable(rvd->vdev_child[c]))
return (B_FALSE);
}
return (B_TRUE);
}
boolean_t
spa_has_checkpoint(spa_t *spa)
{
return (spa->spa_checkpoint_txg != 0);
}
boolean_t
spa_importing_readonly_checkpoint(spa_t *spa)
{
return ((spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT) &&
spa->spa_mode == SPA_MODE_READ);
}
uint64_t
spa_min_claim_txg(spa_t *spa)
{
uint64_t checkpoint_txg = spa->spa_uberblock.ub_checkpoint_txg;
if (checkpoint_txg != 0)
return (checkpoint_txg + 1);
return (spa->spa_first_txg);
}
/*
* If there is a checkpoint, async destroys may consume more space from
* the pool instead of freeing it. In an attempt to save the pool from
* getting suspended when it is about to run out of space, we stop
* processing async destroys.
*/
boolean_t
spa_suspend_async_destroy(spa_t *spa)
{
dsl_pool_t *dp = spa_get_dsl(spa);
uint64_t unreserved = dsl_pool_unreserved_space(dp,
ZFS_SPACE_CHECK_EXTRA_RESERVED);
uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes;
uint64_t avail = (unreserved > used) ? (unreserved - used) : 0;
if (spa_has_checkpoint(spa) && avail == 0)
return (B_TRUE);
return (B_FALSE);
}
#if defined(_KERNEL)
int
param_set_deadman_failmode_common(const char *val)
{
spa_t *spa = NULL;
char *p;
if (val == NULL)
return (SET_ERROR(EINVAL));
if ((p = strchr(val, '\n')) != NULL)
*p = '\0';
if (strcmp(val, "wait") != 0 && strcmp(val, "continue") != 0 &&
strcmp(val, "panic"))
return (SET_ERROR(EINVAL));
if (spa_mode_global != SPA_MODE_UNINIT) {
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(spa)) != NULL)
spa_set_deadman_failmode(spa, val);
mutex_exit(&spa_namespace_lock);
}
return (0);
}
#endif
/* Namespace manipulation */
EXPORT_SYMBOL(spa_lookup);
EXPORT_SYMBOL(spa_add);
EXPORT_SYMBOL(spa_remove);
EXPORT_SYMBOL(spa_next);
/* Refcount functions */
EXPORT_SYMBOL(spa_open_ref);
EXPORT_SYMBOL(spa_close);
EXPORT_SYMBOL(spa_refcount_zero);
/* Pool configuration lock */
EXPORT_SYMBOL(spa_config_tryenter);
EXPORT_SYMBOL(spa_config_enter);
EXPORT_SYMBOL(spa_config_exit);
EXPORT_SYMBOL(spa_config_held);
/* Pool vdev add/remove lock */
EXPORT_SYMBOL(spa_vdev_enter);
EXPORT_SYMBOL(spa_vdev_exit);
/* Pool vdev state change lock */
EXPORT_SYMBOL(spa_vdev_state_enter);
EXPORT_SYMBOL(spa_vdev_state_exit);
/* Accessor functions */
EXPORT_SYMBOL(spa_shutting_down);
EXPORT_SYMBOL(spa_get_dsl);
EXPORT_SYMBOL(spa_get_rootblkptr);
EXPORT_SYMBOL(spa_set_rootblkptr);
EXPORT_SYMBOL(spa_altroot);
EXPORT_SYMBOL(spa_sync_pass);
EXPORT_SYMBOL(spa_name);
EXPORT_SYMBOL(spa_guid);
EXPORT_SYMBOL(spa_last_synced_txg);
EXPORT_SYMBOL(spa_first_txg);
EXPORT_SYMBOL(spa_syncing_txg);
EXPORT_SYMBOL(spa_version);
EXPORT_SYMBOL(spa_state);
EXPORT_SYMBOL(spa_load_state);
EXPORT_SYMBOL(spa_freeze_txg);
EXPORT_SYMBOL(spa_get_dspace);
EXPORT_SYMBOL(spa_update_dspace);
EXPORT_SYMBOL(spa_deflate);
EXPORT_SYMBOL(spa_normal_class);
EXPORT_SYMBOL(spa_log_class);
EXPORT_SYMBOL(spa_special_class);
EXPORT_SYMBOL(spa_preferred_class);
EXPORT_SYMBOL(spa_max_replication);
EXPORT_SYMBOL(spa_prev_software_version);
EXPORT_SYMBOL(spa_get_failmode);
EXPORT_SYMBOL(spa_suspended);
EXPORT_SYMBOL(spa_bootfs);
EXPORT_SYMBOL(spa_delegation);
EXPORT_SYMBOL(spa_meta_objset);
EXPORT_SYMBOL(spa_maxblocksize);
EXPORT_SYMBOL(spa_maxdnodesize);
/* Miscellaneous support routines */
EXPORT_SYMBOL(spa_guid_exists);
EXPORT_SYMBOL(spa_strdup);
EXPORT_SYMBOL(spa_strfree);
EXPORT_SYMBOL(spa_generate_guid);
EXPORT_SYMBOL(snprintf_blkptr);
EXPORT_SYMBOL(spa_freeze);
EXPORT_SYMBOL(spa_upgrade);
EXPORT_SYMBOL(spa_evict_all);
EXPORT_SYMBOL(spa_lookup_by_guid);
EXPORT_SYMBOL(spa_has_spare);
EXPORT_SYMBOL(dva_get_dsize_sync);
EXPORT_SYMBOL(bp_get_dsize_sync);
EXPORT_SYMBOL(bp_get_dsize);
EXPORT_SYMBOL(spa_has_slogs);
EXPORT_SYMBOL(spa_is_root);
EXPORT_SYMBOL(spa_writeable);
EXPORT_SYMBOL(spa_mode);
EXPORT_SYMBOL(spa_namespace_lock);
EXPORT_SYMBOL(spa_trust_config);
EXPORT_SYMBOL(spa_missing_tvds_allowed);
EXPORT_SYMBOL(spa_set_missing_tvds);
EXPORT_SYMBOL(spa_state_to_name);
EXPORT_SYMBOL(spa_importing_readonly_checkpoint);
EXPORT_SYMBOL(spa_min_claim_txg);
EXPORT_SYMBOL(spa_suspend_async_destroy);
EXPORT_SYMBOL(spa_has_checkpoint);
EXPORT_SYMBOL(spa_top_vdevs_spacemap_addressable);
ZFS_MODULE_PARAM(zfs, zfs_, flags, UINT, ZMOD_RW,
"Set additional debugging flags");
ZFS_MODULE_PARAM(zfs, zfs_, recover, INT, ZMOD_RW,
"Set to attempt to recover from fatal errors");
ZFS_MODULE_PARAM(zfs, zfs_, free_leak_on_eio, INT, ZMOD_RW,
"Set to ignore IO errors during free and permanently leak the space");
ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, checktime_ms, ULONG, ZMOD_RW,
"Dead I/O check interval in milliseconds");
ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, enabled, INT, ZMOD_RW,
"Enable deadman timer");
ZFS_MODULE_PARAM(zfs_spa, spa_, asize_inflation, INT, ZMOD_RW,
"SPA size estimate multiplication factor");
ZFS_MODULE_PARAM(zfs, zfs_, ddt_data_is_special, INT, ZMOD_RW,
"Place DDT data into the special class");
ZFS_MODULE_PARAM(zfs, zfs_, user_indirect_is_special, INT, ZMOD_RW,
"Place user data indirect blocks into the special class");
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, failmode,
param_set_deadman_failmode, param_get_charp, ZMOD_RW,
"Failmode for deadman timer");
ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, synctime_ms,
param_set_deadman_synctime, param_get_ulong, ZMOD_RW,
"Pool sync expiration time in milliseconds");
ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, ziotime_ms,
param_set_deadman_ziotime, param_get_ulong, ZMOD_RW,
"IO expiration time in milliseconds");
ZFS_MODULE_PARAM(zfs, zfs_, special_class_metadata_reserve_pct, INT, ZMOD_RW,
"Small file blocks in special vdevs depends on this much "
"free space available");
/* END CSTYLED */
ZFS_MODULE_PARAM_CALL(zfs_spa, spa_, slop_shift, param_set_slop_shift,
param_get_int, ZMOD_RW, "Reserved free space in pool");
diff --git a/sys/contrib/openzfs/module/zfs/vdev.c b/sys/contrib/openzfs/module/zfs/vdev.c
index d659ec5bf333..57dcfe72339d 100644
--- a/sys/contrib/openzfs/module/zfs/vdev.c
+++ b/sys/contrib/openzfs/module/zfs/vdev.c
@@ -1,5450 +1,5457 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2021 by Delphix. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2016 Toomas Soome <tsoome@me.com>
* Copyright 2017 Joyent, Inc.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, Datto Inc. All rights reserved.
* Copyright [2021] Hewlett Packard Enterprise Development LP
*/
#include <sys/zfs_context.h>
#include <sys/fm/fs/zfs.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/bpobj.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/dsl_dir.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_rebuild.h>
#include <sys/vdev_draid.h>
#include <sys/uberblock_impl.h>
#include <sys/metaslab.h>
#include <sys/metaslab_impl.h>
#include <sys/space_map.h>
#include <sys/space_reftree.h>
#include <sys/zio.h>
#include <sys/zap.h>
#include <sys/fs/zfs.h>
#include <sys/arc.h>
#include <sys/zil.h>
#include <sys/dsl_scan.h>
#include <sys/vdev_raidz.h>
#include <sys/abd.h>
#include <sys/vdev_initialize.h>
#include <sys/vdev_trim.h>
#include <sys/zvol.h>
#include <sys/zfs_ratelimit.h>
/*
* One metaslab from each (normal-class) vdev is used by the ZIL. These are
* called "embedded slog metaslabs", are referenced by vdev_log_mg, and are
* part of the spa_embedded_log_class. The metaslab with the most free space
* in each vdev is selected for this purpose when the pool is opened (or a
* vdev is added). See vdev_metaslab_init().
*
* Log blocks can be allocated from the following locations. Each one is tried
* in order until the allocation succeeds:
* 1. dedicated log vdevs, aka "slog" (spa_log_class)
* 2. embedded slog metaslabs (spa_embedded_log_class)
* 3. other metaslabs in normal vdevs (spa_normal_class)
*
* zfs_embedded_slog_min_ms disables the embedded slog if there are fewer
* than this number of metaslabs in the vdev. This ensures that we don't set
* aside an unreasonable amount of space for the ZIL. If set to less than
* 1 << (spa_slop_shift + 1), on small pools the usable space may be reduced
* (by more than 1<<spa_slop_shift) due to the embedded slog metaslab.
*/
int zfs_embedded_slog_min_ms = 64;
/* default target for number of metaslabs per top-level vdev */
int zfs_vdev_default_ms_count = 200;
/* minimum number of metaslabs per top-level vdev */
int zfs_vdev_min_ms_count = 16;
/* practical upper limit of total metaslabs per top-level vdev */
int zfs_vdev_ms_count_limit = 1ULL << 17;
/* lower limit for metaslab size (512M) */
int zfs_vdev_default_ms_shift = 29;
/* upper limit for metaslab size (16G) */
int zfs_vdev_max_ms_shift = 34;
int vdev_validate_skip = B_FALSE;
/*
* Since the DTL space map of a vdev is not expected to have a lot of
* entries, we default its block size to 4K.
*/
int zfs_vdev_dtl_sm_blksz = (1 << 12);
/*
* Rate limit slow IO (delay) events to this many per second.
*/
unsigned int zfs_slow_io_events_per_second = 20;
/*
* Rate limit checksum events after this many checksum errors per second.
*/
unsigned int zfs_checksum_events_per_second = 20;
/*
* Ignore errors during scrub/resilver. Allows to work around resilver
* upon import when there are pool errors.
*/
int zfs_scan_ignore_errors = 0;
/*
* vdev-wide space maps that have lots of entries written to them at
* the end of each transaction can benefit from a higher I/O bandwidth
* (e.g. vdev_obsolete_sm), thus we default their block size to 128K.
*/
int zfs_vdev_standard_sm_blksz = (1 << 17);
/*
* Tunable parameter for debugging or performance analysis. Setting this
* will cause pool corruption on power loss if a volatile out-of-order
* write cache is enabled.
*/
int zfs_nocacheflush = 0;
uint64_t zfs_vdev_max_auto_ashift = ASHIFT_MAX;
uint64_t zfs_vdev_min_auto_ashift = ASHIFT_MIN;
/*PRINTFLIKE2*/
void
vdev_dbgmsg(vdev_t *vd, const char *fmt, ...)
{
va_list adx;
char buf[256];
va_start(adx, fmt);
(void) vsnprintf(buf, sizeof (buf), fmt, adx);
va_end(adx);
if (vd->vdev_path != NULL) {
zfs_dbgmsg("%s vdev '%s': %s", vd->vdev_ops->vdev_op_type,
vd->vdev_path, buf);
} else {
zfs_dbgmsg("%s-%llu vdev (guid %llu): %s",
vd->vdev_ops->vdev_op_type,
(u_longlong_t)vd->vdev_id,
(u_longlong_t)vd->vdev_guid, buf);
}
}
void
vdev_dbgmsg_print_tree(vdev_t *vd, int indent)
{
char state[20];
if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) {
zfs_dbgmsg("%*svdev %llu: %s", indent, "",
(u_longlong_t)vd->vdev_id,
vd->vdev_ops->vdev_op_type);
return;
}
switch (vd->vdev_state) {
case VDEV_STATE_UNKNOWN:
(void) snprintf(state, sizeof (state), "unknown");
break;
case VDEV_STATE_CLOSED:
(void) snprintf(state, sizeof (state), "closed");
break;
case VDEV_STATE_OFFLINE:
(void) snprintf(state, sizeof (state), "offline");
break;
case VDEV_STATE_REMOVED:
(void) snprintf(state, sizeof (state), "removed");
break;
case VDEV_STATE_CANT_OPEN:
(void) snprintf(state, sizeof (state), "can't open");
break;
case VDEV_STATE_FAULTED:
(void) snprintf(state, sizeof (state), "faulted");
break;
case VDEV_STATE_DEGRADED:
(void) snprintf(state, sizeof (state), "degraded");
break;
case VDEV_STATE_HEALTHY:
(void) snprintf(state, sizeof (state), "healthy");
break;
default:
(void) snprintf(state, sizeof (state), "<state %u>",
(uint_t)vd->vdev_state);
}
zfs_dbgmsg("%*svdev %u: %s%s, guid: %llu, path: %s, %s", indent,
"", (int)vd->vdev_id, vd->vdev_ops->vdev_op_type,
vd->vdev_islog ? " (log)" : "",
(u_longlong_t)vd->vdev_guid,
vd->vdev_path ? vd->vdev_path : "N/A", state);
for (uint64_t i = 0; i < vd->vdev_children; i++)
vdev_dbgmsg_print_tree(vd->vdev_child[i], indent + 2);
}
/*
* Virtual device management.
*/
static vdev_ops_t *vdev_ops_table[] = {
&vdev_root_ops,
&vdev_raidz_ops,
&vdev_draid_ops,
&vdev_draid_spare_ops,
&vdev_mirror_ops,
&vdev_replacing_ops,
&vdev_spare_ops,
&vdev_disk_ops,
&vdev_file_ops,
&vdev_missing_ops,
&vdev_hole_ops,
&vdev_indirect_ops,
NULL
};
/*
* Given a vdev type, return the appropriate ops vector.
*/
static vdev_ops_t *
vdev_getops(const char *type)
{
vdev_ops_t *ops, **opspp;
for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++)
if (strcmp(ops->vdev_op_type, type) == 0)
break;
return (ops);
}
/*
* Given a vdev and a metaslab class, find which metaslab group we're
* interested in. All vdevs may belong to two different metaslab classes.
* Dedicated slog devices use only the primary metaslab group, rather than a
* separate log group. For embedded slogs, the vdev_log_mg will be non-NULL.
*/
metaslab_group_t *
vdev_get_mg(vdev_t *vd, metaslab_class_t *mc)
{
if (mc == spa_embedded_log_class(vd->vdev_spa) &&
vd->vdev_log_mg != NULL)
return (vd->vdev_log_mg);
else
return (vd->vdev_mg);
}
-/* ARGSUSED */
void
vdev_default_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
range_seg64_t *physical_rs, range_seg64_t *remain_rs)
{
+ (void) vd, (void) remain_rs;
+
physical_rs->rs_start = logical_rs->rs_start;
physical_rs->rs_end = logical_rs->rs_end;
}
/*
* Derive the enumerated allocation bias from string input.
* String origin is either the per-vdev zap or zpool(8).
*/
static vdev_alloc_bias_t
vdev_derive_alloc_bias(const char *bias)
{
vdev_alloc_bias_t alloc_bias = VDEV_BIAS_NONE;
if (strcmp(bias, VDEV_ALLOC_BIAS_LOG) == 0)
alloc_bias = VDEV_BIAS_LOG;
else if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0)
alloc_bias = VDEV_BIAS_SPECIAL;
else if (strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0)
alloc_bias = VDEV_BIAS_DEDUP;
return (alloc_bias);
}
/*
* Default asize function: return the MAX of psize with the asize of
* all children. This is what's used by anything other than RAID-Z.
*/
uint64_t
vdev_default_asize(vdev_t *vd, uint64_t psize)
{
uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift);
uint64_t csize;
for (int c = 0; c < vd->vdev_children; c++) {
csize = vdev_psize_to_asize(vd->vdev_child[c], psize);
asize = MAX(asize, csize);
}
return (asize);
}
uint64_t
vdev_default_min_asize(vdev_t *vd)
{
return (vd->vdev_min_asize);
}
/*
* Get the minimum allocatable size. We define the allocatable size as
* the vdev's asize rounded to the nearest metaslab. This allows us to
* replace or attach devices which don't have the same physical size but
* can still satisfy the same number of allocations.
*/
uint64_t
vdev_get_min_asize(vdev_t *vd)
{
vdev_t *pvd = vd->vdev_parent;
/*
* If our parent is NULL (inactive spare or cache) or is the root,
* just return our own asize.
*/
if (pvd == NULL)
return (vd->vdev_asize);
/*
* The top-level vdev just returns the allocatable size rounded
* to the nearest metaslab.
*/
if (vd == vd->vdev_top)
return (P2ALIGN(vd->vdev_asize, 1ULL << vd->vdev_ms_shift));
return (pvd->vdev_ops->vdev_op_min_asize(pvd));
}
void
vdev_set_min_asize(vdev_t *vd)
{
vd->vdev_min_asize = vdev_get_min_asize(vd);
for (int c = 0; c < vd->vdev_children; c++)
vdev_set_min_asize(vd->vdev_child[c]);
}
/*
* Get the minimal allocation size for the top-level vdev.
*/
uint64_t
vdev_get_min_alloc(vdev_t *vd)
{
uint64_t min_alloc = 1ULL << vd->vdev_ashift;
if (vd->vdev_ops->vdev_op_min_alloc != NULL)
min_alloc = vd->vdev_ops->vdev_op_min_alloc(vd);
return (min_alloc);
}
/*
* Get the parity level for a top-level vdev.
*/
uint64_t
vdev_get_nparity(vdev_t *vd)
{
uint64_t nparity = 0;
if (vd->vdev_ops->vdev_op_nparity != NULL)
nparity = vd->vdev_ops->vdev_op_nparity(vd);
return (nparity);
}
/*
* Get the number of data disks for a top-level vdev.
*/
uint64_t
vdev_get_ndisks(vdev_t *vd)
{
uint64_t ndisks = 1;
if (vd->vdev_ops->vdev_op_ndisks != NULL)
ndisks = vd->vdev_ops->vdev_op_ndisks(vd);
return (ndisks);
}
vdev_t *
vdev_lookup_top(spa_t *spa, uint64_t vdev)
{
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
if (vdev < rvd->vdev_children) {
ASSERT(rvd->vdev_child[vdev] != NULL);
return (rvd->vdev_child[vdev]);
}
return (NULL);
}
vdev_t *
vdev_lookup_by_guid(vdev_t *vd, uint64_t guid)
{
vdev_t *mvd;
if (vd->vdev_guid == guid)
return (vd);
for (int c = 0; c < vd->vdev_children; c++)
if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) !=
NULL)
return (mvd);
return (NULL);
}
static int
vdev_count_leaves_impl(vdev_t *vd)
{
int n = 0;
if (vd->vdev_ops->vdev_op_leaf)
return (1);
for (int c = 0; c < vd->vdev_children; c++)
n += vdev_count_leaves_impl(vd->vdev_child[c]);
return (n);
}
int
vdev_count_leaves(spa_t *spa)
{
int rc;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
rc = vdev_count_leaves_impl(spa->spa_root_vdev);
spa_config_exit(spa, SCL_VDEV, FTAG);
return (rc);
}
void
vdev_add_child(vdev_t *pvd, vdev_t *cvd)
{
size_t oldsize, newsize;
uint64_t id = cvd->vdev_id;
vdev_t **newchild;
ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
ASSERT(cvd->vdev_parent == NULL);
cvd->vdev_parent = pvd;
if (pvd == NULL)
return;
ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL);
oldsize = pvd->vdev_children * sizeof (vdev_t *);
pvd->vdev_children = MAX(pvd->vdev_children, id + 1);
newsize = pvd->vdev_children * sizeof (vdev_t *);
newchild = kmem_alloc(newsize, KM_SLEEP);
if (pvd->vdev_child != NULL) {
bcopy(pvd->vdev_child, newchild, oldsize);
kmem_free(pvd->vdev_child, oldsize);
}
pvd->vdev_child = newchild;
pvd->vdev_child[id] = cvd;
cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd);
ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL);
/*
* Walk up all ancestors to update guid sum.
*/
for (; pvd != NULL; pvd = pvd->vdev_parent)
pvd->vdev_guid_sum += cvd->vdev_guid_sum;
if (cvd->vdev_ops->vdev_op_leaf) {
list_insert_head(&cvd->vdev_spa->spa_leaf_list, cvd);
cvd->vdev_spa->spa_leaf_list_gen++;
}
}
void
vdev_remove_child(vdev_t *pvd, vdev_t *cvd)
{
int c;
uint_t id = cvd->vdev_id;
ASSERT(cvd->vdev_parent == pvd);
if (pvd == NULL)
return;
ASSERT(id < pvd->vdev_children);
ASSERT(pvd->vdev_child[id] == cvd);
pvd->vdev_child[id] = NULL;
cvd->vdev_parent = NULL;
for (c = 0; c < pvd->vdev_children; c++)
if (pvd->vdev_child[c])
break;
if (c == pvd->vdev_children) {
kmem_free(pvd->vdev_child, c * sizeof (vdev_t *));
pvd->vdev_child = NULL;
pvd->vdev_children = 0;
}
if (cvd->vdev_ops->vdev_op_leaf) {
spa_t *spa = cvd->vdev_spa;
list_remove(&spa->spa_leaf_list, cvd);
spa->spa_leaf_list_gen++;
}
/*
* Walk up all ancestors to update guid sum.
*/
for (; pvd != NULL; pvd = pvd->vdev_parent)
pvd->vdev_guid_sum -= cvd->vdev_guid_sum;
}
/*
* Remove any holes in the child array.
*/
void
vdev_compact_children(vdev_t *pvd)
{
vdev_t **newchild, *cvd;
int oldc = pvd->vdev_children;
int newc;
ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
if (oldc == 0)
return;
for (int c = newc = 0; c < oldc; c++)
if (pvd->vdev_child[c])
newc++;
if (newc > 0) {
newchild = kmem_zalloc(newc * sizeof (vdev_t *), KM_SLEEP);
for (int c = newc = 0; c < oldc; c++) {
if ((cvd = pvd->vdev_child[c]) != NULL) {
newchild[newc] = cvd;
cvd->vdev_id = newc++;
}
}
} else {
newchild = NULL;
}
kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *));
pvd->vdev_child = newchild;
pvd->vdev_children = newc;
}
/*
* Allocate and minimally initialize a vdev_t.
*/
vdev_t *
vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
{
vdev_t *vd;
vdev_indirect_config_t *vic;
vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP);
vic = &vd->vdev_indirect_config;
if (spa->spa_root_vdev == NULL) {
ASSERT(ops == &vdev_root_ops);
spa->spa_root_vdev = vd;
spa->spa_load_guid = spa_generate_guid(NULL);
}
if (guid == 0 && ops != &vdev_hole_ops) {
if (spa->spa_root_vdev == vd) {
/*
* The root vdev's guid will also be the pool guid,
* which must be unique among all pools.
*/
guid = spa_generate_guid(NULL);
} else {
/*
* Any other vdev's guid must be unique within the pool.
*/
guid = spa_generate_guid(spa);
}
ASSERT(!spa_guid_exists(spa_guid(spa), guid));
}
vd->vdev_spa = spa;
vd->vdev_id = id;
vd->vdev_guid = guid;
vd->vdev_guid_sum = guid;
vd->vdev_ops = ops;
vd->vdev_state = VDEV_STATE_CLOSED;
vd->vdev_ishole = (ops == &vdev_hole_ops);
vic->vic_prev_indirect_vdev = UINT64_MAX;
rw_init(&vd->vdev_indirect_rwlock, NULL, RW_DEFAULT, NULL);
mutex_init(&vd->vdev_obsolete_lock, NULL, MUTEX_DEFAULT, NULL);
vd->vdev_obsolete_segments = range_tree_create(NULL, RANGE_SEG64, NULL,
0, 0);
/*
* Initialize rate limit structs for events. We rate limit ZIO delay
* and checksum events so that we don't overwhelm ZED with thousands
* of events when a disk is acting up.
*/
zfs_ratelimit_init(&vd->vdev_delay_rl, &zfs_slow_io_events_per_second,
1);
zfs_ratelimit_init(&vd->vdev_deadman_rl, &zfs_slow_io_events_per_second,
1);
zfs_ratelimit_init(&vd->vdev_checksum_rl,
&zfs_checksum_events_per_second, 1);
list_link_init(&vd->vdev_config_dirty_node);
list_link_init(&vd->vdev_state_dirty_node);
list_link_init(&vd->vdev_initialize_node);
list_link_init(&vd->vdev_leaf_node);
list_link_init(&vd->vdev_trim_node);
mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_NOLOCKDEP, NULL);
mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_scan_io_queue_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_initialize_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_initialize_io_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&vd->vdev_initialize_cv, NULL, CV_DEFAULT, NULL);
cv_init(&vd->vdev_initialize_io_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&vd->vdev_trim_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_autotrim_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_trim_io_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&vd->vdev_trim_cv, NULL, CV_DEFAULT, NULL);
cv_init(&vd->vdev_autotrim_cv, NULL, CV_DEFAULT, NULL);
cv_init(&vd->vdev_trim_io_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&vd->vdev_rebuild_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&vd->vdev_rebuild_cv, NULL, CV_DEFAULT, NULL);
for (int t = 0; t < DTL_TYPES; t++) {
vd->vdev_dtl[t] = range_tree_create(NULL, RANGE_SEG64, NULL, 0,
0);
}
txg_list_create(&vd->vdev_ms_list, spa,
offsetof(struct metaslab, ms_txg_node));
txg_list_create(&vd->vdev_dtl_list, spa,
offsetof(struct vdev, vdev_dtl_node));
vd->vdev_stat.vs_timestamp = gethrtime();
vdev_queue_init(vd);
vdev_cache_init(vd);
return (vd);
}
/*
* Allocate a new vdev. The 'alloctype' is used to control whether we are
* creating a new vdev or loading an existing one - the behavior is slightly
* different for each case.
*/
int
vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
int alloctype)
{
vdev_ops_t *ops;
char *type;
uint64_t guid = 0, islog;
vdev_t *vd;
vdev_indirect_config_t *vic;
char *tmp = NULL;
int rc;
vdev_alloc_bias_t alloc_bias = VDEV_BIAS_NONE;
boolean_t top_level = (parent && !parent->vdev_parent);
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
return (SET_ERROR(EINVAL));
if ((ops = vdev_getops(type)) == NULL)
return (SET_ERROR(EINVAL));
/*
* If this is a load, get the vdev guid from the nvlist.
* Otherwise, vdev_alloc_common() will generate one for us.
*/
if (alloctype == VDEV_ALLOC_LOAD) {
uint64_t label_id;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) ||
label_id != id)
return (SET_ERROR(EINVAL));
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (SET_ERROR(EINVAL));
} else if (alloctype == VDEV_ALLOC_SPARE) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (SET_ERROR(EINVAL));
} else if (alloctype == VDEV_ALLOC_L2CACHE) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (SET_ERROR(EINVAL));
} else if (alloctype == VDEV_ALLOC_ROOTPOOL) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (SET_ERROR(EINVAL));
}
/*
* The first allocated vdev must be of type 'root'.
*/
if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL)
return (SET_ERROR(EINVAL));
/*
* Determine whether we're a log vdev.
*/
islog = 0;
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog);
if (islog && spa_version(spa) < SPA_VERSION_SLOGS)
return (SET_ERROR(ENOTSUP));
if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES)
return (SET_ERROR(ENOTSUP));
if (top_level && alloctype == VDEV_ALLOC_ADD) {
char *bias;
/*
* If creating a top-level vdev, check for allocation
* classes input.
*/
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_ALLOCATION_BIAS,
&bias) == 0) {
alloc_bias = vdev_derive_alloc_bias(bias);
/* spa_vdev_add() expects feature to be enabled */
if (spa->spa_load_state != SPA_LOAD_CREATE &&
!spa_feature_is_enabled(spa,
SPA_FEATURE_ALLOCATION_CLASSES)) {
return (SET_ERROR(ENOTSUP));
}
}
/* spa_vdev_add() expects feature to be enabled */
if (ops == &vdev_draid_ops &&
spa->spa_load_state != SPA_LOAD_CREATE &&
!spa_feature_is_enabled(spa, SPA_FEATURE_DRAID)) {
return (SET_ERROR(ENOTSUP));
}
}
/*
* Initialize the vdev specific data. This is done before calling
* vdev_alloc_common() since it may fail and this simplifies the
* error reporting and cleanup code paths.
*/
void *tsd = NULL;
if (ops->vdev_op_init != NULL) {
rc = ops->vdev_op_init(spa, nv, &tsd);
if (rc != 0) {
return (rc);
}
}
vd = vdev_alloc_common(spa, id, guid, ops);
vd->vdev_tsd = tsd;
vd->vdev_islog = islog;
if (top_level && alloc_bias != VDEV_BIAS_NONE)
vd->vdev_alloc_bias = alloc_bias;
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0)
vd->vdev_path = spa_strdup(vd->vdev_path);
/*
* ZPOOL_CONFIG_AUX_STATE = "external" means we previously forced a
* fault on a vdev and want it to persist across imports (like with
* zpool offline -f).
*/
rc = nvlist_lookup_string(nv, ZPOOL_CONFIG_AUX_STATE, &tmp);
if (rc == 0 && tmp != NULL && strcmp(tmp, "external") == 0) {
vd->vdev_stat.vs_aux = VDEV_AUX_EXTERNAL;
vd->vdev_faulted = 1;
vd->vdev_label_aux = VDEV_AUX_EXTERNAL;
}
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0)
vd->vdev_devid = spa_strdup(vd->vdev_devid);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH,
&vd->vdev_physpath) == 0)
vd->vdev_physpath = spa_strdup(vd->vdev_physpath);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
&vd->vdev_enc_sysfs_path) == 0)
vd->vdev_enc_sysfs_path = spa_strdup(vd->vdev_enc_sysfs_path);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &vd->vdev_fru) == 0)
vd->vdev_fru = spa_strdup(vd->vdev_fru);
/*
* Set the whole_disk property. If it's not specified, leave the value
* as -1.
*/
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
&vd->vdev_wholedisk) != 0)
vd->vdev_wholedisk = -1ULL;
vic = &vd->vdev_indirect_config;
ASSERT0(vic->vic_mapping_object);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_OBJECT,
&vic->vic_mapping_object);
ASSERT0(vic->vic_births_object);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_BIRTHS,
&vic->vic_births_object);
ASSERT3U(vic->vic_prev_indirect_vdev, ==, UINT64_MAX);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_PREV_INDIRECT_VDEV,
&vic->vic_prev_indirect_vdev);
/*
* Look for the 'not present' flag. This will only be set if the device
* was not present at the time of import.
*/
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
&vd->vdev_not_present);
/*
* Get the alignment requirement.
*/
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift);
/*
* Retrieve the vdev creation time.
*/
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_CREATE_TXG,
&vd->vdev_crtxg);
/*
* If we're a top-level vdev, try to load the allocation parameters.
*/
if (top_level &&
(alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY,
&vd->vdev_ms_array);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT,
&vd->vdev_ms_shift);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE,
&vd->vdev_asize);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVING,
&vd->vdev_removing);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP,
&vd->vdev_top_zap);
} else {
ASSERT0(vd->vdev_top_zap);
}
if (top_level && alloctype != VDEV_ALLOC_ATTACH) {
ASSERT(alloctype == VDEV_ALLOC_LOAD ||
alloctype == VDEV_ALLOC_ADD ||
alloctype == VDEV_ALLOC_SPLIT ||
alloctype == VDEV_ALLOC_ROOTPOOL);
/* Note: metaslab_group_create() is now deferred */
}
if (vd->vdev_ops->vdev_op_leaf &&
(alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
(void) nvlist_lookup_uint64(nv,
ZPOOL_CONFIG_VDEV_LEAF_ZAP, &vd->vdev_leaf_zap);
} else {
ASSERT0(vd->vdev_leaf_zap);
}
/*
* If we're a leaf vdev, try to load the DTL object and other state.
*/
if (vd->vdev_ops->vdev_op_leaf &&
(alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE ||
alloctype == VDEV_ALLOC_ROOTPOOL)) {
if (alloctype == VDEV_ALLOC_LOAD) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL,
&vd->vdev_dtl_object);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE,
&vd->vdev_unspare);
}
if (alloctype == VDEV_ALLOC_ROOTPOOL) {
uint64_t spare = 0;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
&spare) == 0 && spare)
spa_spare_add(vd);
}
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE,
&vd->vdev_offline);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG,
&vd->vdev_resilver_txg);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REBUILD_TXG,
&vd->vdev_rebuild_txg);
if (nvlist_exists(nv, ZPOOL_CONFIG_RESILVER_DEFER))
vdev_defer_resilver(vd);
/*
* In general, when importing a pool we want to ignore the
* persistent fault state, as the diagnosis made on another
* system may not be valid in the current context. The only
* exception is if we forced a vdev to a persistently faulted
* state with 'zpool offline -f'. The persistent fault will
* remain across imports until cleared.
*
* Local vdevs will remain in the faulted state.
*/
if (spa_load_state(spa) == SPA_LOAD_OPEN ||
spa_load_state(spa) == SPA_LOAD_IMPORT) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED,
&vd->vdev_faulted);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED,
&vd->vdev_degraded);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED,
&vd->vdev_removed);
if (vd->vdev_faulted || vd->vdev_degraded) {
char *aux;
vd->vdev_label_aux =
VDEV_AUX_ERR_EXCEEDED;
if (nvlist_lookup_string(nv,
ZPOOL_CONFIG_AUX_STATE, &aux) == 0 &&
strcmp(aux, "external") == 0)
vd->vdev_label_aux = VDEV_AUX_EXTERNAL;
else
vd->vdev_faulted = 0ULL;
}
}
}
/*
* Add ourselves to the parent's list of children.
*/
vdev_add_child(parent, vd);
*vdp = vd;
return (0);
}
void
vdev_free(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
ASSERT3P(vd->vdev_trim_thread, ==, NULL);
ASSERT3P(vd->vdev_autotrim_thread, ==, NULL);
ASSERT3P(vd->vdev_rebuild_thread, ==, NULL);
/*
* Scan queues are normally destroyed at the end of a scan. If the
* queue exists here, that implies the vdev is being removed while
* the scan is still running.
*/
if (vd->vdev_scan_io_queue != NULL) {
mutex_enter(&vd->vdev_scan_io_queue_lock);
dsl_scan_io_queue_destroy(vd->vdev_scan_io_queue);
vd->vdev_scan_io_queue = NULL;
mutex_exit(&vd->vdev_scan_io_queue_lock);
}
/*
* vdev_free() implies closing the vdev first. This is simpler than
* trying to ensure complicated semantics for all callers.
*/
vdev_close(vd);
ASSERT(!list_link_active(&vd->vdev_config_dirty_node));
ASSERT(!list_link_active(&vd->vdev_state_dirty_node));
/*
* Free all children.
*/
for (int c = 0; c < vd->vdev_children; c++)
vdev_free(vd->vdev_child[c]);
ASSERT(vd->vdev_child == NULL);
ASSERT(vd->vdev_guid_sum == vd->vdev_guid);
if (vd->vdev_ops->vdev_op_fini != NULL)
vd->vdev_ops->vdev_op_fini(vd);
/*
* Discard allocation state.
*/
if (vd->vdev_mg != NULL) {
vdev_metaslab_fini(vd);
metaslab_group_destroy(vd->vdev_mg);
vd->vdev_mg = NULL;
}
if (vd->vdev_log_mg != NULL) {
ASSERT0(vd->vdev_ms_count);
metaslab_group_destroy(vd->vdev_log_mg);
vd->vdev_log_mg = NULL;
}
ASSERT0(vd->vdev_stat.vs_space);
ASSERT0(vd->vdev_stat.vs_dspace);
ASSERT0(vd->vdev_stat.vs_alloc);
/*
* Remove this vdev from its parent's child list.
*/
vdev_remove_child(vd->vdev_parent, vd);
ASSERT(vd->vdev_parent == NULL);
ASSERT(!list_link_active(&vd->vdev_leaf_node));
/*
* Clean up vdev structure.
*/
vdev_queue_fini(vd);
vdev_cache_fini(vd);
if (vd->vdev_path)
spa_strfree(vd->vdev_path);
if (vd->vdev_devid)
spa_strfree(vd->vdev_devid);
if (vd->vdev_physpath)
spa_strfree(vd->vdev_physpath);
if (vd->vdev_enc_sysfs_path)
spa_strfree(vd->vdev_enc_sysfs_path);
if (vd->vdev_fru)
spa_strfree(vd->vdev_fru);
if (vd->vdev_isspare)
spa_spare_remove(vd);
if (vd->vdev_isl2cache)
spa_l2cache_remove(vd);
txg_list_destroy(&vd->vdev_ms_list);
txg_list_destroy(&vd->vdev_dtl_list);
mutex_enter(&vd->vdev_dtl_lock);
space_map_close(vd->vdev_dtl_sm);
for (int t = 0; t < DTL_TYPES; t++) {
range_tree_vacate(vd->vdev_dtl[t], NULL, NULL);
range_tree_destroy(vd->vdev_dtl[t]);
}
mutex_exit(&vd->vdev_dtl_lock);
EQUIV(vd->vdev_indirect_births != NULL,
vd->vdev_indirect_mapping != NULL);
if (vd->vdev_indirect_births != NULL) {
vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
vdev_indirect_births_close(vd->vdev_indirect_births);
}
if (vd->vdev_obsolete_sm != NULL) {
ASSERT(vd->vdev_removing ||
vd->vdev_ops == &vdev_indirect_ops);
space_map_close(vd->vdev_obsolete_sm);
vd->vdev_obsolete_sm = NULL;
}
range_tree_destroy(vd->vdev_obsolete_segments);
rw_destroy(&vd->vdev_indirect_rwlock);
mutex_destroy(&vd->vdev_obsolete_lock);
mutex_destroy(&vd->vdev_dtl_lock);
mutex_destroy(&vd->vdev_stat_lock);
mutex_destroy(&vd->vdev_probe_lock);
mutex_destroy(&vd->vdev_scan_io_queue_lock);
mutex_destroy(&vd->vdev_initialize_lock);
mutex_destroy(&vd->vdev_initialize_io_lock);
cv_destroy(&vd->vdev_initialize_io_cv);
cv_destroy(&vd->vdev_initialize_cv);
mutex_destroy(&vd->vdev_trim_lock);
mutex_destroy(&vd->vdev_autotrim_lock);
mutex_destroy(&vd->vdev_trim_io_lock);
cv_destroy(&vd->vdev_trim_cv);
cv_destroy(&vd->vdev_autotrim_cv);
cv_destroy(&vd->vdev_trim_io_cv);
mutex_destroy(&vd->vdev_rebuild_lock);
cv_destroy(&vd->vdev_rebuild_cv);
zfs_ratelimit_fini(&vd->vdev_delay_rl);
zfs_ratelimit_fini(&vd->vdev_deadman_rl);
zfs_ratelimit_fini(&vd->vdev_checksum_rl);
if (vd == spa->spa_root_vdev)
spa->spa_root_vdev = NULL;
kmem_free(vd, sizeof (vdev_t));
}
/*
* Transfer top-level vdev state from svd to tvd.
*/
static void
vdev_top_transfer(vdev_t *svd, vdev_t *tvd)
{
spa_t *spa = svd->vdev_spa;
metaslab_t *msp;
vdev_t *vd;
int t;
ASSERT(tvd == tvd->vdev_top);
tvd->vdev_pending_fastwrite = svd->vdev_pending_fastwrite;
tvd->vdev_ms_array = svd->vdev_ms_array;
tvd->vdev_ms_shift = svd->vdev_ms_shift;
tvd->vdev_ms_count = svd->vdev_ms_count;
tvd->vdev_top_zap = svd->vdev_top_zap;
svd->vdev_ms_array = 0;
svd->vdev_ms_shift = 0;
svd->vdev_ms_count = 0;
svd->vdev_top_zap = 0;
if (tvd->vdev_mg)
ASSERT3P(tvd->vdev_mg, ==, svd->vdev_mg);
if (tvd->vdev_log_mg)
ASSERT3P(tvd->vdev_log_mg, ==, svd->vdev_log_mg);
tvd->vdev_mg = svd->vdev_mg;
tvd->vdev_log_mg = svd->vdev_log_mg;
tvd->vdev_ms = svd->vdev_ms;
svd->vdev_mg = NULL;
svd->vdev_log_mg = NULL;
svd->vdev_ms = NULL;
if (tvd->vdev_mg != NULL)
tvd->vdev_mg->mg_vd = tvd;
if (tvd->vdev_log_mg != NULL)
tvd->vdev_log_mg->mg_vd = tvd;
tvd->vdev_checkpoint_sm = svd->vdev_checkpoint_sm;
svd->vdev_checkpoint_sm = NULL;
tvd->vdev_alloc_bias = svd->vdev_alloc_bias;
svd->vdev_alloc_bias = VDEV_BIAS_NONE;
tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc;
tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space;
tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace;
svd->vdev_stat.vs_alloc = 0;
svd->vdev_stat.vs_space = 0;
svd->vdev_stat.vs_dspace = 0;
/*
* State which may be set on a top-level vdev that's in the
* process of being removed.
*/
ASSERT0(tvd->vdev_indirect_config.vic_births_object);
ASSERT0(tvd->vdev_indirect_config.vic_mapping_object);
ASSERT3U(tvd->vdev_indirect_config.vic_prev_indirect_vdev, ==, -1ULL);
ASSERT3P(tvd->vdev_indirect_mapping, ==, NULL);
ASSERT3P(tvd->vdev_indirect_births, ==, NULL);
ASSERT3P(tvd->vdev_obsolete_sm, ==, NULL);
ASSERT0(tvd->vdev_removing);
ASSERT0(tvd->vdev_rebuilding);
tvd->vdev_removing = svd->vdev_removing;
tvd->vdev_rebuilding = svd->vdev_rebuilding;
tvd->vdev_rebuild_config = svd->vdev_rebuild_config;
tvd->vdev_indirect_config = svd->vdev_indirect_config;
tvd->vdev_indirect_mapping = svd->vdev_indirect_mapping;
tvd->vdev_indirect_births = svd->vdev_indirect_births;
range_tree_swap(&svd->vdev_obsolete_segments,
&tvd->vdev_obsolete_segments);
tvd->vdev_obsolete_sm = svd->vdev_obsolete_sm;
svd->vdev_indirect_config.vic_mapping_object = 0;
svd->vdev_indirect_config.vic_births_object = 0;
svd->vdev_indirect_config.vic_prev_indirect_vdev = -1ULL;
svd->vdev_indirect_mapping = NULL;
svd->vdev_indirect_births = NULL;
svd->vdev_obsolete_sm = NULL;
svd->vdev_removing = 0;
svd->vdev_rebuilding = 0;
for (t = 0; t < TXG_SIZE; t++) {
while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL)
(void) txg_list_add(&tvd->vdev_ms_list, msp, t);
while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL)
(void) txg_list_add(&tvd->vdev_dtl_list, vd, t);
if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t))
(void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t);
}
if (list_link_active(&svd->vdev_config_dirty_node)) {
vdev_config_clean(svd);
vdev_config_dirty(tvd);
}
if (list_link_active(&svd->vdev_state_dirty_node)) {
vdev_state_clean(svd);
vdev_state_dirty(tvd);
}
tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio;
svd->vdev_deflate_ratio = 0;
tvd->vdev_islog = svd->vdev_islog;
svd->vdev_islog = 0;
dsl_scan_io_queue_vdev_xfer(svd, tvd);
}
static void
vdev_top_update(vdev_t *tvd, vdev_t *vd)
{
if (vd == NULL)
return;
vd->vdev_top = tvd;
for (int c = 0; c < vd->vdev_children; c++)
vdev_top_update(tvd, vd->vdev_child[c]);
}
/*
* Add a mirror/replacing vdev above an existing vdev. There is no need to
* call .vdev_op_init() since mirror/replacing vdevs do not have private state.
*/
vdev_t *
vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops)
{
spa_t *spa = cvd->vdev_spa;
vdev_t *pvd = cvd->vdev_parent;
vdev_t *mvd;
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops);
mvd->vdev_asize = cvd->vdev_asize;
mvd->vdev_min_asize = cvd->vdev_min_asize;
mvd->vdev_max_asize = cvd->vdev_max_asize;
mvd->vdev_psize = cvd->vdev_psize;
mvd->vdev_ashift = cvd->vdev_ashift;
mvd->vdev_logical_ashift = cvd->vdev_logical_ashift;
mvd->vdev_physical_ashift = cvd->vdev_physical_ashift;
mvd->vdev_state = cvd->vdev_state;
mvd->vdev_crtxg = cvd->vdev_crtxg;
vdev_remove_child(pvd, cvd);
vdev_add_child(pvd, mvd);
cvd->vdev_id = mvd->vdev_children;
vdev_add_child(mvd, cvd);
vdev_top_update(cvd->vdev_top, cvd->vdev_top);
if (mvd == mvd->vdev_top)
vdev_top_transfer(cvd, mvd);
return (mvd);
}
/*
* Remove a 1-way mirror/replacing vdev from the tree.
*/
void
vdev_remove_parent(vdev_t *cvd)
{
vdev_t *mvd = cvd->vdev_parent;
vdev_t *pvd = mvd->vdev_parent;
ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
ASSERT(mvd->vdev_children == 1);
ASSERT(mvd->vdev_ops == &vdev_mirror_ops ||
mvd->vdev_ops == &vdev_replacing_ops ||
mvd->vdev_ops == &vdev_spare_ops);
cvd->vdev_ashift = mvd->vdev_ashift;
cvd->vdev_logical_ashift = mvd->vdev_logical_ashift;
cvd->vdev_physical_ashift = mvd->vdev_physical_ashift;
vdev_remove_child(mvd, cvd);
vdev_remove_child(pvd, mvd);
/*
* If cvd will replace mvd as a top-level vdev, preserve mvd's guid.
* Otherwise, we could have detached an offline device, and when we
* go to import the pool we'll think we have two top-level vdevs,
* instead of a different version of the same top-level vdev.
*/
if (mvd->vdev_top == mvd) {
uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid;
cvd->vdev_orig_guid = cvd->vdev_guid;
cvd->vdev_guid += guid_delta;
cvd->vdev_guid_sum += guid_delta;
/*
* If pool not set for autoexpand, we need to also preserve
* mvd's asize to prevent automatic expansion of cvd.
* Otherwise if we are adjusting the mirror by attaching and
* detaching children of non-uniform sizes, the mirror could
* autoexpand, unexpectedly requiring larger devices to
* re-establish the mirror.
*/
if (!cvd->vdev_spa->spa_autoexpand)
cvd->vdev_asize = mvd->vdev_asize;
}
cvd->vdev_id = mvd->vdev_id;
vdev_add_child(pvd, cvd);
vdev_top_update(cvd->vdev_top, cvd->vdev_top);
if (cvd == cvd->vdev_top)
vdev_top_transfer(mvd, cvd);
ASSERT(mvd->vdev_children == 0);
vdev_free(mvd);
}
void
vdev_metaslab_group_create(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
/*
* metaslab_group_create was delayed until allocation bias was available
*/
if (vd->vdev_mg == NULL) {
metaslab_class_t *mc;
if (vd->vdev_islog && vd->vdev_alloc_bias == VDEV_BIAS_NONE)
vd->vdev_alloc_bias = VDEV_BIAS_LOG;
ASSERT3U(vd->vdev_islog, ==,
(vd->vdev_alloc_bias == VDEV_BIAS_LOG));
switch (vd->vdev_alloc_bias) {
case VDEV_BIAS_LOG:
mc = spa_log_class(spa);
break;
case VDEV_BIAS_SPECIAL:
mc = spa_special_class(spa);
break;
case VDEV_BIAS_DEDUP:
mc = spa_dedup_class(spa);
break;
default:
mc = spa_normal_class(spa);
}
vd->vdev_mg = metaslab_group_create(mc, vd,
spa->spa_alloc_count);
if (!vd->vdev_islog) {
vd->vdev_log_mg = metaslab_group_create(
spa_embedded_log_class(spa), vd, 1);
}
/*
* The spa ashift min/max only apply for the normal metaslab
* class. Class destination is late binding so ashift boundary
* setting had to wait until now.
*/
if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
mc == spa_normal_class(spa) && vd->vdev_aux == NULL) {
if (vd->vdev_ashift > spa->spa_max_ashift)
spa->spa_max_ashift = vd->vdev_ashift;
if (vd->vdev_ashift < spa->spa_min_ashift)
spa->spa_min_ashift = vd->vdev_ashift;
uint64_t min_alloc = vdev_get_min_alloc(vd);
if (min_alloc < spa->spa_min_alloc)
spa->spa_min_alloc = min_alloc;
}
}
}
int
vdev_metaslab_init(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
uint64_t oldc = vd->vdev_ms_count;
uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift;
metaslab_t **mspp;
int error;
boolean_t expanding = (oldc != 0);
ASSERT(txg == 0 || spa_config_held(spa, SCL_ALLOC, RW_WRITER));
/*
* This vdev is not being allocated from yet or is a hole.
*/
if (vd->vdev_ms_shift == 0)
return (0);
ASSERT(!vd->vdev_ishole);
ASSERT(oldc <= newc);
mspp = vmem_zalloc(newc * sizeof (*mspp), KM_SLEEP);
if (expanding) {
bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp));
vmem_free(vd->vdev_ms, oldc * sizeof (*mspp));
}
vd->vdev_ms = mspp;
vd->vdev_ms_count = newc;
for (uint64_t m = oldc; m < newc; m++) {
uint64_t object = 0;
/*
* vdev_ms_array may be 0 if we are creating the "fake"
* metaslabs for an indirect vdev for zdb's leak detection.
* See zdb_leak_init().
*/
if (txg == 0 && vd->vdev_ms_array != 0) {
error = dmu_read(spa->spa_meta_objset,
vd->vdev_ms_array,
m * sizeof (uint64_t), sizeof (uint64_t), &object,
DMU_READ_PREFETCH);
if (error != 0) {
vdev_dbgmsg(vd, "unable to read the metaslab "
"array [error=%d]", error);
return (error);
}
}
error = metaslab_init(vd->vdev_mg, m, object, txg,
&(vd->vdev_ms[m]));
if (error != 0) {
vdev_dbgmsg(vd, "metaslab_init failed [error=%d]",
error);
return (error);
}
}
/*
* Find the emptiest metaslab on the vdev and mark it for use for
* embedded slog by moving it from the regular to the log metaslab
* group.
*/
if (vd->vdev_mg->mg_class == spa_normal_class(spa) &&
vd->vdev_ms_count > zfs_embedded_slog_min_ms &&
avl_is_empty(&vd->vdev_log_mg->mg_metaslab_tree)) {
uint64_t slog_msid = 0;
uint64_t smallest = UINT64_MAX;
/*
* Note, we only search the new metaslabs, because the old
* (pre-existing) ones may be active (e.g. have non-empty
* range_tree's), and we don't move them to the new
* metaslab_t.
*/
for (uint64_t m = oldc; m < newc; m++) {
uint64_t alloc =
space_map_allocated(vd->vdev_ms[m]->ms_sm);
if (alloc < smallest) {
slog_msid = m;
smallest = alloc;
}
}
metaslab_t *slog_ms = vd->vdev_ms[slog_msid];
/*
* The metaslab was marked as dirty at the end of
* metaslab_init(). Remove it from the dirty list so that we
* can uninitialize and reinitialize it to the new class.
*/
if (txg != 0) {
(void) txg_list_remove_this(&vd->vdev_ms_list,
slog_ms, txg);
}
uint64_t sm_obj = space_map_object(slog_ms->ms_sm);
metaslab_fini(slog_ms);
VERIFY0(metaslab_init(vd->vdev_log_mg, slog_msid, sm_obj, txg,
&vd->vdev_ms[slog_msid]));
}
if (txg == 0)
spa_config_enter(spa, SCL_ALLOC, FTAG, RW_WRITER);
/*
* If the vdev is being removed we don't activate
* the metaslabs since we want to ensure that no new
* allocations are performed on this device.
*/
if (!expanding && !vd->vdev_removing) {
metaslab_group_activate(vd->vdev_mg);
if (vd->vdev_log_mg != NULL)
metaslab_group_activate(vd->vdev_log_mg);
}
if (txg == 0)
spa_config_exit(spa, SCL_ALLOC, FTAG);
/*
* Regardless whether this vdev was just added or it is being
* expanded, the metaslab count has changed. Recalculate the
* block limit.
*/
spa_log_sm_set_blocklimit(spa);
return (0);
}
void
vdev_metaslab_fini(vdev_t *vd)
{
if (vd->vdev_checkpoint_sm != NULL) {
ASSERT(spa_feature_is_active(vd->vdev_spa,
SPA_FEATURE_POOL_CHECKPOINT));
space_map_close(vd->vdev_checkpoint_sm);
/*
* Even though we close the space map, we need to set its
* pointer to NULL. The reason is that vdev_metaslab_fini()
* may be called multiple times for certain operations
* (i.e. when destroying a pool) so we need to ensure that
* this clause never executes twice. This logic is similar
* to the one used for the vdev_ms clause below.
*/
vd->vdev_checkpoint_sm = NULL;
}
if (vd->vdev_ms != NULL) {
metaslab_group_t *mg = vd->vdev_mg;
metaslab_group_passivate(mg);
if (vd->vdev_log_mg != NULL) {
ASSERT(!vd->vdev_islog);
metaslab_group_passivate(vd->vdev_log_mg);
}
uint64_t count = vd->vdev_ms_count;
for (uint64_t m = 0; m < count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
if (msp != NULL)
metaslab_fini(msp);
}
vmem_free(vd->vdev_ms, count * sizeof (metaslab_t *));
vd->vdev_ms = NULL;
vd->vdev_ms_count = 0;
for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
ASSERT0(mg->mg_histogram[i]);
if (vd->vdev_log_mg != NULL)
ASSERT0(vd->vdev_log_mg->mg_histogram[i]);
}
}
ASSERT0(vd->vdev_ms_count);
ASSERT3U(vd->vdev_pending_fastwrite, ==, 0);
}
typedef struct vdev_probe_stats {
boolean_t vps_readable;
boolean_t vps_writeable;
int vps_flags;
} vdev_probe_stats_t;
static void
vdev_probe_done(zio_t *zio)
{
spa_t *spa = zio->io_spa;
vdev_t *vd = zio->io_vd;
vdev_probe_stats_t *vps = zio->io_private;
ASSERT(vd->vdev_probe_zio != NULL);
if (zio->io_type == ZIO_TYPE_READ) {
if (zio->io_error == 0)
vps->vps_readable = 1;
if (zio->io_error == 0 && spa_writeable(spa)) {
zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd,
zio->io_offset, zio->io_size, zio->io_abd,
ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE));
} else {
abd_free(zio->io_abd);
}
} else if (zio->io_type == ZIO_TYPE_WRITE) {
if (zio->io_error == 0)
vps->vps_writeable = 1;
abd_free(zio->io_abd);
} else if (zio->io_type == ZIO_TYPE_NULL) {
zio_t *pio;
zio_link_t *zl;
vd->vdev_cant_read |= !vps->vps_readable;
vd->vdev_cant_write |= !vps->vps_writeable;
if (vdev_readable(vd) &&
(vdev_writeable(vd) || !spa_writeable(spa))) {
zio->io_error = 0;
} else {
ASSERT(zio->io_error != 0);
vdev_dbgmsg(vd, "failed probe");
(void) zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE,
spa, vd, NULL, NULL, 0);
zio->io_error = SET_ERROR(ENXIO);
}
mutex_enter(&vd->vdev_probe_lock);
ASSERT(vd->vdev_probe_zio == zio);
vd->vdev_probe_zio = NULL;
mutex_exit(&vd->vdev_probe_lock);
zl = NULL;
while ((pio = zio_walk_parents(zio, &zl)) != NULL)
if (!vdev_accessible(vd, pio))
pio->io_error = SET_ERROR(ENXIO);
kmem_free(vps, sizeof (*vps));
}
}
/*
* Determine whether this device is accessible.
*
* Read and write to several known locations: the pad regions of each
* vdev label but the first, which we leave alone in case it contains
* a VTOC.
*/
zio_t *
vdev_probe(vdev_t *vd, zio_t *zio)
{
spa_t *spa = vd->vdev_spa;
vdev_probe_stats_t *vps = NULL;
zio_t *pio;
ASSERT(vd->vdev_ops->vdev_op_leaf);
/*
* Don't probe the probe.
*/
if (zio && (zio->io_flags & ZIO_FLAG_PROBE))
return (NULL);
/*
* To prevent 'probe storms' when a device fails, we create
* just one probe i/o at a time. All zios that want to probe
* this vdev will become parents of the probe io.
*/
mutex_enter(&vd->vdev_probe_lock);
if ((pio = vd->vdev_probe_zio) == NULL) {
vps = kmem_zalloc(sizeof (*vps), KM_SLEEP);
vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE |
ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE |
ZIO_FLAG_TRYHARD;
if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) {
/*
* vdev_cant_read and vdev_cant_write can only
* transition from TRUE to FALSE when we have the
* SCL_ZIO lock as writer; otherwise they can only
* transition from FALSE to TRUE. This ensures that
* any zio looking at these values can assume that
* failures persist for the life of the I/O. That's
* important because when a device has intermittent
* connectivity problems, we want to ensure that
* they're ascribed to the device (ENXIO) and not
* the zio (EIO).
*
* Since we hold SCL_ZIO as writer here, clear both
* values so the probe can reevaluate from first
* principles.
*/
vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER;
vd->vdev_cant_read = B_FALSE;
vd->vdev_cant_write = B_FALSE;
}
vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd,
vdev_probe_done, vps,
vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE);
/*
* We can't change the vdev state in this context, so we
* kick off an async task to do it on our behalf.
*/
if (zio != NULL) {
vd->vdev_probe_wanted = B_TRUE;
spa_async_request(spa, SPA_ASYNC_PROBE);
}
}
if (zio != NULL)
zio_add_child(zio, pio);
mutex_exit(&vd->vdev_probe_lock);
if (vps == NULL) {
ASSERT(zio != NULL);
return (NULL);
}
for (int l = 1; l < VDEV_LABELS; l++) {
zio_nowait(zio_read_phys(pio, vd,
vdev_label_offset(vd->vdev_psize, l,
offsetof(vdev_label_t, vl_be)), VDEV_PAD_SIZE,
abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE),
ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE));
}
if (zio == NULL)
return (pio);
zio_nowait(pio);
return (NULL);
}
static void
vdev_load_child(void *arg)
{
vdev_t *vd = arg;
vd->vdev_load_error = vdev_load(vd);
}
static void
vdev_open_child(void *arg)
{
vdev_t *vd = arg;
vd->vdev_open_thread = curthread;
vd->vdev_open_error = vdev_open(vd);
vd->vdev_open_thread = NULL;
}
static boolean_t
vdev_uses_zvols(vdev_t *vd)
{
#ifdef _KERNEL
if (zvol_is_zvol(vd->vdev_path))
return (B_TRUE);
#endif
for (int c = 0; c < vd->vdev_children; c++)
if (vdev_uses_zvols(vd->vdev_child[c]))
return (B_TRUE);
return (B_FALSE);
}
/*
* Returns B_TRUE if the passed child should be opened.
*/
static boolean_t
vdev_default_open_children_func(vdev_t *vd)
{
+ (void) vd;
return (B_TRUE);
}
/*
* Open the requested child vdevs. If any of the leaf vdevs are using
* a ZFS volume then do the opens in a single thread. This avoids a
* deadlock when the current thread is holding the spa_namespace_lock.
*/
static void
vdev_open_children_impl(vdev_t *vd, vdev_open_children_func_t *open_func)
{
int children = vd->vdev_children;
taskq_t *tq = taskq_create("vdev_open", children, minclsyspri,
children, children, TASKQ_PREPOPULATE);
vd->vdev_nonrot = B_TRUE;
for (int c = 0; c < children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (open_func(cvd) == B_FALSE)
continue;
if (tq == NULL || vdev_uses_zvols(vd)) {
cvd->vdev_open_error = vdev_open(cvd);
} else {
VERIFY(taskq_dispatch(tq, vdev_open_child,
cvd, TQ_SLEEP) != TASKQID_INVALID);
}
vd->vdev_nonrot &= cvd->vdev_nonrot;
}
if (tq != NULL) {
taskq_wait(tq);
taskq_destroy(tq);
}
}
/*
* Open all child vdevs.
*/
void
vdev_open_children(vdev_t *vd)
{
vdev_open_children_impl(vd, vdev_default_open_children_func);
}
/*
* Conditionally open a subset of child vdevs.
*/
void
vdev_open_children_subset(vdev_t *vd, vdev_open_children_func_t *open_func)
{
vdev_open_children_impl(vd, open_func);
}
/*
* Compute the raidz-deflation ratio. Note, we hard-code
* in 128k (1 << 17) because it is the "typical" blocksize.
* Even though SPA_MAXBLOCKSIZE changed, this algorithm can not change,
* otherwise it would inconsistently account for existing bp's.
*/
static void
vdev_set_deflate_ratio(vdev_t *vd)
{
if (vd == vd->vdev_top && !vd->vdev_ishole && vd->vdev_ashift != 0) {
vd->vdev_deflate_ratio = (1 << 17) /
(vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT);
}
}
/*
* Maximize performance by inflating the configured ashift for top level
* vdevs to be as close to the physical ashift as possible while maintaining
* administrator defined limits and ensuring it doesn't go below the
* logical ashift.
*/
static void
vdev_ashift_optimize(vdev_t *vd)
{
ASSERT(vd == vd->vdev_top);
if (vd->vdev_ashift < vd->vdev_physical_ashift) {
vd->vdev_ashift = MIN(
MAX(zfs_vdev_max_auto_ashift, vd->vdev_ashift),
MAX(zfs_vdev_min_auto_ashift,
vd->vdev_physical_ashift));
} else {
/*
* If the logical and physical ashifts are the same, then
* we ensure that the top-level vdev's ashift is not smaller
* than our minimum ashift value. For the unusual case
* where logical ashift > physical ashift, we can't cap
* the calculated ashift based on max ashift as that
* would cause failures.
* We still check if we need to increase it to match
* the min ashift.
*/
vd->vdev_ashift = MAX(zfs_vdev_min_auto_ashift,
vd->vdev_ashift);
}
}
/*
* Prepare a virtual device for access.
*/
int
vdev_open(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
int error;
uint64_t osize = 0;
uint64_t max_osize = 0;
uint64_t asize, max_asize, psize;
uint64_t logical_ashift = 0;
uint64_t physical_ashift = 0;
ASSERT(vd->vdev_open_thread == curthread ||
spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
ASSERT(vd->vdev_state == VDEV_STATE_CLOSED ||
vd->vdev_state == VDEV_STATE_CANT_OPEN ||
vd->vdev_state == VDEV_STATE_OFFLINE);
vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
vd->vdev_cant_read = B_FALSE;
vd->vdev_cant_write = B_FALSE;
vd->vdev_min_asize = vdev_get_min_asize(vd);
/*
* If this vdev is not removed, check its fault status. If it's
* faulted, bail out of the open.
*/
if (!vd->vdev_removed && vd->vdev_faulted) {
ASSERT(vd->vdev_children == 0);
ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
vd->vdev_label_aux);
return (SET_ERROR(ENXIO));
} else if (vd->vdev_offline) {
ASSERT(vd->vdev_children == 0);
vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE);
return (SET_ERROR(ENXIO));
}
error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize,
&logical_ashift, &physical_ashift);
/*
* Physical volume size should never be larger than its max size, unless
* the disk has shrunk while we were reading it or the device is buggy
* or damaged: either way it's not safe for use, bail out of the open.
*/
if (osize > max_osize) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_OPEN_FAILED);
return (SET_ERROR(ENXIO));
}
/*
* Reset the vdev_reopening flag so that we actually close
* the vdev on error.
*/
vd->vdev_reopening = B_FALSE;
if (zio_injection_enabled && error == 0)
error = zio_handle_device_injection(vd, NULL, SET_ERROR(ENXIO));
if (error) {
if (vd->vdev_removed &&
vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED)
vd->vdev_removed = B_FALSE;
if (vd->vdev_stat.vs_aux == VDEV_AUX_CHILDREN_OFFLINE) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE,
vd->vdev_stat.vs_aux);
} else {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
vd->vdev_stat.vs_aux);
}
return (error);
}
vd->vdev_removed = B_FALSE;
/*
* Recheck the faulted flag now that we have confirmed that
* the vdev is accessible. If we're faulted, bail.
*/
if (vd->vdev_faulted) {
ASSERT(vd->vdev_children == 0);
ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
vd->vdev_label_aux);
return (SET_ERROR(ENXIO));
}
if (vd->vdev_degraded) {
ASSERT(vd->vdev_children == 0);
vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
VDEV_AUX_ERR_EXCEEDED);
} else {
vdev_set_state(vd, B_TRUE, VDEV_STATE_HEALTHY, 0);
}
/*
* For hole or missing vdevs we just return success.
*/
if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops)
return (0);
for (int c = 0; c < vd->vdev_children; c++) {
if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
VDEV_AUX_NONE);
break;
}
}
osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t));
max_osize = P2ALIGN(max_osize, (uint64_t)sizeof (vdev_label_t));
if (vd->vdev_children == 0) {
if (osize < SPA_MINDEVSIZE) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_TOO_SMALL);
return (SET_ERROR(EOVERFLOW));
}
psize = osize;
asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE);
max_asize = max_osize - (VDEV_LABEL_START_SIZE +
VDEV_LABEL_END_SIZE);
} else {
if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE -
(VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_TOO_SMALL);
return (SET_ERROR(EOVERFLOW));
}
psize = 0;
asize = osize;
max_asize = max_osize;
}
/*
* If the vdev was expanded, record this so that we can re-create the
* uberblock rings in labels {2,3}, during the next sync.
*/
if ((psize > vd->vdev_psize) && (vd->vdev_psize != 0))
vd->vdev_copy_uberblocks = B_TRUE;
vd->vdev_psize = psize;
/*
* Make sure the allocatable size hasn't shrunk too much.
*/
if (asize < vd->vdev_min_asize) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_LABEL);
return (SET_ERROR(EINVAL));
}
/*
* We can always set the logical/physical ashift members since
* their values are only used to calculate the vdev_ashift when
* the device is first added to the config. These values should
* not be used for anything else since they may change whenever
* the device is reopened and we don't store them in the label.
*/
vd->vdev_physical_ashift =
MAX(physical_ashift, vd->vdev_physical_ashift);
vd->vdev_logical_ashift = MAX(logical_ashift,
vd->vdev_logical_ashift);
if (vd->vdev_asize == 0) {
/*
* This is the first-ever open, so use the computed values.
* For compatibility, a different ashift can be requested.
*/
vd->vdev_asize = asize;
vd->vdev_max_asize = max_asize;
/*
* If the vdev_ashift was not overridden at creation time,
* then set it the logical ashift and optimize the ashift.
*/
if (vd->vdev_ashift == 0) {
vd->vdev_ashift = vd->vdev_logical_ashift;
if (vd->vdev_logical_ashift > ASHIFT_MAX) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_ASHIFT_TOO_BIG);
return (SET_ERROR(EDOM));
}
if (vd->vdev_top == vd) {
vdev_ashift_optimize(vd);
}
}
if (vd->vdev_ashift != 0 && (vd->vdev_ashift < ASHIFT_MIN ||
vd->vdev_ashift > ASHIFT_MAX)) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_ASHIFT);
return (SET_ERROR(EDOM));
}
} else {
/*
* Make sure the alignment required hasn't increased.
*/
if (vd->vdev_ashift > vd->vdev_top->vdev_ashift &&
vd->vdev_ops->vdev_op_leaf) {
(void) zfs_ereport_post(
FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT,
spa, vd, NULL, NULL, 0);
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_LABEL);
return (SET_ERROR(EDOM));
}
vd->vdev_max_asize = max_asize;
}
/*
* If all children are healthy we update asize if either:
* The asize has increased, due to a device expansion caused by dynamic
* LUN growth or vdev replacement, and automatic expansion is enabled;
* making the additional space available.
*
* The asize has decreased, due to a device shrink usually caused by a
* vdev replace with a smaller device. This ensures that calculations
* based of max_asize and asize e.g. esize are always valid. It's safe
* to do this as we've already validated that asize is greater than
* vdev_min_asize.
*/
if (vd->vdev_state == VDEV_STATE_HEALTHY &&
((asize > vd->vdev_asize &&
(vd->vdev_expanding || spa->spa_autoexpand)) ||
(asize < vd->vdev_asize)))
vd->vdev_asize = asize;
vdev_set_min_asize(vd);
/*
* Ensure we can issue some IO before declaring the
* vdev open for business.
*/
if (vd->vdev_ops->vdev_op_leaf &&
(error = zio_wait(vdev_probe(vd, NULL))) != 0) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
VDEV_AUX_ERR_EXCEEDED);
return (error);
}
/*
* Track the minimum allocation size.
*/
if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
vd->vdev_islog == 0 && vd->vdev_aux == NULL) {
uint64_t min_alloc = vdev_get_min_alloc(vd);
if (min_alloc < spa->spa_min_alloc)
spa->spa_min_alloc = min_alloc;
}
/*
* If this is a leaf vdev, assess whether a resilver is needed.
* But don't do this if we are doing a reopen for a scrub, since
* this would just restart the scrub we are already doing.
*/
if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen)
dsl_scan_assess_vdev(spa->spa_dsl_pool, vd);
return (0);
}
static void
vdev_validate_child(void *arg)
{
vdev_t *vd = arg;
vd->vdev_validate_thread = curthread;
vd->vdev_validate_error = vdev_validate(vd);
vd->vdev_validate_thread = NULL;
}
/*
* Called once the vdevs are all opened, this routine validates the label
* contents. This needs to be done before vdev_load() so that we don't
* inadvertently do repair I/Os to the wrong device.
*
* This function will only return failure if one of the vdevs indicates that it
* has since been destroyed or exported. This is only possible if
* /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state
* will be updated but the function will return 0.
*/
int
vdev_validate(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
taskq_t *tq = NULL;
nvlist_t *label;
uint64_t guid = 0, aux_guid = 0, top_guid;
uint64_t state;
nvlist_t *nvl;
uint64_t txg;
int children = vd->vdev_children;
if (vdev_validate_skip)
return (0);
if (children > 0) {
tq = taskq_create("vdev_validate", children, minclsyspri,
children, children, TASKQ_PREPOPULATE);
}
for (uint64_t c = 0; c < children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (tq == NULL || vdev_uses_zvols(cvd)) {
vdev_validate_child(cvd);
} else {
VERIFY(taskq_dispatch(tq, vdev_validate_child, cvd,
TQ_SLEEP) != TASKQID_INVALID);
}
}
if (tq != NULL) {
taskq_wait(tq);
taskq_destroy(tq);
}
for (int c = 0; c < children; c++) {
int error = vd->vdev_child[c]->vdev_validate_error;
if (error != 0)
return (SET_ERROR(EBADF));
}
/*
* If the device has already failed, or was marked offline, don't do
* any further validation. Otherwise, label I/O will fail and we will
* overwrite the previous state.
*/
if (!vd->vdev_ops->vdev_op_leaf || !vdev_readable(vd))
return (0);
/*
* If we are performing an extreme rewind, we allow for a label that
* was modified at a point after the current txg.
* If config lock is not held do not check for the txg. spa_sync could
* be updating the vdev's label before updating spa_last_synced_txg.
*/
if (spa->spa_extreme_rewind || spa_last_synced_txg(spa) == 0 ||
spa_config_held(spa, SCL_CONFIG, RW_WRITER) != SCL_CONFIG)
txg = UINT64_MAX;
else
txg = spa_last_synced_txg(spa);
if ((label = vdev_label_read_config(vd, txg)) == NULL) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_LABEL);
vdev_dbgmsg(vd, "vdev_validate: failed reading config for "
"txg %llu", (u_longlong_t)txg);
return (0);
}
/*
* Determine if this vdev has been split off into another
* pool. If so, then refuse to open it.
*/
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_SPLIT_GUID,
&aux_guid) == 0 && aux_guid == spa_guid(spa)) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_SPLIT_POOL);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: vdev split into other pool");
return (0);
}
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, &guid) != 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
ZPOOL_CONFIG_POOL_GUID);
return (0);
}
/*
* If config is not trusted then ignore the spa guid check. This is
* necessary because if the machine crashed during a re-guid the new
* guid might have been written to all of the vdev labels, but not the
* cached config. The check will be performed again once we have the
* trusted config from the MOS.
*/
if (spa->spa_trust_config && guid != spa_guid(spa)) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: vdev label pool_guid doesn't "
"match config (%llu != %llu)", (u_longlong_t)guid,
(u_longlong_t)spa_guid(spa));
return (0);
}
if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvl)
!= 0 || nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_ORIG_GUID,
&aux_guid) != 0)
aux_guid = 0;
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
ZPOOL_CONFIG_GUID);
return (0);
}
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID, &top_guid)
!= 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
ZPOOL_CONFIG_TOP_GUID);
return (0);
}
/*
* If this vdev just became a top-level vdev because its sibling was
* detached, it will have adopted the parent's vdev guid -- but the
* label may or may not be on disk yet. Fortunately, either version
* of the label will have the same top guid, so if we're a top-level
* vdev, we can safely compare to that instead.
* However, if the config comes from a cachefile that failed to update
* after the detach, a top-level vdev will appear as a non top-level
* vdev in the config. Also relax the constraints if we perform an
* extreme rewind.
*
* If we split this vdev off instead, then we also check the
* original pool's guid. We don't want to consider the vdev
* corrupt if it is partway through a split operation.
*/
if (vd->vdev_guid != guid && vd->vdev_guid != aux_guid) {
boolean_t mismatch = B_FALSE;
if (spa->spa_trust_config && !spa->spa_extreme_rewind) {
if (vd != vd->vdev_top || vd->vdev_guid != top_guid)
mismatch = B_TRUE;
} else {
if (vd->vdev_guid != top_guid &&
vd->vdev_top->vdev_guid != guid)
mismatch = B_TRUE;
}
if (mismatch) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: config guid "
"doesn't match label guid");
vdev_dbgmsg(vd, "CONFIG: guid %llu, top_guid %llu",
(u_longlong_t)vd->vdev_guid,
(u_longlong_t)vd->vdev_top->vdev_guid);
vdev_dbgmsg(vd, "LABEL: guid %llu, top_guid %llu, "
"aux_guid %llu", (u_longlong_t)guid,
(u_longlong_t)top_guid, (u_longlong_t)aux_guid);
return (0);
}
}
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE,
&state) != 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: '%s' missing from label",
ZPOOL_CONFIG_POOL_STATE);
return (0);
}
nvlist_free(label);
/*
* If this is a verbatim import, no need to check the
* state of the pool.
*/
if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) &&
spa_load_state(spa) == SPA_LOAD_OPEN &&
state != POOL_STATE_ACTIVE) {
vdev_dbgmsg(vd, "vdev_validate: invalid pool state (%llu) "
"for spa %s", (u_longlong_t)state, spa->spa_name);
return (SET_ERROR(EBADF));
}
/*
* If we were able to open and validate a vdev that was
* previously marked permanently unavailable, clear that state
* now.
*/
if (vd->vdev_not_present)
vd->vdev_not_present = 0;
return (0);
}
static void
vdev_copy_path_impl(vdev_t *svd, vdev_t *dvd)
{
char *old, *new;
if (svd->vdev_path != NULL && dvd->vdev_path != NULL) {
if (strcmp(svd->vdev_path, dvd->vdev_path) != 0) {
zfs_dbgmsg("vdev_copy_path: vdev %llu: path changed "
"from '%s' to '%s'", (u_longlong_t)dvd->vdev_guid,
dvd->vdev_path, svd->vdev_path);
spa_strfree(dvd->vdev_path);
dvd->vdev_path = spa_strdup(svd->vdev_path);
}
} else if (svd->vdev_path != NULL) {
dvd->vdev_path = spa_strdup(svd->vdev_path);
zfs_dbgmsg("vdev_copy_path: vdev %llu: path set to '%s'",
(u_longlong_t)dvd->vdev_guid, dvd->vdev_path);
}
/*
* Our enclosure sysfs path may have changed between imports
*/
old = dvd->vdev_enc_sysfs_path;
new = svd->vdev_enc_sysfs_path;
if ((old != NULL && new == NULL) ||
(old == NULL && new != NULL) ||
((old != NULL && new != NULL) && strcmp(new, old) != 0)) {
zfs_dbgmsg("vdev_copy_path: vdev %llu: vdev_enc_sysfs_path "
"changed from '%s' to '%s'", (u_longlong_t)dvd->vdev_guid,
old, new);
if (dvd->vdev_enc_sysfs_path)
spa_strfree(dvd->vdev_enc_sysfs_path);
if (svd->vdev_enc_sysfs_path) {
dvd->vdev_enc_sysfs_path = spa_strdup(
svd->vdev_enc_sysfs_path);
} else {
dvd->vdev_enc_sysfs_path = NULL;
}
}
}
/*
* Recursively copy vdev paths from one vdev to another. Source and destination
* vdev trees must have same geometry otherwise return error. Intended to copy
* paths from userland config into MOS config.
*/
int
vdev_copy_path_strict(vdev_t *svd, vdev_t *dvd)
{
if ((svd->vdev_ops == &vdev_missing_ops) ||
(svd->vdev_ishole && dvd->vdev_ishole) ||
(dvd->vdev_ops == &vdev_indirect_ops))
return (0);
if (svd->vdev_ops != dvd->vdev_ops) {
vdev_dbgmsg(svd, "vdev_copy_path: vdev type mismatch: %s != %s",
svd->vdev_ops->vdev_op_type, dvd->vdev_ops->vdev_op_type);
return (SET_ERROR(EINVAL));
}
if (svd->vdev_guid != dvd->vdev_guid) {
vdev_dbgmsg(svd, "vdev_copy_path: guids mismatch (%llu != "
"%llu)", (u_longlong_t)svd->vdev_guid,
(u_longlong_t)dvd->vdev_guid);
return (SET_ERROR(EINVAL));
}
if (svd->vdev_children != dvd->vdev_children) {
vdev_dbgmsg(svd, "vdev_copy_path: children count mismatch: "
"%llu != %llu", (u_longlong_t)svd->vdev_children,
(u_longlong_t)dvd->vdev_children);
return (SET_ERROR(EINVAL));
}
for (uint64_t i = 0; i < svd->vdev_children; i++) {
int error = vdev_copy_path_strict(svd->vdev_child[i],
dvd->vdev_child[i]);
if (error != 0)
return (error);
}
if (svd->vdev_ops->vdev_op_leaf)
vdev_copy_path_impl(svd, dvd);
return (0);
}
static void
vdev_copy_path_search(vdev_t *stvd, vdev_t *dvd)
{
ASSERT(stvd->vdev_top == stvd);
ASSERT3U(stvd->vdev_id, ==, dvd->vdev_top->vdev_id);
for (uint64_t i = 0; i < dvd->vdev_children; i++) {
vdev_copy_path_search(stvd, dvd->vdev_child[i]);
}
if (!dvd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(dvd))
return;
/*
* The idea here is that while a vdev can shift positions within
* a top vdev (when replacing, attaching mirror, etc.) it cannot
* step outside of it.
*/
vdev_t *vd = vdev_lookup_by_guid(stvd, dvd->vdev_guid);
if (vd == NULL || vd->vdev_ops != dvd->vdev_ops)
return;
ASSERT(vd->vdev_ops->vdev_op_leaf);
vdev_copy_path_impl(vd, dvd);
}
/*
* Recursively copy vdev paths from one root vdev to another. Source and
* destination vdev trees may differ in geometry. For each destination leaf
* vdev, search a vdev with the same guid and top vdev id in the source.
* Intended to copy paths from userland config into MOS config.
*/
void
vdev_copy_path_relaxed(vdev_t *srvd, vdev_t *drvd)
{
uint64_t children = MIN(srvd->vdev_children, drvd->vdev_children);
ASSERT(srvd->vdev_ops == &vdev_root_ops);
ASSERT(drvd->vdev_ops == &vdev_root_ops);
for (uint64_t i = 0; i < children; i++) {
vdev_copy_path_search(srvd->vdev_child[i],
drvd->vdev_child[i]);
}
}
/*
* Close a virtual device.
*/
void
vdev_close(vdev_t *vd)
{
vdev_t *pvd = vd->vdev_parent;
spa_t *spa __maybe_unused = vd->vdev_spa;
ASSERT(vd != NULL);
ASSERT(vd->vdev_open_thread == curthread ||
spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
/*
* If our parent is reopening, then we are as well, unless we are
* going offline.
*/
if (pvd != NULL && pvd->vdev_reopening)
vd->vdev_reopening = (pvd->vdev_reopening && !vd->vdev_offline);
vd->vdev_ops->vdev_op_close(vd);
vdev_cache_purge(vd);
/*
* We record the previous state before we close it, so that if we are
* doing a reopen(), we don't generate FMA ereports if we notice that
* it's still faulted.
*/
vd->vdev_prevstate = vd->vdev_state;
if (vd->vdev_offline)
vd->vdev_state = VDEV_STATE_OFFLINE;
else
vd->vdev_state = VDEV_STATE_CLOSED;
vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
}
void
vdev_hold(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_is_root(spa));
if (spa->spa_state == POOL_STATE_UNINITIALIZED)
return;
for (int c = 0; c < vd->vdev_children; c++)
vdev_hold(vd->vdev_child[c]);
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_ops->vdev_op_hold != NULL)
vd->vdev_ops->vdev_op_hold(vd);
}
void
vdev_rele(vdev_t *vd)
{
ASSERT(spa_is_root(vd->vdev_spa));
for (int c = 0; c < vd->vdev_children; c++)
vdev_rele(vd->vdev_child[c]);
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_ops->vdev_op_rele != NULL)
vd->vdev_ops->vdev_op_rele(vd);
}
/*
* Reopen all interior vdevs and any unopened leaves. We don't actually
* reopen leaf vdevs which had previously been opened as they might deadlock
* on the spa_config_lock. Instead we only obtain the leaf's physical size.
* If the leaf has never been opened then open it, as usual.
*/
void
vdev_reopen(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
/* set the reopening flag unless we're taking the vdev offline */
vd->vdev_reopening = !vd->vdev_offline;
vdev_close(vd);
(void) vdev_open(vd);
/*
* Call vdev_validate() here to make sure we have the same device.
* Otherwise, a device with an invalid label could be successfully
* opened in response to vdev_reopen().
*/
if (vd->vdev_aux) {
(void) vdev_validate_aux(vd);
if (vdev_readable(vd) && vdev_writeable(vd) &&
vd->vdev_aux == &spa->spa_l2cache) {
/*
* In case the vdev is present we should evict all ARC
* buffers and pointers to log blocks and reclaim their
* space before restoring its contents to L2ARC.
*/
if (l2arc_vdev_present(vd)) {
l2arc_rebuild_vdev(vd, B_TRUE);
} else {
l2arc_add_vdev(spa, vd);
}
spa_async_request(spa, SPA_ASYNC_L2CACHE_REBUILD);
spa_async_request(spa, SPA_ASYNC_L2CACHE_TRIM);
}
} else {
(void) vdev_validate(vd);
}
/*
* Reassess parent vdev's health.
*/
vdev_propagate_state(vd);
}
int
vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing)
{
int error;
/*
* Normally, partial opens (e.g. of a mirror) are allowed.
* For a create, however, we want to fail the request if
* there are any components we can't open.
*/
error = vdev_open(vd);
if (error || vd->vdev_state != VDEV_STATE_HEALTHY) {
vdev_close(vd);
return (error ? error : SET_ERROR(ENXIO));
}
/*
* Recursively load DTLs and initialize all labels.
*/
if ((error = vdev_dtl_load(vd)) != 0 ||
(error = vdev_label_init(vd, txg, isreplacing ?
VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) {
vdev_close(vd);
return (error);
}
return (0);
}
void
vdev_metaslab_set_size(vdev_t *vd)
{
uint64_t asize = vd->vdev_asize;
uint64_t ms_count = asize >> zfs_vdev_default_ms_shift;
uint64_t ms_shift;
/*
* There are two dimensions to the metaslab sizing calculation:
* the size of the metaslab and the count of metaslabs per vdev.
*
* The default values used below are a good balance between memory
* usage (larger metaslab size means more memory needed for loaded
* metaslabs; more metaslabs means more memory needed for the
* metaslab_t structs), metaslab load time (larger metaslabs take
* longer to load), and metaslab sync time (more metaslabs means
* more time spent syncing all of them).
*
* In general, we aim for zfs_vdev_default_ms_count (200) metaslabs.
* The range of the dimensions are as follows:
*
* 2^29 <= ms_size <= 2^34
* 16 <= ms_count <= 131,072
*
* On the lower end of vdev sizes, we aim for metaslabs sizes of
* at least 512MB (2^29) to minimize fragmentation effects when
* testing with smaller devices. However, the count constraint
* of at least 16 metaslabs will override this minimum size goal.
*
* On the upper end of vdev sizes, we aim for a maximum metaslab
* size of 16GB. However, we will cap the total count to 2^17
* metaslabs to keep our memory footprint in check and let the
* metaslab size grow from there if that limit is hit.
*
* The net effect of applying above constrains is summarized below.
*
* vdev size metaslab count
* --------------|-----------------
* < 8GB ~16
* 8GB - 100GB one per 512MB
* 100GB - 3TB ~200
* 3TB - 2PB one per 16GB
* > 2PB ~131,072
* --------------------------------
*
* Finally, note that all of the above calculate the initial
* number of metaslabs. Expanding a top-level vdev will result
* in additional metaslabs being allocated making it possible
* to exceed the zfs_vdev_ms_count_limit.
*/
if (ms_count < zfs_vdev_min_ms_count)
ms_shift = highbit64(asize / zfs_vdev_min_ms_count);
else if (ms_count > zfs_vdev_default_ms_count)
ms_shift = highbit64(asize / zfs_vdev_default_ms_count);
else
ms_shift = zfs_vdev_default_ms_shift;
if (ms_shift < SPA_MAXBLOCKSHIFT) {
ms_shift = SPA_MAXBLOCKSHIFT;
} else if (ms_shift > zfs_vdev_max_ms_shift) {
ms_shift = zfs_vdev_max_ms_shift;
/* cap the total count to constrain memory footprint */
if ((asize >> ms_shift) > zfs_vdev_ms_count_limit)
ms_shift = highbit64(asize / zfs_vdev_ms_count_limit);
}
vd->vdev_ms_shift = ms_shift;
ASSERT3U(vd->vdev_ms_shift, >=, SPA_MAXBLOCKSHIFT);
}
void
vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg)
{
ASSERT(vd == vd->vdev_top);
/* indirect vdevs don't have metaslabs or dtls */
ASSERT(vdev_is_concrete(vd) || flags == 0);
ASSERT(ISP2(flags));
ASSERT(spa_writeable(vd->vdev_spa));
if (flags & VDD_METASLAB)
(void) txg_list_add(&vd->vdev_ms_list, arg, txg);
if (flags & VDD_DTL)
(void) txg_list_add(&vd->vdev_dtl_list, arg, txg);
(void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg);
}
void
vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg)
{
for (int c = 0; c < vd->vdev_children; c++)
vdev_dirty_leaves(vd->vdev_child[c], flags, txg);
if (vd->vdev_ops->vdev_op_leaf)
vdev_dirty(vd->vdev_top, flags, vd, txg);
}
/*
* DTLs.
*
* A vdev's DTL (dirty time log) is the set of transaction groups for which
* the vdev has less than perfect replication. There are four kinds of DTL:
*
* DTL_MISSING: txgs for which the vdev has no valid copies of the data
*
* DTL_PARTIAL: txgs for which data is available, but not fully replicated
*
* DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon
* scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of
* txgs that was scrubbed.
*
* DTL_OUTAGE: txgs which cannot currently be read, whether due to
* persistent errors or just some device being offline.
* Unlike the other three, the DTL_OUTAGE map is not generally
* maintained; it's only computed when needed, typically to
* determine whether a device can be detached.
*
* For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device
* either has the data or it doesn't.
*
* For interior vdevs such as mirror and RAID-Z the picture is more complex.
* A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because
* if any child is less than fully replicated, then so is its parent.
* A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs,
* comprising only those txgs which appear in 'maxfaults' or more children;
* those are the txgs we don't have enough replication to read. For example,
* double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2);
* thus, its DTL_MISSING consists of the set of txgs that appear in more than
* two child DTL_MISSING maps.
*
* It should be clear from the above that to compute the DTLs and outage maps
* for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps.
* Therefore, that is all we keep on disk. When loading the pool, or after
* a configuration change, we generate all other DTLs from first principles.
*/
void
vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
{
range_tree_t *rt = vd->vdev_dtl[t];
ASSERT(t < DTL_TYPES);
ASSERT(vd != vd->vdev_spa->spa_root_vdev);
ASSERT(spa_writeable(vd->vdev_spa));
mutex_enter(&vd->vdev_dtl_lock);
if (!range_tree_contains(rt, txg, size))
range_tree_add(rt, txg, size);
mutex_exit(&vd->vdev_dtl_lock);
}
boolean_t
vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
{
range_tree_t *rt = vd->vdev_dtl[t];
boolean_t dirty = B_FALSE;
ASSERT(t < DTL_TYPES);
ASSERT(vd != vd->vdev_spa->spa_root_vdev);
/*
* While we are loading the pool, the DTLs have not been loaded yet.
* This isn't a problem but it can result in devices being tried
* which are known to not have the data. In which case, the import
* is relying on the checksum to ensure that we get the right data.
* Note that while importing we are only reading the MOS, which is
* always checksummed.
*/
mutex_enter(&vd->vdev_dtl_lock);
if (!range_tree_is_empty(rt))
dirty = range_tree_contains(rt, txg, size);
mutex_exit(&vd->vdev_dtl_lock);
return (dirty);
}
boolean_t
vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t)
{
range_tree_t *rt = vd->vdev_dtl[t];
boolean_t empty;
mutex_enter(&vd->vdev_dtl_lock);
empty = range_tree_is_empty(rt);
mutex_exit(&vd->vdev_dtl_lock);
return (empty);
}
/*
* Check if the txg falls within the range which must be
* resilvered. DVAs outside this range can always be skipped.
*/
boolean_t
vdev_default_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize,
uint64_t phys_birth)
{
+ (void) dva, (void) psize;
+
/* Set by sequential resilver. */
if (phys_birth == TXG_UNKNOWN)
return (B_TRUE);
return (vdev_dtl_contains(vd, DTL_PARTIAL, phys_birth, 1));
}
/*
* Returns B_TRUE if the vdev determines the DVA needs to be resilvered.
*/
boolean_t
vdev_dtl_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize,
uint64_t phys_birth)
{
ASSERT(vd != vd->vdev_spa->spa_root_vdev);
if (vd->vdev_ops->vdev_op_need_resilver == NULL ||
vd->vdev_ops->vdev_op_leaf)
return (B_TRUE);
return (vd->vdev_ops->vdev_op_need_resilver(vd, dva, psize,
phys_birth));
}
/*
* Returns the lowest txg in the DTL range.
*/
static uint64_t
vdev_dtl_min(vdev_t *vd)
{
ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
ASSERT0(vd->vdev_children);
return (range_tree_min(vd->vdev_dtl[DTL_MISSING]) - 1);
}
/*
* Returns the highest txg in the DTL.
*/
static uint64_t
vdev_dtl_max(vdev_t *vd)
{
ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
ASSERT0(vd->vdev_children);
return (range_tree_max(vd->vdev_dtl[DTL_MISSING]));
}
/*
* Determine if a resilvering vdev should remove any DTL entries from
* its range. If the vdev was resilvering for the entire duration of the
* scan then it should excise that range from its DTLs. Otherwise, this
* vdev is considered partially resilvered and should leave its DTL
* entries intact. The comment in vdev_dtl_reassess() describes how we
* excise the DTLs.
*/
static boolean_t
vdev_dtl_should_excise(vdev_t *vd, boolean_t rebuild_done)
{
ASSERT0(vd->vdev_children);
if (vd->vdev_state < VDEV_STATE_DEGRADED)
return (B_FALSE);
if (vd->vdev_resilver_deferred)
return (B_FALSE);
if (range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]))
return (B_TRUE);
if (rebuild_done) {
vdev_rebuild_t *vr = &vd->vdev_top->vdev_rebuild_config;
vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
/* Rebuild not initiated by attach */
if (vd->vdev_rebuild_txg == 0)
return (B_TRUE);
/*
* When a rebuild completes without error then all missing data
* up to the rebuild max txg has been reconstructed and the DTL
* is eligible for excision.
*/
if (vrp->vrp_rebuild_state == VDEV_REBUILD_COMPLETE &&
vdev_dtl_max(vd) <= vrp->vrp_max_txg) {
ASSERT3U(vrp->vrp_min_txg, <=, vdev_dtl_min(vd));
ASSERT3U(vrp->vrp_min_txg, <, vd->vdev_rebuild_txg);
ASSERT3U(vd->vdev_rebuild_txg, <=, vrp->vrp_max_txg);
return (B_TRUE);
}
} else {
dsl_scan_t *scn = vd->vdev_spa->spa_dsl_pool->dp_scan;
dsl_scan_phys_t *scnp __maybe_unused = &scn->scn_phys;
/* Resilver not initiated by attach */
if (vd->vdev_resilver_txg == 0)
return (B_TRUE);
/*
* When a resilver is initiated the scan will assign the
* scn_max_txg value to the highest txg value that exists
* in all DTLs. If this device's max DTL is not part of this
* scan (i.e. it is not in the range (scn_min_txg, scn_max_txg]
* then it is not eligible for excision.
*/
if (vdev_dtl_max(vd) <= scn->scn_phys.scn_max_txg) {
ASSERT3U(scnp->scn_min_txg, <=, vdev_dtl_min(vd));
ASSERT3U(scnp->scn_min_txg, <, vd->vdev_resilver_txg);
ASSERT3U(vd->vdev_resilver_txg, <=, scnp->scn_max_txg);
return (B_TRUE);
}
}
return (B_FALSE);
}
/*
* Reassess DTLs after a config change or scrub completion. If txg == 0 no
* write operations will be issued to the pool.
*/
void
vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg,
boolean_t scrub_done, boolean_t rebuild_done)
{
spa_t *spa = vd->vdev_spa;
avl_tree_t reftree;
int minref;
ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
for (int c = 0; c < vd->vdev_children; c++)
vdev_dtl_reassess(vd->vdev_child[c], txg,
scrub_txg, scrub_done, rebuild_done);
if (vd == spa->spa_root_vdev || !vdev_is_concrete(vd) || vd->vdev_aux)
return;
if (vd->vdev_ops->vdev_op_leaf) {
dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
vdev_rebuild_t *vr = &vd->vdev_top->vdev_rebuild_config;
boolean_t check_excise = B_FALSE;
boolean_t wasempty = B_TRUE;
mutex_enter(&vd->vdev_dtl_lock);
/*
* If requested, pretend the scan or rebuild completed cleanly.
*/
if (zfs_scan_ignore_errors) {
if (scn != NULL)
scn->scn_phys.scn_errors = 0;
if (vr != NULL)
vr->vr_rebuild_phys.vrp_errors = 0;
}
if (scrub_txg != 0 &&
!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) {
wasempty = B_FALSE;
zfs_dbgmsg("guid:%llu txg:%llu scrub:%llu started:%d "
"dtl:%llu/%llu errors:%llu",
(u_longlong_t)vd->vdev_guid, (u_longlong_t)txg,
(u_longlong_t)scrub_txg, spa->spa_scrub_started,
(u_longlong_t)vdev_dtl_min(vd),
(u_longlong_t)vdev_dtl_max(vd),
(u_longlong_t)(scn ? scn->scn_phys.scn_errors : 0));
}
/*
* If we've completed a scrub/resilver or a rebuild cleanly
* then determine if this vdev should remove any DTLs. We
* only want to excise regions on vdevs that were available
* during the entire duration of this scan.
*/
if (rebuild_done &&
vr != NULL && vr->vr_rebuild_phys.vrp_errors == 0) {
check_excise = B_TRUE;
} else {
if (spa->spa_scrub_started ||
(scn != NULL && scn->scn_phys.scn_errors == 0)) {
check_excise = B_TRUE;
}
}
if (scrub_txg && check_excise &&
vdev_dtl_should_excise(vd, rebuild_done)) {
/*
* We completed a scrub, resilver or rebuild up to
* scrub_txg. If we did it without rebooting, then
* the scrub dtl will be valid, so excise the old
* region and fold in the scrub dtl. Otherwise,
* leave the dtl as-is if there was an error.
*
* There's little trick here: to excise the beginning
* of the DTL_MISSING map, we put it into a reference
* tree and then add a segment with refcnt -1 that
* covers the range [0, scrub_txg). This means
* that each txg in that range has refcnt -1 or 0.
* We then add DTL_SCRUB with a refcnt of 2, so that
* entries in the range [0, scrub_txg) will have a
* positive refcnt -- either 1 or 2. We then convert
* the reference tree into the new DTL_MISSING map.
*/
space_reftree_create(&reftree);
space_reftree_add_map(&reftree,
vd->vdev_dtl[DTL_MISSING], 1);
space_reftree_add_seg(&reftree, 0, scrub_txg, -1);
space_reftree_add_map(&reftree,
vd->vdev_dtl[DTL_SCRUB], 2);
space_reftree_generate_map(&reftree,
vd->vdev_dtl[DTL_MISSING], 1);
space_reftree_destroy(&reftree);
if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING])) {
zfs_dbgmsg("update DTL_MISSING:%llu/%llu",
(u_longlong_t)vdev_dtl_min(vd),
(u_longlong_t)vdev_dtl_max(vd));
} else if (!wasempty) {
zfs_dbgmsg("DTL_MISSING is now empty");
}
}
range_tree_vacate(vd->vdev_dtl[DTL_PARTIAL], NULL, NULL);
range_tree_walk(vd->vdev_dtl[DTL_MISSING],
range_tree_add, vd->vdev_dtl[DTL_PARTIAL]);
if (scrub_done)
range_tree_vacate(vd->vdev_dtl[DTL_SCRUB], NULL, NULL);
range_tree_vacate(vd->vdev_dtl[DTL_OUTAGE], NULL, NULL);
if (!vdev_readable(vd))
range_tree_add(vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL);
else
range_tree_walk(vd->vdev_dtl[DTL_MISSING],
range_tree_add, vd->vdev_dtl[DTL_OUTAGE]);
/*
* If the vdev was resilvering or rebuilding and no longer
* has any DTLs then reset the appropriate flag and dirty
* the top level so that we persist the change.
*/
if (txg != 0 &&
range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) &&
range_tree_is_empty(vd->vdev_dtl[DTL_OUTAGE])) {
if (vd->vdev_rebuild_txg != 0) {
vd->vdev_rebuild_txg = 0;
vdev_config_dirty(vd->vdev_top);
} else if (vd->vdev_resilver_txg != 0) {
vd->vdev_resilver_txg = 0;
vdev_config_dirty(vd->vdev_top);
}
}
mutex_exit(&vd->vdev_dtl_lock);
if (txg != 0)
vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg);
return;
}
mutex_enter(&vd->vdev_dtl_lock);
for (int t = 0; t < DTL_TYPES; t++) {
/* account for child's outage in parent's missing map */
int s = (t == DTL_MISSING) ? DTL_OUTAGE: t;
if (t == DTL_SCRUB)
continue; /* leaf vdevs only */
if (t == DTL_PARTIAL)
minref = 1; /* i.e. non-zero */
else if (vdev_get_nparity(vd) != 0)
minref = vdev_get_nparity(vd) + 1; /* RAID-Z, dRAID */
else
minref = vd->vdev_children; /* any kind of mirror */
space_reftree_create(&reftree);
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
mutex_enter(&cvd->vdev_dtl_lock);
space_reftree_add_map(&reftree, cvd->vdev_dtl[s], 1);
mutex_exit(&cvd->vdev_dtl_lock);
}
space_reftree_generate_map(&reftree, vd->vdev_dtl[t], minref);
space_reftree_destroy(&reftree);
}
mutex_exit(&vd->vdev_dtl_lock);
}
int
vdev_dtl_load(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset;
range_tree_t *rt;
int error = 0;
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_dtl_object != 0) {
ASSERT(vdev_is_concrete(vd));
error = space_map_open(&vd->vdev_dtl_sm, mos,
vd->vdev_dtl_object, 0, -1ULL, 0);
if (error)
return (error);
ASSERT(vd->vdev_dtl_sm != NULL);
rt = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
error = space_map_load(vd->vdev_dtl_sm, rt, SM_ALLOC);
if (error == 0) {
mutex_enter(&vd->vdev_dtl_lock);
range_tree_walk(rt, range_tree_add,
vd->vdev_dtl[DTL_MISSING]);
mutex_exit(&vd->vdev_dtl_lock);
}
range_tree_vacate(rt, NULL, NULL);
range_tree_destroy(rt);
return (error);
}
for (int c = 0; c < vd->vdev_children; c++) {
error = vdev_dtl_load(vd->vdev_child[c]);
if (error != 0)
break;
}
return (error);
}
static void
vdev_zap_allocation_data(vdev_t *vd, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset;
vdev_alloc_bias_t alloc_bias = vd->vdev_alloc_bias;
const char *string;
ASSERT(alloc_bias != VDEV_BIAS_NONE);
string =
(alloc_bias == VDEV_BIAS_LOG) ? VDEV_ALLOC_BIAS_LOG :
(alloc_bias == VDEV_BIAS_SPECIAL) ? VDEV_ALLOC_BIAS_SPECIAL :
(alloc_bias == VDEV_BIAS_DEDUP) ? VDEV_ALLOC_BIAS_DEDUP : NULL;
ASSERT(string != NULL);
VERIFY0(zap_add(mos, vd->vdev_top_zap, VDEV_TOP_ZAP_ALLOCATION_BIAS,
1, strlen(string) + 1, string, tx));
if (alloc_bias == VDEV_BIAS_SPECIAL || alloc_bias == VDEV_BIAS_DEDUP) {
spa_activate_allocation_classes(spa, tx);
}
}
void
vdev_destroy_unlink_zap(vdev_t *vd, uint64_t zapobj, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
VERIFY0(zap_destroy(spa->spa_meta_objset, zapobj, tx));
VERIFY0(zap_remove_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
zapobj, tx));
}
uint64_t
vdev_create_link_zap(vdev_t *vd, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
uint64_t zap = zap_create(spa->spa_meta_objset, DMU_OTN_ZAP_METADATA,
DMU_OT_NONE, 0, tx);
ASSERT(zap != 0);
VERIFY0(zap_add_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
zap, tx));
return (zap);
}
void
vdev_construct_zaps(vdev_t *vd, dmu_tx_t *tx)
{
if (vd->vdev_ops != &vdev_hole_ops &&
vd->vdev_ops != &vdev_missing_ops &&
vd->vdev_ops != &vdev_root_ops &&
!vd->vdev_top->vdev_removing) {
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_leaf_zap == 0) {
vd->vdev_leaf_zap = vdev_create_link_zap(vd, tx);
}
if (vd == vd->vdev_top && vd->vdev_top_zap == 0) {
vd->vdev_top_zap = vdev_create_link_zap(vd, tx);
if (vd->vdev_alloc_bias != VDEV_BIAS_NONE)
vdev_zap_allocation_data(vd, tx);
}
}
for (uint64_t i = 0; i < vd->vdev_children; i++) {
vdev_construct_zaps(vd->vdev_child[i], tx);
}
}
static void
vdev_dtl_sync(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
range_tree_t *rt = vd->vdev_dtl[DTL_MISSING];
objset_t *mos = spa->spa_meta_objset;
range_tree_t *rtsync;
dmu_tx_t *tx;
uint64_t object = space_map_object(vd->vdev_dtl_sm);
ASSERT(vdev_is_concrete(vd));
ASSERT(vd->vdev_ops->vdev_op_leaf);
tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
if (vd->vdev_detached || vd->vdev_top->vdev_removing) {
mutex_enter(&vd->vdev_dtl_lock);
space_map_free(vd->vdev_dtl_sm, tx);
space_map_close(vd->vdev_dtl_sm);
vd->vdev_dtl_sm = NULL;
mutex_exit(&vd->vdev_dtl_lock);
/*
* We only destroy the leaf ZAP for detached leaves or for
* removed log devices. Removed data devices handle leaf ZAP
* cleanup later, once cancellation is no longer possible.
*/
if (vd->vdev_leaf_zap != 0 && (vd->vdev_detached ||
vd->vdev_top->vdev_islog)) {
vdev_destroy_unlink_zap(vd, vd->vdev_leaf_zap, tx);
vd->vdev_leaf_zap = 0;
}
dmu_tx_commit(tx);
return;
}
if (vd->vdev_dtl_sm == NULL) {
uint64_t new_object;
new_object = space_map_alloc(mos, zfs_vdev_dtl_sm_blksz, tx);
VERIFY3U(new_object, !=, 0);
VERIFY0(space_map_open(&vd->vdev_dtl_sm, mos, new_object,
0, -1ULL, 0));
ASSERT(vd->vdev_dtl_sm != NULL);
}
rtsync = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
mutex_enter(&vd->vdev_dtl_lock);
range_tree_walk(rt, range_tree_add, rtsync);
mutex_exit(&vd->vdev_dtl_lock);
space_map_truncate(vd->vdev_dtl_sm, zfs_vdev_dtl_sm_blksz, tx);
space_map_write(vd->vdev_dtl_sm, rtsync, SM_ALLOC, SM_NO_VDEVID, tx);
range_tree_vacate(rtsync, NULL, NULL);
range_tree_destroy(rtsync);
/*
* If the object for the space map has changed then dirty
* the top level so that we update the config.
*/
if (object != space_map_object(vd->vdev_dtl_sm)) {
vdev_dbgmsg(vd, "txg %llu, spa %s, DTL old object %llu, "
"new object %llu", (u_longlong_t)txg, spa_name(spa),
(u_longlong_t)object,
(u_longlong_t)space_map_object(vd->vdev_dtl_sm));
vdev_config_dirty(vd->vdev_top);
}
dmu_tx_commit(tx);
}
/*
* Determine whether the specified vdev can be offlined/detached/removed
* without losing data.
*/
boolean_t
vdev_dtl_required(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
vdev_t *tvd = vd->vdev_top;
uint8_t cant_read = vd->vdev_cant_read;
boolean_t required;
ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
if (vd == spa->spa_root_vdev || vd == tvd)
return (B_TRUE);
/*
* Temporarily mark the device as unreadable, and then determine
* whether this results in any DTL outages in the top-level vdev.
* If not, we can safely offline/detach/remove the device.
*/
vd->vdev_cant_read = B_TRUE;
vdev_dtl_reassess(tvd, 0, 0, B_FALSE, B_FALSE);
required = !vdev_dtl_empty(tvd, DTL_OUTAGE);
vd->vdev_cant_read = cant_read;
vdev_dtl_reassess(tvd, 0, 0, B_FALSE, B_FALSE);
if (!required && zio_injection_enabled) {
required = !!zio_handle_device_injection(vd, NULL,
SET_ERROR(ECHILD));
}
return (required);
}
/*
* Determine if resilver is needed, and if so the txg range.
*/
boolean_t
vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp)
{
boolean_t needed = B_FALSE;
uint64_t thismin = UINT64_MAX;
uint64_t thismax = 0;
if (vd->vdev_children == 0) {
mutex_enter(&vd->vdev_dtl_lock);
if (!range_tree_is_empty(vd->vdev_dtl[DTL_MISSING]) &&
vdev_writeable(vd)) {
thismin = vdev_dtl_min(vd);
thismax = vdev_dtl_max(vd);
needed = B_TRUE;
}
mutex_exit(&vd->vdev_dtl_lock);
} else {
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
uint64_t cmin, cmax;
if (vdev_resilver_needed(cvd, &cmin, &cmax)) {
thismin = MIN(thismin, cmin);
thismax = MAX(thismax, cmax);
needed = B_TRUE;
}
}
}
if (needed && minp) {
*minp = thismin;
*maxp = thismax;
}
return (needed);
}
/*
* Gets the checkpoint space map object from the vdev's ZAP. On success sm_obj
* will contain either the checkpoint spacemap object or zero if none exists.
* All other errors are returned to the caller.
*/
int
vdev_checkpoint_sm_object(vdev_t *vd, uint64_t *sm_obj)
{
ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
if (vd->vdev_top_zap == 0) {
*sm_obj = 0;
return (0);
}
int error = zap_lookup(spa_meta_objset(vd->vdev_spa), vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, sizeof (uint64_t), 1, sm_obj);
if (error == ENOENT) {
*sm_obj = 0;
error = 0;
}
return (error);
}
int
vdev_load(vdev_t *vd)
{
int children = vd->vdev_children;
int error = 0;
taskq_t *tq = NULL;
/*
* It's only worthwhile to use the taskq for the root vdev, because the
* slow part is metaslab_init, and that only happens for top-level
* vdevs.
*/
if (vd->vdev_ops == &vdev_root_ops && vd->vdev_children > 0) {
tq = taskq_create("vdev_load", children, minclsyspri,
children, children, TASKQ_PREPOPULATE);
}
/*
* Recursively load all children.
*/
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (tq == NULL || vdev_uses_zvols(cvd)) {
cvd->vdev_load_error = vdev_load(cvd);
} else {
VERIFY(taskq_dispatch(tq, vdev_load_child,
cvd, TQ_SLEEP) != TASKQID_INVALID);
}
}
if (tq != NULL) {
taskq_wait(tq);
taskq_destroy(tq);
}
for (int c = 0; c < vd->vdev_children; c++) {
int error = vd->vdev_child[c]->vdev_load_error;
if (error != 0)
return (error);
}
vdev_set_deflate_ratio(vd);
/*
* On spa_load path, grab the allocation bias from our zap
*/
if (vd == vd->vdev_top && vd->vdev_top_zap != 0) {
spa_t *spa = vd->vdev_spa;
char bias_str[64];
error = zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_ALLOCATION_BIAS, 1, sizeof (bias_str),
bias_str);
if (error == 0) {
ASSERT(vd->vdev_alloc_bias == VDEV_BIAS_NONE);
vd->vdev_alloc_bias = vdev_derive_alloc_bias(bias_str);
} else if (error != ENOENT) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: zap_lookup(top_zap=%llu) "
"failed [error=%d]", vd->vdev_top_zap, error);
return (error);
}
}
/*
* Load any rebuild state from the top-level vdev zap.
*/
if (vd == vd->vdev_top && vd->vdev_top_zap != 0) {
error = vdev_rebuild_load(vd);
if (error && error != ENOTSUP) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: vdev_rebuild_load "
"failed [error=%d]", error);
return (error);
}
}
/*
* If this is a top-level vdev, initialize its metaslabs.
*/
if (vd == vd->vdev_top && vdev_is_concrete(vd)) {
vdev_metaslab_group_create(vd);
if (vd->vdev_ashift == 0 || vd->vdev_asize == 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: invalid size. ashift=%llu, "
"asize=%llu", (u_longlong_t)vd->vdev_ashift,
(u_longlong_t)vd->vdev_asize);
return (SET_ERROR(ENXIO));
}
error = vdev_metaslab_init(vd, 0);
if (error != 0) {
vdev_dbgmsg(vd, "vdev_load: metaslab_init failed "
"[error=%d]", error);
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
return (error);
}
uint64_t checkpoint_sm_obj;
error = vdev_checkpoint_sm_object(vd, &checkpoint_sm_obj);
if (error == 0 && checkpoint_sm_obj != 0) {
objset_t *mos = spa_meta_objset(vd->vdev_spa);
ASSERT(vd->vdev_asize != 0);
ASSERT3P(vd->vdev_checkpoint_sm, ==, NULL);
error = space_map_open(&vd->vdev_checkpoint_sm,
mos, checkpoint_sm_obj, 0, vd->vdev_asize,
vd->vdev_ashift);
if (error != 0) {
vdev_dbgmsg(vd, "vdev_load: space_map_open "
"failed for checkpoint spacemap (obj %llu) "
"[error=%d]",
(u_longlong_t)checkpoint_sm_obj, error);
return (error);
}
ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
/*
* Since the checkpoint_sm contains free entries
* exclusively we can use space_map_allocated() to
* indicate the cumulative checkpointed space that
* has been freed.
*/
vd->vdev_stat.vs_checkpoint_space =
-space_map_allocated(vd->vdev_checkpoint_sm);
vd->vdev_spa->spa_checkpoint_info.sci_dspace +=
vd->vdev_stat.vs_checkpoint_space;
} else if (error != 0) {
vdev_dbgmsg(vd, "vdev_load: failed to retrieve "
"checkpoint space map object from vdev ZAP "
"[error=%d]", error);
return (error);
}
}
/*
* If this is a leaf vdev, load its DTL.
*/
if (vd->vdev_ops->vdev_op_leaf && (error = vdev_dtl_load(vd)) != 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: vdev_dtl_load failed "
"[error=%d]", error);
return (error);
}
uint64_t obsolete_sm_object;
error = vdev_obsolete_sm_object(vd, &obsolete_sm_object);
if (error == 0 && obsolete_sm_object != 0) {
objset_t *mos = vd->vdev_spa->spa_meta_objset;
ASSERT(vd->vdev_asize != 0);
ASSERT3P(vd->vdev_obsolete_sm, ==, NULL);
if ((error = space_map_open(&vd->vdev_obsolete_sm, mos,
obsolete_sm_object, 0, vd->vdev_asize, 0))) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: space_map_open failed for "
"obsolete spacemap (obj %llu) [error=%d]",
(u_longlong_t)obsolete_sm_object, error);
return (error);
}
} else if (error != 0) {
vdev_dbgmsg(vd, "vdev_load: failed to retrieve obsolete "
"space map object from vdev ZAP [error=%d]", error);
return (error);
}
return (0);
}
/*
* The special vdev case is used for hot spares and l2cache devices. Its
* sole purpose it to set the vdev state for the associated vdev. To do this,
* we make sure that we can open the underlying device, then try to read the
* label, and make sure that the label is sane and that it hasn't been
* repurposed to another pool.
*/
int
vdev_validate_aux(vdev_t *vd)
{
nvlist_t *label;
uint64_t guid, version;
uint64_t state;
if (!vdev_readable(vd))
return (0);
if ((label = vdev_label_read_config(vd, -1ULL)) == NULL) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
return (-1);
}
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 ||
!SPA_VERSION_IS_SUPPORTED(version) ||
nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 ||
guid != vd->vdev_guid ||
nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
return (-1);
}
/*
* We don't actually check the pool state here. If it's in fact in
* use by another pool, we update this fact on the fly when requested.
*/
nvlist_free(label);
return (0);
}
static void
vdev_destroy_ms_flush_data(vdev_t *vd, dmu_tx_t *tx)
{
objset_t *mos = spa_meta_objset(vd->vdev_spa);
if (vd->vdev_top_zap == 0)
return;
uint64_t object = 0;
int err = zap_lookup(mos, vd->vdev_top_zap,
VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1, &object);
if (err == ENOENT)
return;
VERIFY0(err);
VERIFY0(dmu_object_free(mos, object, tx));
VERIFY0(zap_remove(mos, vd->vdev_top_zap,
VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, tx));
}
/*
* Free the objects used to store this vdev's spacemaps, and the array
* that points to them.
*/
void
vdev_destroy_spacemaps(vdev_t *vd, dmu_tx_t *tx)
{
if (vd->vdev_ms_array == 0)
return;
objset_t *mos = vd->vdev_spa->spa_meta_objset;
uint64_t array_count = vd->vdev_asize >> vd->vdev_ms_shift;
size_t array_bytes = array_count * sizeof (uint64_t);
uint64_t *smobj_array = kmem_alloc(array_bytes, KM_SLEEP);
VERIFY0(dmu_read(mos, vd->vdev_ms_array, 0,
array_bytes, smobj_array, 0));
for (uint64_t i = 0; i < array_count; i++) {
uint64_t smobj = smobj_array[i];
if (smobj == 0)
continue;
space_map_free_obj(mos, smobj, tx);
}
kmem_free(smobj_array, array_bytes);
VERIFY0(dmu_object_free(mos, vd->vdev_ms_array, tx));
vdev_destroy_ms_flush_data(vd, tx);
vd->vdev_ms_array = 0;
}
static void
vdev_remove_empty_log(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
ASSERT(vd->vdev_islog);
ASSERT(vd == vd->vdev_top);
ASSERT3U(txg, ==, spa_syncing_txg(spa));
dmu_tx_t *tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
vdev_destroy_spacemaps(vd, tx);
if (vd->vdev_top_zap != 0) {
vdev_destroy_unlink_zap(vd, vd->vdev_top_zap, tx);
vd->vdev_top_zap = 0;
}
dmu_tx_commit(tx);
}
void
vdev_sync_done(vdev_t *vd, uint64_t txg)
{
metaslab_t *msp;
boolean_t reassess = !txg_list_empty(&vd->vdev_ms_list, TXG_CLEAN(txg));
ASSERT(vdev_is_concrete(vd));
while ((msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg)))
!= NULL)
metaslab_sync_done(msp, txg);
if (reassess) {
metaslab_sync_reassess(vd->vdev_mg);
if (vd->vdev_log_mg != NULL)
metaslab_sync_reassess(vd->vdev_log_mg);
}
}
void
vdev_sync(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
vdev_t *lvd;
metaslab_t *msp;
ASSERT3U(txg, ==, spa->spa_syncing_txg);
dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
if (range_tree_space(vd->vdev_obsolete_segments) > 0) {
ASSERT(vd->vdev_removing ||
vd->vdev_ops == &vdev_indirect_ops);
vdev_indirect_sync_obsolete(vd, tx);
/*
* If the vdev is indirect, it can't have dirty
* metaslabs or DTLs.
*/
if (vd->vdev_ops == &vdev_indirect_ops) {
ASSERT(txg_list_empty(&vd->vdev_ms_list, txg));
ASSERT(txg_list_empty(&vd->vdev_dtl_list, txg));
dmu_tx_commit(tx);
return;
}
}
ASSERT(vdev_is_concrete(vd));
if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0 &&
!vd->vdev_removing) {
ASSERT(vd == vd->vdev_top);
ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset,
DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx);
ASSERT(vd->vdev_ms_array != 0);
vdev_config_dirty(vd);
}
while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) {
metaslab_sync(msp, txg);
(void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg));
}
while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL)
vdev_dtl_sync(lvd, txg);
/*
* If this is an empty log device being removed, destroy the
* metadata associated with it.
*/
if (vd->vdev_islog && vd->vdev_stat.vs_alloc == 0 && vd->vdev_removing)
vdev_remove_empty_log(vd, txg);
(void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg));
dmu_tx_commit(tx);
}
uint64_t
vdev_psize_to_asize(vdev_t *vd, uint64_t psize)
{
return (vd->vdev_ops->vdev_op_asize(vd, psize));
}
/*
* Mark the given vdev faulted. A faulted vdev behaves as if the device could
* not be opened, and no I/O is attempted.
*/
int
vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux)
{
vdev_t *vd, *tvd;
spa_vdev_state_enter(spa, SCL_NONE);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
tvd = vd->vdev_top;
/*
* If user did a 'zpool offline -f' then make the fault persist across
* reboots.
*/
if (aux == VDEV_AUX_EXTERNAL_PERSIST) {
/*
* There are two kinds of forced faults: temporary and
* persistent. Temporary faults go away at pool import, while
* persistent faults stay set. Both types of faults can be
* cleared with a zpool clear.
*
* We tell if a vdev is persistently faulted by looking at the
* ZPOOL_CONFIG_AUX_STATE nvpair. If it's set to "external" at
* import then it's a persistent fault. Otherwise, it's
* temporary. We get ZPOOL_CONFIG_AUX_STATE set to "external"
* by setting vd.vdev_stat.vs_aux to VDEV_AUX_EXTERNAL. This
* tells vdev_config_generate() (which gets run later) to set
* ZPOOL_CONFIG_AUX_STATE to "external" in the nvlist.
*/
vd->vdev_stat.vs_aux = VDEV_AUX_EXTERNAL;
vd->vdev_tmpoffline = B_FALSE;
aux = VDEV_AUX_EXTERNAL;
} else {
vd->vdev_tmpoffline = B_TRUE;
}
/*
* We don't directly use the aux state here, but if we do a
* vdev_reopen(), we need this value to be present to remember why we
* were faulted.
*/
vd->vdev_label_aux = aux;
/*
* Faulted state takes precedence over degraded.
*/
vd->vdev_delayed_close = B_FALSE;
vd->vdev_faulted = 1ULL;
vd->vdev_degraded = 0ULL;
vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, aux);
/*
* If this device has the only valid copy of the data, then
* back off and simply mark the vdev as degraded instead.
*/
if (!tvd->vdev_islog && vd->vdev_aux == NULL && vdev_dtl_required(vd)) {
vd->vdev_degraded = 1ULL;
vd->vdev_faulted = 0ULL;
/*
* If we reopen the device and it's not dead, only then do we
* mark it degraded.
*/
vdev_reopen(tvd);
if (vdev_readable(vd))
vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, aux);
}
return (spa_vdev_state_exit(spa, vd, 0));
}
/*
* Mark the given vdev degraded. A degraded vdev is purely an indication to the
* user that something is wrong. The vdev continues to operate as normal as far
* as I/O is concerned.
*/
int
vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux)
{
vdev_t *vd;
spa_vdev_state_enter(spa, SCL_NONE);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
/*
* If the vdev is already faulted, then don't do anything.
*/
if (vd->vdev_faulted || vd->vdev_degraded)
return (spa_vdev_state_exit(spa, NULL, 0));
vd->vdev_degraded = 1ULL;
if (!vdev_is_dead(vd))
vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED,
aux);
return (spa_vdev_state_exit(spa, vd, 0));
}
/*
* Online the given vdev.
*
* If 'ZFS_ONLINE_UNSPARE' is set, it implies two things. First, any attached
* spare device should be detached when the device finishes resilvering.
* Second, the online should be treated like a 'test' online case, so no FMA
* events are generated if the device fails to open.
*/
int
vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate)
{
vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev;
boolean_t wasoffline;
vdev_state_t oldstate;
spa_vdev_state_enter(spa, SCL_NONE);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
wasoffline = (vd->vdev_offline || vd->vdev_tmpoffline);
oldstate = vd->vdev_state;
tvd = vd->vdev_top;
vd->vdev_offline = B_FALSE;
vd->vdev_tmpoffline = B_FALSE;
vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE);
vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT);
/* XXX - L2ARC 1.0 does not support expansion */
if (!vd->vdev_aux) {
for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
pvd->vdev_expanding = !!((flags & ZFS_ONLINE_EXPAND) ||
spa->spa_autoexpand);
vd->vdev_expansion_time = gethrestime_sec();
}
vdev_reopen(tvd);
vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE;
if (!vd->vdev_aux) {
for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
pvd->vdev_expanding = B_FALSE;
}
if (newstate)
*newstate = vd->vdev_state;
if ((flags & ZFS_ONLINE_UNSPARE) &&
!vdev_is_dead(vd) && vd->vdev_parent &&
vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
vd->vdev_parent->vdev_child[0] == vd)
vd->vdev_unspare = B_TRUE;
if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) {
/* XXX - L2ARC 1.0 does not support expansion */
if (vd->vdev_aux)
return (spa_vdev_state_exit(spa, vd, ENOTSUP));
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
/* Restart initializing if necessary */
mutex_enter(&vd->vdev_initialize_lock);
if (vdev_writeable(vd) &&
vd->vdev_initialize_thread == NULL &&
vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) {
(void) vdev_initialize(vd);
}
mutex_exit(&vd->vdev_initialize_lock);
/*
* Restart trimming if necessary. We do not restart trimming for cache
* devices here. This is triggered by l2arc_rebuild_vdev()
* asynchronously for the whole device or in l2arc_evict() as it evicts
* space for upcoming writes.
*/
mutex_enter(&vd->vdev_trim_lock);
if (vdev_writeable(vd) && !vd->vdev_isl2cache &&
vd->vdev_trim_thread == NULL &&
vd->vdev_trim_state == VDEV_TRIM_ACTIVE) {
(void) vdev_trim(vd, vd->vdev_trim_rate, vd->vdev_trim_partial,
vd->vdev_trim_secure);
}
mutex_exit(&vd->vdev_trim_lock);
if (wasoffline ||
(oldstate < VDEV_STATE_DEGRADED &&
vd->vdev_state >= VDEV_STATE_DEGRADED))
spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_ONLINE);
return (spa_vdev_state_exit(spa, vd, 0));
}
static int
vdev_offline_locked(spa_t *spa, uint64_t guid, uint64_t flags)
{
vdev_t *vd, *tvd;
int error = 0;
uint64_t generation;
metaslab_group_t *mg;
top:
spa_vdev_state_enter(spa, SCL_ALLOC);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
if (vd->vdev_ops == &vdev_draid_spare_ops)
return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
tvd = vd->vdev_top;
mg = tvd->vdev_mg;
generation = spa->spa_config_generation + 1;
/*
* If the device isn't already offline, try to offline it.
*/
if (!vd->vdev_offline) {
/*
* If this device has the only valid copy of some data,
* don't allow it to be offlined. Log devices are always
* expendable.
*/
if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
vdev_dtl_required(vd))
return (spa_vdev_state_exit(spa, NULL,
SET_ERROR(EBUSY)));
/*
* If the top-level is a slog and it has had allocations
* then proceed. We check that the vdev's metaslab group
* is not NULL since it's possible that we may have just
* added this vdev but not yet initialized its metaslabs.
*/
if (tvd->vdev_islog && mg != NULL) {
/*
* Prevent any future allocations.
*/
ASSERT3P(tvd->vdev_log_mg, ==, NULL);
metaslab_group_passivate(mg);
(void) spa_vdev_state_exit(spa, vd, 0);
error = spa_reset_logs(spa);
/*
* If the log device was successfully reset but has
* checkpointed data, do not offline it.
*/
if (error == 0 &&
tvd->vdev_checkpoint_sm != NULL) {
ASSERT3U(space_map_allocated(
tvd->vdev_checkpoint_sm), !=, 0);
error = ZFS_ERR_CHECKPOINT_EXISTS;
}
spa_vdev_state_enter(spa, SCL_ALLOC);
/*
* Check to see if the config has changed.
*/
if (error || generation != spa->spa_config_generation) {
metaslab_group_activate(mg);
if (error)
return (spa_vdev_state_exit(spa,
vd, error));
(void) spa_vdev_state_exit(spa, vd, 0);
goto top;
}
ASSERT0(tvd->vdev_stat.vs_alloc);
}
/*
* Offline this device and reopen its top-level vdev.
* If the top-level vdev is a log device then just offline
* it. Otherwise, if this action results in the top-level
* vdev becoming unusable, undo it and fail the request.
*/
vd->vdev_offline = B_TRUE;
vdev_reopen(tvd);
if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
vdev_is_dead(tvd)) {
vd->vdev_offline = B_FALSE;
vdev_reopen(tvd);
return (spa_vdev_state_exit(spa, NULL,
SET_ERROR(EBUSY)));
}
/*
* Add the device back into the metaslab rotor so that
* once we online the device it's open for business.
*/
if (tvd->vdev_islog && mg != NULL)
metaslab_group_activate(mg);
}
vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY);
return (spa_vdev_state_exit(spa, vd, 0));
}
int
vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags)
{
int error;
mutex_enter(&spa->spa_vdev_top_lock);
error = vdev_offline_locked(spa, guid, flags);
mutex_exit(&spa->spa_vdev_top_lock);
return (error);
}
/*
* Clear the error counts associated with this vdev. Unlike vdev_online() and
* vdev_offline(), we assume the spa config is locked. We also clear all
* children. If 'vd' is NULL, then the user wants to clear all vdevs.
*/
void
vdev_clear(spa_t *spa, vdev_t *vd)
{
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
if (vd == NULL)
vd = rvd;
vd->vdev_stat.vs_read_errors = 0;
vd->vdev_stat.vs_write_errors = 0;
vd->vdev_stat.vs_checksum_errors = 0;
vd->vdev_stat.vs_slow_ios = 0;
for (int c = 0; c < vd->vdev_children; c++)
vdev_clear(spa, vd->vdev_child[c]);
/*
* It makes no sense to "clear" an indirect vdev.
*/
if (!vdev_is_concrete(vd))
return;
/*
* If we're in the FAULTED state or have experienced failed I/O, then
* clear the persistent state and attempt to reopen the device. We
* also mark the vdev config dirty, so that the new faulted state is
* written out to disk.
*/
if (vd->vdev_faulted || vd->vdev_degraded ||
!vdev_readable(vd) || !vdev_writeable(vd)) {
/*
* When reopening in response to a clear event, it may be due to
* a fmadm repair request. In this case, if the device is
* still broken, we want to still post the ereport again.
*/
vd->vdev_forcefault = B_TRUE;
vd->vdev_faulted = vd->vdev_degraded = 0ULL;
vd->vdev_cant_read = B_FALSE;
vd->vdev_cant_write = B_FALSE;
vd->vdev_stat.vs_aux = 0;
vdev_reopen(vd == rvd ? rvd : vd->vdev_top);
vd->vdev_forcefault = B_FALSE;
if (vd != rvd && vdev_writeable(vd->vdev_top))
vdev_state_dirty(vd->vdev_top);
/* If a resilver isn't required, check if vdevs can be culled */
if (vd->vdev_aux == NULL && !vdev_is_dead(vd) &&
!dsl_scan_resilvering(spa->spa_dsl_pool) &&
!dsl_scan_resilver_scheduled(spa->spa_dsl_pool))
spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_CLEAR);
}
/*
* When clearing a FMA-diagnosed fault, we always want to
* unspare the device, as we assume that the original spare was
* done in response to the FMA fault.
*/
if (!vdev_is_dead(vd) && vd->vdev_parent != NULL &&
vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
vd->vdev_parent->vdev_child[0] == vd)
vd->vdev_unspare = B_TRUE;
/* Clear recent error events cache (i.e. duplicate events tracking) */
zfs_ereport_clear(spa, vd);
}
boolean_t
vdev_is_dead(vdev_t *vd)
{
/*
* Holes and missing devices are always considered "dead".
* This simplifies the code since we don't have to check for
* these types of devices in the various code paths.
* Instead we rely on the fact that we skip over dead devices
* before issuing I/O to them.
*/
return (vd->vdev_state < VDEV_STATE_DEGRADED ||
vd->vdev_ops == &vdev_hole_ops ||
vd->vdev_ops == &vdev_missing_ops);
}
boolean_t
vdev_readable(vdev_t *vd)
{
return (!vdev_is_dead(vd) && !vd->vdev_cant_read);
}
boolean_t
vdev_writeable(vdev_t *vd)
{
return (!vdev_is_dead(vd) && !vd->vdev_cant_write &&
vdev_is_concrete(vd));
}
boolean_t
vdev_allocatable(vdev_t *vd)
{
uint64_t state = vd->vdev_state;
/*
* We currently allow allocations from vdevs which may be in the
* process of reopening (i.e. VDEV_STATE_CLOSED). If the device
* fails to reopen then we'll catch it later when we're holding
* the proper locks. Note that we have to get the vdev state
* in a local variable because although it changes atomically,
* we're asking two separate questions about it.
*/
return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) &&
!vd->vdev_cant_write && vdev_is_concrete(vd) &&
vd->vdev_mg->mg_initialized);
}
boolean_t
vdev_accessible(vdev_t *vd, zio_t *zio)
{
ASSERT(zio->io_vd == vd);
if (vdev_is_dead(vd) || vd->vdev_remove_wanted)
return (B_FALSE);
if (zio->io_type == ZIO_TYPE_READ)
return (!vd->vdev_cant_read);
if (zio->io_type == ZIO_TYPE_WRITE)
return (!vd->vdev_cant_write);
return (B_TRUE);
}
static void
vdev_get_child_stat(vdev_t *cvd, vdev_stat_t *vs, vdev_stat_t *cvs)
{
/*
* Exclude the dRAID spare when aggregating to avoid double counting
* the ops and bytes. These IOs are counted by the physical leaves.
*/
if (cvd->vdev_ops == &vdev_draid_spare_ops)
return;
for (int t = 0; t < VS_ZIO_TYPES; t++) {
vs->vs_ops[t] += cvs->vs_ops[t];
vs->vs_bytes[t] += cvs->vs_bytes[t];
}
cvs->vs_scan_removing = cvd->vdev_removing;
}
/*
* Get extended stats
*/
static void
vdev_get_child_stat_ex(vdev_t *cvd, vdev_stat_ex_t *vsx, vdev_stat_ex_t *cvsx)
{
+ (void) cvd;
+
int t, b;
for (t = 0; t < ZIO_TYPES; t++) {
for (b = 0; b < ARRAY_SIZE(vsx->vsx_disk_histo[0]); b++)
vsx->vsx_disk_histo[t][b] += cvsx->vsx_disk_histo[t][b];
for (b = 0; b < ARRAY_SIZE(vsx->vsx_total_histo[0]); b++) {
vsx->vsx_total_histo[t][b] +=
cvsx->vsx_total_histo[t][b];
}
}
for (t = 0; t < ZIO_PRIORITY_NUM_QUEUEABLE; t++) {
for (b = 0; b < ARRAY_SIZE(vsx->vsx_queue_histo[0]); b++) {
vsx->vsx_queue_histo[t][b] +=
cvsx->vsx_queue_histo[t][b];
}
vsx->vsx_active_queue[t] += cvsx->vsx_active_queue[t];
vsx->vsx_pend_queue[t] += cvsx->vsx_pend_queue[t];
for (b = 0; b < ARRAY_SIZE(vsx->vsx_ind_histo[0]); b++)
vsx->vsx_ind_histo[t][b] += cvsx->vsx_ind_histo[t][b];
for (b = 0; b < ARRAY_SIZE(vsx->vsx_agg_histo[0]); b++)
vsx->vsx_agg_histo[t][b] += cvsx->vsx_agg_histo[t][b];
}
}
boolean_t
vdev_is_spacemap_addressable(vdev_t *vd)
{
if (spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_SPACEMAP_V2))
return (B_TRUE);
/*
* If double-word space map entries are not enabled we assume
* 47 bits of the space map entry are dedicated to the entry's
* offset (see SM_OFFSET_BITS in space_map.h). We then use that
* to calculate the maximum address that can be described by a
* space map entry for the given device.
*/
uint64_t shift = vd->vdev_ashift + SM_OFFSET_BITS;
if (shift >= 63) /* detect potential overflow */
return (B_TRUE);
return (vd->vdev_asize < (1ULL << shift));
}
/*
* Get statistics for the given vdev.
*/
static void
vdev_get_stats_ex_impl(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
{
int t;
/*
* If we're getting stats on the root vdev, aggregate the I/O counts
* over all top-level vdevs (i.e. the direct children of the root).
*/
if (!vd->vdev_ops->vdev_op_leaf) {
if (vs) {
memset(vs->vs_ops, 0, sizeof (vs->vs_ops));
memset(vs->vs_bytes, 0, sizeof (vs->vs_bytes));
}
if (vsx)
memset(vsx, 0, sizeof (*vsx));
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
vdev_stat_t *cvs = &cvd->vdev_stat;
vdev_stat_ex_t *cvsx = &cvd->vdev_stat_ex;
vdev_get_stats_ex_impl(cvd, cvs, cvsx);
if (vs)
vdev_get_child_stat(cvd, vs, cvs);
if (vsx)
vdev_get_child_stat_ex(cvd, vsx, cvsx);
}
} else {
/*
* We're a leaf. Just copy our ZIO active queue stats in. The
* other leaf stats are updated in vdev_stat_update().
*/
if (!vsx)
return;
memcpy(vsx, &vd->vdev_stat_ex, sizeof (vd->vdev_stat_ex));
for (t = 0; t < ARRAY_SIZE(vd->vdev_queue.vq_class); t++) {
vsx->vsx_active_queue[t] =
vd->vdev_queue.vq_class[t].vqc_active;
vsx->vsx_pend_queue[t] = avl_numnodes(
&vd->vdev_queue.vq_class[t].vqc_queued_tree);
}
}
}
void
vdev_get_stats_ex(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
{
vdev_t *tvd = vd->vdev_top;
mutex_enter(&vd->vdev_stat_lock);
if (vs) {
bcopy(&vd->vdev_stat, vs, sizeof (*vs));
vs->vs_timestamp = gethrtime() - vs->vs_timestamp;
vs->vs_state = vd->vdev_state;
vs->vs_rsize = vdev_get_min_asize(vd);
if (vd->vdev_ops->vdev_op_leaf) {
vs->vs_rsize += VDEV_LABEL_START_SIZE +
VDEV_LABEL_END_SIZE;
/*
* Report initializing progress. Since we don't
* have the initializing locks held, this is only
* an estimate (although a fairly accurate one).
*/
vs->vs_initialize_bytes_done =
vd->vdev_initialize_bytes_done;
vs->vs_initialize_bytes_est =
vd->vdev_initialize_bytes_est;
vs->vs_initialize_state = vd->vdev_initialize_state;
vs->vs_initialize_action_time =
vd->vdev_initialize_action_time;
/*
* Report manual TRIM progress. Since we don't have
* the manual TRIM locks held, this is only an
* estimate (although fairly accurate one).
*/
vs->vs_trim_notsup = !vd->vdev_has_trim;
vs->vs_trim_bytes_done = vd->vdev_trim_bytes_done;
vs->vs_trim_bytes_est = vd->vdev_trim_bytes_est;
vs->vs_trim_state = vd->vdev_trim_state;
vs->vs_trim_action_time = vd->vdev_trim_action_time;
/* Set when there is a deferred resilver. */
vs->vs_resilver_deferred = vd->vdev_resilver_deferred;
}
/*
* Report expandable space on top-level, non-auxiliary devices
* only. The expandable space is reported in terms of metaslab
* sized units since that determines how much space the pool
* can expand.
*/
if (vd->vdev_aux == NULL && tvd != NULL) {
vs->vs_esize = P2ALIGN(
vd->vdev_max_asize - vd->vdev_asize,
1ULL << tvd->vdev_ms_shift);
}
vs->vs_configured_ashift = vd->vdev_top != NULL
? vd->vdev_top->vdev_ashift : vd->vdev_ashift;
vs->vs_logical_ashift = vd->vdev_logical_ashift;
vs->vs_physical_ashift = vd->vdev_physical_ashift;
/*
* Report fragmentation and rebuild progress for top-level,
* non-auxiliary, concrete devices.
*/
if (vd->vdev_aux == NULL && vd == vd->vdev_top &&
vdev_is_concrete(vd)) {
/*
* The vdev fragmentation rating doesn't take into
* account the embedded slog metaslab (vdev_log_mg).
* Since it's only one metaslab, it would have a tiny
* impact on the overall fragmentation.
*/
vs->vs_fragmentation = (vd->vdev_mg != NULL) ?
vd->vdev_mg->mg_fragmentation : 0;
}
}
vdev_get_stats_ex_impl(vd, vs, vsx);
mutex_exit(&vd->vdev_stat_lock);
}
void
vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
{
return (vdev_get_stats_ex(vd, vs, NULL));
}
void
vdev_clear_stats(vdev_t *vd)
{
mutex_enter(&vd->vdev_stat_lock);
vd->vdev_stat.vs_space = 0;
vd->vdev_stat.vs_dspace = 0;
vd->vdev_stat.vs_alloc = 0;
mutex_exit(&vd->vdev_stat_lock);
}
void
vdev_scan_stat_init(vdev_t *vd)
{
vdev_stat_t *vs = &vd->vdev_stat;
for (int c = 0; c < vd->vdev_children; c++)
vdev_scan_stat_init(vd->vdev_child[c]);
mutex_enter(&vd->vdev_stat_lock);
vs->vs_scan_processed = 0;
mutex_exit(&vd->vdev_stat_lock);
}
void
vdev_stat_update(zio_t *zio, uint64_t psize)
{
spa_t *spa = zio->io_spa;
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *vd = zio->io_vd ? zio->io_vd : rvd;
vdev_t *pvd;
uint64_t txg = zio->io_txg;
vdev_stat_t *vs = &vd->vdev_stat;
vdev_stat_ex_t *vsx = &vd->vdev_stat_ex;
zio_type_t type = zio->io_type;
int flags = zio->io_flags;
/*
* If this i/o is a gang leader, it didn't do any actual work.
*/
if (zio->io_gang_tree)
return;
if (zio->io_error == 0) {
/*
* If this is a root i/o, don't count it -- we've already
* counted the top-level vdevs, and vdev_get_stats() will
* aggregate them when asked. This reduces contention on
* the root vdev_stat_lock and implicitly handles blocks
* that compress away to holes, for which there is no i/o.
* (Holes never create vdev children, so all the counters
* remain zero, which is what we want.)
*
* Note: this only applies to successful i/o (io_error == 0)
* because unlike i/o counts, errors are not additive.
* When reading a ditto block, for example, failure of
* one top-level vdev does not imply a root-level error.
*/
if (vd == rvd)
return;
ASSERT(vd == zio->io_vd);
if (flags & ZIO_FLAG_IO_BYPASS)
return;
mutex_enter(&vd->vdev_stat_lock);
if (flags & ZIO_FLAG_IO_REPAIR) {
/*
* Repair is the result of a resilver issued by the
* scan thread (spa_sync).
*/
if (flags & ZIO_FLAG_SCAN_THREAD) {
dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
dsl_scan_phys_t *scn_phys = &scn->scn_phys;
uint64_t *processed = &scn_phys->scn_processed;
if (vd->vdev_ops->vdev_op_leaf)
atomic_add_64(processed, psize);
vs->vs_scan_processed += psize;
}
/*
* Repair is the result of a rebuild issued by the
* rebuild thread (vdev_rebuild_thread). To avoid
* double counting repaired bytes the virtual dRAID
* spare vdev is excluded from the processed bytes.
*/
if (zio->io_priority == ZIO_PRIORITY_REBUILD) {
vdev_t *tvd = vd->vdev_top;
vdev_rebuild_t *vr = &tvd->vdev_rebuild_config;
vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
uint64_t *rebuilt = &vrp->vrp_bytes_rebuilt;
if (vd->vdev_ops->vdev_op_leaf &&
vd->vdev_ops != &vdev_draid_spare_ops) {
atomic_add_64(rebuilt, psize);
}
vs->vs_rebuild_processed += psize;
}
if (flags & ZIO_FLAG_SELF_HEAL)
vs->vs_self_healed += psize;
}
/*
* The bytes/ops/histograms are recorded at the leaf level and
* aggregated into the higher level vdevs in vdev_get_stats().
*/
if (vd->vdev_ops->vdev_op_leaf &&
(zio->io_priority < ZIO_PRIORITY_NUM_QUEUEABLE)) {
zio_type_t vs_type = type;
zio_priority_t priority = zio->io_priority;
/*
* TRIM ops and bytes are reported to user space as
* ZIO_TYPE_IOCTL. This is done to preserve the
* vdev_stat_t structure layout for user space.
*/
if (type == ZIO_TYPE_TRIM)
vs_type = ZIO_TYPE_IOCTL;
/*
* Solely for the purposes of 'zpool iostat -lqrw'
* reporting use the priority to categorize the IO.
* Only the following are reported to user space:
*
* ZIO_PRIORITY_SYNC_READ,
* ZIO_PRIORITY_SYNC_WRITE,
* ZIO_PRIORITY_ASYNC_READ,
* ZIO_PRIORITY_ASYNC_WRITE,
* ZIO_PRIORITY_SCRUB,
* ZIO_PRIORITY_TRIM.
*/
if (priority == ZIO_PRIORITY_REBUILD) {
priority = ((type == ZIO_TYPE_WRITE) ?
ZIO_PRIORITY_ASYNC_WRITE :
ZIO_PRIORITY_SCRUB);
} else if (priority == ZIO_PRIORITY_INITIALIZING) {
ASSERT3U(type, ==, ZIO_TYPE_WRITE);
priority = ZIO_PRIORITY_ASYNC_WRITE;
} else if (priority == ZIO_PRIORITY_REMOVAL) {
priority = ((type == ZIO_TYPE_WRITE) ?
ZIO_PRIORITY_ASYNC_WRITE :
ZIO_PRIORITY_ASYNC_READ);
}
vs->vs_ops[vs_type]++;
vs->vs_bytes[vs_type] += psize;
if (flags & ZIO_FLAG_DELEGATED) {
vsx->vsx_agg_histo[priority]
[RQ_HISTO(zio->io_size)]++;
} else {
vsx->vsx_ind_histo[priority]
[RQ_HISTO(zio->io_size)]++;
}
if (zio->io_delta && zio->io_delay) {
vsx->vsx_queue_histo[priority]
[L_HISTO(zio->io_delta - zio->io_delay)]++;
vsx->vsx_disk_histo[type]
[L_HISTO(zio->io_delay)]++;
vsx->vsx_total_histo[type]
[L_HISTO(zio->io_delta)]++;
}
}
mutex_exit(&vd->vdev_stat_lock);
return;
}
if (flags & ZIO_FLAG_SPECULATIVE)
return;
/*
* If this is an I/O error that is going to be retried, then ignore the
* error. Otherwise, the user may interpret B_FAILFAST I/O errors as
* hard errors, when in reality they can happen for any number of
* innocuous reasons (bus resets, MPxIO link failure, etc).
*/
if (zio->io_error == EIO &&
!(zio->io_flags & ZIO_FLAG_IO_RETRY))
return;
/*
* Intent logs writes won't propagate their error to the root
* I/O so don't mark these types of failures as pool-level
* errors.
*/
if (zio->io_vd == NULL && (zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
return;
if (type == ZIO_TYPE_WRITE && txg != 0 &&
(!(flags & ZIO_FLAG_IO_REPAIR) ||
(flags & ZIO_FLAG_SCAN_THREAD) ||
spa->spa_claiming)) {
/*
* This is either a normal write (not a repair), or it's
* a repair induced by the scrub thread, or it's a repair
* made by zil_claim() during spa_load() in the first txg.
* In the normal case, we commit the DTL change in the same
* txg as the block was born. In the scrub-induced repair
* case, we know that scrubs run in first-pass syncing context,
* so we commit the DTL change in spa_syncing_txg(spa).
* In the zil_claim() case, we commit in spa_first_txg(spa).
*
* We currently do not make DTL entries for failed spontaneous
* self-healing writes triggered by normal (non-scrubbing)
* reads, because we have no transactional context in which to
* do so -- and it's not clear that it'd be desirable anyway.
*/
if (vd->vdev_ops->vdev_op_leaf) {
uint64_t commit_txg = txg;
if (flags & ZIO_FLAG_SCAN_THREAD) {
ASSERT(flags & ZIO_FLAG_IO_REPAIR);
ASSERT(spa_sync_pass(spa) == 1);
vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1);
commit_txg = spa_syncing_txg(spa);
} else if (spa->spa_claiming) {
ASSERT(flags & ZIO_FLAG_IO_REPAIR);
commit_txg = spa_first_txg(spa);
}
ASSERT(commit_txg >= spa_syncing_txg(spa));
if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1))
return;
for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1);
vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg);
}
if (vd != rvd)
vdev_dtl_dirty(vd, DTL_MISSING, txg, 1);
}
}
int64_t
vdev_deflated_space(vdev_t *vd, int64_t space)
{
ASSERT((space & (SPA_MINBLOCKSIZE-1)) == 0);
ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache);
return ((space >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio);
}
/*
* Update the in-core space usage stats for this vdev, its metaslab class,
* and the root vdev.
*/
void
vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta,
int64_t space_delta)
{
+ (void) defer_delta;
int64_t dspace_delta;
spa_t *spa = vd->vdev_spa;
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(vd == vd->vdev_top);
/*
* Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion
* factor. We must calculate this here and not at the root vdev
* because the root vdev's psize-to-asize is simply the max of its
* children's, thus not accurate enough for us.
*/
dspace_delta = vdev_deflated_space(vd, space_delta);
mutex_enter(&vd->vdev_stat_lock);
/* ensure we won't underflow */
if (alloc_delta < 0) {
ASSERT3U(vd->vdev_stat.vs_alloc, >=, -alloc_delta);
}
vd->vdev_stat.vs_alloc += alloc_delta;
vd->vdev_stat.vs_space += space_delta;
vd->vdev_stat.vs_dspace += dspace_delta;
mutex_exit(&vd->vdev_stat_lock);
/* every class but log contributes to root space stats */
if (vd->vdev_mg != NULL && !vd->vdev_islog) {
ASSERT(!vd->vdev_isl2cache);
mutex_enter(&rvd->vdev_stat_lock);
rvd->vdev_stat.vs_alloc += alloc_delta;
rvd->vdev_stat.vs_space += space_delta;
rvd->vdev_stat.vs_dspace += dspace_delta;
mutex_exit(&rvd->vdev_stat_lock);
}
/* Note: metaslab_class_space_update moved to metaslab_space_update */
}
/*
* Mark a top-level vdev's config as dirty, placing it on the dirty list
* so that it will be written out next time the vdev configuration is synced.
* If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs.
*/
void
vdev_config_dirty(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
vdev_t *rvd = spa->spa_root_vdev;
int c;
ASSERT(spa_writeable(spa));
/*
* If this is an aux vdev (as with l2cache and spare devices), then we
* update the vdev config manually and set the sync flag.
*/
if (vd->vdev_aux != NULL) {
spa_aux_vdev_t *sav = vd->vdev_aux;
nvlist_t **aux;
uint_t naux;
for (c = 0; c < sav->sav_count; c++) {
if (sav->sav_vdevs[c] == vd)
break;
}
if (c == sav->sav_count) {
/*
* We're being removed. There's nothing more to do.
*/
ASSERT(sav->sav_sync == B_TRUE);
return;
}
sav->sav_sync = B_TRUE;
if (nvlist_lookup_nvlist_array(sav->sav_config,
ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) {
VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
ZPOOL_CONFIG_SPARES, &aux, &naux) == 0);
}
ASSERT(c < naux);
/*
* Setting the nvlist in the middle if the array is a little
* sketchy, but it will work.
*/
nvlist_free(aux[c]);
aux[c] = vdev_config_generate(spa, vd, B_TRUE, 0);
return;
}
/*
* The dirty list is protected by the SCL_CONFIG lock. The caller
* must either hold SCL_CONFIG as writer, or must be the sync thread
* (which holds SCL_CONFIG as reader). There's only one sync thread,
* so this is sufficient to ensure mutual exclusion.
*/
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
(dsl_pool_sync_context(spa_get_dsl(spa)) &&
spa_config_held(spa, SCL_CONFIG, RW_READER)));
if (vd == rvd) {
for (c = 0; c < rvd->vdev_children; c++)
vdev_config_dirty(rvd->vdev_child[c]);
} else {
ASSERT(vd == vd->vdev_top);
if (!list_link_active(&vd->vdev_config_dirty_node) &&
vdev_is_concrete(vd)) {
list_insert_head(&spa->spa_config_dirty_list, vd);
}
}
}
void
vdev_config_clean(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
(dsl_pool_sync_context(spa_get_dsl(spa)) &&
spa_config_held(spa, SCL_CONFIG, RW_READER)));
ASSERT(list_link_active(&vd->vdev_config_dirty_node));
list_remove(&spa->spa_config_dirty_list, vd);
}
/*
* Mark a top-level vdev's state as dirty, so that the next pass of
* spa_sync() can convert this into vdev_config_dirty(). We distinguish
* the state changes from larger config changes because they require
* much less locking, and are often needed for administrative actions.
*/
void
vdev_state_dirty(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_writeable(spa));
ASSERT(vd == vd->vdev_top);
/*
* The state list is protected by the SCL_STATE lock. The caller
* must either hold SCL_STATE as writer, or must be the sync thread
* (which holds SCL_STATE as reader). There's only one sync thread,
* so this is sufficient to ensure mutual exclusion.
*/
ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
(dsl_pool_sync_context(spa_get_dsl(spa)) &&
spa_config_held(spa, SCL_STATE, RW_READER)));
if (!list_link_active(&vd->vdev_state_dirty_node) &&
vdev_is_concrete(vd))
list_insert_head(&spa->spa_state_dirty_list, vd);
}
void
vdev_state_clean(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
(dsl_pool_sync_context(spa_get_dsl(spa)) &&
spa_config_held(spa, SCL_STATE, RW_READER)));
ASSERT(list_link_active(&vd->vdev_state_dirty_node));
list_remove(&spa->spa_state_dirty_list, vd);
}
/*
* Propagate vdev state up from children to parent.
*/
void
vdev_propagate_state(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
vdev_t *rvd = spa->spa_root_vdev;
int degraded = 0, faulted = 0;
int corrupted = 0;
vdev_t *child;
if (vd->vdev_children > 0) {
for (int c = 0; c < vd->vdev_children; c++) {
child = vd->vdev_child[c];
/*
* Don't factor holes or indirect vdevs into the
* decision.
*/
if (!vdev_is_concrete(child))
continue;
if (!vdev_readable(child) ||
(!vdev_writeable(child) && spa_writeable(spa))) {
/*
* Root special: if there is a top-level log
* device, treat the root vdev as if it were
* degraded.
*/
if (child->vdev_islog && vd == rvd)
degraded++;
else
faulted++;
} else if (child->vdev_state <= VDEV_STATE_DEGRADED) {
degraded++;
}
if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA)
corrupted++;
}
vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded);
/*
* Root special: if there is a top-level vdev that cannot be
* opened due to corrupted metadata, then propagate the root
* vdev's aux state as 'corrupt' rather than 'insufficient
* replicas'.
*/
if (corrupted && vd == rvd &&
rvd->vdev_state == VDEV_STATE_CANT_OPEN)
vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
}
if (vd->vdev_parent)
vdev_propagate_state(vd->vdev_parent);
}
/*
* Set a vdev's state. If this is during an open, we don't update the parent
* state, because we're in the process of opening children depth-first.
* Otherwise, we propagate the change to the parent.
*
* If this routine places a device in a faulted state, an appropriate ereport is
* generated.
*/
void
vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux)
{
uint64_t save_state;
spa_t *spa = vd->vdev_spa;
if (state == vd->vdev_state) {
/*
* Since vdev_offline() code path is already in an offline
* state we can miss a statechange event to OFFLINE. Check
* the previous state to catch this condition.
*/
if (vd->vdev_ops->vdev_op_leaf &&
(state == VDEV_STATE_OFFLINE) &&
(vd->vdev_prevstate >= VDEV_STATE_FAULTED)) {
/* post an offline state change */
zfs_post_state_change(spa, vd, vd->vdev_prevstate);
}
vd->vdev_stat.vs_aux = aux;
return;
}
save_state = vd->vdev_state;
vd->vdev_state = state;
vd->vdev_stat.vs_aux = aux;
/*
* If we are setting the vdev state to anything but an open state, then
* always close the underlying device unless the device has requested
* a delayed close (i.e. we're about to remove or fault the device).
* Otherwise, we keep accessible but invalid devices open forever.
* We don't call vdev_close() itself, because that implies some extra
* checks (offline, etc) that we don't want here. This is limited to
* leaf devices, because otherwise closing the device will affect other
* children.
*/
if (!vd->vdev_delayed_close && vdev_is_dead(vd) &&
vd->vdev_ops->vdev_op_leaf)
vd->vdev_ops->vdev_op_close(vd);
if (vd->vdev_removed &&
state == VDEV_STATE_CANT_OPEN &&
(aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) {
/*
* If the previous state is set to VDEV_STATE_REMOVED, then this
* device was previously marked removed and someone attempted to
* reopen it. If this failed due to a nonexistent device, then
* keep the device in the REMOVED state. We also let this be if
* it is one of our special test online cases, which is only
* attempting to online the device and shouldn't generate an FMA
* fault.
*/
vd->vdev_state = VDEV_STATE_REMOVED;
vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
} else if (state == VDEV_STATE_REMOVED) {
vd->vdev_removed = B_TRUE;
} else if (state == VDEV_STATE_CANT_OPEN) {
/*
* If we fail to open a vdev during an import or recovery, we
* mark it as "not available", which signifies that it was
* never there to begin with. Failure to open such a device
* is not considered an error.
*/
if ((spa_load_state(spa) == SPA_LOAD_IMPORT ||
spa_load_state(spa) == SPA_LOAD_RECOVER) &&
vd->vdev_ops->vdev_op_leaf)
vd->vdev_not_present = 1;
/*
* Post the appropriate ereport. If the 'prevstate' field is
* set to something other than VDEV_STATE_UNKNOWN, it indicates
* that this is part of a vdev_reopen(). In this case, we don't
* want to post the ereport if the device was already in the
* CANT_OPEN state beforehand.
*
* If the 'checkremove' flag is set, then this is an attempt to
* online the device in response to an insertion event. If we
* hit this case, then we have detected an insertion event for a
* faulted or offline device that wasn't in the removed state.
* In this scenario, we don't post an ereport because we are
* about to replace the device, or attempt an online with
* vdev_forcefault, which will generate the fault for us.
*/
if ((vd->vdev_prevstate != state || vd->vdev_forcefault) &&
!vd->vdev_not_present && !vd->vdev_checkremove &&
vd != spa->spa_root_vdev) {
const char *class;
switch (aux) {
case VDEV_AUX_OPEN_FAILED:
class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED;
break;
case VDEV_AUX_CORRUPT_DATA:
class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA;
break;
case VDEV_AUX_NO_REPLICAS:
class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS;
break;
case VDEV_AUX_BAD_GUID_SUM:
class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM;
break;
case VDEV_AUX_TOO_SMALL:
class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL;
break;
case VDEV_AUX_BAD_LABEL:
class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL;
break;
case VDEV_AUX_BAD_ASHIFT:
class = FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT;
break;
default:
class = FM_EREPORT_ZFS_DEVICE_UNKNOWN;
}
(void) zfs_ereport_post(class, spa, vd, NULL, NULL,
save_state);
}
/* Erase any notion of persistent removed state */
vd->vdev_removed = B_FALSE;
} else {
vd->vdev_removed = B_FALSE;
}
/*
* Notify ZED of any significant state-change on a leaf vdev.
*
*/
if (vd->vdev_ops->vdev_op_leaf) {
/* preserve original state from a vdev_reopen() */
if ((vd->vdev_prevstate != VDEV_STATE_UNKNOWN) &&
(vd->vdev_prevstate != vd->vdev_state) &&
(save_state <= VDEV_STATE_CLOSED))
save_state = vd->vdev_prevstate;
/* filter out state change due to initial vdev_open */
if (save_state > VDEV_STATE_CLOSED)
zfs_post_state_change(spa, vd, save_state);
}
if (!isopen && vd->vdev_parent)
vdev_propagate_state(vd->vdev_parent);
}
boolean_t
vdev_children_are_offline(vdev_t *vd)
{
ASSERT(!vd->vdev_ops->vdev_op_leaf);
for (uint64_t i = 0; i < vd->vdev_children; i++) {
if (vd->vdev_child[i]->vdev_state != VDEV_STATE_OFFLINE)
return (B_FALSE);
}
return (B_TRUE);
}
/*
* Check the vdev configuration to ensure that it's capable of supporting
* a root pool. We do not support partial configuration.
*/
boolean_t
vdev_is_bootable(vdev_t *vd)
{
if (!vd->vdev_ops->vdev_op_leaf) {
const char *vdev_type = vd->vdev_ops->vdev_op_type;
if (strcmp(vdev_type, VDEV_TYPE_MISSING) == 0)
return (B_FALSE);
}
for (int c = 0; c < vd->vdev_children; c++) {
if (!vdev_is_bootable(vd->vdev_child[c]))
return (B_FALSE);
}
return (B_TRUE);
}
boolean_t
vdev_is_concrete(vdev_t *vd)
{
vdev_ops_t *ops = vd->vdev_ops;
if (ops == &vdev_indirect_ops || ops == &vdev_hole_ops ||
ops == &vdev_missing_ops || ops == &vdev_root_ops) {
return (B_FALSE);
} else {
return (B_TRUE);
}
}
/*
* Determine if a log device has valid content. If the vdev was
* removed or faulted in the MOS config then we know that
* the content on the log device has already been written to the pool.
*/
boolean_t
vdev_log_state_valid(vdev_t *vd)
{
if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted &&
!vd->vdev_removed)
return (B_TRUE);
for (int c = 0; c < vd->vdev_children; c++)
if (vdev_log_state_valid(vd->vdev_child[c]))
return (B_TRUE);
return (B_FALSE);
}
/*
* Expand a vdev if possible.
*/
void
vdev_expand(vdev_t *vd, uint64_t txg)
{
ASSERT(vd->vdev_top == vd);
ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
ASSERT(vdev_is_concrete(vd));
vdev_set_deflate_ratio(vd);
if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count &&
vdev_is_concrete(vd)) {
vdev_metaslab_group_create(vd);
VERIFY(vdev_metaslab_init(vd, txg) == 0);
vdev_config_dirty(vd);
}
}
/*
* Split a vdev.
*/
void
vdev_split(vdev_t *vd)
{
vdev_t *cvd, *pvd = vd->vdev_parent;
vdev_remove_child(pvd, vd);
vdev_compact_children(pvd);
cvd = pvd->vdev_child[0];
if (pvd->vdev_children == 1) {
vdev_remove_parent(cvd);
cvd->vdev_splitting = B_TRUE;
}
vdev_propagate_state(cvd);
}
void
vdev_deadman(vdev_t *vd, char *tag)
{
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
vdev_deadman(cvd, tag);
}
if (vd->vdev_ops->vdev_op_leaf) {
vdev_queue_t *vq = &vd->vdev_queue;
mutex_enter(&vq->vq_lock);
if (avl_numnodes(&vq->vq_active_tree) > 0) {
spa_t *spa = vd->vdev_spa;
zio_t *fio;
uint64_t delta;
zfs_dbgmsg("slow vdev: %s has %lu active IOs",
vd->vdev_path, avl_numnodes(&vq->vq_active_tree));
/*
* Look at the head of all the pending queues,
* if any I/O has been outstanding for longer than
* the spa_deadman_synctime invoke the deadman logic.
*/
fio = avl_first(&vq->vq_active_tree);
delta = gethrtime() - fio->io_timestamp;
if (delta > spa_deadman_synctime(spa))
zio_deadman(fio, tag);
}
mutex_exit(&vq->vq_lock);
}
}
void
vdev_defer_resilver(vdev_t *vd)
{
ASSERT(vd->vdev_ops->vdev_op_leaf);
vd->vdev_resilver_deferred = B_TRUE;
vd->vdev_spa->spa_resilver_deferred = B_TRUE;
}
/*
* Clears the resilver deferred flag on all leaf devs under vd. Returns
* B_TRUE if we have devices that need to be resilvered and are available to
* accept resilver I/Os.
*/
boolean_t
vdev_clear_resilver_deferred(vdev_t *vd, dmu_tx_t *tx)
{
boolean_t resilver_needed = B_FALSE;
spa_t *spa = vd->vdev_spa;
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
resilver_needed |= vdev_clear_resilver_deferred(cvd, tx);
}
if (vd == spa->spa_root_vdev &&
spa_feature_is_active(spa, SPA_FEATURE_RESILVER_DEFER)) {
spa_feature_decr(spa, SPA_FEATURE_RESILVER_DEFER, tx);
vdev_config_dirty(vd);
spa->spa_resilver_deferred = B_FALSE;
return (resilver_needed);
}
if (!vdev_is_concrete(vd) || vd->vdev_aux ||
!vd->vdev_ops->vdev_op_leaf)
return (resilver_needed);
vd->vdev_resilver_deferred = B_FALSE;
return (!vdev_is_dead(vd) && !vd->vdev_offline &&
vdev_resilver_needed(vd, NULL, NULL));
}
boolean_t
vdev_xlate_is_empty(range_seg64_t *rs)
{
return (rs->rs_start == rs->rs_end);
}
/*
* Translate a logical range to the first contiguous physical range for the
* specified vdev_t. This function is initially called with a leaf vdev and
* will walk each parent vdev until it reaches a top-level vdev. Once the
* top-level is reached the physical range is initialized and the recursive
* function begins to unwind. As it unwinds it calls the parent's vdev
* specific translation function to do the real conversion.
*/
void
vdev_xlate(vdev_t *vd, const range_seg64_t *logical_rs,
range_seg64_t *physical_rs, range_seg64_t *remain_rs)
{
/*
* Walk up the vdev tree
*/
if (vd != vd->vdev_top) {
vdev_xlate(vd->vdev_parent, logical_rs, physical_rs,
remain_rs);
} else {
/*
* We've reached the top-level vdev, initialize the physical
* range to the logical range and set an empty remaining
* range then start to unwind.
*/
physical_rs->rs_start = logical_rs->rs_start;
physical_rs->rs_end = logical_rs->rs_end;
remain_rs->rs_start = logical_rs->rs_start;
remain_rs->rs_end = logical_rs->rs_start;
return;
}
vdev_t *pvd = vd->vdev_parent;
ASSERT3P(pvd, !=, NULL);
ASSERT3P(pvd->vdev_ops->vdev_op_xlate, !=, NULL);
/*
* As this recursive function unwinds, translate the logical
* range into its physical and any remaining components by calling
* the vdev specific translate function.
*/
range_seg64_t intermediate = { 0 };
pvd->vdev_ops->vdev_op_xlate(vd, physical_rs, &intermediate, remain_rs);
physical_rs->rs_start = intermediate.rs_start;
physical_rs->rs_end = intermediate.rs_end;
}
void
vdev_xlate_walk(vdev_t *vd, const range_seg64_t *logical_rs,
vdev_xlate_func_t *func, void *arg)
{
range_seg64_t iter_rs = *logical_rs;
range_seg64_t physical_rs;
range_seg64_t remain_rs;
while (!vdev_xlate_is_empty(&iter_rs)) {
vdev_xlate(vd, &iter_rs, &physical_rs, &remain_rs);
/*
* With raidz and dRAID, it's possible that the logical range
* does not live on this leaf vdev. Only when there is a non-
* zero physical size call the provided function.
*/
if (!vdev_xlate_is_empty(&physical_rs))
func(arg, &physical_rs);
iter_rs = remain_rs;
}
}
/*
* Look at the vdev tree and determine whether any devices are currently being
* replaced.
*/
boolean_t
vdev_replace_in_progress(vdev_t *vdev)
{
ASSERT(spa_config_held(vdev->vdev_spa, SCL_ALL, RW_READER) != 0);
if (vdev->vdev_ops == &vdev_replacing_ops)
return (B_TRUE);
/*
* A 'spare' vdev indicates that we have a replace in progress, unless
* it has exactly two children, and the second, the hot spare, has
* finished being resilvered.
*/
if (vdev->vdev_ops == &vdev_spare_ops && (vdev->vdev_children > 2 ||
!vdev_dtl_empty(vdev->vdev_child[1], DTL_MISSING)))
return (B_TRUE);
for (int i = 0; i < vdev->vdev_children; i++) {
if (vdev_replace_in_progress(vdev->vdev_child[i]))
return (B_TRUE);
}
return (B_FALSE);
}
EXPORT_SYMBOL(vdev_fault);
EXPORT_SYMBOL(vdev_degrade);
EXPORT_SYMBOL(vdev_online);
EXPORT_SYMBOL(vdev_offline);
EXPORT_SYMBOL(vdev_clear);
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, default_ms_count, INT, ZMOD_RW,
"Target number of metaslabs per top-level vdev");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, default_ms_shift, INT, ZMOD_RW,
"Default limit for metaslab size");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, min_ms_count, INT, ZMOD_RW,
"Minimum number of metaslabs per top-level vdev");
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, ms_count_limit, INT, ZMOD_RW,
"Practical upper limit of total metaslabs per top-level vdev");
ZFS_MODULE_PARAM(zfs, zfs_, slow_io_events_per_second, UINT, ZMOD_RW,
"Rate limit slow IO (delay) events to this many per second");
ZFS_MODULE_PARAM(zfs, zfs_, checksum_events_per_second, UINT, ZMOD_RW,
"Rate limit checksum events to this many checksum errors per second "
"(do not set below zed threshold).");
ZFS_MODULE_PARAM(zfs, zfs_, scan_ignore_errors, INT, ZMOD_RW,
"Ignore errors during resilver/scrub");
ZFS_MODULE_PARAM(zfs_vdev, vdev_, validate_skip, INT, ZMOD_RW,
"Bypass vdev_validate()");
ZFS_MODULE_PARAM(zfs, zfs_, nocacheflush, INT, ZMOD_RW,
"Disable cache flushes");
ZFS_MODULE_PARAM(zfs, zfs_, embedded_slog_min_ms, INT, ZMOD_RW,
"Minimum number of metaslabs required to dedicate one for log blocks");
ZFS_MODULE_PARAM_CALL(zfs_vdev, zfs_vdev_, min_auto_ashift,
param_set_min_auto_ashift, param_get_ulong, ZMOD_RW,
"Minimum ashift used when creating new top-level vdevs");
ZFS_MODULE_PARAM_CALL(zfs_vdev, zfs_vdev_, max_auto_ashift,
param_set_max_auto_ashift, param_get_ulong, ZMOD_RW,
"Maximum ashift used when optimizing for logical -> physical sector "
"size on new top-level vdevs");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/vdev_draid.c b/sys/contrib/openzfs/module/zfs/vdev_draid.c
index b8f82d52e8f0..db87e69f2057 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_draid.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_draid.c
@@ -1,2784 +1,2831 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2018 Intel Corporation.
* Copyright (c) 2020 by Lawrence Livermore National Security, LLC.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_draid.h>
#include <sys/vdev_raidz.h>
#include <sys/vdev_rebuild.h>
#include <sys/abd.h>
#include <sys/zio.h>
#include <sys/nvpair.h>
#include <sys/zio_checksum.h>
#include <sys/fs/zfs.h>
#include <sys/fm/fs/zfs.h>
#include <zfs_fletcher.h>
#ifdef ZFS_DEBUG
#include <sys/vdev.h> /* For vdev_xlate() in vdev_draid_io_verify() */
#endif
/*
* dRAID is a distributed spare implementation for ZFS. A dRAID vdev is
* comprised of multiple raidz redundancy groups which are spread over the
* dRAID children. To ensure an even distribution, and avoid hot spots, a
* permutation mapping is applied to the order of the dRAID children.
* This mixing effectively distributes the parity columns evenly over all
* of the disks in the dRAID.
*
* This is beneficial because it means when resilvering all of the disks
* can participate thereby increasing the available IOPs and bandwidth.
* Furthermore, by reserving a small fraction of each child's total capacity
* virtual distributed spare disks can be created. These spares similarly
* benefit from the performance gains of spanning all of the children. The
* consequence of which is that resilvering to a distributed spare can
* substantially reduce the time required to restore full parity to pool
* with a failed disks.
*
* === dRAID group layout ===
*
* First, let's define a "row" in the configuration to be a 16M chunk from
* each physical drive at the same offset. This is the minimum allowable
* size since it must be possible to store a full 16M block when there is
* only a single data column. Next, we define a "group" to be a set of
* sequential disks containing both the parity and data columns. We allow
* groups to span multiple rows in order to align any group size to any
* number of physical drives. Finally, a "slice" is comprised of the rows
* which contain the target number of groups. The permutation mappings
* are applied in a round robin fashion to each slice.
*
* Given D+P drives in a group (including parity drives) and C-S physical
* drives (not including the spare drives), we can distribute the groups
* across R rows without remainder by selecting the least common multiple
* of D+P and C-S as the number of groups; i.e. ngroups = LCM(D+P, C-S).
*
* In the example below, there are C=14 physical drives in the configuration
* with S=2 drives worth of spare capacity. Each group has a width of 9
* which includes D=8 data and P=1 parity drive. There are 4 groups and
* 3 rows per slice. Each group has a size of 144M (16M * 9) and a slice
* size is 576M (144M * 4). When allocating from a dRAID each group is
* filled before moving on to the next as show in slice0 below.
*
* data disks (8 data + 1 parity) spares (2)
* +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
* ^ | 2 | 6 | 1 | 11| 4 | 0 | 7 | 10| 8 | 9 | 13| 5 | 12| 3 | device map 0
* | +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
* | | group 0 | group 1..| |
* | +-----------------------------------+-----------+-------|
* | | 0 1 2 3 4 5 6 7 8 | 36 37 38| | r
* | | 9 10 11 12 13 14 15 16 17| 45 46 47| | o
* | | 18 19 20 21 22 23 24 25 26| 54 55 56| | w
* | 27 28 29 30 31 32 33 34 35| 63 64 65| | 0
* s +-----------------------+-----------------------+-------+
* l | ..group 1 | group 2.. | |
* i +-----------------------+-----------------------+-------+
* c | 39 40 41 42 43 44| 72 73 74 75 76 77| | r
* e | 48 49 50 51 52 53| 81 82 83 84 85 86| | o
* 0 | 57 58 59 60 61 62| 90 91 92 93 94 95| | w
* | 66 67 68 69 70 71| 99 100 101 102 103 104| | 1
* | +-----------+-----------+-----------------------+-------+
* | |..group 2 | group 3 | |
* | +-----------+-----------+-----------------------+-------+
* | | 78 79 80|108 109 110 111 112 113 114 115 116| | r
* | | 87 88 89|117 118 119 120 121 122 123 124 125| | o
* | | 96 97 98|126 127 128 129 130 131 132 133 134| | w
* v |105 106 107|135 136 137 138 139 140 141 142 143| | 2
* +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
* | 9 | 11| 12| 2 | 4 | 1 | 3 | 0 | 10| 13| 8 | 5 | 6 | 7 | device map 1
* s +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
* l | group 4 | group 5..| | row 3
* i +-----------------------+-----------+-----------+-------|
* c | ..group 5 | group 6.. | | row 4
* e +-----------+-----------+-----------------------+-------+
* 1 |..group 6 | group 7 | | row 5
* +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
* | 3 | 5 | 10| 8 | 6 | 11| 12| 0 | 2 | 4 | 7 | 1 | 9 | 13| device map 2
* s +===+===+===+===+===+===+===+===+===+===+===+===+===+===+
* l | group 8 | group 9..| | row 6
* i +-----------------------------------------------+-------|
* c | ..group 9 | group 10.. | | row 7
* e +-----------------------+-----------------------+-------+
* 2 |..group 10 | group 11 | | row 8
* +-----------+-----------------------------------+-------+
*
* This layout has several advantages over requiring that each row contain
* a whole number of groups.
*
* 1. The group count is not a relevant parameter when defining a dRAID
* layout. Only the group width is needed, and *all* groups will have
* the desired size.
*
* 2. All possible group widths (<= physical disk count) can be supported.
*
* 3. The logic within vdev_draid.c is simplified when the group width is
* the same for all groups (although some of the logic around computing
* permutation numbers and drive offsets is more complicated).
*
* N.B. The following array describes all valid dRAID permutation maps.
* Each row is used to generate a permutation map for a different number
* of children from a unique seed. The seeds were generated and carefully
* evaluated by the 'draid' utility in order to provide balanced mappings.
* In addition to the seed a checksum of the in-memory mapping is stored
* for verification.
*
* The imbalance ratio of a given failure (e.g. 5 disks wide, child 3 failed,
* with a given permutation map) is the ratio of the amounts of I/O that will
* be sent to the least and most busy disks when resilvering. The average
* imbalance ratio (of a given number of disks and permutation map) is the
* average of the ratios of all possible single and double disk failures.
*
* In order to achieve a low imbalance ratio the number of permutations in
* the mapping must be significantly larger than the number of children.
* For dRAID the number of permutations has been limited to 512 to minimize
* the map size. This does result in a gradually increasing imbalance ratio
* as seen in the table below. Increasing the number of permutations for
* larger child counts would reduce the imbalance ratio. However, in practice
* when there are a large number of children each child is responsible for
* fewer total IOs so it's less of a concern.
*
* Note these values are hard coded and must never be changed. Existing
* pools depend on the same mapping always being generated in order to
* read and write from the correct locations. Any change would make
* existing pools completely inaccessible.
*/
static const draid_map_t draid_maps[VDEV_DRAID_MAX_MAPS] = {
{ 2, 256, 0x89ef3dabbcc7de37, 0x00000000433d433d }, /* 1.000 */
{ 3, 256, 0x89a57f3de98121b4, 0x00000000bcd8b7b5 }, /* 1.000 */
{ 4, 256, 0xc9ea9ec82340c885, 0x00000001819d7c69 }, /* 1.000 */
{ 5, 256, 0xf46733b7f4d47dfd, 0x00000002a1648d74 }, /* 1.010 */
{ 6, 256, 0x88c3c62d8585b362, 0x00000003d3b0c2c4 }, /* 1.031 */
{ 7, 256, 0x3a65d809b4d1b9d5, 0x000000055c4183ee }, /* 1.043 */
{ 8, 256, 0xe98930e3c5d2e90a, 0x00000006edfb0329 }, /* 1.059 */
{ 9, 256, 0x5a5430036b982ccb, 0x00000008ceaf6934 }, /* 1.056 */
{ 10, 256, 0x92bf389e9eadac74, 0x0000000b26668c09 }, /* 1.072 */
{ 11, 256, 0x74ccebf1dcf3ae80, 0x0000000dd691358c }, /* 1.083 */
{ 12, 256, 0x8847e41a1a9f5671, 0x00000010a0c63c8e }, /* 1.097 */
{ 13, 256, 0x7481b56debf0e637, 0x0000001424121fe4 }, /* 1.100 */
{ 14, 256, 0x559b8c44065f8967, 0x00000016ab2ff079 }, /* 1.121 */
{ 15, 256, 0x34c49545a2ee7f01, 0x0000001a6028efd6 }, /* 1.103 */
{ 16, 256, 0xb85f4fa81a7698f7, 0x0000001e95ff5e66 }, /* 1.111 */
{ 17, 256, 0x6353e47b7e47aba0, 0x00000021a81fa0fe }, /* 1.133 */
{ 18, 256, 0xaa549746b1cbb81c, 0x00000026f02494c9 }, /* 1.131 */
{ 19, 256, 0x892e343f2f31d690, 0x00000029eb392835 }, /* 1.130 */
{ 20, 256, 0x76914824db98cc3f, 0x0000003004f31a7c }, /* 1.141 */
{ 21, 256, 0x4b3cbabf9cfb1d0f, 0x00000036363a2408 }, /* 1.139 */
{ 22, 256, 0xf45c77abb4f035d4, 0x00000038dd0f3e84 }, /* 1.150 */
{ 23, 256, 0x5e18bd7f3fd4baf4, 0x0000003f0660391f }, /* 1.174 */
{ 24, 256, 0xa7b3a4d285d6503b, 0x000000443dfc9ff6 }, /* 1.168 */
{ 25, 256, 0x56ac7dd967521f5a, 0x0000004b03a87eb7 }, /* 1.180 */
{ 26, 256, 0x3a42dfda4eb880f7, 0x000000522c719bba }, /* 1.226 */
{ 27, 256, 0xd200d2fc6b54bf60, 0x0000005760b4fdf5 }, /* 1.228 */
{ 28, 256, 0xc52605bbd486c546, 0x0000005e00d8f74c }, /* 1.217 */
{ 29, 256, 0xc761779e63cd762f, 0x00000067be3cd85c }, /* 1.239 */
{ 30, 256, 0xca577b1e07f85ca5, 0x0000006f5517f3e4 }, /* 1.238 */
{ 31, 256, 0xfd50a593c518b3d4, 0x0000007370e7778f }, /* 1.273 */
{ 32, 512, 0xc6c87ba5b042650b, 0x000000f7eb08a156 }, /* 1.191 */
{ 33, 512, 0xc3880d0c9d458304, 0x0000010734b5d160 }, /* 1.199 */
{ 34, 512, 0xe920927e4d8b2c97, 0x00000118c1edbce0 }, /* 1.195 */
{ 35, 512, 0x8da7fcda87bde316, 0x0000012a3e9f9110 }, /* 1.201 */
{ 36, 512, 0xcf09937491514a29, 0x0000013bd6a24bef }, /* 1.194 */
{ 37, 512, 0x9b5abbf345cbd7cc, 0x0000014b9d90fac3 }, /* 1.237 */
{ 38, 512, 0x506312a44668d6a9, 0x0000015e1b5f6148 }, /* 1.242 */
{ 39, 512, 0x71659ede62b4755f, 0x00000173ef029bcd }, /* 1.231 */
{ 40, 512, 0xa7fde73fb74cf2d7, 0x000001866fb72748 }, /* 1.233 */
{ 41, 512, 0x19e8b461a1dea1d3, 0x000001a046f76b23 }, /* 1.271 */
{ 42, 512, 0x031c9b868cc3e976, 0x000001afa64c49d3 }, /* 1.263 */
{ 43, 512, 0xbaa5125faa781854, 0x000001c76789e278 }, /* 1.270 */
{ 44, 512, 0x4ed55052550d721b, 0x000001d800ccd8eb }, /* 1.281 */
{ 45, 512, 0x0fd63ddbdff90677, 0x000001f08ad59ed2 }, /* 1.282 */
{ 46, 512, 0x36d66546de7fdd6f, 0x000002016f09574b }, /* 1.286 */
{ 47, 512, 0x99f997e7eafb69d7, 0x0000021e42e47cb6 }, /* 1.329 */
{ 48, 512, 0xbecd9c2571312c5d, 0x000002320fe2872b }, /* 1.286 */
{ 49, 512, 0xd97371329e488a32, 0x0000024cd73f2ca7 }, /* 1.322 */
{ 50, 512, 0x30e9b136670749ee, 0x000002681c83b0e0 }, /* 1.335 */
{ 51, 512, 0x11ad6bc8f47aaeb4, 0x0000027e9261b5d5 }, /* 1.305 */
{ 52, 512, 0x68e445300af432c1, 0x0000029aa0eb7dbf }, /* 1.330 */
{ 53, 512, 0x910fb561657ea98c, 0x000002b3dca04853 }, /* 1.365 */
{ 54, 512, 0xd619693d8ce5e7a5, 0x000002cc280e9c97 }, /* 1.334 */
{ 55, 512, 0x24e281f564dbb60a, 0x000002e9fa842713 }, /* 1.364 */
{ 56, 512, 0x947a7d3bdaab44c5, 0x000003046680f72e }, /* 1.374 */
{ 57, 512, 0x2d44fec9c093e0de, 0x00000324198ba810 }, /* 1.363 */
{ 58, 512, 0x87743c272d29bb4c, 0x0000033ec48c9ac9 }, /* 1.401 */
{ 59, 512, 0x96aa3b6f67f5d923, 0x0000034faead902c }, /* 1.392 */
{ 60, 512, 0x94a4f1faf520b0d3, 0x0000037d713ab005 }, /* 1.360 */
{ 61, 512, 0xb13ed3a272f711a2, 0x00000397368f3cbd }, /* 1.396 */
{ 62, 512, 0x3b1b11805fa4a64a, 0x000003b8a5e2840c }, /* 1.453 */
{ 63, 512, 0x4c74caad9172ba71, 0x000003d4be280290 }, /* 1.437 */
{ 64, 512, 0x035ff643923dd29e, 0x000003fad6c355e1 }, /* 1.402 */
{ 65, 512, 0x768e9171b11abd3c, 0x0000040eb07fed20 }, /* 1.459 */
{ 66, 512, 0x75880e6f78a13ddd, 0x000004433d6acf14 }, /* 1.423 */
{ 67, 512, 0x910b9714f698a877, 0x00000451ea65d5db }, /* 1.447 */
{ 68, 512, 0x87f5db6f9fdcf5c7, 0x000004732169e3f7 }, /* 1.450 */
{ 69, 512, 0x836d4968fbaa3706, 0x000004954068a380 }, /* 1.455 */
{ 70, 512, 0xc567d73a036421ab, 0x000004bd7cb7bd3d }, /* 1.463 */
{ 71, 512, 0x619df40f240b8fed, 0x000004e376c2e972 }, /* 1.463 */
{ 72, 512, 0x42763a680d5bed8e, 0x000005084275c680 }, /* 1.452 */
{ 73, 512, 0x5866f064b3230431, 0x0000052906f2c9ab }, /* 1.498 */
{ 74, 512, 0x9fa08548b1621a44, 0x0000054708019247 }, /* 1.526 */
{ 75, 512, 0xb6053078ce0fc303, 0x00000572cc5c72b0 }, /* 1.491 */
{ 76, 512, 0x4a7aad7bf3890923, 0x0000058e987bc8e9 }, /* 1.470 */
{ 77, 512, 0xe165613fd75b5a53, 0x000005c20473a211 }, /* 1.527 */
{ 78, 512, 0x3ff154ac878163a6, 0x000005d659194bf3 }, /* 1.509 */
{ 79, 512, 0x24b93ade0aa8a532, 0x0000060a201c4f8e }, /* 1.569 */
{ 80, 512, 0xc18e2d14cd9bb554, 0x0000062c55cfe48c }, /* 1.555 */
{ 81, 512, 0x98cc78302feb58b6, 0x0000066656a07194 }, /* 1.509 */
{ 82, 512, 0xc6c5fd5a2abc0543, 0x0000067cff94fbf8 }, /* 1.596 */
{ 83, 512, 0xa7962f514acbba21, 0x000006ab7b5afa2e }, /* 1.568 */
{ 84, 512, 0xba02545069ddc6dc, 0x000006d19861364f }, /* 1.541 */
{ 85, 512, 0x447c73192c35073e, 0x000006fce315ce35 }, /* 1.623 */
{ 86, 512, 0x48beef9e2d42b0c2, 0x00000720a8e38b6b }, /* 1.620 */
{ 87, 512, 0x4874cf98541a35e0, 0x00000758382a2273 }, /* 1.597 */
{ 88, 512, 0xad4cf8333a31127a, 0x00000781e1651b1b }, /* 1.575 */
{ 89, 512, 0x47ae4859d57888c1, 0x000007b27edbe5bc }, /* 1.627 */
{ 90, 512, 0x06f7723cfe5d1891, 0x000007dc2a96d8eb }, /* 1.596 */
{ 91, 512, 0xd4e44218d660576d, 0x0000080ac46f02d5 }, /* 1.622 */
{ 92, 512, 0x7066702b0d5be1f2, 0x00000832c96d154e }, /* 1.695 */
{ 93, 512, 0x011209b4f9e11fb9, 0x0000085eefda104c }, /* 1.605 */
{ 94, 512, 0x47ffba30a0b35708, 0x00000899badc32dc }, /* 1.625 */
{ 95, 512, 0x1a95a6ac4538aaa8, 0x000008b6b69a42b2 }, /* 1.687 */
{ 96, 512, 0xbda2b239bb2008eb, 0x000008f22d2de38a }, /* 1.621 */
{ 97, 512, 0x7ffa0bea90355c6c, 0x0000092e5b23b816 }, /* 1.699 */
{ 98, 512, 0x1d56ba34be426795, 0x0000094f482e5d1b }, /* 1.688 */
{ 99, 512, 0x0aa89d45c502e93d, 0x00000977d94a98ce }, /* 1.642 */
{ 100, 512, 0x54369449f6857774, 0x000009c06c9b34cc }, /* 1.683 */
{ 101, 512, 0xf7d4dd8445b46765, 0x000009e5dc542259 }, /* 1.755 */
{ 102, 512, 0xfa8866312f169469, 0x00000a16b54eae93 }, /* 1.692 */
{ 103, 512, 0xd8a5aea08aef3ff9, 0x00000a381d2cbfe7 }, /* 1.747 */
{ 104, 512, 0x66bcd2c3d5f9ef0e, 0x00000a8191817be7 }, /* 1.751 */
{ 105, 512, 0x3fb13a47a012ec81, 0x00000ab562b9a254 }, /* 1.751 */
{ 106, 512, 0x43100f01c9e5e3ca, 0x00000aeee84c185f }, /* 1.726 */
{ 107, 512, 0xca09c50ccee2d054, 0x00000b1c359c047d }, /* 1.788 */
{ 108, 512, 0xd7176732ac503f9b, 0x00000b578bc52a73 }, /* 1.740 */
{ 109, 512, 0xed206e51f8d9422d, 0x00000b8083e0d960 }, /* 1.780 */
{ 110, 512, 0x17ead5dc6ba0dcd6, 0x00000bcfb1a32ca8 }, /* 1.836 */
{ 111, 512, 0x5f1dc21e38a969eb, 0x00000c0171becdd6 }, /* 1.778 */
{ 112, 512, 0xddaa973de33ec528, 0x00000c3edaba4b95 }, /* 1.831 */
{ 113, 512, 0x2a5eccd7735a3630, 0x00000c630664e7df }, /* 1.825 */
{ 114, 512, 0xafcccee5c0b71446, 0x00000cb65392f6e4 }, /* 1.826 */
{ 115, 512, 0x8fa30c5e7b147e27, 0x00000cd4db391e55 }, /* 1.843 */
{ 116, 512, 0x5afe0711fdfafd82, 0x00000d08cb4ec35d }, /* 1.826 */
{ 117, 512, 0x533a6090238afd4c, 0x00000d336f115d1b }, /* 1.803 */
{ 118, 512, 0x90cf11b595e39a84, 0x00000d8e041c2048 }, /* 1.857 */
{ 119, 512, 0x0d61a3b809444009, 0x00000dcb798afe35 }, /* 1.877 */
{ 120, 512, 0x7f34da0f54b0d114, 0x00000df3922664e1 }, /* 1.849 */
{ 121, 512, 0xa52258d5b72f6551, 0x00000e4d37a9872d }, /* 1.867 */
{ 122, 512, 0xc1de54d7672878db, 0x00000e6583a94cf6 }, /* 1.978 */
{ 123, 512, 0x1d03354316a414ab, 0x00000ebffc50308d }, /* 1.947 */
{ 124, 512, 0xcebdcc377665412c, 0x00000edee1997cea }, /* 1.865 */
{ 125, 512, 0x4ddd4c04b1a12344, 0x00000f21d64b373f }, /* 1.881 */
{ 126, 512, 0x64fc8f94e3973658, 0x00000f8f87a8896b }, /* 1.882 */
{ 127, 512, 0x68765f78034a334e, 0x00000fb8fe62197e }, /* 1.867 */
{ 128, 512, 0xaf36b871a303e816, 0x00000fec6f3afb1e }, /* 1.972 */
{ 129, 512, 0x2a4cbf73866c3a28, 0x00001027febfe4e5 }, /* 1.896 */
{ 130, 512, 0x9cb128aacdcd3b2f, 0x0000106aa8ac569d }, /* 1.965 */
{ 131, 512, 0x5511d41c55869124, 0x000010bbd755ddf1 }, /* 1.963 */
{ 132, 512, 0x42f92461937f284a, 0x000010fb8bceb3b5 }, /* 1.925 */
{ 133, 512, 0xe2d89a1cf6f1f287, 0x0000114cf5331e34 }, /* 1.862 */
{ 134, 512, 0xdc631a038956200e, 0x0000116428d2adc5 }, /* 2.042 */
{ 135, 512, 0xb2e5ac222cd236be, 0x000011ca88e4d4d2 }, /* 1.935 */
{ 136, 512, 0xbc7d8236655d88e7, 0x000011e39cb94e66 }, /* 2.005 */
{ 137, 512, 0x073e02d88d2d8e75, 0x0000123136c7933c }, /* 2.041 */
{ 138, 512, 0x3ddb9c3873166be0, 0x00001280e4ec6d52 }, /* 1.997 */
{ 139, 512, 0x7d3b1a845420e1b5, 0x000012c2e7cd6a44 }, /* 1.996 */
{ 140, 512, 0x60102308aa7b2a6c, 0x000012fc490e6c7d }, /* 2.053 */
{ 141, 512, 0xdb22bb2f9eb894aa, 0x00001343f5a85a1a }, /* 1.971 */
{ 142, 512, 0xd853f879a13b1606, 0x000013bb7d5f9048 }, /* 2.018 */
{ 143, 512, 0x001620a03f804b1d, 0x000013e74cc794fd }, /* 1.961 */
{ 144, 512, 0xfdb52dda76fbf667, 0x00001442d2f22480 }, /* 2.046 */
{ 145, 512, 0xa9160110f66e24ff, 0x0000144b899f9dbb }, /* 1.968 */
{ 146, 512, 0x77306a30379ae03b, 0x000014cb98eb1f81 }, /* 2.143 */
{ 147, 512, 0x14f5985d2752319d, 0x000014feab821fc9 }, /* 2.064 */
{ 148, 512, 0xa4b8ff11de7863f8, 0x0000154a0e60b9c9 }, /* 2.023 */
{ 149, 512, 0x44b345426455c1b3, 0x000015999c3c569c }, /* 2.136 */
{ 150, 512, 0x272677826049b46c, 0x000015c9697f4b92 }, /* 2.063 */
{ 151, 512, 0x2f9216e2cd74fe40, 0x0000162b1f7bbd39 }, /* 1.974 */
{ 152, 512, 0x706ae3e763ad8771, 0x00001661371c55e1 }, /* 2.210 */
{ 153, 512, 0xf7fd345307c2480e, 0x000016e251f28b6a }, /* 2.006 */
{ 154, 512, 0x6e94e3d26b3139eb, 0x000016f2429bb8c6 }, /* 2.193 */
{ 155, 512, 0x5458bbfbb781fcba, 0x0000173efdeca1b9 }, /* 2.163 */
{ 156, 512, 0xa80e2afeccd93b33, 0x000017bfdcb78adc }, /* 2.046 */
{ 157, 512, 0x1e4ccbb22796cf9d, 0x00001826fdcc39c9 }, /* 2.084 */
{ 158, 512, 0x8fba4b676aaa3663, 0x00001841a1379480 }, /* 2.264 */
{ 159, 512, 0xf82b843814b315fa, 0x000018886e19b8a3 }, /* 2.074 */
{ 160, 512, 0x7f21e920ecf753a3, 0x0000191812ca0ea7 }, /* 2.282 */
{ 161, 512, 0x48bb8ea2c4caa620, 0x0000192f310faccf }, /* 2.148 */
{ 162, 512, 0x5cdb652b4952c91b, 0x0000199e1d7437c7 }, /* 2.355 */
{ 163, 512, 0x6ac1ba6f78c06cd4, 0x000019cd11f82c70 }, /* 2.164 */
{ 164, 512, 0x9faf5f9ca2669a56, 0x00001a18d5431f6a }, /* 2.393 */
{ 165, 512, 0xaa57e9383eb01194, 0x00001a9e7d253d85 }, /* 2.178 */
{ 166, 512, 0x896967bf495c34d2, 0x00001afb8319b9fc }, /* 2.334 */
{ 167, 512, 0xdfad5f05de225f1b, 0x00001b3a59c3093b }, /* 2.266 */
{ 168, 512, 0xfd299a99f9f2abdd, 0x00001bb6f1a10799 }, /* 2.304 */
{ 169, 512, 0xdda239e798fe9fd4, 0x00001bfae0c9692d }, /* 2.218 */
{ 170, 512, 0x5fca670414a32c3e, 0x00001c22129dbcff }, /* 2.377 */
{ 171, 512, 0x1bb8934314b087de, 0x00001c955db36cd0 }, /* 2.155 */
{ 172, 512, 0xd96394b4b082200d, 0x00001cfc8619b7e6 }, /* 2.404 */
{ 173, 512, 0xb612a7735b1c8cbc, 0x00001d303acdd585 }, /* 2.205 */
{ 174, 512, 0x28e7430fe5875fe1, 0x00001d7ed5b3697d }, /* 2.359 */
{ 175, 512, 0x5038e89efdd981b9, 0x00001dc40ec35c59 }, /* 2.158 */
{ 176, 512, 0x075fd78f1d14db7c, 0x00001e31c83b4a2b }, /* 2.614 */
{ 177, 512, 0xc50fafdb5021be15, 0x00001e7cdac82fbc }, /* 2.239 */
{ 178, 512, 0xe6dc7572ce7b91c7, 0x00001edd8bb454fc }, /* 2.493 */
{ 179, 512, 0x21f7843e7beda537, 0x00001f3a8e019d6c }, /* 2.327 */
{ 180, 512, 0xc83385e20b43ec82, 0x00001f70735ec137 }, /* 2.231 */
{ 181, 512, 0xca818217dddb21fd, 0x0000201ca44c5a3c }, /* 2.237 */
{ 182, 512, 0xe6035defea48f933, 0x00002038e3346658 }, /* 2.691 */
{ 183, 512, 0x47262a4f953dac5a, 0x000020c2e554314e }, /* 2.170 */
{ 184, 512, 0xe24c7246260873ea, 0x000021197e618d64 }, /* 2.600 */
{ 185, 512, 0xeef6b57c9b58e9e1, 0x0000217ea48ecddc }, /* 2.391 */
{ 186, 512, 0x2becd3346e386142, 0x000021c496d4a5f9 }, /* 2.677 */
{ 187, 512, 0x63c6207bdf3b40a3, 0x0000220e0f2eec0c }, /* 2.410 */
{ 188, 512, 0x3056ce8989767d4b, 0x0000228eb76cd137 }, /* 2.776 */
{ 189, 512, 0x91af61c307cee780, 0x000022e17e2ea501 }, /* 2.266 */
{ 190, 512, 0xda359da225f6d54f, 0x00002358a2debc19 }, /* 2.717 */
{ 191, 512, 0x0a5f7a2a55607ba0, 0x0000238a79dac18c }, /* 2.474 */
{ 192, 512, 0x27bb75bf5224638a, 0x00002403a58e2351 }, /* 2.673 */
{ 193, 512, 0x1ebfdb94630f5d0f, 0x00002492a10cb339 }, /* 2.420 */
{ 194, 512, 0x6eae5e51d9c5f6fb, 0x000024ce4bf98715 }, /* 2.898 */
{ 195, 512, 0x08d903b4daedc2e0, 0x0000250d1e15886c }, /* 2.363 */
{ 196, 512, 0xc722a2f7fa7cd686, 0x0000258a99ed0c9e }, /* 2.747 */
{ 197, 512, 0x8f71faf0e54e361d, 0x000025dee11976f5 }, /* 2.531 */
{ 198, 512, 0x87f64695c91a54e7, 0x0000264e00a43da0 }, /* 2.707 */
{ 199, 512, 0xc719cbac2c336b92, 0x000026d327277ac1 }, /* 2.315 */
{ 200, 512, 0xe7e647afaf771ade, 0x000027523a5c44bf }, /* 3.012 */
{ 201, 512, 0x12d4b5c38ce8c946, 0x0000273898432545 }, /* 2.378 */
{ 202, 512, 0xf2e0cd4067bdc94a, 0x000027e47bb2c935 }, /* 2.969 */
{ 203, 512, 0x21b79f14d6d947d3, 0x0000281e64977f0d }, /* 2.594 */
{ 204, 512, 0x515093f952f18cd6, 0x0000289691a473fd }, /* 2.763 */
{ 205, 512, 0xd47b160a1b1022c8, 0x00002903e8b52411 }, /* 2.457 */
{ 206, 512, 0xc02fc96684715a16, 0x0000297515608601 }, /* 3.057 */
{ 207, 512, 0xef51e68efba72ed0, 0x000029ef73604804 }, /* 2.590 */
{ 208, 512, 0x9e3be6e5448b4f33, 0x00002a2846ed074b }, /* 3.047 */
{ 209, 512, 0x81d446c6d5fec063, 0x00002a92ca693455 }, /* 2.676 */
{ 210, 512, 0xff215de8224e57d5, 0x00002b2271fe3729 }, /* 2.993 */
{ 211, 512, 0xe2524d9ba8f69796, 0x00002b64b99c3ba2 }, /* 2.457 */
{ 212, 512, 0xf6b28e26097b7e4b, 0x00002bd768b6e068 }, /* 3.182 */
{ 213, 512, 0x893a487f30ce1644, 0x00002c67f722b4b2 }, /* 2.563 */
{ 214, 512, 0x386566c3fc9871df, 0x00002cc1cf8b4037 }, /* 3.025 */
{ 215, 512, 0x1e0ed78edf1f558a, 0x00002d3948d36c7f }, /* 2.730 */
{ 216, 512, 0xe3bc20c31e61f113, 0x00002d6d6b12e025 }, /* 3.036 */
{ 217, 512, 0xd6c3ad2e23021882, 0x00002deff7572241 }, /* 2.722 */
{ 218, 512, 0xb4a9f95cf0f69c5a, 0x00002e67d537aa36 }, /* 3.356 */
{ 219, 512, 0x6e98ed6f6c38e82f, 0x00002e9720626789 }, /* 2.697 */
{ 220, 512, 0x2e01edba33fddac7, 0x00002f407c6b0198 }, /* 2.979 */
{ 221, 512, 0x559d02e1f5f57ccc, 0x00002fb6a5ab4f24 }, /* 2.858 */
{ 222, 512, 0xac18f5a916adcd8e, 0x0000304ae1c5c57e }, /* 3.258 */
{ 223, 512, 0x15789fbaddb86f4b, 0x0000306f6e019c78 }, /* 2.693 */
{ 224, 512, 0xf4a9c36d5bc4c408, 0x000030da40434213 }, /* 3.259 */
{ 225, 512, 0xf640f90fd2727f44, 0x00003189ed37b90c }, /* 2.733 */
{ 226, 512, 0xb5313d390d61884a, 0x000031e152616b37 }, /* 3.235 */
{ 227, 512, 0x4bae6b3ce9160939, 0x0000321f40aeac42 }, /* 2.983 */
{ 228, 512, 0x838c34480f1a66a1, 0x000032f389c0f78e }, /* 3.308 */
{ 229, 512, 0xb1c4a52c8e3d6060, 0x0000330062a40284 }, /* 2.715 */
{ 230, 512, 0xe0f1110c6d0ed822, 0x0000338be435644f }, /* 3.540 */
{ 231, 512, 0x9f1a8ccdcea68d4b, 0x000034045a4e97e1 }, /* 2.779 */
{ 232, 512, 0x3261ed62223f3099, 0x000034702cfc401c }, /* 3.084 */
{ 233, 512, 0xf2191e2311022d65, 0x00003509dd19c9fc }, /* 2.987 */
{ 234, 512, 0xf102a395c2033abc, 0x000035654dc96fae }, /* 3.341 */
{ 235, 512, 0x11fe378f027906b6, 0x000035b5193b0264 }, /* 2.793 */
{ 236, 512, 0xf777f2c026b337aa, 0x000036704f5d9297 }, /* 3.518 */
{ 237, 512, 0x1b04e9c2ee143f32, 0x000036dfbb7af218 }, /* 2.962 */
{ 238, 512, 0x2fcec95266f9352c, 0x00003785c8df24a9 }, /* 3.196 */
{ 239, 512, 0xfe2b0e47e427dd85, 0x000037cbdf5da729 }, /* 2.914 */
{ 240, 512, 0x72b49bf2225f6c6d, 0x0000382227c15855 }, /* 3.408 */
{ 241, 512, 0x50486b43df7df9c7, 0x0000389b88be6453 }, /* 2.903 */
{ 242, 512, 0x5192a3e53181c8ab, 0x000038ddf3d67263 }, /* 3.778 */
{ 243, 512, 0xe9f5d8365296fd5e, 0x0000399f1c6c9e9c }, /* 3.026 */
{ 244, 512, 0xc740263f0301efa8, 0x00003a147146512d }, /* 3.347 */
{ 245, 512, 0x23cd0f2b5671e67d, 0x00003ab10bcc0d9d }, /* 3.212 */
{ 246, 512, 0x002ccc7e5cd41390, 0x00003ad6cd14a6c0 }, /* 3.482 */
{ 247, 512, 0x9aafb3c02544b31b, 0x00003b8cb8779fb0 }, /* 3.146 */
{ 248, 512, 0x72ba07a78b121999, 0x00003c24142a5a3f }, /* 3.626 */
{ 249, 512, 0x3d784aa58edfc7b4, 0x00003cd084817d99 }, /* 2.952 */
{ 250, 512, 0xaab750424d8004af, 0x00003d506a8e098e }, /* 3.463 */
{ 251, 512, 0x84403fcf8e6b5ca2, 0x00003d4c54c2aec4 }, /* 3.131 */
{ 252, 512, 0x71eb7455ec98e207, 0x00003e655715cf2c }, /* 3.538 */
{ 253, 512, 0xd752b4f19301595b, 0x00003ecd7b2ca5ac }, /* 2.974 */
{ 254, 512, 0xc4674129750499de, 0x00003e99e86d3e95 }, /* 3.843 */
{ 255, 512, 0x9772baff5cd12ef5, 0x00003f895c019841 }, /* 3.088 */
};
/*
* Verify the map is valid. Each device index must appear exactly
* once in every row, and the permutation array checksum must match.
*/
static int
verify_perms(uint8_t *perms, uint64_t children, uint64_t nperms,
uint64_t checksum)
{
int countssz = sizeof (uint16_t) * children;
uint16_t *counts = kmem_zalloc(countssz, KM_SLEEP);
for (int i = 0; i < nperms; i++) {
for (int j = 0; j < children; j++) {
uint8_t val = perms[(i * children) + j];
if (val >= children || counts[val] != i) {
kmem_free(counts, countssz);
return (EINVAL);
}
counts[val]++;
}
}
if (checksum != 0) {
int permssz = sizeof (uint8_t) * children * nperms;
zio_cksum_t cksum;
fletcher_4_native_varsize(perms, permssz, &cksum);
if (checksum != cksum.zc_word[0]) {
kmem_free(counts, countssz);
return (ECKSUM);
}
}
kmem_free(counts, countssz);
return (0);
}
/*
* Generate the permutation array for the draid_map_t. These maps control
* the placement of all data in a dRAID. Therefore it's critical that the
* seed always generates the same mapping. We provide our own pseudo-random
* number generator for this purpose.
*/
int
vdev_draid_generate_perms(const draid_map_t *map, uint8_t **permsp)
{
VERIFY3U(map->dm_children, >=, VDEV_DRAID_MIN_CHILDREN);
VERIFY3U(map->dm_children, <=, VDEV_DRAID_MAX_CHILDREN);
VERIFY3U(map->dm_seed, !=, 0);
VERIFY3U(map->dm_nperms, !=, 0);
VERIFY3P(map->dm_perms, ==, NULL);
#ifdef _KERNEL
/*
* The kernel code always provides both a map_seed and checksum.
* Only the tests/zfs-tests/cmd/draid/draid.c utility will provide
* a zero checksum when generating new candidate maps.
*/
VERIFY3U(map->dm_checksum, !=, 0);
#endif
uint64_t children = map->dm_children;
uint64_t nperms = map->dm_nperms;
int rowsz = sizeof (uint8_t) * children;
int permssz = rowsz * nperms;
uint8_t *perms;
/* Allocate the permutation array */
perms = vmem_alloc(permssz, KM_SLEEP);
/* Setup an initial row with a known pattern */
uint8_t *initial_row = kmem_alloc(rowsz, KM_SLEEP);
for (int i = 0; i < children; i++)
initial_row[i] = i;
uint64_t draid_seed[2] = { VDEV_DRAID_SEED, map->dm_seed };
uint8_t *current_row, *previous_row = initial_row;
/*
* Perform a Fisher-Yates shuffle of each row using the previous
* row as the starting point. An initial_row with known pattern
* is used as the input for the first row.
*/
for (int i = 0; i < nperms; i++) {
current_row = &perms[i * children];
memcpy(current_row, previous_row, rowsz);
for (int j = children - 1; j > 0; j--) {
uint64_t k = vdev_draid_rand(draid_seed) % (j + 1);
uint8_t val = current_row[j];
current_row[j] = current_row[k];
current_row[k] = val;
}
previous_row = current_row;
}
kmem_free(initial_row, rowsz);
int error = verify_perms(perms, children, nperms, map->dm_checksum);
if (error) {
vmem_free(perms, permssz);
return (error);
}
*permsp = perms;
return (0);
}
/*
* Lookup the fixed draid_map_t for the requested number of children.
*/
int
vdev_draid_lookup_map(uint64_t children, const draid_map_t **mapp)
{
for (int i = 0; i <= VDEV_DRAID_MAX_MAPS; i++) {
if (draid_maps[i].dm_children == children) {
*mapp = &draid_maps[i];
return (0);
}
}
return (ENOENT);
}
/*
* Lookup the permutation array and iteration id for the provided offset.
*/
static void
vdev_draid_get_perm(vdev_draid_config_t *vdc, uint64_t pindex,
uint8_t **base, uint64_t *iter)
{
uint64_t ncols = vdc->vdc_children;
uint64_t poff = pindex % (vdc->vdc_nperms * ncols);
*base = vdc->vdc_perms + (poff / ncols) * ncols;
*iter = poff % ncols;
}
static inline uint64_t
vdev_draid_permute_id(vdev_draid_config_t *vdc,
uint8_t *base, uint64_t iter, uint64_t index)
{
return ((base[index] + iter) % vdc->vdc_children);
}
/*
* Return the asize which is the psize rounded up to a full group width.
* i.e. vdev_draid_psize_to_asize().
*/
static uint64_t
vdev_draid_asize(vdev_t *vd, uint64_t psize)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
uint64_t ashift = vd->vdev_ashift;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
uint64_t rows = ((psize - 1) / (vdc->vdc_ndata << ashift)) + 1;
uint64_t asize = (rows * vdc->vdc_groupwidth) << ashift;
ASSERT3U(asize, !=, 0);
ASSERT3U(asize % (vdc->vdc_groupwidth), ==, 0);
return (asize);
}
/*
* Deflate the asize to the psize, this includes stripping parity.
*/
uint64_t
vdev_draid_asize_to_psize(vdev_t *vd, uint64_t asize)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
ASSERT0(asize % vdc->vdc_groupwidth);
return ((asize / vdc->vdc_groupwidth) * vdc->vdc_ndata);
}
/*
* Convert a logical offset to the corresponding group number.
*/
static uint64_t
vdev_draid_offset_to_group(vdev_t *vd, uint64_t offset)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
return (offset / vdc->vdc_groupsz);
}
/*
* Convert a group number to the logical starting offset for that group.
*/
static uint64_t
vdev_draid_group_to_offset(vdev_t *vd, uint64_t group)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
return (group * vdc->vdc_groupsz);
}
/*
* Full stripe writes. When writing, all columns (D+P) are required. Parity
* is calculated over all the columns, including empty zero filled sectors,
* and each is written to disk. While only the data columns are needed for
* a normal read, all of the columns are required for reconstruction when
* performing a sequential resilver.
*
* For "big columns" it's sufficient to map the correct range of the zio ABD.
* Partial columns require allocating a gang ABD in order to zero fill the
* empty sectors. When the column is empty a zero filled sector must be
* mapped. In all cases the data ABDs must be the same size as the parity
* ABDs (e.g. rc->rc_size == parity_size).
*/
static void
vdev_draid_map_alloc_write(zio_t *zio, uint64_t abd_offset, raidz_row_t *rr)
{
uint64_t skip_size = 1ULL << zio->io_vd->vdev_top->vdev_ashift;
uint64_t parity_size = rr->rr_col[0].rc_size;
uint64_t abd_off = abd_offset;
ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
ASSERT3U(parity_size, ==, abd_get_size(rr->rr_col[0].rc_abd));
for (uint64_t c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_size == 0) {
/* empty data column (small write), add a skip sector */
ASSERT3U(skip_size, ==, parity_size);
rc->rc_abd = abd_get_zeros(skip_size);
} else if (rc->rc_size == parity_size) {
/* this is a "big column" */
rc->rc_abd = abd_get_offset_struct(&rc->rc_abdstruct,
zio->io_abd, abd_off, rc->rc_size);
} else {
/* short data column, add a skip sector */
ASSERT3U(rc->rc_size + skip_size, ==, parity_size);
rc->rc_abd = abd_alloc_gang();
abd_gang_add(rc->rc_abd, abd_get_offset_size(
zio->io_abd, abd_off, rc->rc_size), B_TRUE);
abd_gang_add(rc->rc_abd, abd_get_zeros(skip_size),
B_TRUE);
}
ASSERT3U(abd_get_size(rc->rc_abd), ==, parity_size);
abd_off += rc->rc_size;
rc->rc_size = parity_size;
}
IMPLY(abd_offset != 0, abd_off == zio->io_size);
}
/*
* Scrub/resilver reads. In order to store the contents of the skip sectors
* an additional ABD is allocated. The columns are handled in the same way
* as a full stripe write except instead of using the zero ABD the newly
* allocated skip ABD is used to back the skip sectors. In all cases the
* data ABD must be the same size as the parity ABDs.
*/
static void
vdev_draid_map_alloc_scrub(zio_t *zio, uint64_t abd_offset, raidz_row_t *rr)
{
uint64_t skip_size = 1ULL << zio->io_vd->vdev_top->vdev_ashift;
uint64_t parity_size = rr->rr_col[0].rc_size;
uint64_t abd_off = abd_offset;
uint64_t skip_off = 0;
ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
ASSERT3P(rr->rr_abd_empty, ==, NULL);
if (rr->rr_nempty > 0) {
rr->rr_abd_empty = abd_alloc_linear(rr->rr_nempty * skip_size,
B_FALSE);
}
for (uint64_t c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_size == 0) {
/* empty data column (small read), add a skip sector */
ASSERT3U(skip_size, ==, parity_size);
ASSERT3U(rr->rr_nempty, !=, 0);
rc->rc_abd = abd_get_offset_size(rr->rr_abd_empty,
skip_off, skip_size);
skip_off += skip_size;
} else if (rc->rc_size == parity_size) {
/* this is a "big column" */
rc->rc_abd = abd_get_offset_struct(&rc->rc_abdstruct,
zio->io_abd, abd_off, rc->rc_size);
} else {
/* short data column, add a skip sector */
ASSERT3U(rc->rc_size + skip_size, ==, parity_size);
ASSERT3U(rr->rr_nempty, !=, 0);
rc->rc_abd = abd_alloc_gang();
abd_gang_add(rc->rc_abd, abd_get_offset_size(
zio->io_abd, abd_off, rc->rc_size), B_TRUE);
abd_gang_add(rc->rc_abd, abd_get_offset_size(
rr->rr_abd_empty, skip_off, skip_size), B_TRUE);
skip_off += skip_size;
}
uint64_t abd_size = abd_get_size(rc->rc_abd);
ASSERT3U(abd_size, ==, abd_get_size(rr->rr_col[0].rc_abd));
/*
* Increase rc_size so the skip ABD is included in subsequent
* parity calculations.
*/
abd_off += rc->rc_size;
rc->rc_size = abd_size;
}
IMPLY(abd_offset != 0, abd_off == zio->io_size);
ASSERT3U(skip_off, ==, rr->rr_nempty * skip_size);
}
/*
* Normal reads. In this common case only the columns containing data
* are read in to the zio ABDs. Neither the parity columns or empty skip
* sectors are read unless the checksum fails verification. In which case
* vdev_raidz_read_all() will call vdev_draid_map_alloc_empty() to expand
* the raid map in order to allow reconstruction using the parity data and
* skip sectors.
*/
static void
vdev_draid_map_alloc_read(zio_t *zio, uint64_t abd_offset, raidz_row_t *rr)
{
uint64_t abd_off = abd_offset;
ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
for (uint64_t c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_size > 0) {
rc->rc_abd = abd_get_offset_struct(&rc->rc_abdstruct,
zio->io_abd, abd_off, rc->rc_size);
abd_off += rc->rc_size;
}
}
IMPLY(abd_offset != 0, abd_off == zio->io_size);
}
/*
* Converts a normal "read" raidz_row_t to a "scrub" raidz_row_t. The key
* difference is that an ABD is allocated to back skip sectors so they may
* be read in to memory, verified, and repaired if needed.
*/
void
vdev_draid_map_alloc_empty(zio_t *zio, raidz_row_t *rr)
{
uint64_t skip_size = 1ULL << zio->io_vd->vdev_top->vdev_ashift;
uint64_t parity_size = rr->rr_col[0].rc_size;
uint64_t skip_off = 0;
ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
ASSERT3P(rr->rr_abd_empty, ==, NULL);
if (rr->rr_nempty > 0) {
rr->rr_abd_empty = abd_alloc_linear(rr->rr_nempty * skip_size,
B_FALSE);
}
for (uint64_t c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_size == 0) {
/* empty data column (small read), add a skip sector */
ASSERT3U(skip_size, ==, parity_size);
ASSERT3U(rr->rr_nempty, !=, 0);
ASSERT3P(rc->rc_abd, ==, NULL);
rc->rc_abd = abd_get_offset_size(rr->rr_abd_empty,
skip_off, skip_size);
skip_off += skip_size;
} else if (rc->rc_size == parity_size) {
/* this is a "big column", nothing to add */
ASSERT3P(rc->rc_abd, !=, NULL);
} else {
/*
* short data column, add a skip sector and clear
* rc_tried to force the entire column to be re-read
* thereby including the missing skip sector data
* which is needed for reconstruction.
*/
ASSERT3U(rc->rc_size + skip_size, ==, parity_size);
ASSERT3U(rr->rr_nempty, !=, 0);
ASSERT3P(rc->rc_abd, !=, NULL);
ASSERT(!abd_is_gang(rc->rc_abd));
abd_t *read_abd = rc->rc_abd;
rc->rc_abd = abd_alloc_gang();
abd_gang_add(rc->rc_abd, read_abd, B_TRUE);
abd_gang_add(rc->rc_abd, abd_get_offset_size(
rr->rr_abd_empty, skip_off, skip_size), B_TRUE);
skip_off += skip_size;
rc->rc_tried = 0;
}
/*
* Increase rc_size so the empty ABD is included in subsequent
* parity calculations.
*/
rc->rc_size = parity_size;
}
ASSERT3U(skip_off, ==, rr->rr_nempty * skip_size);
}
+/*
+ * Verify that all empty sectors are zero filled before using them to
+ * calculate parity. Otherwise, silent corruption in an empty sector will
+ * result in bad parity being generated. That bad parity will then be
+ * considered authoritative and overwrite the good parity on disk. This
+ * is possible because the checksum is only calculated over the data,
+ * thus it cannot be used to detect damage in empty sectors.
+ */
+int
+vdev_draid_map_verify_empty(zio_t *zio, raidz_row_t *rr)
+{
+ uint64_t skip_size = 1ULL << zio->io_vd->vdev_top->vdev_ashift;
+ uint64_t parity_size = rr->rr_col[0].rc_size;
+ uint64_t skip_off = parity_size - skip_size;
+ uint64_t empty_off = 0;
+ int ret = 0;
+
+ ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
+ ASSERT3P(rr->rr_abd_empty, !=, NULL);
+ ASSERT3U(rr->rr_bigcols, >, 0);
+
+ void *zero_buf = kmem_zalloc(skip_size, KM_SLEEP);
+
+ for (int c = rr->rr_bigcols; c < rr->rr_cols; c++) {
+ raidz_col_t *rc = &rr->rr_col[c];
+
+ ASSERT3P(rc->rc_abd, !=, NULL);
+ ASSERT3U(rc->rc_size, ==, parity_size);
+
+ if (abd_cmp_buf_off(rc->rc_abd, zero_buf, skip_off,
+ skip_size) != 0) {
+ vdev_raidz_checksum_error(zio, rc, rc->rc_abd);
+ abd_zero_off(rc->rc_abd, skip_off, skip_size);
+ rc->rc_error = SET_ERROR(ECKSUM);
+ ret++;
+ }
+
+ empty_off += skip_size;
+ }
+
+ ASSERT3U(empty_off, ==, abd_get_size(rr->rr_abd_empty));
+
+ kmem_free(zero_buf, skip_size);
+
+ return (ret);
+}
+
/*
* Given a logical address within a dRAID configuration, return the physical
* address on the first drive in the group that this address maps to
* (at position 'start' in permutation number 'perm').
*/
static uint64_t
vdev_draid_logical_to_physical(vdev_t *vd, uint64_t logical_offset,
uint64_t *perm, uint64_t *start)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
/* b is the dRAID (parent) sector offset. */
uint64_t ashift = vd->vdev_top->vdev_ashift;
uint64_t b_offset = logical_offset >> ashift;
/*
* The height of a row in units of the vdev's minimum sector size.
* This is the amount of data written to each disk of each group
* in a given permutation.
*/
uint64_t rowheight_sectors = VDEV_DRAID_ROWHEIGHT >> ashift;
/*
* We cycle through a disk permutation every groupsz * ngroups chunk
* of address space. Note that ngroups * groupsz must be a multiple
* of the number of data drives (ndisks) in order to guarantee
* alignment. So, for example, if our row height is 16MB, our group
* size is 10, and there are 13 data drives in the draid, then ngroups
* will be 13, we will change permutation every 2.08GB and each
* disk will have 160MB of data per chunk.
*/
uint64_t groupwidth = vdc->vdc_groupwidth;
uint64_t ngroups = vdc->vdc_ngroups;
uint64_t ndisks = vdc->vdc_ndisks;
/*
* groupstart is where the group this IO will land in "starts" in
* the permutation array.
*/
uint64_t group = logical_offset / vdc->vdc_groupsz;
uint64_t groupstart = (group * groupwidth) % ndisks;
ASSERT3U(groupstart + groupwidth, <=, ndisks + groupstart);
*start = groupstart;
/* b_offset is the sector offset within a group chunk */
b_offset = b_offset % (rowheight_sectors * groupwidth);
ASSERT0(b_offset % groupwidth);
/*
* Find the starting byte offset on each child vdev:
* - within a permutation there are ngroups groups spread over the
* rows, where each row covers a slice portion of the disk
* - each permutation has (groupwidth * ngroups) / ndisks rows
* - so each permutation covers rows * slice portion of the disk
* - so we need to find the row where this IO group target begins
*/
*perm = group / ngroups;
uint64_t row = (*perm * ((groupwidth * ngroups) / ndisks)) +
(((group % ngroups) * groupwidth) / ndisks);
return (((rowheight_sectors * row) +
(b_offset / groupwidth)) << ashift);
}
static uint64_t
vdev_draid_map_alloc_row(zio_t *zio, raidz_row_t **rrp, uint64_t io_offset,
uint64_t abd_offset, uint64_t abd_size)
{
vdev_t *vd = zio->io_vd;
vdev_draid_config_t *vdc = vd->vdev_tsd;
uint64_t ashift = vd->vdev_top->vdev_ashift;
uint64_t io_size = abd_size;
uint64_t io_asize = vdev_draid_asize(vd, io_size);
uint64_t group = vdev_draid_offset_to_group(vd, io_offset);
uint64_t start_offset = vdev_draid_group_to_offset(vd, group + 1);
/*
* Limit the io_size to the space remaining in the group. A second
* row in the raidz_map_t is created for the remainder.
*/
if (io_offset + io_asize > start_offset) {
io_size = vdev_draid_asize_to_psize(vd,
start_offset - io_offset);
}
/*
* At most a block may span the logical end of one group and the start
* of the next group. Therefore, at the end of a group the io_size must
* span the group width evenly and the remainder must be aligned to the
* start of the next group.
*/
IMPLY(abd_offset == 0 && io_size < zio->io_size,
(io_asize >> ashift) % vdc->vdc_groupwidth == 0);
IMPLY(abd_offset != 0,
vdev_draid_group_to_offset(vd, group) == io_offset);
/* Lookup starting byte offset on each child vdev */
uint64_t groupstart, perm;
uint64_t physical_offset = vdev_draid_logical_to_physical(vd,
io_offset, &perm, &groupstart);
/*
* If there is less than groupwidth drives available after the group
* start, the group is going to wrap onto the next row. 'wrap' is the
* group disk number that starts on the next row.
*/
uint64_t ndisks = vdc->vdc_ndisks;
uint64_t groupwidth = vdc->vdc_groupwidth;
uint64_t wrap = groupwidth;
if (groupstart + groupwidth > ndisks)
wrap = ndisks - groupstart;
/* The io size in units of the vdev's minimum sector size. */
const uint64_t psize = io_size >> ashift;
/*
* "Quotient": The number of data sectors for this stripe on all but
* the "big column" child vdevs that also contain "remainder" data.
*/
uint64_t q = psize / vdc->vdc_ndata;
/*
* "Remainder": The number of partial stripe data sectors in this I/O.
* This will add a sector to some, but not all, child vdevs.
*/
uint64_t r = psize - q * vdc->vdc_ndata;
/* The number of "big columns" - those which contain remainder data. */
uint64_t bc = (r == 0 ? 0 : r + vdc->vdc_nparity);
ASSERT3U(bc, <, groupwidth);
/* The total number of data and parity sectors for this I/O. */
uint64_t tot = psize + (vdc->vdc_nparity * (q + (r == 0 ? 0 : 1)));
raidz_row_t *rr;
rr = kmem_alloc(offsetof(raidz_row_t, rr_col[groupwidth]), KM_SLEEP);
rr->rr_cols = groupwidth;
rr->rr_scols = groupwidth;
rr->rr_bigcols = bc;
rr->rr_missingdata = 0;
rr->rr_missingparity = 0;
rr->rr_firstdatacol = vdc->vdc_nparity;
rr->rr_abd_empty = NULL;
#ifdef ZFS_DEBUG
rr->rr_offset = io_offset;
rr->rr_size = io_size;
#endif
*rrp = rr;
uint8_t *base;
uint64_t iter, asize = 0;
vdev_draid_get_perm(vdc, perm, &base, &iter);
for (uint64_t i = 0; i < groupwidth; i++) {
raidz_col_t *rc = &rr->rr_col[i];
uint64_t c = (groupstart + i) % ndisks;
/* increment the offset if we wrap to the next row */
if (i == wrap)
physical_offset += VDEV_DRAID_ROWHEIGHT;
rc->rc_devidx = vdev_draid_permute_id(vdc, base, iter, c);
rc->rc_offset = physical_offset;
rc->rc_abd = NULL;
rc->rc_orig_data = NULL;
rc->rc_error = 0;
rc->rc_tried = 0;
rc->rc_skipped = 0;
rc->rc_force_repair = 0;
rc->rc_allow_repair = 1;
rc->rc_need_orig_restore = B_FALSE;
if (q == 0 && i >= bc)
rc->rc_size = 0;
else if (i < bc)
rc->rc_size = (q + 1) << ashift;
else
rc->rc_size = q << ashift;
asize += rc->rc_size;
}
ASSERT3U(asize, ==, tot << ashift);
rr->rr_nempty = roundup(tot, groupwidth) - tot;
IMPLY(bc > 0, rr->rr_nempty == groupwidth - bc);
/* Allocate buffers for the parity columns */
for (uint64_t c = 0; c < rr->rr_firstdatacol; c++) {
raidz_col_t *rc = &rr->rr_col[c];
rc->rc_abd = abd_alloc_linear(rc->rc_size, B_FALSE);
}
/*
* Map buffers for data columns and allocate/map buffers for skip
* sectors. There are three distinct cases for dRAID which are
* required to support sequential rebuild.
*/
if (zio->io_type == ZIO_TYPE_WRITE) {
vdev_draid_map_alloc_write(zio, abd_offset, rr);
} else if ((rr->rr_nempty > 0) &&
(zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER))) {
vdev_draid_map_alloc_scrub(zio, abd_offset, rr);
} else {
ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
vdev_draid_map_alloc_read(zio, abd_offset, rr);
}
return (io_size);
}
/*
* Allocate the raidz mapping to be applied to the dRAID I/O. The parity
* calculations for dRAID are identical to raidz however there are a few
* differences in the layout.
*
* - dRAID always allocates a full stripe width. Any extra sectors due
* this padding are zero filled and written to disk. They will be read
* back during a scrub or repair operation since they are included in
* the parity calculation. This property enables sequential resilvering.
*
* - When the block at the logical offset spans redundancy groups then two
* rows are allocated in the raidz_map_t. One row resides at the end of
* the first group and the other at the start of the following group.
*/
static raidz_map_t *
vdev_draid_map_alloc(zio_t *zio)
{
raidz_row_t *rr[2];
uint64_t abd_offset = 0;
uint64_t abd_size = zio->io_size;
uint64_t io_offset = zio->io_offset;
uint64_t size;
int nrows = 1;
size = vdev_draid_map_alloc_row(zio, &rr[0], io_offset,
abd_offset, abd_size);
if (size < abd_size) {
vdev_t *vd = zio->io_vd;
io_offset += vdev_draid_asize(vd, size);
abd_offset += size;
abd_size -= size;
nrows++;
ASSERT3U(io_offset, ==, vdev_draid_group_to_offset(
vd, vdev_draid_offset_to_group(vd, io_offset)));
ASSERT3U(abd_offset, <, zio->io_size);
ASSERT3U(abd_size, !=, 0);
size = vdev_draid_map_alloc_row(zio, &rr[1],
io_offset, abd_offset, abd_size);
VERIFY3U(size, ==, abd_size);
}
raidz_map_t *rm;
rm = kmem_zalloc(offsetof(raidz_map_t, rm_row[nrows]), KM_SLEEP);
rm->rm_ops = vdev_raidz_math_get_ops();
rm->rm_nrows = nrows;
rm->rm_row[0] = rr[0];
if (nrows == 2)
rm->rm_row[1] = rr[1];
return (rm);
}
/*
* Given an offset into a dRAID return the next group width aligned offset
* which can be used to start an allocation.
*/
static uint64_t
vdev_draid_get_astart(vdev_t *vd, const uint64_t start)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
return (roundup(start, vdc->vdc_groupwidth << vd->vdev_ashift));
}
/*
* Allocatable space for dRAID is (children - nspares) * sizeof(smallest child)
* rounded down to the last full slice. So each child must provide at least
* 1 / (children - nspares) of its asize.
*/
static uint64_t
vdev_draid_min_asize(vdev_t *vd)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
return (VDEV_DRAID_REFLOW_RESERVE +
(vd->vdev_min_asize + vdc->vdc_ndisks - 1) / (vdc->vdc_ndisks));
}
/*
* When using dRAID the minimum allocation size is determined by the number
* of data disks in the redundancy group. Full stripes are always used.
*/
static uint64_t
vdev_draid_min_alloc(vdev_t *vd)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
return (vdc->vdc_ndata << vd->vdev_ashift);
}
/*
* Returns true if the txg range does not exist on any leaf vdev.
*
* A dRAID spare does not fit into the DTL model. While it has child vdevs
* there is no redundancy among them, and the effective child vdev is
* determined by offset. Essentially we do a vdev_dtl_reassess() on the
* fly by replacing a dRAID spare with the child vdev under the offset.
* Note that it is a recursive process because the child vdev can be
* another dRAID spare and so on.
*/
boolean_t
vdev_draid_missing(vdev_t *vd, uint64_t physical_offset, uint64_t txg,
uint64_t size)
{
if (vd->vdev_ops == &vdev_spare_ops ||
vd->vdev_ops == &vdev_replacing_ops) {
/*
* Check all of the readable children, if any child
* contains the txg range the data it is not missing.
*/
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (!vdev_readable(cvd))
continue;
if (!vdev_draid_missing(cvd, physical_offset,
txg, size))
return (B_FALSE);
}
return (B_TRUE);
}
if (vd->vdev_ops == &vdev_draid_spare_ops) {
/*
* When sequentially resilvering we don't have a proper
* txg range so instead we must presume all txgs are
* missing on this vdev until the resilver completes.
*/
if (vd->vdev_rebuild_txg != 0)
return (B_TRUE);
/*
* DTL_MISSING is set for all prior txgs when a resilver
* is started in spa_vdev_attach().
*/
if (vdev_dtl_contains(vd, DTL_MISSING, txg, size))
return (B_TRUE);
/*
* Consult the DTL on the relevant vdev. Either a vdev
* leaf or spare/replace mirror child may be returned so
* we must recursively call vdev_draid_missing_impl().
*/
vd = vdev_draid_spare_get_child(vd, physical_offset);
if (vd == NULL)
return (B_TRUE);
return (vdev_draid_missing(vd, physical_offset,
txg, size));
}
return (vdev_dtl_contains(vd, DTL_MISSING, txg, size));
}
/*
* Returns true if the txg is only partially replicated on the leaf vdevs.
*/
static boolean_t
vdev_draid_partial(vdev_t *vd, uint64_t physical_offset, uint64_t txg,
uint64_t size)
{
if (vd->vdev_ops == &vdev_spare_ops ||
vd->vdev_ops == &vdev_replacing_ops) {
/*
* Check all of the readable children, if any child is
* missing the txg range then it is partially replicated.
*/
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (!vdev_readable(cvd))
continue;
if (vdev_draid_partial(cvd, physical_offset, txg, size))
return (B_TRUE);
}
return (B_FALSE);
}
if (vd->vdev_ops == &vdev_draid_spare_ops) {
/*
* When sequentially resilvering we don't have a proper
* txg range so instead we must presume all txgs are
* missing on this vdev until the resilver completes.
*/
if (vd->vdev_rebuild_txg != 0)
return (B_TRUE);
/*
* DTL_MISSING is set for all prior txgs when a resilver
* is started in spa_vdev_attach().
*/
if (vdev_dtl_contains(vd, DTL_MISSING, txg, size))
return (B_TRUE);
/*
* Consult the DTL on the relevant vdev. Either a vdev
* leaf or spare/replace mirror child may be returned so
* we must recursively call vdev_draid_missing_impl().
*/
vd = vdev_draid_spare_get_child(vd, physical_offset);
if (vd == NULL)
return (B_TRUE);
return (vdev_draid_partial(vd, physical_offset, txg, size));
}
return (vdev_dtl_contains(vd, DTL_MISSING, txg, size));
}
/*
* Determine if the vdev is readable at the given offset.
*/
boolean_t
vdev_draid_readable(vdev_t *vd, uint64_t physical_offset)
{
if (vd->vdev_ops == &vdev_draid_spare_ops) {
vd = vdev_draid_spare_get_child(vd, physical_offset);
if (vd == NULL)
return (B_FALSE);
}
if (vd->vdev_ops == &vdev_spare_ops ||
vd->vdev_ops == &vdev_replacing_ops) {
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (!vdev_readable(cvd))
continue;
if (vdev_draid_readable(cvd, physical_offset))
return (B_TRUE);
}
return (B_FALSE);
}
return (vdev_readable(vd));
}
/*
* Returns the first distributed spare found under the provided vdev tree.
*/
static vdev_t *
vdev_draid_find_spare(vdev_t *vd)
{
if (vd->vdev_ops == &vdev_draid_spare_ops)
return (vd);
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *svd = vdev_draid_find_spare(vd->vdev_child[c]);
if (svd != NULL)
return (svd);
}
return (NULL);
}
/*
* Returns B_TRUE if the passed in vdev is currently "faulted".
* Faulted, in this context, means that the vdev represents a
* replacing or sparing vdev tree.
*/
static boolean_t
vdev_draid_faulted(vdev_t *vd, uint64_t physical_offset)
{
if (vd->vdev_ops == &vdev_draid_spare_ops) {
vd = vdev_draid_spare_get_child(vd, physical_offset);
if (vd == NULL)
return (B_FALSE);
/*
* After resolving the distributed spare to a leaf vdev
* check the parent to determine if it's "faulted".
*/
vd = vd->vdev_parent;
}
return (vd->vdev_ops == &vdev_replacing_ops ||
vd->vdev_ops == &vdev_spare_ops);
}
/*
* Determine if the dRAID block at the logical offset is degraded.
* Used by sequential resilver.
*/
static boolean_t
vdev_draid_group_degraded(vdev_t *vd, uint64_t offset)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
ASSERT3U(vdev_draid_get_astart(vd, offset), ==, offset);
uint64_t groupstart, perm;
uint64_t physical_offset = vdev_draid_logical_to_physical(vd,
offset, &perm, &groupstart);
uint8_t *base;
uint64_t iter;
vdev_draid_get_perm(vdc, perm, &base, &iter);
for (uint64_t i = 0; i < vdc->vdc_groupwidth; i++) {
uint64_t c = (groupstart + i) % vdc->vdc_ndisks;
uint64_t cid = vdev_draid_permute_id(vdc, base, iter, c);
vdev_t *cvd = vd->vdev_child[cid];
/* Group contains a faulted vdev. */
if (vdev_draid_faulted(cvd, physical_offset))
return (B_TRUE);
/*
* Always check groups with active distributed spares
* because any vdev failure in the pool will affect them.
*/
if (vdev_draid_find_spare(cvd) != NULL)
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Determine if the txg is missing. Used by healing resilver.
*/
static boolean_t
vdev_draid_group_missing(vdev_t *vd, uint64_t offset, uint64_t txg,
uint64_t size)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
ASSERT3U(vdev_draid_get_astart(vd, offset), ==, offset);
uint64_t groupstart, perm;
uint64_t physical_offset = vdev_draid_logical_to_physical(vd,
offset, &perm, &groupstart);
uint8_t *base;
uint64_t iter;
vdev_draid_get_perm(vdc, perm, &base, &iter);
for (uint64_t i = 0; i < vdc->vdc_groupwidth; i++) {
uint64_t c = (groupstart + i) % vdc->vdc_ndisks;
uint64_t cid = vdev_draid_permute_id(vdc, base, iter, c);
vdev_t *cvd = vd->vdev_child[cid];
/* Transaction group is known to be partially replicated. */
if (vdev_draid_partial(cvd, physical_offset, txg, size))
return (B_TRUE);
/*
* Always check groups with active distributed spares
* because any vdev failure in the pool will affect them.
*/
if (vdev_draid_find_spare(cvd) != NULL)
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Find the smallest child asize and largest sector size to calculate the
* available capacity. Distributed spares are ignored since their capacity
* is also based of the minimum child size in the top-level dRAID.
*/
static void
vdev_draid_calculate_asize(vdev_t *vd, uint64_t *asizep, uint64_t *max_asizep,
uint64_t *logical_ashiftp, uint64_t *physical_ashiftp)
{
uint64_t logical_ashift = 0, physical_ashift = 0;
uint64_t asize = 0, max_asize = 0;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (cvd->vdev_ops == &vdev_draid_spare_ops)
continue;
asize = MIN(asize - 1, cvd->vdev_asize - 1) + 1;
max_asize = MIN(max_asize - 1, cvd->vdev_max_asize - 1) + 1;
logical_ashift = MAX(logical_ashift, cvd->vdev_ashift);
physical_ashift = MAX(physical_ashift,
cvd->vdev_physical_ashift);
}
*asizep = asize;
*max_asizep = max_asize;
*logical_ashiftp = logical_ashift;
*physical_ashiftp = physical_ashift;
}
/*
* Open spare vdevs.
*/
static boolean_t
vdev_draid_open_spares(vdev_t *vd)
{
return (vd->vdev_ops == &vdev_draid_spare_ops ||
vd->vdev_ops == &vdev_replacing_ops ||
vd->vdev_ops == &vdev_spare_ops);
}
/*
* Open all children, excluding spares.
*/
static boolean_t
vdev_draid_open_children(vdev_t *vd)
{
return (!vdev_draid_open_spares(vd));
}
/*
* Open a top-level dRAID vdev.
*/
static int
vdev_draid_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
uint64_t *logical_ashift, uint64_t *physical_ashift)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
uint64_t nparity = vdc->vdc_nparity;
int open_errors = 0;
if (nparity > VDEV_DRAID_MAXPARITY ||
vd->vdev_children < nparity + 1) {
vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
return (SET_ERROR(EINVAL));
}
/*
* First open the normal children then the distributed spares. This
* ordering is important to ensure the distributed spares calculate
* the correct psize in the event that the dRAID vdevs were expanded.
*/
vdev_open_children_subset(vd, vdev_draid_open_children);
vdev_open_children_subset(vd, vdev_draid_open_spares);
/* Verify enough of the children are available to continue. */
for (int c = 0; c < vd->vdev_children; c++) {
if (vd->vdev_child[c]->vdev_open_error != 0) {
if ((++open_errors) > nparity) {
vd->vdev_stat.vs_aux = VDEV_AUX_NO_REPLICAS;
return (SET_ERROR(ENXIO));
}
}
}
/*
* Allocatable capacity is the sum of the space on all children less
* the number of distributed spares rounded down to last full row
* and then to the last full group. An additional 32MB of scratch
* space is reserved at the end of each child for use by the dRAID
* expansion feature.
*/
uint64_t child_asize, child_max_asize;
vdev_draid_calculate_asize(vd, &child_asize, &child_max_asize,
logical_ashift, physical_ashift);
/*
* Should be unreachable since the minimum child size is 64MB, but
* we want to make sure an underflow absolutely cannot occur here.
*/
if (child_asize < VDEV_DRAID_REFLOW_RESERVE ||
child_max_asize < VDEV_DRAID_REFLOW_RESERVE) {
return (SET_ERROR(ENXIO));
}
child_asize = ((child_asize - VDEV_DRAID_REFLOW_RESERVE) /
VDEV_DRAID_ROWHEIGHT) * VDEV_DRAID_ROWHEIGHT;
child_max_asize = ((child_max_asize - VDEV_DRAID_REFLOW_RESERVE) /
VDEV_DRAID_ROWHEIGHT) * VDEV_DRAID_ROWHEIGHT;
*asize = (((child_asize * vdc->vdc_ndisks) / vdc->vdc_groupsz) *
vdc->vdc_groupsz);
*max_asize = (((child_max_asize * vdc->vdc_ndisks) / vdc->vdc_groupsz) *
vdc->vdc_groupsz);
return (0);
}
/*
* Close a top-level dRAID vdev.
*/
static void
vdev_draid_close(vdev_t *vd)
{
for (int c = 0; c < vd->vdev_children; c++) {
if (vd->vdev_child[c] != NULL)
vdev_close(vd->vdev_child[c]);
}
}
/*
* Return the maximum asize for a rebuild zio in the provided range
* given the following constraints. A dRAID chunks may not:
*
* - Exceed the maximum allowed block size (SPA_MAXBLOCKSIZE), or
* - Span dRAID redundancy groups.
*/
static uint64_t
vdev_draid_rebuild_asize(vdev_t *vd, uint64_t start, uint64_t asize,
uint64_t max_segment)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
uint64_t ashift = vd->vdev_ashift;
uint64_t ndata = vdc->vdc_ndata;
uint64_t psize = MIN(P2ROUNDUP(max_segment * ndata, 1 << ashift),
SPA_MAXBLOCKSIZE);
ASSERT3U(vdev_draid_get_astart(vd, start), ==, start);
ASSERT3U(asize % (vdc->vdc_groupwidth << ashift), ==, 0);
/* Chunks must evenly span all data columns in the group. */
psize = (((psize >> ashift) / ndata) * ndata) << ashift;
uint64_t chunk_size = MIN(asize, vdev_psize_to_asize(vd, psize));
/* Reduce the chunk size to the group space remaining. */
uint64_t group = vdev_draid_offset_to_group(vd, start);
uint64_t left = vdev_draid_group_to_offset(vd, group + 1) - start;
chunk_size = MIN(chunk_size, left);
ASSERT3U(chunk_size % (vdc->vdc_groupwidth << ashift), ==, 0);
ASSERT3U(vdev_draid_offset_to_group(vd, start), ==,
vdev_draid_offset_to_group(vd, start + chunk_size - 1));
return (chunk_size);
}
/*
* Align the start of the metaslab to the group width and slightly reduce
* its size to a multiple of the group width. Since full stripe writes are
* required by dRAID this space is unallocable. Furthermore, aligning the
* metaslab start is important for vdev initialize and TRIM which both operate
* on metaslab boundaries which vdev_xlate() expects to be aligned.
*/
static void
vdev_draid_metaslab_init(vdev_t *vd, uint64_t *ms_start, uint64_t *ms_size)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
uint64_t sz = vdc->vdc_groupwidth << vd->vdev_ashift;
uint64_t astart = vdev_draid_get_astart(vd, *ms_start);
uint64_t asize = ((*ms_size - (astart - *ms_start)) / sz) * sz;
*ms_start = astart;
*ms_size = asize;
ASSERT0(*ms_start % sz);
ASSERT0(*ms_size % sz);
}
/*
* Add virtual dRAID spares to the list of valid spares. In order to accomplish
* this the existing array must be freed and reallocated with the additional
* entries.
*/
int
vdev_draid_spare_create(nvlist_t *nvroot, vdev_t *vd, uint64_t *ndraidp,
uint64_t next_vdev_id)
{
uint64_t draid_nspares = 0;
uint64_t ndraid = 0;
int error;
for (uint64_t i = 0; i < vd->vdev_children; i++) {
vdev_t *cvd = vd->vdev_child[i];
if (cvd->vdev_ops == &vdev_draid_ops) {
vdev_draid_config_t *vdc = cvd->vdev_tsd;
draid_nspares += vdc->vdc_nspares;
ndraid++;
}
}
if (draid_nspares == 0) {
*ndraidp = ndraid;
return (0);
}
nvlist_t **old_spares, **new_spares;
uint_t old_nspares;
error = nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&old_spares, &old_nspares);
if (error)
old_nspares = 0;
/* Allocate memory and copy of the existing spares. */
new_spares = kmem_alloc(sizeof (nvlist_t *) *
(draid_nspares + old_nspares), KM_SLEEP);
for (uint_t i = 0; i < old_nspares; i++)
new_spares[i] = fnvlist_dup(old_spares[i]);
/* Add new distributed spares to ZPOOL_CONFIG_SPARES. */
uint64_t n = old_nspares;
for (uint64_t vdev_id = 0; vdev_id < vd->vdev_children; vdev_id++) {
vdev_t *cvd = vd->vdev_child[vdev_id];
char path[64];
if (cvd->vdev_ops != &vdev_draid_ops)
continue;
vdev_draid_config_t *vdc = cvd->vdev_tsd;
uint64_t nspares = vdc->vdc_nspares;
uint64_t nparity = vdc->vdc_nparity;
for (uint64_t spare_id = 0; spare_id < nspares; spare_id++) {
bzero(path, sizeof (path));
(void) snprintf(path, sizeof (path) - 1,
"%s%llu-%llu-%llu", VDEV_TYPE_DRAID,
(u_longlong_t)nparity,
(u_longlong_t)next_vdev_id + vdev_id,
(u_longlong_t)spare_id);
nvlist_t *spare = fnvlist_alloc();
fnvlist_add_string(spare, ZPOOL_CONFIG_PATH, path);
fnvlist_add_string(spare, ZPOOL_CONFIG_TYPE,
VDEV_TYPE_DRAID_SPARE);
fnvlist_add_uint64(spare, ZPOOL_CONFIG_TOP_GUID,
cvd->vdev_guid);
fnvlist_add_uint64(spare, ZPOOL_CONFIG_SPARE_ID,
spare_id);
fnvlist_add_uint64(spare, ZPOOL_CONFIG_IS_LOG, 0);
fnvlist_add_uint64(spare, ZPOOL_CONFIG_IS_SPARE, 1);
fnvlist_add_uint64(spare, ZPOOL_CONFIG_WHOLE_DISK, 1);
fnvlist_add_uint64(spare, ZPOOL_CONFIG_ASHIFT,
cvd->vdev_ashift);
new_spares[n] = spare;
n++;
}
}
if (n > 0) {
(void) nvlist_remove_all(nvroot, ZPOOL_CONFIG_SPARES);
fnvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
new_spares, n);
}
for (int i = 0; i < n; i++)
nvlist_free(new_spares[i]);
kmem_free(new_spares, sizeof (*new_spares) * n);
*ndraidp = ndraid;
return (0);
}
/*
* Determine if any portion of the provided block resides on a child vdev
* with a dirty DTL and therefore needs to be resilvered.
*/
static boolean_t
vdev_draid_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize,
uint64_t phys_birth)
{
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t asize = vdev_draid_asize(vd, psize);
if (phys_birth == TXG_UNKNOWN) {
/*
* Sequential resilver. There is no meaningful phys_birth
* for this block, we can only determine if block resides
* in a degraded group in which case it must be resilvered.
*/
ASSERT3U(vdev_draid_offset_to_group(vd, offset), ==,
vdev_draid_offset_to_group(vd, offset + asize - 1));
return (vdev_draid_group_degraded(vd, offset));
} else {
/*
* Healing resilver. TXGs not in DTL_PARTIAL are intact,
* as are blocks in non-degraded groups.
*/
if (!vdev_dtl_contains(vd, DTL_PARTIAL, phys_birth, 1))
return (B_FALSE);
if (vdev_draid_group_missing(vd, offset, phys_birth, 1))
return (B_TRUE);
/* The block may span groups in which case check both. */
if (vdev_draid_offset_to_group(vd, offset) !=
vdev_draid_offset_to_group(vd, offset + asize - 1)) {
if (vdev_draid_group_missing(vd,
offset + asize, phys_birth, 1))
return (B_TRUE);
}
return (B_FALSE);
}
}
static boolean_t
vdev_draid_rebuilding(vdev_t *vd)
{
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_rebuild_txg)
return (B_TRUE);
for (int i = 0; i < vd->vdev_children; i++) {
if (vdev_draid_rebuilding(vd->vdev_child[i])) {
return (B_TRUE);
}
}
return (B_FALSE);
}
static void
vdev_draid_io_verify(vdev_t *vd, raidz_row_t *rr, int col)
{
#ifdef ZFS_DEBUG
range_seg64_t logical_rs, physical_rs, remain_rs;
logical_rs.rs_start = rr->rr_offset;
logical_rs.rs_end = logical_rs.rs_start +
vdev_draid_asize(vd, rr->rr_size);
raidz_col_t *rc = &rr->rr_col[col];
vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
vdev_xlate(cvd, &logical_rs, &physical_rs, &remain_rs);
ASSERT(vdev_xlate_is_empty(&remain_rs));
ASSERT3U(rc->rc_offset, ==, physical_rs.rs_start);
ASSERT3U(rc->rc_offset, <, physical_rs.rs_end);
ASSERT3U(rc->rc_offset + rc->rc_size, ==, physical_rs.rs_end);
#endif
}
/*
* For write operations:
* 1. Generate the parity data
* 2. Create child zio write operations to each column's vdev, for both
* data and parity. A gang ABD is allocated by vdev_draid_map_alloc()
* if a skip sector needs to be added to a column.
*/
static void
vdev_draid_io_start_write(zio_t *zio, raidz_row_t *rr)
{
vdev_t *vd = zio->io_vd;
raidz_map_t *rm = zio->io_vsd;
vdev_raidz_generate_parity_row(rm, rr);
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
/*
* Empty columns are zero filled and included in the parity
* calculation and therefore must be written.
*/
ASSERT3U(rc->rc_size, !=, 0);
/* Verify physical to logical translation */
vdev_draid_io_verify(vd, rr, c);
zio_nowait(zio_vdev_child_io(zio, NULL,
vd->vdev_child[rc->rc_devidx], rc->rc_offset,
rc->rc_abd, rc->rc_size, zio->io_type, zio->io_priority,
0, vdev_raidz_child_done, rc));
}
}
/*
* For read operations:
* 1. The vdev_draid_map_alloc() function will create a minimal raidz
* mapping for the read based on the zio->io_flags. There are two
* possible mappings either 1) a normal read, or 2) a scrub/resilver.
* 2. Create the zio read operations. This will include all parity
* columns and skip sectors for a scrub/resilver.
*/
static void
vdev_draid_io_start_read(zio_t *zio, raidz_row_t *rr)
{
vdev_t *vd = zio->io_vd;
/* Sequential rebuild must do IO at redundancy group boundary. */
IMPLY(zio->io_priority == ZIO_PRIORITY_REBUILD, rr->rr_nempty == 0);
/*
* Iterate over the columns in reverse order so that we hit the parity
* last. Any errors along the way will force us to read the parity.
* For scrub/resilver IOs which verify skip sectors, a gang ABD will
* have been allocated to store them and rc->rc_size is increased.
*/
for (int c = rr->rr_cols - 1; c >= 0; c--) {
raidz_col_t *rc = &rr->rr_col[c];
vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
if (!vdev_draid_readable(cvd, rc->rc_offset)) {
if (c >= rr->rr_firstdatacol)
rr->rr_missingdata++;
else
rr->rr_missingparity++;
rc->rc_error = SET_ERROR(ENXIO);
rc->rc_tried = 1;
rc->rc_skipped = 1;
continue;
}
if (vdev_draid_missing(cvd, rc->rc_offset, zio->io_txg, 1)) {
if (c >= rr->rr_firstdatacol)
rr->rr_missingdata++;
else
rr->rr_missingparity++;
rc->rc_error = SET_ERROR(ESTALE);
rc->rc_skipped = 1;
continue;
}
/*
* Empty columns may be read during vdev_draid_io_done().
* Only skip them after the readable and missing checks
* verify they are available.
*/
if (rc->rc_size == 0) {
rc->rc_skipped = 1;
continue;
}
if (zio->io_flags & ZIO_FLAG_RESILVER) {
vdev_t *svd;
/*
* Sequential rebuilds need to always consider the data
* on the child being rebuilt to be stale. This is
* important when all columns are available to aid
* known reconstruction in identifing which columns
* contain incorrect data.
*
* Furthermore, all repairs need to be constrained to
* the devices being rebuilt because without a checksum
* we cannot verify the data is actually correct and
* performing an incorrect repair could result in
* locking in damage and making the data unrecoverable.
*/
if (zio->io_priority == ZIO_PRIORITY_REBUILD) {
if (vdev_draid_rebuilding(cvd)) {
if (c >= rr->rr_firstdatacol)
rr->rr_missingdata++;
else
rr->rr_missingparity++;
rc->rc_error = SET_ERROR(ESTALE);
rc->rc_skipped = 1;
rc->rc_allow_repair = 1;
continue;
} else {
rc->rc_allow_repair = 0;
}
} else {
rc->rc_allow_repair = 1;
}
/*
* If this child is a distributed spare then the
* offset might reside on the vdev being replaced.
* In which case this data must be written to the
* new device. Failure to do so would result in
* checksum errors when the old device is detached
* and the pool is scrubbed.
*/
if ((svd = vdev_draid_find_spare(cvd)) != NULL) {
svd = vdev_draid_spare_get_child(svd,
rc->rc_offset);
if (svd && (svd->vdev_ops == &vdev_spare_ops ||
svd->vdev_ops == &vdev_replacing_ops)) {
rc->rc_force_repair = 1;
if (vdev_draid_rebuilding(svd))
rc->rc_allow_repair = 1;
}
}
/*
* Always issue a repair IO to this child when its
* a spare or replacing vdev with an active rebuild.
*/
if ((cvd->vdev_ops == &vdev_spare_ops ||
cvd->vdev_ops == &vdev_replacing_ops) &&
vdev_draid_rebuilding(cvd)) {
rc->rc_force_repair = 1;
rc->rc_allow_repair = 1;
}
}
}
/*
* Either a parity or data column is missing this means a repair
* may be attempted by vdev_draid_io_done(). Expand the raid map
* to read in empty columns which are needed along with the parity
* during reconstruction.
*/
if ((rr->rr_missingdata > 0 || rr->rr_missingparity > 0) &&
rr->rr_nempty > 0 && rr->rr_abd_empty == NULL) {
vdev_draid_map_alloc_empty(zio, rr);
}
for (int c = rr->rr_cols - 1; c >= 0; c--) {
raidz_col_t *rc = &rr->rr_col[c];
vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
if (rc->rc_error || rc->rc_size == 0)
continue;
if (c >= rr->rr_firstdatacol || rr->rr_missingdata > 0 ||
(zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER))) {
zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
rc->rc_offset, rc->rc_abd, rc->rc_size,
zio->io_type, zio->io_priority, 0,
vdev_raidz_child_done, rc));
}
}
}
/*
* Start an IO operation to a dRAID vdev.
*/
static void
vdev_draid_io_start(zio_t *zio)
{
vdev_t *vd __maybe_unused = zio->io_vd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
ASSERT3U(zio->io_offset, ==, vdev_draid_get_astart(vd, zio->io_offset));
raidz_map_t *rm = vdev_draid_map_alloc(zio);
zio->io_vsd = rm;
zio->io_vsd_ops = &vdev_raidz_vsd_ops;
if (zio->io_type == ZIO_TYPE_WRITE) {
for (int i = 0; i < rm->rm_nrows; i++) {
vdev_draid_io_start_write(zio, rm->rm_row[i]);
}
} else {
ASSERT(zio->io_type == ZIO_TYPE_READ);
for (int i = 0; i < rm->rm_nrows; i++) {
vdev_draid_io_start_read(zio, rm->rm_row[i]);
}
}
zio_execute(zio);
}
/*
* Complete an IO operation on a dRAID vdev. The raidz logic can be applied
* to dRAID since the layout is fully described by the raidz_map_t.
*/
static void
vdev_draid_io_done(zio_t *zio)
{
vdev_raidz_io_done(zio);
}
static void
vdev_draid_state_change(vdev_t *vd, int faulted, int degraded)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
ASSERT(vd->vdev_ops == &vdev_draid_ops);
if (faulted > vdc->vdc_nparity)
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_NO_REPLICAS);
else if (degraded + faulted != 0)
vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE);
else
vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE);
}
static void
vdev_draid_xlate(vdev_t *cvd, const range_seg64_t *logical_rs,
range_seg64_t *physical_rs, range_seg64_t *remain_rs)
{
vdev_t *raidvd = cvd->vdev_parent;
ASSERT(raidvd->vdev_ops == &vdev_draid_ops);
vdev_draid_config_t *vdc = raidvd->vdev_tsd;
uint64_t ashift = raidvd->vdev_top->vdev_ashift;
/* Make sure the offsets are block-aligned */
ASSERT0(logical_rs->rs_start % (1 << ashift));
ASSERT0(logical_rs->rs_end % (1 << ashift));
uint64_t logical_start = logical_rs->rs_start;
uint64_t logical_end = logical_rs->rs_end;
/*
* Unaligned ranges must be skipped. All metaslabs are correctly
* aligned so this should not happen, but this case is handled in
* case it's needed by future callers.
*/
uint64_t astart = vdev_draid_get_astart(raidvd, logical_start);
if (astart != logical_start) {
physical_rs->rs_start = logical_start;
physical_rs->rs_end = logical_start;
remain_rs->rs_start = MIN(astart, logical_end);
remain_rs->rs_end = logical_end;
return;
}
/*
* Unlike with mirrors and raidz a dRAID logical range can map
* to multiple non-contiguous physical ranges. This is handled by
* limiting the size of the logical range to a single group and
* setting the remain argument such that it describes the remaining
* unmapped logical range. This is stricter than absolutely
* necessary but helps simplify the logic below.
*/
uint64_t group = vdev_draid_offset_to_group(raidvd, logical_start);
uint64_t nextstart = vdev_draid_group_to_offset(raidvd, group + 1);
if (logical_end > nextstart)
logical_end = nextstart;
/* Find the starting offset for each vdev in the group */
uint64_t perm, groupstart;
uint64_t start = vdev_draid_logical_to_physical(raidvd,
logical_start, &perm, &groupstart);
uint64_t end = start;
uint8_t *base;
uint64_t iter, id;
vdev_draid_get_perm(vdc, perm, &base, &iter);
/*
* Check if the passed child falls within the group. If it does
* update the start and end to reflect the physical range.
* Otherwise, leave them unmodified which will result in an empty
* (zero-length) physical range being returned.
*/
for (uint64_t i = 0; i < vdc->vdc_groupwidth; i++) {
uint64_t c = (groupstart + i) % vdc->vdc_ndisks;
if (c == 0 && i != 0) {
/* the group wrapped, increment the start */
start += VDEV_DRAID_ROWHEIGHT;
end = start;
}
id = vdev_draid_permute_id(vdc, base, iter, c);
if (id == cvd->vdev_id) {
uint64_t b_size = (logical_end >> ashift) -
(logical_start >> ashift);
ASSERT3U(b_size, >, 0);
end = start + ((((b_size - 1) /
vdc->vdc_groupwidth) + 1) << ashift);
break;
}
}
physical_rs->rs_start = start;
physical_rs->rs_end = end;
/*
* Only top-level vdevs are allowed to set remain_rs because
* when .vdev_op_xlate() is called for their children the full
* logical range is not provided by vdev_xlate().
*/
remain_rs->rs_start = logical_end;
remain_rs->rs_end = logical_rs->rs_end;
ASSERT3U(physical_rs->rs_start, <=, logical_start);
ASSERT3U(physical_rs->rs_end - physical_rs->rs_start, <=,
logical_end - logical_start);
}
/*
* Add dRAID specific fields to the config nvlist.
*/
static void
vdev_draid_config_generate(vdev_t *vd, nvlist_t *nv)
{
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_ops);
vdev_draid_config_t *vdc = vd->vdev_tsd;
fnvlist_add_uint64(nv, ZPOOL_CONFIG_NPARITY, vdc->vdc_nparity);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_DRAID_NDATA, vdc->vdc_ndata);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_DRAID_NSPARES, vdc->vdc_nspares);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_DRAID_NGROUPS, vdc->vdc_ngroups);
}
/*
* Initialize private dRAID specific fields from the nvlist.
*/
static int
vdev_draid_init(spa_t *spa, nvlist_t *nv, void **tsd)
{
+ (void) spa;
uint64_t ndata, nparity, nspares, ngroups;
int error;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DRAID_NDATA, &ndata))
return (SET_ERROR(EINVAL));
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, &nparity) ||
nparity == 0 || nparity > VDEV_DRAID_MAXPARITY) {
return (SET_ERROR(EINVAL));
}
uint_t children;
nvlist_t **child;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0 || children == 0 ||
children > VDEV_DRAID_MAX_CHILDREN) {
return (SET_ERROR(EINVAL));
}
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DRAID_NSPARES, &nspares) ||
nspares > 100 || nspares > (children - (ndata + nparity))) {
return (SET_ERROR(EINVAL));
}
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DRAID_NGROUPS, &ngroups) ||
ngroups == 0 || ngroups > VDEV_DRAID_MAX_CHILDREN) {
return (SET_ERROR(EINVAL));
}
/*
* Validate the minimum number of children exist per group for the
* specified parity level (draid1 >= 2, draid2 >= 3, draid3 >= 4).
*/
if (children < (ndata + nparity + nspares))
return (SET_ERROR(EINVAL));
/*
* Create the dRAID configuration using the pool nvlist configuration
* and the fixed mapping for the correct number of children.
*/
vdev_draid_config_t *vdc;
const draid_map_t *map;
error = vdev_draid_lookup_map(children, &map);
if (error)
return (SET_ERROR(EINVAL));
vdc = kmem_zalloc(sizeof (*vdc), KM_SLEEP);
vdc->vdc_ndata = ndata;
vdc->vdc_nparity = nparity;
vdc->vdc_nspares = nspares;
vdc->vdc_children = children;
vdc->vdc_ngroups = ngroups;
vdc->vdc_nperms = map->dm_nperms;
error = vdev_draid_generate_perms(map, &vdc->vdc_perms);
if (error) {
kmem_free(vdc, sizeof (*vdc));
return (SET_ERROR(EINVAL));
}
/*
* Derived constants.
*/
vdc->vdc_groupwidth = vdc->vdc_ndata + vdc->vdc_nparity;
vdc->vdc_ndisks = vdc->vdc_children - vdc->vdc_nspares;
vdc->vdc_groupsz = vdc->vdc_groupwidth * VDEV_DRAID_ROWHEIGHT;
vdc->vdc_devslicesz = (vdc->vdc_groupsz * vdc->vdc_ngroups) /
vdc->vdc_ndisks;
ASSERT3U(vdc->vdc_groupwidth, >=, 2);
ASSERT3U(vdc->vdc_groupwidth, <=, vdc->vdc_ndisks);
ASSERT3U(vdc->vdc_groupsz, >=, 2 * VDEV_DRAID_ROWHEIGHT);
ASSERT3U(vdc->vdc_devslicesz, >=, VDEV_DRAID_ROWHEIGHT);
ASSERT3U(vdc->vdc_devslicesz % VDEV_DRAID_ROWHEIGHT, ==, 0);
ASSERT3U((vdc->vdc_groupwidth * vdc->vdc_ngroups) %
vdc->vdc_ndisks, ==, 0);
*tsd = vdc;
return (0);
}
static void
vdev_draid_fini(vdev_t *vd)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
vmem_free(vdc->vdc_perms, sizeof (uint8_t) *
vdc->vdc_children * vdc->vdc_nperms);
kmem_free(vdc, sizeof (*vdc));
}
static uint64_t
vdev_draid_nparity(vdev_t *vd)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
return (vdc->vdc_nparity);
}
static uint64_t
vdev_draid_ndisks(vdev_t *vd)
{
vdev_draid_config_t *vdc = vd->vdev_tsd;
return (vdc->vdc_ndisks);
}
vdev_ops_t vdev_draid_ops = {
.vdev_op_init = vdev_draid_init,
.vdev_op_fini = vdev_draid_fini,
.vdev_op_open = vdev_draid_open,
.vdev_op_close = vdev_draid_close,
.vdev_op_asize = vdev_draid_asize,
.vdev_op_min_asize = vdev_draid_min_asize,
.vdev_op_min_alloc = vdev_draid_min_alloc,
.vdev_op_io_start = vdev_draid_io_start,
.vdev_op_io_done = vdev_draid_io_done,
.vdev_op_state_change = vdev_draid_state_change,
.vdev_op_need_resilver = vdev_draid_need_resilver,
.vdev_op_hold = NULL,
.vdev_op_rele = NULL,
.vdev_op_remap = NULL,
.vdev_op_xlate = vdev_draid_xlate,
.vdev_op_rebuild_asize = vdev_draid_rebuild_asize,
.vdev_op_metaslab_init = vdev_draid_metaslab_init,
.vdev_op_config_generate = vdev_draid_config_generate,
.vdev_op_nparity = vdev_draid_nparity,
.vdev_op_ndisks = vdev_draid_ndisks,
.vdev_op_type = VDEV_TYPE_DRAID,
.vdev_op_leaf = B_FALSE,
};
/*
* A dRAID distributed spare is a virtual leaf vdev which is included in the
* parent dRAID configuration. The last N columns of the dRAID permutation
* table are used to determine on which dRAID children a specific offset
* should be written. These spare leaf vdevs can only be used to replace
* faulted children in the same dRAID configuration.
*/
/*
* Distributed spare state. All fields are set when the distributed spare is
* first opened and are immutable.
*/
typedef struct {
vdev_t *vds_draid_vdev; /* top-level parent dRAID vdev */
uint64_t vds_top_guid; /* top-level parent dRAID guid */
uint64_t vds_spare_id; /* spare id (0 - vdc->vdc_nspares-1) */
} vdev_draid_spare_t;
/*
* Returns the parent dRAID vdev to which the distributed spare belongs.
* This may be safely called even when the vdev is not open.
*/
vdev_t *
vdev_draid_spare_get_parent(vdev_t *vd)
{
vdev_draid_spare_t *vds = vd->vdev_tsd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_spare_ops);
if (vds->vds_draid_vdev != NULL)
return (vds->vds_draid_vdev);
return (vdev_lookup_by_guid(vd->vdev_spa->spa_root_vdev,
vds->vds_top_guid));
}
/*
* A dRAID space is active when it's the child of a vdev using the
* vdev_spare_ops, vdev_replacing_ops or vdev_draid_ops.
*/
static boolean_t
vdev_draid_spare_is_active(vdev_t *vd)
{
vdev_t *pvd = vd->vdev_parent;
if (pvd != NULL && (pvd->vdev_ops == &vdev_spare_ops ||
pvd->vdev_ops == &vdev_replacing_ops ||
pvd->vdev_ops == &vdev_draid_ops)) {
return (B_TRUE);
} else {
return (B_FALSE);
}
}
/*
* Given a dRAID distribute spare vdev, returns the physical child vdev
* on which the provided offset resides. This may involve recursing through
* multiple layers of distributed spares. Note that offset is relative to
* this vdev.
*/
vdev_t *
vdev_draid_spare_get_child(vdev_t *vd, uint64_t physical_offset)
{
vdev_draid_spare_t *vds = vd->vdev_tsd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_spare_ops);
/* The vdev is closed */
if (vds->vds_draid_vdev == NULL)
return (NULL);
vdev_t *tvd = vds->vds_draid_vdev;
vdev_draid_config_t *vdc = tvd->vdev_tsd;
ASSERT3P(tvd->vdev_ops, ==, &vdev_draid_ops);
ASSERT3U(vds->vds_spare_id, <, vdc->vdc_nspares);
uint8_t *base;
uint64_t iter;
uint64_t perm = physical_offset / vdc->vdc_devslicesz;
vdev_draid_get_perm(vdc, perm, &base, &iter);
uint64_t cid = vdev_draid_permute_id(vdc, base, iter,
(tvd->vdev_children - 1) - vds->vds_spare_id);
vdev_t *cvd = tvd->vdev_child[cid];
if (cvd->vdev_ops == &vdev_draid_spare_ops)
return (vdev_draid_spare_get_child(cvd, physical_offset));
return (cvd);
}
-/* ARGSUSED */
static void
vdev_draid_spare_close(vdev_t *vd)
{
vdev_draid_spare_t *vds = vd->vdev_tsd;
vds->vds_draid_vdev = NULL;
}
/*
* Opening a dRAID spare device is done by looking up the associated dRAID
* top-level vdev guid from the spare configuration.
*/
static int
vdev_draid_spare_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
uint64_t *logical_ashift, uint64_t *physical_ashift)
{
vdev_draid_spare_t *vds = vd->vdev_tsd;
vdev_t *rvd = vd->vdev_spa->spa_root_vdev;
uint64_t asize, max_asize;
vdev_t *tvd = vdev_lookup_by_guid(rvd, vds->vds_top_guid);
if (tvd == NULL) {
/*
* When spa_vdev_add() is labeling new spares the
* associated dRAID is not attached to the root vdev
* nor does this spare have a parent. Simulate a valid
* device in order to allow the label to be initialized
* and the distributed spare added to the configuration.
*/
if (vd->vdev_parent == NULL) {
*psize = *max_psize = SPA_MINDEVSIZE;
*logical_ashift = *physical_ashift = ASHIFT_MIN;
return (0);
}
return (SET_ERROR(EINVAL));
}
vdev_draid_config_t *vdc = tvd->vdev_tsd;
if (tvd->vdev_ops != &vdev_draid_ops || vdc == NULL)
return (SET_ERROR(EINVAL));
if (vds->vds_spare_id >= vdc->vdc_nspares)
return (SET_ERROR(EINVAL));
/*
* Neither tvd->vdev_asize or tvd->vdev_max_asize can be used here
* because the caller may be vdev_draid_open() in which case the
* values are stale as they haven't yet been updated by vdev_open().
* To avoid this always recalculate the dRAID asize and max_asize.
*/
vdev_draid_calculate_asize(tvd, &asize, &max_asize,
logical_ashift, physical_ashift);
*psize = asize + VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
*max_psize = max_asize + VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
vds->vds_draid_vdev = tvd;
return (0);
}
/*
* Completed distributed spare IO. Store the result in the parent zio
* as if it had performed the operation itself. Only the first error is
* preserved if there are multiple errors.
*/
static void
vdev_draid_spare_child_done(zio_t *zio)
{
zio_t *pio = zio->io_private;
/*
* IOs are issued to non-writable vdevs in order to keep their
* DTLs accurate. However, we don't want to propagate the
* error in to the distributed spare's DTL. When resilvering
* vdev_draid_need_resilver() will consult the relevant DTL
* to determine if the data is missing and must be repaired.
*/
if (!vdev_writeable(zio->io_vd))
return;
if (pio->io_error == 0)
pio->io_error = zio->io_error;
}
/*
* Returns a valid label nvlist for the distributed spare vdev. This is
* used to bypass the IO pipeline to avoid the complexity of constructing
* a complete label with valid checksum to return when read.
*/
nvlist_t *
vdev_draid_read_config_spare(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
spa_aux_vdev_t *sav = &spa->spa_spares;
uint64_t guid = vd->vdev_guid;
nvlist_t *nv = fnvlist_alloc();
fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 1);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_CREATE_TXG, vd->vdev_crtxg);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_VERSION, spa_version(spa));
fnvlist_add_string(nv, ZPOOL_CONFIG_POOL_NAME, spa_name(spa));
fnvlist_add_uint64(nv, ZPOOL_CONFIG_POOL_GUID, spa_guid(spa));
fnvlist_add_uint64(nv, ZPOOL_CONFIG_POOL_TXG, spa->spa_config_txg);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_TOP_GUID, vd->vdev_top->vdev_guid);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_POOL_STATE,
vdev_draid_spare_is_active(vd) ?
POOL_STATE_ACTIVE : POOL_STATE_SPARE);
/* Set the vdev guid based on the vdev list in sav_count. */
for (int i = 0; i < sav->sav_count; i++) {
if (sav->sav_vdevs[i]->vdev_ops == &vdev_draid_spare_ops &&
strcmp(sav->sav_vdevs[i]->vdev_path, vd->vdev_path) == 0) {
guid = sav->sav_vdevs[i]->vdev_guid;
break;
}
}
fnvlist_add_uint64(nv, ZPOOL_CONFIG_GUID, guid);
return (nv);
}
/*
* Handle any ioctl requested of the distributed spare. Only flushes
* are supported in which case all children must be flushed.
*/
static int
vdev_draid_spare_ioctl(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
int error = 0;
if (zio->io_cmd == DKIOCFLUSHWRITECACHE) {
for (int c = 0; c < vd->vdev_children; c++) {
zio_nowait(zio_vdev_child_io(zio, NULL,
vd->vdev_child[c], zio->io_offset, zio->io_abd,
zio->io_size, zio->io_type, zio->io_priority, 0,
vdev_draid_spare_child_done, zio));
}
} else {
error = SET_ERROR(ENOTSUP);
}
return (error);
}
/*
* Initiate an IO to the distributed spare. For normal IOs this entails using
* the zio->io_offset and permutation table to calculate which child dRAID vdev
* is responsible for the data. Then passing along the zio to that child to
* perform the actual IO. The label ranges are not stored on disk and require
* some special handling which is described below.
*/
static void
vdev_draid_spare_io_start(zio_t *zio)
{
vdev_t *cvd = NULL, *vd = zio->io_vd;
vdev_draid_spare_t *vds = vd->vdev_tsd;
uint64_t offset = zio->io_offset - VDEV_LABEL_START_SIZE;
/*
* If the vdev is closed, it's likely in the REMOVED or FAULTED state.
* Nothing to be done here but return failure.
*/
if (vds == NULL) {
zio->io_error = ENXIO;
zio_interrupt(zio);
return;
}
switch (zio->io_type) {
case ZIO_TYPE_IOCTL:
zio->io_error = vdev_draid_spare_ioctl(zio);
break;
case ZIO_TYPE_WRITE:
if (VDEV_OFFSET_IS_LABEL(vd, zio->io_offset)) {
/*
* Accept probe IOs and config writers to simulate the
* existence of an on disk label. vdev_label_sync(),
* vdev_uberblock_sync() and vdev_copy_uberblocks()
* skip the distributed spares. This only leaves
* vdev_label_init() which is allowed to succeed to
* avoid adding special cases the function.
*/
if (zio->io_flags & ZIO_FLAG_PROBE ||
zio->io_flags & ZIO_FLAG_CONFIG_WRITER) {
zio->io_error = 0;
} else {
zio->io_error = SET_ERROR(EIO);
}
} else {
cvd = vdev_draid_spare_get_child(vd, offset);
if (cvd == NULL) {
zio->io_error = SET_ERROR(ENXIO);
} else {
zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
offset, zio->io_abd, zio->io_size,
zio->io_type, zio->io_priority, 0,
vdev_draid_spare_child_done, zio));
}
}
break;
case ZIO_TYPE_READ:
if (VDEV_OFFSET_IS_LABEL(vd, zio->io_offset)) {
/*
* Accept probe IOs to simulate the existence of a
* label. vdev_label_read_config() bypasses the
* pipeline to read the label configuration and
* vdev_uberblock_load() skips distributed spares
* when attempting to locate the best uberblock.
*/
if (zio->io_flags & ZIO_FLAG_PROBE) {
zio->io_error = 0;
} else {
zio->io_error = SET_ERROR(EIO);
}
} else {
cvd = vdev_draid_spare_get_child(vd, offset);
if (cvd == NULL || !vdev_readable(cvd)) {
zio->io_error = SET_ERROR(ENXIO);
} else {
zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
offset, zio->io_abd, zio->io_size,
zio->io_type, zio->io_priority, 0,
vdev_draid_spare_child_done, zio));
}
}
break;
case ZIO_TYPE_TRIM:
/* The vdev label ranges are never trimmed */
ASSERT0(VDEV_OFFSET_IS_LABEL(vd, zio->io_offset));
cvd = vdev_draid_spare_get_child(vd, offset);
if (cvd == NULL || !cvd->vdev_has_trim) {
zio->io_error = SET_ERROR(ENXIO);
} else {
zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
offset, zio->io_abd, zio->io_size,
zio->io_type, zio->io_priority, 0,
vdev_draid_spare_child_done, zio));
}
break;
default:
zio->io_error = SET_ERROR(ENOTSUP);
break;
}
zio_execute(zio);
}
-/* ARGSUSED */
static void
vdev_draid_spare_io_done(zio_t *zio)
{
+ (void) zio;
}
/*
* Lookup the full spare config in spa->spa_spares.sav_config and
* return the top_guid and spare_id for the named spare.
*/
static int
vdev_draid_spare_lookup(spa_t *spa, nvlist_t *nv, uint64_t *top_guidp,
uint64_t *spare_idp)
{
nvlist_t **spares;
uint_t nspares;
int error;
if ((spa->spa_spares.sav_config == NULL) ||
(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, &spares, &nspares) != 0)) {
return (SET_ERROR(ENOENT));
}
char *spare_name;
error = nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &spare_name);
if (error != 0)
return (SET_ERROR(EINVAL));
for (int i = 0; i < nspares; i++) {
nvlist_t *spare = spares[i];
uint64_t top_guid, spare_id;
char *type, *path;
/* Skip non-distributed spares */
error = nvlist_lookup_string(spare, ZPOOL_CONFIG_TYPE, &type);
if (error != 0 || strcmp(type, VDEV_TYPE_DRAID_SPARE) != 0)
continue;
/* Skip spares with the wrong name */
error = nvlist_lookup_string(spare, ZPOOL_CONFIG_PATH, &path);
if (error != 0 || strcmp(path, spare_name) != 0)
continue;
/* Found the matching spare */
error = nvlist_lookup_uint64(spare,
ZPOOL_CONFIG_TOP_GUID, &top_guid);
if (error == 0) {
error = nvlist_lookup_uint64(spare,
ZPOOL_CONFIG_SPARE_ID, &spare_id);
}
if (error != 0) {
return (SET_ERROR(EINVAL));
} else {
*top_guidp = top_guid;
*spare_idp = spare_id;
return (0);
}
}
return (SET_ERROR(ENOENT));
}
/*
* Initialize private dRAID spare specific fields from the nvlist.
*/
static int
vdev_draid_spare_init(spa_t *spa, nvlist_t *nv, void **tsd)
{
vdev_draid_spare_t *vds;
uint64_t top_guid = 0;
uint64_t spare_id;
/*
* In the normal case check the list of spares stored in the spa
* to lookup the top_guid and spare_id for provided spare config.
* When creating a new pool or adding vdevs the spare list is not
* yet populated and the values are provided in the passed config.
*/
if (vdev_draid_spare_lookup(spa, nv, &top_guid, &spare_id) != 0) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_TOP_GUID,
&top_guid) != 0)
return (SET_ERROR(EINVAL));
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_SPARE_ID,
&spare_id) != 0)
return (SET_ERROR(EINVAL));
}
vds = kmem_alloc(sizeof (vdev_draid_spare_t), KM_SLEEP);
vds->vds_draid_vdev = NULL;
vds->vds_top_guid = top_guid;
vds->vds_spare_id = spare_id;
*tsd = vds;
return (0);
}
static void
vdev_draid_spare_fini(vdev_t *vd)
{
kmem_free(vd->vdev_tsd, sizeof (vdev_draid_spare_t));
}
static void
vdev_draid_spare_config_generate(vdev_t *vd, nvlist_t *nv)
{
vdev_draid_spare_t *vds = vd->vdev_tsd;
ASSERT3P(vd->vdev_ops, ==, &vdev_draid_spare_ops);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_TOP_GUID, vds->vds_top_guid);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_SPARE_ID, vds->vds_spare_id);
}
vdev_ops_t vdev_draid_spare_ops = {
.vdev_op_init = vdev_draid_spare_init,
.vdev_op_fini = vdev_draid_spare_fini,
.vdev_op_open = vdev_draid_spare_open,
.vdev_op_close = vdev_draid_spare_close,
.vdev_op_asize = vdev_default_asize,
.vdev_op_min_asize = vdev_default_min_asize,
.vdev_op_min_alloc = NULL,
.vdev_op_io_start = vdev_draid_spare_io_start,
.vdev_op_io_done = vdev_draid_spare_io_done,
.vdev_op_state_change = NULL,
.vdev_op_need_resilver = NULL,
.vdev_op_hold = NULL,
.vdev_op_rele = NULL,
.vdev_op_remap = NULL,
.vdev_op_xlate = vdev_default_xlate,
.vdev_op_rebuild_asize = NULL,
.vdev_op_metaslab_init = NULL,
.vdev_op_config_generate = vdev_draid_spare_config_generate,
.vdev_op_nparity = NULL,
.vdev_op_ndisks = NULL,
.vdev_op_type = VDEV_TYPE_DRAID_SPARE,
.vdev_op_leaf = B_TRUE,
};
diff --git a/sys/contrib/openzfs/module/zfs/vdev_indirect.c b/sys/contrib/openzfs/module/zfs/vdev_indirect.c
index 3237dc4021a0..b541058d94f8 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_indirect.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_indirect.c
@@ -1,1912 +1,1910 @@
/*
* CDDL HEADER START
*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2014, 2017 by Delphix. All rights reserved.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
* Copyright (c) 2014, 2020 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/vdev_impl.h>
#include <sys/fs/zfs.h>
#include <sys/zio.h>
#include <sys/zio_checksum.h>
#include <sys/metaslab.h>
#include <sys/dmu.h>
#include <sys/vdev_indirect_mapping.h>
#include <sys/dmu_tx.h>
#include <sys/dsl_synctask.h>
#include <sys/zap.h>
#include <sys/abd.h>
#include <sys/zthr.h>
/*
* An indirect vdev corresponds to a vdev that has been removed. Since
* we cannot rewrite block pointers of snapshots, etc., we keep a
* mapping from old location on the removed device to the new location
* on another device in the pool and use this mapping whenever we need
* to access the DVA. Unfortunately, this mapping did not respect
* logical block boundaries when it was first created, and so a DVA on
* this indirect vdev may be "split" into multiple sections that each
* map to a different location. As a consequence, not all DVAs can be
* translated to an equivalent new DVA. Instead we must provide a
* "vdev_remap" operation that executes a callback on each contiguous
* segment of the new location. This function is used in multiple ways:
*
* - i/os to this vdev use the callback to determine where the
* data is now located, and issue child i/os for each segment's new
* location.
*
* - frees and claims to this vdev use the callback to free or claim
* each mapped segment. (Note that we don't actually need to claim
* log blocks on indirect vdevs, because we don't allocate to
* removing vdevs. However, zdb uses zio_claim() for its leak
* detection.)
*/
/*
* "Big theory statement" for how we mark blocks obsolete.
*
* When a block on an indirect vdev is freed or remapped, a section of
* that vdev's mapping may no longer be referenced (aka "obsolete"). We
* keep track of how much of each mapping entry is obsolete. When
* an entry becomes completely obsolete, we can remove it, thus reducing
* the memory used by the mapping. The complete picture of obsolescence
* is given by the following data structures, described below:
* - the entry-specific obsolete count
* - the vdev-specific obsolete spacemap
* - the pool-specific obsolete bpobj
*
* == On disk data structures used ==
*
* We track the obsolete space for the pool using several objects. Each
* of these objects is created on demand and freed when no longer
* needed, and is assumed to be empty if it does not exist.
* SPA_FEATURE_OBSOLETE_COUNTS includes the count of these objects.
*
* - Each vic_mapping_object (associated with an indirect vdev) can
* have a vimp_counts_object. This is an array of uint32_t's
* with the same number of entries as the vic_mapping_object. When
* the mapping is condensed, entries from the vic_obsolete_sm_object
* (see below) are folded into the counts. Therefore, each
* obsolete_counts entry tells us the number of bytes in the
* corresponding mapping entry that were not referenced when the
* mapping was last condensed.
*
* - Each indirect or removing vdev can have a vic_obsolete_sm_object.
* This is a space map containing an alloc entry for every DVA that
* has been obsoleted since the last time this indirect vdev was
* condensed. We use this object in order to improve performance
* when marking a DVA as obsolete. Instead of modifying an arbitrary
* offset of the vimp_counts_object, we only need to append an entry
* to the end of this object. When a DVA becomes obsolete, it is
* added to the obsolete space map. This happens when the DVA is
* freed, remapped and not referenced by a snapshot, or the last
* snapshot referencing it is destroyed.
*
* - Each dataset can have a ds_remap_deadlist object. This is a
* deadlist object containing all blocks that were remapped in this
* dataset but referenced in a previous snapshot. Blocks can *only*
* appear on this list if they were remapped (dsl_dataset_block_remapped);
* blocks that were killed in a head dataset are put on the normal
* ds_deadlist and marked obsolete when they are freed.
*
* - The pool can have a dp_obsolete_bpobj. This is a list of blocks
* in the pool that need to be marked obsolete. When a snapshot is
* destroyed, we move some of the ds_remap_deadlist to the obsolete
* bpobj (see dsl_destroy_snapshot_handle_remaps()). We then
* asynchronously process the obsolete bpobj, moving its entries to
* the specific vdevs' obsolete space maps.
*
* == Summary of how we mark blocks as obsolete ==
*
* - When freeing a block: if any DVA is on an indirect vdev, append to
* vic_obsolete_sm_object.
* - When remapping a block, add dva to ds_remap_deadlist (if prev snap
* references; otherwise append to vic_obsolete_sm_object).
* - When freeing a snapshot: move parts of ds_remap_deadlist to
* dp_obsolete_bpobj (same algorithm as ds_deadlist).
* - When syncing the spa: process dp_obsolete_bpobj, moving ranges to
* individual vdev's vic_obsolete_sm_object.
*/
/*
* "Big theory statement" for how we condense indirect vdevs.
*
* Condensing an indirect vdev's mapping is the process of determining
* the precise counts of obsolete space for each mapping entry (by
* integrating the obsolete spacemap into the obsolete counts) and
* writing out a new mapping that contains only referenced entries.
*
* We condense a vdev when we expect the mapping to shrink (see
* vdev_indirect_should_condense()), but only perform one condense at a
* time to limit the memory usage. In addition, we use a separate
* open-context thread (spa_condense_indirect_thread) to incrementally
* create the new mapping object in a way that minimizes the impact on
* the rest of the system.
*
* == Generating a new mapping ==
*
* To generate a new mapping, we follow these steps:
*
* 1. Save the old obsolete space map and create a new mapping object
* (see spa_condense_indirect_start_sync()). This initializes the
* spa_condensing_indirect_phys with the "previous obsolete space map",
* which is now read only. Newly obsolete DVAs will be added to a
* new (initially empty) obsolete space map, and will not be
* considered as part of this condense operation.
*
* 2. Construct in memory the precise counts of obsolete space for each
* mapping entry, by incorporating the obsolete space map into the
* counts. (See vdev_indirect_mapping_load_obsolete_{counts,spacemap}().)
*
* 3. Iterate through each mapping entry, writing to the new mapping any
* entries that are not completely obsolete (i.e. which don't have
* obsolete count == mapping length). (See
* spa_condense_indirect_generate_new_mapping().)
*
* 4. Destroy the old mapping object and switch over to the new one
* (spa_condense_indirect_complete_sync).
*
* == Restarting from failure ==
*
* To restart the condense when we import/open the pool, we must start
* at the 2nd step above: reconstruct the precise counts in memory,
* based on the space map + counts. Then in the 3rd step, we start
* iterating where we left off: at vimp_max_offset of the new mapping
* object.
*/
int zfs_condense_indirect_vdevs_enable = B_TRUE;
/*
* Condense if at least this percent of the bytes in the mapping is
* obsolete. With the default of 25%, the amount of space mapped
* will be reduced to 1% of its original size after at most 16
* condenses. Higher values will condense less often (causing less
* i/o); lower values will reduce the mapping size more quickly.
*/
int zfs_condense_indirect_obsolete_pct = 25;
/*
* Condense if the obsolete space map takes up more than this amount of
* space on disk (logically). This limits the amount of disk space
* consumed by the obsolete space map; the default of 1GB is small enough
* that we typically don't mind "wasting" it.
*/
unsigned long zfs_condense_max_obsolete_bytes = 1024 * 1024 * 1024;
/*
* Don't bother condensing if the mapping uses less than this amount of
* memory. The default of 128KB is considered a "trivial" amount of
* memory and not worth reducing.
*/
unsigned long zfs_condense_min_mapping_bytes = 128 * 1024;
/*
* This is used by the test suite so that it can ensure that certain
* actions happen while in the middle of a condense (which might otherwise
* complete too quickly). If used to reduce the performance impact of
* condensing in production, a maximum value of 1 should be sufficient.
*/
int zfs_condense_indirect_commit_entry_delay_ms = 0;
/*
* If an indirect split block contains more than this many possible unique
* combinations when being reconstructed, consider it too computationally
* expensive to check them all. Instead, try at most 100 randomly-selected
* combinations each time the block is accessed. This allows all segment
* copies to participate fairly in the reconstruction when all combinations
* cannot be checked and prevents repeated use of one bad copy.
*/
int zfs_reconstruct_indirect_combinations_max = 4096;
/*
* Enable to simulate damaged segments and validate reconstruction. This
* is intentionally not exposed as a module parameter.
*/
unsigned long zfs_reconstruct_indirect_damage_fraction = 0;
/*
* The indirect_child_t represents the vdev that we will read from, when we
* need to read all copies of the data (e.g. for scrub or reconstruction).
* For plain (non-mirror) top-level vdevs (i.e. is_vdev is not a mirror),
* ic_vdev is the same as is_vdev. However, for mirror top-level vdevs,
* ic_vdev is a child of the mirror.
*/
typedef struct indirect_child {
abd_t *ic_data;
vdev_t *ic_vdev;
/*
* ic_duplicate is NULL when the ic_data contents are unique, when it
* is determined to be a duplicate it references the primary child.
*/
struct indirect_child *ic_duplicate;
list_node_t ic_node; /* node on is_unique_child */
int ic_error; /* set when a child does not contain the data */
} indirect_child_t;
/*
* The indirect_split_t represents one mapped segment of an i/o to the
* indirect vdev. For non-split (contiguously-mapped) blocks, there will be
* only one indirect_split_t, with is_split_offset==0 and is_size==io_size.
* For split blocks, there will be several of these.
*/
typedef struct indirect_split {
list_node_t is_node; /* link on iv_splits */
/*
* is_split_offset is the offset into the i/o.
* This is the sum of the previous splits' is_size's.
*/
uint64_t is_split_offset;
vdev_t *is_vdev; /* top-level vdev */
uint64_t is_target_offset; /* offset on is_vdev */
uint64_t is_size;
int is_children; /* number of entries in is_child[] */
int is_unique_children; /* number of entries in is_unique_child */
list_t is_unique_child;
/*
* is_good_child is the child that we are currently using to
* attempt reconstruction.
*/
indirect_child_t *is_good_child;
indirect_child_t is_child[1]; /* variable-length */
} indirect_split_t;
/*
* The indirect_vsd_t is associated with each i/o to the indirect vdev.
* It is the "Vdev-Specific Data" in the zio_t's io_vsd.
*/
typedef struct indirect_vsd {
boolean_t iv_split_block;
boolean_t iv_reconstruct;
uint64_t iv_unique_combinations;
uint64_t iv_attempts;
uint64_t iv_attempts_max;
list_t iv_splits; /* list of indirect_split_t's */
} indirect_vsd_t;
static void
vdev_indirect_map_free(zio_t *zio)
{
indirect_vsd_t *iv = zio->io_vsd;
indirect_split_t *is;
while ((is = list_head(&iv->iv_splits)) != NULL) {
for (int c = 0; c < is->is_children; c++) {
indirect_child_t *ic = &is->is_child[c];
if (ic->ic_data != NULL)
abd_free(ic->ic_data);
}
list_remove(&iv->iv_splits, is);
indirect_child_t *ic;
while ((ic = list_head(&is->is_unique_child)) != NULL)
list_remove(&is->is_unique_child, ic);
list_destroy(&is->is_unique_child);
kmem_free(is,
offsetof(indirect_split_t, is_child[is->is_children]));
}
kmem_free(iv, sizeof (*iv));
}
static const zio_vsd_ops_t vdev_indirect_vsd_ops = {
.vsd_free = vdev_indirect_map_free,
};
/*
* Mark the given offset and size as being obsolete.
*/
void
vdev_indirect_mark_obsolete(vdev_t *vd, uint64_t offset, uint64_t size)
{
spa_t *spa = vd->vdev_spa;
ASSERT3U(vd->vdev_indirect_config.vic_mapping_object, !=, 0);
ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops);
ASSERT(size > 0);
VERIFY(vdev_indirect_mapping_entry_for_offset(
vd->vdev_indirect_mapping, offset) != NULL);
if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
mutex_enter(&vd->vdev_obsolete_lock);
range_tree_add(vd->vdev_obsolete_segments, offset, size);
mutex_exit(&vd->vdev_obsolete_lock);
vdev_dirty(vd, 0, NULL, spa_syncing_txg(spa));
}
}
/*
* Mark the DVA vdev_id:offset:size as being obsolete in the given tx. This
* wrapper is provided because the DMU does not know about vdev_t's and
* cannot directly call vdev_indirect_mark_obsolete.
*/
void
spa_vdev_indirect_mark_obsolete(spa_t *spa, uint64_t vdev_id, uint64_t offset,
uint64_t size, dmu_tx_t *tx)
{
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
ASSERT(dmu_tx_is_syncing(tx));
/* The DMU can only remap indirect vdevs. */
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
vdev_indirect_mark_obsolete(vd, offset, size);
}
static spa_condensing_indirect_t *
spa_condensing_indirect_create(spa_t *spa)
{
spa_condensing_indirect_phys_t *scip =
&spa->spa_condensing_indirect_phys;
spa_condensing_indirect_t *sci = kmem_zalloc(sizeof (*sci), KM_SLEEP);
objset_t *mos = spa->spa_meta_objset;
for (int i = 0; i < TXG_SIZE; i++) {
list_create(&sci->sci_new_mapping_entries[i],
sizeof (vdev_indirect_mapping_entry_t),
offsetof(vdev_indirect_mapping_entry_t, vime_node));
}
sci->sci_new_mapping =
vdev_indirect_mapping_open(mos, scip->scip_next_mapping_object);
return (sci);
}
static void
spa_condensing_indirect_destroy(spa_condensing_indirect_t *sci)
{
for (int i = 0; i < TXG_SIZE; i++)
list_destroy(&sci->sci_new_mapping_entries[i]);
if (sci->sci_new_mapping != NULL)
vdev_indirect_mapping_close(sci->sci_new_mapping);
kmem_free(sci, sizeof (*sci));
}
boolean_t
vdev_indirect_should_condense(vdev_t *vd)
{
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
spa_t *spa = vd->vdev_spa;
ASSERT(dsl_pool_sync_context(spa->spa_dsl_pool));
if (!zfs_condense_indirect_vdevs_enable)
return (B_FALSE);
/*
* We can only condense one indirect vdev at a time.
*/
if (spa->spa_condensing_indirect != NULL)
return (B_FALSE);
if (spa_shutting_down(spa))
return (B_FALSE);
/*
* The mapping object size must not change while we are
* condensing, so we can only condense indirect vdevs
* (not vdevs that are still in the middle of being removed).
*/
if (vd->vdev_ops != &vdev_indirect_ops)
return (B_FALSE);
/*
* If nothing new has been marked obsolete, there is no
* point in condensing.
*/
uint64_t obsolete_sm_obj __maybe_unused;
ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_obj));
if (vd->vdev_obsolete_sm == NULL) {
ASSERT0(obsolete_sm_obj);
return (B_FALSE);
}
ASSERT(vd->vdev_obsolete_sm != NULL);
ASSERT3U(obsolete_sm_obj, ==, space_map_object(vd->vdev_obsolete_sm));
uint64_t bytes_mapped = vdev_indirect_mapping_bytes_mapped(vim);
uint64_t bytes_obsolete = space_map_allocated(vd->vdev_obsolete_sm);
uint64_t mapping_size = vdev_indirect_mapping_size(vim);
uint64_t obsolete_sm_size = space_map_length(vd->vdev_obsolete_sm);
ASSERT3U(bytes_obsolete, <=, bytes_mapped);
/*
* If a high percentage of the bytes that are mapped have become
* obsolete, condense (unless the mapping is already small enough).
* This has a good chance of reducing the amount of memory used
* by the mapping.
*/
if (bytes_obsolete * 100 / bytes_mapped >=
zfs_condense_indirect_obsolete_pct &&
mapping_size > zfs_condense_min_mapping_bytes) {
zfs_dbgmsg("should condense vdev %llu because obsolete "
"spacemap covers %d%% of %lluMB mapping",
(u_longlong_t)vd->vdev_id,
(int)(bytes_obsolete * 100 / bytes_mapped),
(u_longlong_t)bytes_mapped / 1024 / 1024);
return (B_TRUE);
}
/*
* If the obsolete space map takes up too much space on disk,
* condense in order to free up this disk space.
*/
if (obsolete_sm_size >= zfs_condense_max_obsolete_bytes) {
zfs_dbgmsg("should condense vdev %llu because obsolete sm "
"length %lluMB >= max size %lluMB",
(u_longlong_t)vd->vdev_id,
(u_longlong_t)obsolete_sm_size / 1024 / 1024,
(u_longlong_t)zfs_condense_max_obsolete_bytes /
1024 / 1024);
return (B_TRUE);
}
return (B_FALSE);
}
/*
* This sync task completes (finishes) a condense, deleting the old
* mapping and replacing it with the new one.
*/
static void
spa_condense_indirect_complete_sync(void *arg, dmu_tx_t *tx)
{
spa_condensing_indirect_t *sci = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
spa_condensing_indirect_phys_t *scip =
&spa->spa_condensing_indirect_phys;
vdev_t *vd = vdev_lookup_top(spa, scip->scip_vdev);
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
objset_t *mos = spa->spa_meta_objset;
vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
uint64_t old_count = vdev_indirect_mapping_num_entries(old_mapping);
uint64_t new_count =
vdev_indirect_mapping_num_entries(sci->sci_new_mapping);
ASSERT(dmu_tx_is_syncing(tx));
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
ASSERT3P(sci, ==, spa->spa_condensing_indirect);
for (int i = 0; i < TXG_SIZE; i++) {
ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i]));
}
ASSERT(vic->vic_mapping_object != 0);
ASSERT3U(vd->vdev_id, ==, scip->scip_vdev);
ASSERT(scip->scip_next_mapping_object != 0);
ASSERT(scip->scip_prev_obsolete_sm_object != 0);
/*
* Reset vdev_indirect_mapping to refer to the new object.
*/
rw_enter(&vd->vdev_indirect_rwlock, RW_WRITER);
vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
vd->vdev_indirect_mapping = sci->sci_new_mapping;
rw_exit(&vd->vdev_indirect_rwlock);
sci->sci_new_mapping = NULL;
vdev_indirect_mapping_free(mos, vic->vic_mapping_object, tx);
vic->vic_mapping_object = scip->scip_next_mapping_object;
scip->scip_next_mapping_object = 0;
space_map_free_obj(mos, scip->scip_prev_obsolete_sm_object, tx);
spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
scip->scip_prev_obsolete_sm_object = 0;
scip->scip_vdev = 0;
VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_CONDENSING_INDIRECT, tx));
spa_condensing_indirect_destroy(spa->spa_condensing_indirect);
spa->spa_condensing_indirect = NULL;
zfs_dbgmsg("finished condense of vdev %llu in txg %llu: "
"new mapping object %llu has %llu entries "
"(was %llu entries)",
(u_longlong_t)vd->vdev_id, (u_longlong_t)dmu_tx_get_txg(tx),
(u_longlong_t)vic->vic_mapping_object,
(u_longlong_t)new_count, (u_longlong_t)old_count);
vdev_config_dirty(spa->spa_root_vdev);
}
/*
* This sync task appends entries to the new mapping object.
*/
static void
spa_condense_indirect_commit_sync(void *arg, dmu_tx_t *tx)
{
spa_condensing_indirect_t *sci = arg;
uint64_t txg = dmu_tx_get_txg(tx);
spa_t *spa __maybe_unused = dmu_tx_pool(tx)->dp_spa;
ASSERT(dmu_tx_is_syncing(tx));
ASSERT3P(sci, ==, spa->spa_condensing_indirect);
vdev_indirect_mapping_add_entries(sci->sci_new_mapping,
&sci->sci_new_mapping_entries[txg & TXG_MASK], tx);
ASSERT(list_is_empty(&sci->sci_new_mapping_entries[txg & TXG_MASK]));
}
/*
* Open-context function to add one entry to the new mapping. The new
* entry will be remembered and written from syncing context.
*/
static void
spa_condense_indirect_commit_entry(spa_t *spa,
vdev_indirect_mapping_entry_phys_t *vimep, uint32_t count)
{
spa_condensing_indirect_t *sci = spa->spa_condensing_indirect;
ASSERT3U(count, <, DVA_GET_ASIZE(&vimep->vimep_dst));
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
dmu_tx_hold_space(tx, sizeof (*vimep) + sizeof (count));
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
/*
* If we are the first entry committed this txg, kick off the sync
* task to write to the MOS on our behalf.
*/
if (list_is_empty(&sci->sci_new_mapping_entries[txgoff])) {
dsl_sync_task_nowait(dmu_tx_pool(tx),
spa_condense_indirect_commit_sync, sci, tx);
}
vdev_indirect_mapping_entry_t *vime =
kmem_alloc(sizeof (*vime), KM_SLEEP);
vime->vime_mapping = *vimep;
vime->vime_obsolete_count = count;
list_insert_tail(&sci->sci_new_mapping_entries[txgoff], vime);
dmu_tx_commit(tx);
}
static void
spa_condense_indirect_generate_new_mapping(vdev_t *vd,
uint32_t *obsolete_counts, uint64_t start_index, zthr_t *zthr)
{
spa_t *spa = vd->vdev_spa;
uint64_t mapi = start_index;
vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
uint64_t old_num_entries =
vdev_indirect_mapping_num_entries(old_mapping);
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
ASSERT3U(vd->vdev_id, ==, spa->spa_condensing_indirect_phys.scip_vdev);
zfs_dbgmsg("starting condense of vdev %llu from index %llu",
(u_longlong_t)vd->vdev_id,
(u_longlong_t)mapi);
while (mapi < old_num_entries) {
if (zthr_iscancelled(zthr)) {
zfs_dbgmsg("pausing condense of vdev %llu "
"at index %llu", (u_longlong_t)vd->vdev_id,
(u_longlong_t)mapi);
break;
}
vdev_indirect_mapping_entry_phys_t *entry =
&old_mapping->vim_entries[mapi];
uint64_t entry_size = DVA_GET_ASIZE(&entry->vimep_dst);
ASSERT3U(obsolete_counts[mapi], <=, entry_size);
if (obsolete_counts[mapi] < entry_size) {
spa_condense_indirect_commit_entry(spa, entry,
obsolete_counts[mapi]);
/*
* This delay may be requested for testing, debugging,
* or performance reasons.
*/
hrtime_t now = gethrtime();
hrtime_t sleep_until = now + MSEC2NSEC(
zfs_condense_indirect_commit_entry_delay_ms);
zfs_sleep_until(sleep_until);
}
mapi++;
}
}
-/* ARGSUSED */
static boolean_t
spa_condense_indirect_thread_check(void *arg, zthr_t *zthr)
{
+ (void) zthr;
spa_t *spa = arg;
return (spa->spa_condensing_indirect != NULL);
}
-/* ARGSUSED */
static void
spa_condense_indirect_thread(void *arg, zthr_t *zthr)
{
spa_t *spa = arg;
vdev_t *vd;
ASSERT3P(spa->spa_condensing_indirect, !=, NULL);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
vd = vdev_lookup_top(spa, spa->spa_condensing_indirect_phys.scip_vdev);
ASSERT3P(vd, !=, NULL);
spa_config_exit(spa, SCL_VDEV, FTAG);
spa_condensing_indirect_t *sci = spa->spa_condensing_indirect;
spa_condensing_indirect_phys_t *scip =
&spa->spa_condensing_indirect_phys;
uint32_t *counts;
uint64_t start_index;
vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
space_map_t *prev_obsolete_sm = NULL;
ASSERT3U(vd->vdev_id, ==, scip->scip_vdev);
ASSERT(scip->scip_next_mapping_object != 0);
ASSERT(scip->scip_prev_obsolete_sm_object != 0);
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
for (int i = 0; i < TXG_SIZE; i++) {
/*
* The list must start out empty in order for the
* _commit_sync() sync task to be properly registered
* on the first call to _commit_entry(); so it's wise
* to double check and ensure we actually are starting
* with empty lists.
*/
ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i]));
}
VERIFY0(space_map_open(&prev_obsolete_sm, spa->spa_meta_objset,
scip->scip_prev_obsolete_sm_object, 0, vd->vdev_asize, 0));
counts = vdev_indirect_mapping_load_obsolete_counts(old_mapping);
if (prev_obsolete_sm != NULL) {
vdev_indirect_mapping_load_obsolete_spacemap(old_mapping,
counts, prev_obsolete_sm);
}
space_map_close(prev_obsolete_sm);
/*
* Generate new mapping. Determine what index to continue from
* based on the max offset that we've already written in the
* new mapping.
*/
uint64_t max_offset =
vdev_indirect_mapping_max_offset(sci->sci_new_mapping);
if (max_offset == 0) {
/* We haven't written anything to the new mapping yet. */
start_index = 0;
} else {
/*
* Pick up from where we left off. _entry_for_offset()
* returns a pointer into the vim_entries array. If
* max_offset is greater than any of the mappings
* contained in the table NULL will be returned and
* that indicates we've exhausted our iteration of the
* old_mapping.
*/
vdev_indirect_mapping_entry_phys_t *entry =
vdev_indirect_mapping_entry_for_offset_or_next(old_mapping,
max_offset);
if (entry == NULL) {
/*
* We've already written the whole new mapping.
* This special value will cause us to skip the
* generate_new_mapping step and just do the sync
* task to complete the condense.
*/
start_index = UINT64_MAX;
} else {
start_index = entry - old_mapping->vim_entries;
ASSERT3U(start_index, <,
vdev_indirect_mapping_num_entries(old_mapping));
}
}
spa_condense_indirect_generate_new_mapping(vd, counts,
start_index, zthr);
vdev_indirect_mapping_free_obsolete_counts(old_mapping, counts);
/*
* If the zthr has received a cancellation signal while running
* in generate_new_mapping() or at any point after that, then bail
* early. We don't want to complete the condense if the spa is
* shutting down.
*/
if (zthr_iscancelled(zthr))
return;
VERIFY0(dsl_sync_task(spa_name(spa), NULL,
spa_condense_indirect_complete_sync, sci, 0,
ZFS_SPACE_CHECK_EXTRA_RESERVED));
}
/*
* Sync task to begin the condensing process.
*/
void
spa_condense_indirect_start_sync(vdev_t *vd, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
spa_condensing_indirect_phys_t *scip =
&spa->spa_condensing_indirect_phys;
ASSERT0(scip->scip_next_mapping_object);
ASSERT0(scip->scip_prev_obsolete_sm_object);
ASSERT0(scip->scip_vdev);
ASSERT(dmu_tx_is_syncing(tx));
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_OBSOLETE_COUNTS));
ASSERT(vdev_indirect_mapping_num_entries(vd->vdev_indirect_mapping));
uint64_t obsolete_sm_obj;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_obj));
ASSERT3U(obsolete_sm_obj, !=, 0);
scip->scip_vdev = vd->vdev_id;
scip->scip_next_mapping_object =
vdev_indirect_mapping_alloc(spa->spa_meta_objset, tx);
scip->scip_prev_obsolete_sm_object = obsolete_sm_obj;
/*
* We don't need to allocate a new space map object, since
* vdev_indirect_sync_obsolete will allocate one when needed.
*/
space_map_close(vd->vdev_obsolete_sm);
vd->vdev_obsolete_sm = NULL;
VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, tx));
VERIFY0(zap_add(spa->spa_dsl_pool->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t),
sizeof (*scip) / sizeof (uint64_t), scip, tx));
ASSERT3P(spa->spa_condensing_indirect, ==, NULL);
spa->spa_condensing_indirect = spa_condensing_indirect_create(spa);
zfs_dbgmsg("starting condense of vdev %llu in txg %llu: "
"posm=%llu nm=%llu",
(u_longlong_t)vd->vdev_id, (u_longlong_t)dmu_tx_get_txg(tx),
(u_longlong_t)scip->scip_prev_obsolete_sm_object,
(u_longlong_t)scip->scip_next_mapping_object);
zthr_wakeup(spa->spa_condense_zthr);
}
/*
* Sync to the given vdev's obsolete space map any segments that are no longer
* referenced as of the given txg.
*
* If the obsolete space map doesn't exist yet, create and open it.
*/
void
vdev_indirect_sync_obsolete(vdev_t *vd, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
vdev_indirect_config_t *vic __maybe_unused = &vd->vdev_indirect_config;
ASSERT3U(vic->vic_mapping_object, !=, 0);
ASSERT(range_tree_space(vd->vdev_obsolete_segments) > 0);
ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops);
ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS));
uint64_t obsolete_sm_object;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
if (obsolete_sm_object == 0) {
obsolete_sm_object = space_map_alloc(spa->spa_meta_objset,
zfs_vdev_standard_sm_blksz, tx);
ASSERT(vd->vdev_top_zap != 0);
VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM,
sizeof (obsolete_sm_object), 1, &obsolete_sm_object, tx));
ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
ASSERT3U(obsolete_sm_object, !=, 0);
spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
VERIFY0(space_map_open(&vd->vdev_obsolete_sm,
spa->spa_meta_objset, obsolete_sm_object,
0, vd->vdev_asize, 0));
}
ASSERT(vd->vdev_obsolete_sm != NULL);
ASSERT3U(obsolete_sm_object, ==,
space_map_object(vd->vdev_obsolete_sm));
space_map_write(vd->vdev_obsolete_sm,
vd->vdev_obsolete_segments, SM_ALLOC, SM_NO_VDEVID, tx);
range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL);
}
int
spa_condense_init(spa_t *spa)
{
int error = zap_lookup(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t),
sizeof (spa->spa_condensing_indirect_phys) / sizeof (uint64_t),
&spa->spa_condensing_indirect_phys);
if (error == 0) {
if (spa_writeable(spa)) {
spa->spa_condensing_indirect =
spa_condensing_indirect_create(spa);
}
return (0);
} else if (error == ENOENT) {
return (0);
} else {
return (error);
}
}
void
spa_condense_fini(spa_t *spa)
{
if (spa->spa_condensing_indirect != NULL) {
spa_condensing_indirect_destroy(spa->spa_condensing_indirect);
spa->spa_condensing_indirect = NULL;
}
}
void
spa_start_indirect_condensing_thread(spa_t *spa)
{
ASSERT3P(spa->spa_condense_zthr, ==, NULL);
spa->spa_condense_zthr = zthr_create("z_indirect_condense",
spa_condense_indirect_thread_check,
spa_condense_indirect_thread, spa, minclsyspri);
}
/*
* Gets the obsolete spacemap object from the vdev's ZAP. On success sm_obj
* will contain either the obsolete spacemap object or zero if none exists.
* All other errors are returned to the caller.
*/
int
vdev_obsolete_sm_object(vdev_t *vd, uint64_t *sm_obj)
{
ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
if (vd->vdev_top_zap == 0) {
*sm_obj = 0;
return (0);
}
int error = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, sizeof (uint64_t), 1, sm_obj);
if (error == ENOENT) {
*sm_obj = 0;
error = 0;
}
return (error);
}
/*
* Gets the obsolete count are precise spacemap object from the vdev's ZAP.
* On success are_precise will be set to reflect if the counts are precise.
* All other errors are returned to the caller.
*/
int
vdev_obsolete_counts_are_precise(vdev_t *vd, boolean_t *are_precise)
{
ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
if (vd->vdev_top_zap == 0) {
*are_precise = B_FALSE;
return (0);
}
uint64_t val = 0;
int error = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, sizeof (val), 1, &val);
if (error == 0) {
*are_precise = (val != 0);
} else if (error == ENOENT) {
*are_precise = B_FALSE;
error = 0;
}
return (error);
}
-/* ARGSUSED */
static void
vdev_indirect_close(vdev_t *vd)
{
+ (void) vd;
}
-/* ARGSUSED */
static int
vdev_indirect_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
uint64_t *logical_ashift, uint64_t *physical_ashift)
{
*psize = *max_psize = vd->vdev_asize +
VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
*logical_ashift = vd->vdev_ashift;
*physical_ashift = vd->vdev_physical_ashift;
return (0);
}
typedef struct remap_segment {
vdev_t *rs_vd;
uint64_t rs_offset;
uint64_t rs_asize;
uint64_t rs_split_offset;
list_node_t rs_node;
} remap_segment_t;
static remap_segment_t *
rs_alloc(vdev_t *vd, uint64_t offset, uint64_t asize, uint64_t split_offset)
{
remap_segment_t *rs = kmem_alloc(sizeof (remap_segment_t), KM_SLEEP);
rs->rs_vd = vd;
rs->rs_offset = offset;
rs->rs_asize = asize;
rs->rs_split_offset = split_offset;
return (rs);
}
/*
* Given an indirect vdev and an extent on that vdev, it duplicates the
* physical entries of the indirect mapping that correspond to the extent
* to a new array and returns a pointer to it. In addition, copied_entries
* is populated with the number of mapping entries that were duplicated.
*
* Note that the function assumes that the caller holds vdev_indirect_rwlock.
* This ensures that the mapping won't change due to condensing as we
* copy over its contents.
*
* Finally, since we are doing an allocation, it is up to the caller to
* free the array allocated in this function.
*/
static vdev_indirect_mapping_entry_phys_t *
vdev_indirect_mapping_duplicate_adjacent_entries(vdev_t *vd, uint64_t offset,
uint64_t asize, uint64_t *copied_entries)
{
vdev_indirect_mapping_entry_phys_t *duplicate_mappings = NULL;
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
uint64_t entries = 0;
ASSERT(RW_READ_HELD(&vd->vdev_indirect_rwlock));
vdev_indirect_mapping_entry_phys_t *first_mapping =
vdev_indirect_mapping_entry_for_offset(vim, offset);
ASSERT3P(first_mapping, !=, NULL);
vdev_indirect_mapping_entry_phys_t *m = first_mapping;
while (asize > 0) {
uint64_t size = DVA_GET_ASIZE(&m->vimep_dst);
ASSERT3U(offset, >=, DVA_MAPPING_GET_SRC_OFFSET(m));
ASSERT3U(offset, <, DVA_MAPPING_GET_SRC_OFFSET(m) + size);
uint64_t inner_offset = offset - DVA_MAPPING_GET_SRC_OFFSET(m);
uint64_t inner_size = MIN(asize, size - inner_offset);
offset += inner_size;
asize -= inner_size;
entries++;
m++;
}
size_t copy_length = entries * sizeof (*first_mapping);
duplicate_mappings = kmem_alloc(copy_length, KM_SLEEP);
bcopy(first_mapping, duplicate_mappings, copy_length);
*copied_entries = entries;
return (duplicate_mappings);
}
/*
* Goes through the relevant indirect mappings until it hits a concrete vdev
* and issues the callback. On the way to the concrete vdev, if any other
* indirect vdevs are encountered, then the callback will also be called on
* each of those indirect vdevs. For example, if the segment is mapped to
* segment A on indirect vdev 1, and then segment A on indirect vdev 1 is
* mapped to segment B on concrete vdev 2, then the callback will be called on
* both vdev 1 and vdev 2.
*
* While the callback passed to vdev_indirect_remap() is called on every vdev
* the function encounters, certain callbacks only care about concrete vdevs.
* These types of callbacks should return immediately and explicitly when they
* are called on an indirect vdev.
*
* Because there is a possibility that a DVA section in the indirect device
* has been split into multiple sections in our mapping, we keep track
* of the relevant contiguous segments of the new location (remap_segment_t)
* in a stack. This way we can call the callback for each of the new sections
* created by a single section of the indirect device. Note though, that in
* this scenario the callbacks in each split block won't occur in-order in
* terms of offset, so callers should not make any assumptions about that.
*
* For callbacks that don't handle split blocks and immediately return when
* they encounter them (as is the case for remap_blkptr_cb), the caller can
* assume that its callback will be applied from the first indirect vdev
* encountered to the last one and then the concrete vdev, in that order.
*/
static void
vdev_indirect_remap(vdev_t *vd, uint64_t offset, uint64_t asize,
void (*func)(uint64_t, vdev_t *, uint64_t, uint64_t, void *), void *arg)
{
list_t stack;
spa_t *spa = vd->vdev_spa;
list_create(&stack, sizeof (remap_segment_t),
offsetof(remap_segment_t, rs_node));
for (remap_segment_t *rs = rs_alloc(vd, offset, asize, 0);
rs != NULL; rs = list_remove_head(&stack)) {
vdev_t *v = rs->rs_vd;
uint64_t num_entries = 0;
ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
ASSERT(rs->rs_asize > 0);
/*
* Note: As this function can be called from open context
* (e.g. zio_read()), we need the following rwlock to
* prevent the mapping from being changed by condensing.
*
* So we grab the lock and we make a copy of the entries
* that are relevant to the extent that we are working on.
* Once that is done, we drop the lock and iterate over
* our copy of the mapping. Once we are done with the with
* the remap segment and we free it, we also free our copy
* of the indirect mapping entries that are relevant to it.
*
* This way we don't need to wait until the function is
* finished with a segment, to condense it. In addition, we
* don't need a recursive rwlock for the case that a call to
* vdev_indirect_remap() needs to call itself (through the
* codepath of its callback) for the same vdev in the middle
* of its execution.
*/
rw_enter(&v->vdev_indirect_rwlock, RW_READER);
ASSERT3P(v->vdev_indirect_mapping, !=, NULL);
vdev_indirect_mapping_entry_phys_t *mapping =
vdev_indirect_mapping_duplicate_adjacent_entries(v,
rs->rs_offset, rs->rs_asize, &num_entries);
ASSERT3P(mapping, !=, NULL);
ASSERT3U(num_entries, >, 0);
rw_exit(&v->vdev_indirect_rwlock);
for (uint64_t i = 0; i < num_entries; i++) {
/*
* Note: the vdev_indirect_mapping can not change
* while we are running. It only changes while the
* removal is in progress, and then only from syncing
* context. While a removal is in progress, this
* function is only called for frees, which also only
* happen from syncing context.
*/
vdev_indirect_mapping_entry_phys_t *m = &mapping[i];
ASSERT3P(m, !=, NULL);
ASSERT3U(rs->rs_asize, >, 0);
uint64_t size = DVA_GET_ASIZE(&m->vimep_dst);
uint64_t dst_offset = DVA_GET_OFFSET(&m->vimep_dst);
uint64_t dst_vdev = DVA_GET_VDEV(&m->vimep_dst);
ASSERT3U(rs->rs_offset, >=,
DVA_MAPPING_GET_SRC_OFFSET(m));
ASSERT3U(rs->rs_offset, <,
DVA_MAPPING_GET_SRC_OFFSET(m) + size);
ASSERT3U(dst_vdev, !=, v->vdev_id);
uint64_t inner_offset = rs->rs_offset -
DVA_MAPPING_GET_SRC_OFFSET(m);
uint64_t inner_size =
MIN(rs->rs_asize, size - inner_offset);
vdev_t *dst_v = vdev_lookup_top(spa, dst_vdev);
ASSERT3P(dst_v, !=, NULL);
if (dst_v->vdev_ops == &vdev_indirect_ops) {
list_insert_head(&stack,
rs_alloc(dst_v, dst_offset + inner_offset,
inner_size, rs->rs_split_offset));
}
if ((zfs_flags & ZFS_DEBUG_INDIRECT_REMAP) &&
IS_P2ALIGNED(inner_size, 2 * SPA_MINBLOCKSIZE)) {
/*
* Note: This clause exists only solely for
* testing purposes. We use it to ensure that
* split blocks work and that the callbacks
* using them yield the same result if issued
* in reverse order.
*/
uint64_t inner_half = inner_size / 2;
func(rs->rs_split_offset + inner_half, dst_v,
dst_offset + inner_offset + inner_half,
inner_half, arg);
func(rs->rs_split_offset, dst_v,
dst_offset + inner_offset,
inner_half, arg);
} else {
func(rs->rs_split_offset, dst_v,
dst_offset + inner_offset,
inner_size, arg);
}
rs->rs_offset += inner_size;
rs->rs_asize -= inner_size;
rs->rs_split_offset += inner_size;
}
VERIFY0(rs->rs_asize);
kmem_free(mapping, num_entries * sizeof (*mapping));
kmem_free(rs, sizeof (remap_segment_t));
}
list_destroy(&stack);
}
static void
vdev_indirect_child_io_done(zio_t *zio)
{
zio_t *pio = zio->io_private;
mutex_enter(&pio->io_lock);
pio->io_error = zio_worst_error(pio->io_error, zio->io_error);
mutex_exit(&pio->io_lock);
abd_free(zio->io_abd);
}
/*
* This is a callback for vdev_indirect_remap() which allocates an
* indirect_split_t for each split segment and adds it to iv_splits.
*/
static void
vdev_indirect_gather_splits(uint64_t split_offset, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
zio_t *zio = arg;
indirect_vsd_t *iv = zio->io_vsd;
ASSERT3P(vd, !=, NULL);
if (vd->vdev_ops == &vdev_indirect_ops)
return;
int n = 1;
if (vd->vdev_ops == &vdev_mirror_ops)
n = vd->vdev_children;
indirect_split_t *is =
kmem_zalloc(offsetof(indirect_split_t, is_child[n]), KM_SLEEP);
is->is_children = n;
is->is_size = size;
is->is_split_offset = split_offset;
is->is_target_offset = offset;
is->is_vdev = vd;
list_create(&is->is_unique_child, sizeof (indirect_child_t),
offsetof(indirect_child_t, ic_node));
/*
* Note that we only consider multiple copies of the data for
* *mirror* vdevs. We don't for "replacing" or "spare" vdevs, even
* though they use the same ops as mirror, because there's only one
* "good" copy under the replacing/spare.
*/
if (vd->vdev_ops == &vdev_mirror_ops) {
for (int i = 0; i < n; i++) {
is->is_child[i].ic_vdev = vd->vdev_child[i];
list_link_init(&is->is_child[i].ic_node);
}
} else {
is->is_child[0].ic_vdev = vd;
}
list_insert_tail(&iv->iv_splits, is);
}
static void
vdev_indirect_read_split_done(zio_t *zio)
{
indirect_child_t *ic = zio->io_private;
if (zio->io_error != 0) {
/*
* Clear ic_data to indicate that we do not have data for this
* child.
*/
abd_free(ic->ic_data);
ic->ic_data = NULL;
}
}
/*
* Issue reads for all copies (mirror children) of all splits.
*/
static void
vdev_indirect_read_all(zio_t *zio)
{
indirect_vsd_t *iv = zio->io_vsd;
ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
for (int i = 0; i < is->is_children; i++) {
indirect_child_t *ic = &is->is_child[i];
if (!vdev_readable(ic->ic_vdev))
continue;
/*
* If a child is missing the data, set ic_error. Used
* in vdev_indirect_repair(). We perform the read
* nevertheless which provides the opportunity to
* reconstruct the split block if at all possible.
*/
if (vdev_dtl_contains(ic->ic_vdev, DTL_MISSING,
zio->io_txg, 1))
ic->ic_error = SET_ERROR(ESTALE);
ic->ic_data = abd_alloc_sametype(zio->io_abd,
is->is_size);
ic->ic_duplicate = NULL;
zio_nowait(zio_vdev_child_io(zio, NULL,
ic->ic_vdev, is->is_target_offset, ic->ic_data,
is->is_size, zio->io_type, zio->io_priority, 0,
vdev_indirect_read_split_done, ic));
}
}
iv->iv_reconstruct = B_TRUE;
}
static void
vdev_indirect_io_start(zio_t *zio)
{
spa_t *spa __maybe_unused = zio->io_spa;
indirect_vsd_t *iv = kmem_zalloc(sizeof (*iv), KM_SLEEP);
list_create(&iv->iv_splits,
sizeof (indirect_split_t), offsetof(indirect_split_t, is_node));
zio->io_vsd = iv;
zio->io_vsd_ops = &vdev_indirect_vsd_ops;
ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
if (zio->io_type != ZIO_TYPE_READ) {
ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
/*
* Note: this code can handle other kinds of writes,
* but we don't expect them.
*/
ASSERT((zio->io_flags & (ZIO_FLAG_SELF_HEAL |
ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE)) != 0);
}
vdev_indirect_remap(zio->io_vd, zio->io_offset, zio->io_size,
vdev_indirect_gather_splits, zio);
indirect_split_t *first = list_head(&iv->iv_splits);
if (first->is_size == zio->io_size) {
/*
* This is not a split block; we are pointing to the entire
* data, which will checksum the same as the original data.
* Pass the BP down so that the child i/o can verify the
* checksum, and try a different location if available
* (e.g. on a mirror).
*
* While this special case could be handled the same as the
* general (split block) case, doing it this way ensures
* that the vast majority of blocks on indirect vdevs
* (which are not split) are handled identically to blocks
* on non-indirect vdevs. This allows us to be less strict
* about performance in the general (but rare) case.
*/
ASSERT0(first->is_split_offset);
ASSERT3P(list_next(&iv->iv_splits, first), ==, NULL);
zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
first->is_vdev, first->is_target_offset,
abd_get_offset(zio->io_abd, 0),
zio->io_size, zio->io_type, zio->io_priority, 0,
vdev_indirect_child_io_done, zio));
} else {
iv->iv_split_block = B_TRUE;
if (zio->io_type == ZIO_TYPE_READ &&
zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER)) {
/*
* Read all copies. Note that for simplicity,
* we don't bother consulting the DTL in the
* resilver case.
*/
vdev_indirect_read_all(zio);
} else {
/*
* If this is a read zio, we read one copy of each
* split segment, from the top-level vdev. Since
* we don't know the checksum of each split
* individually, the child zio can't ensure that
* we get the right data. E.g. if it's a mirror,
* it will just read from a random (healthy) leaf
* vdev. We have to verify the checksum in
* vdev_indirect_io_done().
*
* For write zios, the vdev code will ensure we write
* to all children.
*/
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
zio_nowait(zio_vdev_child_io(zio, NULL,
is->is_vdev, is->is_target_offset,
abd_get_offset(zio->io_abd,
is->is_split_offset), is->is_size,
zio->io_type, zio->io_priority, 0,
vdev_indirect_child_io_done, zio));
}
}
}
zio_execute(zio);
}
/*
* Report a checksum error for a child.
*/
static void
vdev_indirect_checksum_error(zio_t *zio,
indirect_split_t *is, indirect_child_t *ic)
{
vdev_t *vd = ic->ic_vdev;
if (zio->io_flags & ZIO_FLAG_SPECULATIVE)
return;
mutex_enter(&vd->vdev_stat_lock);
vd->vdev_stat.vs_checksum_errors++;
mutex_exit(&vd->vdev_stat_lock);
zio_bad_cksum_t zbc = {{{ 0 }}};
abd_t *bad_abd = ic->ic_data;
abd_t *good_abd = is->is_good_child->ic_data;
(void) zfs_ereport_post_checksum(zio->io_spa, vd, NULL, zio,
is->is_target_offset, is->is_size, good_abd, bad_abd, &zbc);
}
/*
* Issue repair i/os for any incorrect copies. We do this by comparing
* each split segment's correct data (is_good_child's ic_data) with each
* other copy of the data. If they differ, then we overwrite the bad data
* with the good copy. The DTL is checked in vdev_indirect_read_all() and
* if a vdev is missing a copy of the data we set ic_error and the read is
* performed. This provides the opportunity to reconstruct the split block
* if at all possible. ic_error is checked here and if set it suppresses
* incrementing the checksum counter. Aside from this DTLs are not checked,
* which simplifies this code and also issues the optimal number of writes
* (based on which copies actually read bad data, as opposed to which we
* think might be wrong). For the same reason, we always use
* ZIO_FLAG_SELF_HEAL, to bypass the DTL check in zio_vdev_io_start().
*/
static void
vdev_indirect_repair(zio_t *zio)
{
indirect_vsd_t *iv = zio->io_vsd;
enum zio_flag flags = ZIO_FLAG_IO_REPAIR;
if (!(zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER)))
flags |= ZIO_FLAG_SELF_HEAL;
if (!spa_writeable(zio->io_spa))
return;
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
for (int c = 0; c < is->is_children; c++) {
indirect_child_t *ic = &is->is_child[c];
if (ic == is->is_good_child)
continue;
if (ic->ic_data == NULL)
continue;
if (ic->ic_duplicate == is->is_good_child)
continue;
zio_nowait(zio_vdev_child_io(zio, NULL,
ic->ic_vdev, is->is_target_offset,
is->is_good_child->ic_data, is->is_size,
ZIO_TYPE_WRITE, ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_IO_REPAIR | ZIO_FLAG_SELF_HEAL,
NULL, NULL));
/*
* If ic_error is set the current child does not have
* a copy of the data, so suppress incrementing the
* checksum counter.
*/
if (ic->ic_error == ESTALE)
continue;
vdev_indirect_checksum_error(zio, is, ic);
}
}
}
/*
* Report checksum errors on all children that we read from.
*/
static void
vdev_indirect_all_checksum_errors(zio_t *zio)
{
indirect_vsd_t *iv = zio->io_vsd;
if (zio->io_flags & ZIO_FLAG_SPECULATIVE)
return;
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
for (int c = 0; c < is->is_children; c++) {
indirect_child_t *ic = &is->is_child[c];
if (ic->ic_data == NULL)
continue;
vdev_t *vd = ic->ic_vdev;
(void) zfs_ereport_post_checksum(zio->io_spa, vd,
NULL, zio, is->is_target_offset, is->is_size,
NULL, NULL, NULL);
mutex_enter(&vd->vdev_stat_lock);
vd->vdev_stat.vs_checksum_errors++;
mutex_exit(&vd->vdev_stat_lock);
}
}
}
/*
* Copy data from all the splits to a main zio then validate the checksum.
* If then checksum is successfully validated return success.
*/
static int
vdev_indirect_splits_checksum_validate(indirect_vsd_t *iv, zio_t *zio)
{
zio_bad_cksum_t zbc;
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
ASSERT3P(is->is_good_child->ic_data, !=, NULL);
ASSERT3P(is->is_good_child->ic_duplicate, ==, NULL);
abd_copy_off(zio->io_abd, is->is_good_child->ic_data,
is->is_split_offset, 0, is->is_size);
}
return (zio_checksum_error(zio, &zbc));
}
/*
* There are relatively few possible combinations making it feasible to
* deterministically check them all. We do this by setting the good_child
* to the next unique split version. If we reach the end of the list then
* "carry over" to the next unique split version (like counting in base
* is_unique_children, but each digit can have a different base).
*/
static int
vdev_indirect_splits_enumerate_all(indirect_vsd_t *iv, zio_t *zio)
{
boolean_t more = B_TRUE;
iv->iv_attempts = 0;
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is))
is->is_good_child = list_head(&is->is_unique_child);
while (more == B_TRUE) {
iv->iv_attempts++;
more = B_FALSE;
if (vdev_indirect_splits_checksum_validate(iv, zio) == 0)
return (0);
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
is->is_good_child = list_next(&is->is_unique_child,
is->is_good_child);
if (is->is_good_child != NULL) {
more = B_TRUE;
break;
}
is->is_good_child = list_head(&is->is_unique_child);
}
}
ASSERT3S(iv->iv_attempts, <=, iv->iv_unique_combinations);
return (SET_ERROR(ECKSUM));
}
/*
* There are too many combinations to try all of them in a reasonable amount
* of time. So try a fixed number of random combinations from the unique
* split versions, after which we'll consider the block unrecoverable.
*/
static int
vdev_indirect_splits_enumerate_randomly(indirect_vsd_t *iv, zio_t *zio)
{
iv->iv_attempts = 0;
while (iv->iv_attempts < iv->iv_attempts_max) {
iv->iv_attempts++;
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
indirect_child_t *ic = list_head(&is->is_unique_child);
int children = is->is_unique_children;
for (int i = random_in_range(children); i > 0; i--)
ic = list_next(&is->is_unique_child, ic);
ASSERT3P(ic, !=, NULL);
is->is_good_child = ic;
}
if (vdev_indirect_splits_checksum_validate(iv, zio) == 0)
return (0);
}
return (SET_ERROR(ECKSUM));
}
/*
* This is a validation function for reconstruction. It randomly selects
* a good combination, if one can be found, and then it intentionally
* damages all other segment copes by zeroing them. This forces the
* reconstruction algorithm to locate the one remaining known good copy.
*/
static int
vdev_indirect_splits_damage(indirect_vsd_t *iv, zio_t *zio)
{
int error;
/* Presume all the copies are unique for initial selection. */
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
is->is_unique_children = 0;
for (int i = 0; i < is->is_children; i++) {
indirect_child_t *ic = &is->is_child[i];
if (ic->ic_data != NULL) {
is->is_unique_children++;
list_insert_tail(&is->is_unique_child, ic);
}
}
if (list_is_empty(&is->is_unique_child)) {
error = SET_ERROR(EIO);
goto out;
}
}
/*
* Set each is_good_child to a randomly-selected child which
* is known to contain validated data.
*/
error = vdev_indirect_splits_enumerate_randomly(iv, zio);
if (error)
goto out;
/*
* Damage all but the known good copy by zeroing it. This will
* result in two or less unique copies per indirect_child_t.
* Both may need to be checked in order to reconstruct the block.
* Set iv->iv_attempts_max such that all unique combinations will
* enumerated, but limit the damage to at most 12 indirect splits.
*/
iv->iv_attempts_max = 1;
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
for (int c = 0; c < is->is_children; c++) {
indirect_child_t *ic = &is->is_child[c];
if (ic == is->is_good_child)
continue;
if (ic->ic_data == NULL)
continue;
abd_zero(ic->ic_data, abd_get_size(ic->ic_data));
}
iv->iv_attempts_max *= 2;
if (iv->iv_attempts_max >= (1ULL << 12)) {
iv->iv_attempts_max = UINT64_MAX;
break;
}
}
out:
/* Empty the unique children lists so they can be reconstructed. */
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
indirect_child_t *ic;
while ((ic = list_head(&is->is_unique_child)) != NULL)
list_remove(&is->is_unique_child, ic);
is->is_unique_children = 0;
}
return (error);
}
/*
* This function is called when we have read all copies of the data and need
* to try to find a combination of copies that gives us the right checksum.
*
* If we pointed to any mirror vdevs, this effectively does the job of the
* mirror. The mirror vdev code can't do its own job because we don't know
* the checksum of each split segment individually.
*
* We have to try every unique combination of copies of split segments, until
* we find one that checksums correctly. Duplicate segment copies are first
* identified and latter skipped during reconstruction. This optimization
* reduces the search space and ensures that of the remaining combinations
* at most one is correct.
*
* When the total number of combinations is small they can all be checked.
* For example, if we have 3 segments in the split, and each points to a
* 2-way mirror with unique copies, we will have the following pieces of data:
*
* | mirror child
* split | [0] [1]
* ======|=====================
* A | data_A_0 data_A_1
* B | data_B_0 data_B_1
* C | data_C_0 data_C_1
*
* We will try the following (mirror children)^(number of splits) (2^3=8)
* combinations, which is similar to bitwise-little-endian counting in
* binary. In general each "digit" corresponds to a split segment, and the
* base of each digit is is_children, which can be different for each
* digit.
*
* "low bit" "high bit"
* v v
* data_A_0 data_B_0 data_C_0
* data_A_1 data_B_0 data_C_0
* data_A_0 data_B_1 data_C_0
* data_A_1 data_B_1 data_C_0
* data_A_0 data_B_0 data_C_1
* data_A_1 data_B_0 data_C_1
* data_A_0 data_B_1 data_C_1
* data_A_1 data_B_1 data_C_1
*
* Note that the split segments may be on the same or different top-level
* vdevs. In either case, we may need to try lots of combinations (see
* zfs_reconstruct_indirect_combinations_max). This ensures that if a mirror
* has small silent errors on all of its children, we can still reconstruct
* the correct data, as long as those errors are at sufficiently-separated
* offsets (specifically, separated by the largest block size - default of
* 128KB, but up to 16MB).
*/
static void
vdev_indirect_reconstruct_io_done(zio_t *zio)
{
indirect_vsd_t *iv = zio->io_vsd;
boolean_t known_good = B_FALSE;
int error;
iv->iv_unique_combinations = 1;
iv->iv_attempts_max = UINT64_MAX;
if (zfs_reconstruct_indirect_combinations_max > 0)
iv->iv_attempts_max = zfs_reconstruct_indirect_combinations_max;
/*
* If nonzero, every 1/x blocks will be damaged, in order to validate
* reconstruction when there are split segments with damaged copies.
* Known_good will be TRUE when reconstruction is known to be possible.
*/
if (zfs_reconstruct_indirect_damage_fraction != 0 &&
random_in_range(zfs_reconstruct_indirect_damage_fraction) == 0)
known_good = (vdev_indirect_splits_damage(iv, zio) == 0);
/*
* Determine the unique children for a split segment and add them
* to the is_unique_child list. By restricting reconstruction
* to these children, only unique combinations will be considered.
* This can vastly reduce the search space when there are a large
* number of indirect splits.
*/
for (indirect_split_t *is = list_head(&iv->iv_splits);
is != NULL; is = list_next(&iv->iv_splits, is)) {
is->is_unique_children = 0;
for (int i = 0; i < is->is_children; i++) {
indirect_child_t *ic_i = &is->is_child[i];
if (ic_i->ic_data == NULL ||
ic_i->ic_duplicate != NULL)
continue;
for (int j = i + 1; j < is->is_children; j++) {
indirect_child_t *ic_j = &is->is_child[j];
if (ic_j->ic_data == NULL ||
ic_j->ic_duplicate != NULL)
continue;
if (abd_cmp(ic_i->ic_data, ic_j->ic_data) == 0)
ic_j->ic_duplicate = ic_i;
}
is->is_unique_children++;
list_insert_tail(&is->is_unique_child, ic_i);
}
/* Reconstruction is impossible, no valid children */
EQUIV(list_is_empty(&is->is_unique_child),
is->is_unique_children == 0);
if (list_is_empty(&is->is_unique_child)) {
zio->io_error = EIO;
vdev_indirect_all_checksum_errors(zio);
zio_checksum_verified(zio);
return;
}
iv->iv_unique_combinations *= is->is_unique_children;
}
if (iv->iv_unique_combinations <= iv->iv_attempts_max)
error = vdev_indirect_splits_enumerate_all(iv, zio);
else
error = vdev_indirect_splits_enumerate_randomly(iv, zio);
if (error != 0) {
/* All attempted combinations failed. */
ASSERT3B(known_good, ==, B_FALSE);
zio->io_error = error;
vdev_indirect_all_checksum_errors(zio);
} else {
/*
* The checksum has been successfully validated. Issue
* repair I/Os to any copies of splits which don't match
* the validated version.
*/
ASSERT0(vdev_indirect_splits_checksum_validate(iv, zio));
vdev_indirect_repair(zio);
zio_checksum_verified(zio);
}
}
static void
vdev_indirect_io_done(zio_t *zio)
{
indirect_vsd_t *iv = zio->io_vsd;
if (iv->iv_reconstruct) {
/*
* We have read all copies of the data (e.g. from mirrors),
* either because this was a scrub/resilver, or because the
* one-copy read didn't checksum correctly.
*/
vdev_indirect_reconstruct_io_done(zio);
return;
}
if (!iv->iv_split_block) {
/*
* This was not a split block, so we passed the BP down,
* and the checksum was handled by the (one) child zio.
*/
return;
}
zio_bad_cksum_t zbc;
int ret = zio_checksum_error(zio, &zbc);
if (ret == 0) {
zio_checksum_verified(zio);
return;
}
/*
* The checksum didn't match. Read all copies of all splits, and
* then we will try to reconstruct. The next time
* vdev_indirect_io_done() is called, iv_reconstruct will be set.
*/
vdev_indirect_read_all(zio);
zio_vdev_io_redone(zio);
}
vdev_ops_t vdev_indirect_ops = {
.vdev_op_init = NULL,
.vdev_op_fini = NULL,
.vdev_op_open = vdev_indirect_open,
.vdev_op_close = vdev_indirect_close,
.vdev_op_asize = vdev_default_asize,
.vdev_op_min_asize = vdev_default_min_asize,
.vdev_op_min_alloc = NULL,
.vdev_op_io_start = vdev_indirect_io_start,
.vdev_op_io_done = vdev_indirect_io_done,
.vdev_op_state_change = NULL,
.vdev_op_need_resilver = NULL,
.vdev_op_hold = NULL,
.vdev_op_rele = NULL,
.vdev_op_remap = vdev_indirect_remap,
.vdev_op_xlate = NULL,
.vdev_op_rebuild_asize = NULL,
.vdev_op_metaslab_init = NULL,
.vdev_op_config_generate = NULL,
.vdev_op_nparity = NULL,
.vdev_op_ndisks = NULL,
.vdev_op_type = VDEV_TYPE_INDIRECT, /* name of this vdev type */
.vdev_op_leaf = B_FALSE /* leaf vdev */
};
EXPORT_SYMBOL(spa_condense_fini);
EXPORT_SYMBOL(spa_start_indirect_condensing_thread);
EXPORT_SYMBOL(spa_condense_indirect_start_sync);
EXPORT_SYMBOL(spa_condense_init);
EXPORT_SYMBOL(spa_vdev_indirect_mark_obsolete);
EXPORT_SYMBOL(vdev_indirect_mark_obsolete);
EXPORT_SYMBOL(vdev_indirect_should_condense);
EXPORT_SYMBOL(vdev_indirect_sync_obsolete);
EXPORT_SYMBOL(vdev_obsolete_counts_are_precise);
EXPORT_SYMBOL(vdev_obsolete_sm_object);
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_vdevs_enable, INT, ZMOD_RW,
"Whether to attempt condensing indirect vdev mappings");
ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_obsolete_pct, INT, ZMOD_RW,
"Minimum obsolete percent of bytes in the mapping to attempt condensing");
ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, min_mapping_bytes, ULONG, ZMOD_RW,
"Don't bother condensing if the mapping uses less than this amount of "
"memory");
ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, max_obsolete_bytes, ULONG, ZMOD_RW,
"Minimum size obsolete spacemap to attempt condensing");
ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_commit_entry_delay_ms, INT, ZMOD_RW,
"Used by tests to ensure certain actions happen in the middle of a "
"condense. A maximum value of 1 should be sufficient.");
ZFS_MODULE_PARAM(zfs_reconstruct, zfs_reconstruct_, indirect_combinations_max, INT, ZMOD_RW,
"Maximum number of combinations when reconstructing split segments");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/vdev_initialize.c b/sys/contrib/openzfs/module/zfs/vdev_initialize.c
index e9156c32f384..6ffd0d618fdd 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_initialize.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_initialize.c
@@ -1,772 +1,774 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2016, 2019 by Delphix. All rights reserved.
*/
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/txg.h>
#include <sys/vdev_impl.h>
#include <sys/metaslab_impl.h>
#include <sys/dsl_synctask.h>
#include <sys/zap.h>
#include <sys/dmu_tx.h>
#include <sys/vdev_initialize.h>
/*
* Value that is written to disk during initialization.
*/
#ifdef _ILP32
unsigned long zfs_initialize_value = 0xdeadbeefUL;
#else
unsigned long zfs_initialize_value = 0xdeadbeefdeadbeeeULL;
#endif
/* maximum number of I/Os outstanding per leaf vdev */
int zfs_initialize_limit = 1;
/* size of initializing writes; default 1MiB, see zfs_remove_max_segment */
unsigned long zfs_initialize_chunk_size = 1024 * 1024;
static boolean_t
vdev_initialize_should_stop(vdev_t *vd)
{
return (vd->vdev_initialize_exit_wanted || !vdev_writeable(vd) ||
vd->vdev_detached || vd->vdev_top->vdev_removing);
}
static void
vdev_initialize_zap_update_sync(void *arg, dmu_tx_t *tx)
{
/*
* We pass in the guid instead of the vdev_t since the vdev may
* have been freed prior to the sync task being processed. This
* happens when a vdev is detached as we call spa_config_vdev_exit(),
* stop the initializing thread, schedule the sync task, and free
* the vdev. Later when the scheduled sync task is invoked, it would
* find that the vdev has been freed.
*/
uint64_t guid = *(uint64_t *)arg;
uint64_t txg = dmu_tx_get_txg(tx);
kmem_free(arg, sizeof (uint64_t));
vdev_t *vd = spa_lookup_by_guid(tx->tx_pool->dp_spa, guid, B_FALSE);
if (vd == NULL || vd->vdev_top->vdev_removing || !vdev_is_concrete(vd))
return;
uint64_t last_offset = vd->vdev_initialize_offset[txg & TXG_MASK];
vd->vdev_initialize_offset[txg & TXG_MASK] = 0;
VERIFY(vd->vdev_leaf_zap != 0);
objset_t *mos = vd->vdev_spa->spa_meta_objset;
if (last_offset > 0) {
vd->vdev_initialize_last_offset = last_offset;
VERIFY0(zap_update(mos, vd->vdev_leaf_zap,
VDEV_LEAF_ZAP_INITIALIZE_LAST_OFFSET,
sizeof (last_offset), 1, &last_offset, tx));
}
if (vd->vdev_initialize_action_time > 0) {
uint64_t val = (uint64_t)vd->vdev_initialize_action_time;
VERIFY0(zap_update(mos, vd->vdev_leaf_zap,
VDEV_LEAF_ZAP_INITIALIZE_ACTION_TIME, sizeof (val),
1, &val, tx));
}
uint64_t initialize_state = vd->vdev_initialize_state;
VERIFY0(zap_update(mos, vd->vdev_leaf_zap,
VDEV_LEAF_ZAP_INITIALIZE_STATE, sizeof (initialize_state), 1,
&initialize_state, tx));
}
static void
vdev_initialize_change_state(vdev_t *vd, vdev_initializing_state_t new_state)
{
ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock));
spa_t *spa = vd->vdev_spa;
if (new_state == vd->vdev_initialize_state)
return;
/*
* Copy the vd's guid, this will be freed by the sync task.
*/
uint64_t *guid = kmem_zalloc(sizeof (uint64_t), KM_SLEEP);
*guid = vd->vdev_guid;
/*
* If we're suspending, then preserving the original start time.
*/
if (vd->vdev_initialize_state != VDEV_INITIALIZE_SUSPENDED) {
vd->vdev_initialize_action_time = gethrestime_sec();
}
vdev_initializing_state_t old_state = vd->vdev_initialize_state;
vd->vdev_initialize_state = new_state;
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
dsl_sync_task_nowait(spa_get_dsl(spa), vdev_initialize_zap_update_sync,
guid, tx);
switch (new_state) {
case VDEV_INITIALIZE_ACTIVE:
spa_history_log_internal(spa, "initialize", tx,
"vdev=%s activated", vd->vdev_path);
break;
case VDEV_INITIALIZE_SUSPENDED:
spa_history_log_internal(spa, "initialize", tx,
"vdev=%s suspended", vd->vdev_path);
break;
case VDEV_INITIALIZE_CANCELED:
if (old_state == VDEV_INITIALIZE_ACTIVE ||
old_state == VDEV_INITIALIZE_SUSPENDED)
spa_history_log_internal(spa, "initialize", tx,
"vdev=%s canceled", vd->vdev_path);
break;
case VDEV_INITIALIZE_COMPLETE:
spa_history_log_internal(spa, "initialize", tx,
"vdev=%s complete", vd->vdev_path);
break;
default:
panic("invalid state %llu", (unsigned long long)new_state);
}
dmu_tx_commit(tx);
if (new_state != VDEV_INITIALIZE_ACTIVE)
spa_notify_waiters(spa);
}
static void
vdev_initialize_cb(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
mutex_enter(&vd->vdev_initialize_io_lock);
if (zio->io_error == ENXIO && !vdev_writeable(vd)) {
/*
* The I/O failed because the vdev was unavailable; roll the
* last offset back. (This works because spa_sync waits on
* spa_txg_zio before it runs sync tasks.)
*/
uint64_t *off =
&vd->vdev_initialize_offset[zio->io_txg & TXG_MASK];
*off = MIN(*off, zio->io_offset);
} else {
/*
* Since initializing is best-effort, we ignore I/O errors and
* rely on vdev_probe to determine if the errors are more
* critical.
*/
if (zio->io_error != 0)
vd->vdev_stat.vs_initialize_errors++;
vd->vdev_initialize_bytes_done += zio->io_orig_size;
}
ASSERT3U(vd->vdev_initialize_inflight, >, 0);
vd->vdev_initialize_inflight--;
cv_broadcast(&vd->vdev_initialize_io_cv);
mutex_exit(&vd->vdev_initialize_io_lock);
spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd);
}
/* Takes care of physical writing and limiting # of concurrent ZIOs. */
static int
vdev_initialize_write(vdev_t *vd, uint64_t start, uint64_t size, abd_t *data)
{
spa_t *spa = vd->vdev_spa;
/* Limit inflight initializing I/Os */
mutex_enter(&vd->vdev_initialize_io_lock);
while (vd->vdev_initialize_inflight >= zfs_initialize_limit) {
cv_wait(&vd->vdev_initialize_io_cv,
&vd->vdev_initialize_io_lock);
}
vd->vdev_initialize_inflight++;
mutex_exit(&vd->vdev_initialize_io_lock);
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
uint64_t txg = dmu_tx_get_txg(tx);
spa_config_enter(spa, SCL_STATE_ALL, vd, RW_READER);
mutex_enter(&vd->vdev_initialize_lock);
if (vd->vdev_initialize_offset[txg & TXG_MASK] == 0) {
uint64_t *guid = kmem_zalloc(sizeof (uint64_t), KM_SLEEP);
*guid = vd->vdev_guid;
/* This is the first write of this txg. */
dsl_sync_task_nowait(spa_get_dsl(spa),
vdev_initialize_zap_update_sync, guid, tx);
}
/*
* We know the vdev struct will still be around since all
* consumers of vdev_free must stop the initialization first.
*/
if (vdev_initialize_should_stop(vd)) {
mutex_enter(&vd->vdev_initialize_io_lock);
ASSERT3U(vd->vdev_initialize_inflight, >, 0);
vd->vdev_initialize_inflight--;
mutex_exit(&vd->vdev_initialize_io_lock);
spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd);
mutex_exit(&vd->vdev_initialize_lock);
dmu_tx_commit(tx);
return (SET_ERROR(EINTR));
}
mutex_exit(&vd->vdev_initialize_lock);
vd->vdev_initialize_offset[txg & TXG_MASK] = start + size;
zio_nowait(zio_write_phys(spa->spa_txg_zio[txg & TXG_MASK], vd, start,
size, data, ZIO_CHECKSUM_OFF, vdev_initialize_cb, NULL,
ZIO_PRIORITY_INITIALIZING, ZIO_FLAG_CANFAIL, B_FALSE));
/* vdev_initialize_cb releases SCL_STATE_ALL */
dmu_tx_commit(tx);
return (0);
}
/*
* Callback to fill each ABD chunk with zfs_initialize_value. len must be
* divisible by sizeof (uint64_t), and buf must be 8-byte aligned. The ABD
* allocation will guarantee these for us.
*/
-/* ARGSUSED */
static int
vdev_initialize_block_fill(void *buf, size_t len, void *unused)
{
+ (void) unused;
+
ASSERT0(len % sizeof (uint64_t));
#ifdef _ILP32
for (uint64_t i = 0; i < len; i += sizeof (uint32_t)) {
*(uint32_t *)((char *)(buf) + i) = zfs_initialize_value;
}
#else
for (uint64_t i = 0; i < len; i += sizeof (uint64_t)) {
*(uint64_t *)((char *)(buf) + i) = zfs_initialize_value;
}
#endif
return (0);
}
static abd_t *
vdev_initialize_block_alloc(void)
{
/* Allocate ABD for filler data */
abd_t *data = abd_alloc_for_io(zfs_initialize_chunk_size, B_FALSE);
ASSERT0(zfs_initialize_chunk_size % sizeof (uint64_t));
(void) abd_iterate_func(data, 0, zfs_initialize_chunk_size,
vdev_initialize_block_fill, NULL);
return (data);
}
static void
vdev_initialize_block_free(abd_t *data)
{
abd_free(data);
}
static int
vdev_initialize_ranges(vdev_t *vd, abd_t *data)
{
range_tree_t *rt = vd->vdev_initialize_tree;
zfs_btree_t *bt = &rt->rt_root;
zfs_btree_index_t where;
for (range_seg_t *rs = zfs_btree_first(bt, &where); rs != NULL;
rs = zfs_btree_next(bt, &where, &where)) {
uint64_t size = rs_get_end(rs, rt) - rs_get_start(rs, rt);
/* Split range into legally-sized physical chunks */
uint64_t writes_required =
((size - 1) / zfs_initialize_chunk_size) + 1;
for (uint64_t w = 0; w < writes_required; w++) {
int error;
error = vdev_initialize_write(vd,
VDEV_LABEL_START_SIZE + rs_get_start(rs, rt) +
(w * zfs_initialize_chunk_size),
MIN(size - (w * zfs_initialize_chunk_size),
zfs_initialize_chunk_size), data);
if (error != 0)
return (error);
}
}
return (0);
}
static void
vdev_initialize_xlate_last_rs_end(void *arg, range_seg64_t *physical_rs)
{
uint64_t *last_rs_end = (uint64_t *)arg;
if (physical_rs->rs_end > *last_rs_end)
*last_rs_end = physical_rs->rs_end;
}
static void
vdev_initialize_xlate_progress(void *arg, range_seg64_t *physical_rs)
{
vdev_t *vd = (vdev_t *)arg;
uint64_t size = physical_rs->rs_end - physical_rs->rs_start;
vd->vdev_initialize_bytes_est += size;
if (vd->vdev_initialize_last_offset > physical_rs->rs_end) {
vd->vdev_initialize_bytes_done += size;
} else if (vd->vdev_initialize_last_offset > physical_rs->rs_start &&
vd->vdev_initialize_last_offset < physical_rs->rs_end) {
vd->vdev_initialize_bytes_done +=
vd->vdev_initialize_last_offset - physical_rs->rs_start;
}
}
static void
vdev_initialize_calculate_progress(vdev_t *vd)
{
ASSERT(spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_READER) ||
spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_WRITER));
ASSERT(vd->vdev_leaf_zap != 0);
vd->vdev_initialize_bytes_est = 0;
vd->vdev_initialize_bytes_done = 0;
for (uint64_t i = 0; i < vd->vdev_top->vdev_ms_count; i++) {
metaslab_t *msp = vd->vdev_top->vdev_ms[i];
mutex_enter(&msp->ms_lock);
uint64_t ms_free = (msp->ms_size -
metaslab_allocated_space(msp)) /
vdev_get_ndisks(vd->vdev_top);
/*
* Convert the metaslab range to a physical range
* on our vdev. We use this to determine if we are
* in the middle of this metaslab range.
*/
range_seg64_t logical_rs, physical_rs, remain_rs;
logical_rs.rs_start = msp->ms_start;
logical_rs.rs_end = msp->ms_start + msp->ms_size;
/* Metaslab space after this offset has not been initialized */
vdev_xlate(vd, &logical_rs, &physical_rs, &remain_rs);
if (vd->vdev_initialize_last_offset <= physical_rs.rs_start) {
vd->vdev_initialize_bytes_est += ms_free;
mutex_exit(&msp->ms_lock);
continue;
}
/* Metaslab space before this offset has been initialized */
uint64_t last_rs_end = physical_rs.rs_end;
if (!vdev_xlate_is_empty(&remain_rs)) {
vdev_xlate_walk(vd, &remain_rs,
vdev_initialize_xlate_last_rs_end, &last_rs_end);
}
if (vd->vdev_initialize_last_offset > last_rs_end) {
vd->vdev_initialize_bytes_done += ms_free;
vd->vdev_initialize_bytes_est += ms_free;
mutex_exit(&msp->ms_lock);
continue;
}
/*
* If we get here, we're in the middle of initializing this
* metaslab. Load it and walk the free tree for more accurate
* progress estimation.
*/
VERIFY0(metaslab_load(msp));
zfs_btree_index_t where;
range_tree_t *rt = msp->ms_allocatable;
for (range_seg_t *rs =
zfs_btree_first(&rt->rt_root, &where); rs;
rs = zfs_btree_next(&rt->rt_root, &where,
&where)) {
logical_rs.rs_start = rs_get_start(rs, rt);
logical_rs.rs_end = rs_get_end(rs, rt);
vdev_xlate_walk(vd, &logical_rs,
vdev_initialize_xlate_progress, vd);
}
mutex_exit(&msp->ms_lock);
}
}
static int
vdev_initialize_load(vdev_t *vd)
{
int err = 0;
ASSERT(spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_READER) ||
spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_WRITER));
ASSERT(vd->vdev_leaf_zap != 0);
if (vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE ||
vd->vdev_initialize_state == VDEV_INITIALIZE_SUSPENDED) {
err = zap_lookup(vd->vdev_spa->spa_meta_objset,
vd->vdev_leaf_zap, VDEV_LEAF_ZAP_INITIALIZE_LAST_OFFSET,
sizeof (vd->vdev_initialize_last_offset), 1,
&vd->vdev_initialize_last_offset);
if (err == ENOENT) {
vd->vdev_initialize_last_offset = 0;
err = 0;
}
}
vdev_initialize_calculate_progress(vd);
return (err);
}
static void
vdev_initialize_xlate_range_add(void *arg, range_seg64_t *physical_rs)
{
vdev_t *vd = arg;
/* Only add segments that we have not visited yet */
if (physical_rs->rs_end <= vd->vdev_initialize_last_offset)
return;
/* Pick up where we left off mid-range. */
if (vd->vdev_initialize_last_offset > physical_rs->rs_start) {
zfs_dbgmsg("range write: vd %s changed (%llu, %llu) to "
"(%llu, %llu)", vd->vdev_path,
(u_longlong_t)physical_rs->rs_start,
(u_longlong_t)physical_rs->rs_end,
(u_longlong_t)vd->vdev_initialize_last_offset,
(u_longlong_t)physical_rs->rs_end);
ASSERT3U(physical_rs->rs_end, >,
vd->vdev_initialize_last_offset);
physical_rs->rs_start = vd->vdev_initialize_last_offset;
}
ASSERT3U(physical_rs->rs_end, >, physical_rs->rs_start);
range_tree_add(vd->vdev_initialize_tree, physical_rs->rs_start,
physical_rs->rs_end - physical_rs->rs_start);
}
/*
* Convert the logical range into a physical range and add it to our
* avl tree.
*/
static void
vdev_initialize_range_add(void *arg, uint64_t start, uint64_t size)
{
vdev_t *vd = arg;
range_seg64_t logical_rs;
logical_rs.rs_start = start;
logical_rs.rs_end = start + size;
ASSERT(vd->vdev_ops->vdev_op_leaf);
vdev_xlate_walk(vd, &logical_rs, vdev_initialize_xlate_range_add, arg);
}
static void
vdev_initialize_thread(void *arg)
{
vdev_t *vd = arg;
spa_t *spa = vd->vdev_spa;
int error = 0;
uint64_t ms_count = 0;
ASSERT(vdev_is_concrete(vd));
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vd->vdev_initialize_last_offset = 0;
VERIFY0(vdev_initialize_load(vd));
abd_t *deadbeef = vdev_initialize_block_alloc();
vd->vdev_initialize_tree = range_tree_create(NULL, RANGE_SEG64, NULL,
0, 0);
for (uint64_t i = 0; !vd->vdev_detached &&
i < vd->vdev_top->vdev_ms_count; i++) {
metaslab_t *msp = vd->vdev_top->vdev_ms[i];
boolean_t unload_when_done = B_FALSE;
/*
* If we've expanded the top-level vdev or it's our
* first pass, calculate our progress.
*/
if (vd->vdev_top->vdev_ms_count != ms_count) {
vdev_initialize_calculate_progress(vd);
ms_count = vd->vdev_top->vdev_ms_count;
}
spa_config_exit(spa, SCL_CONFIG, FTAG);
metaslab_disable(msp);
mutex_enter(&msp->ms_lock);
if (!msp->ms_loaded && !msp->ms_loading)
unload_when_done = B_TRUE;
VERIFY0(metaslab_load(msp));
range_tree_walk(msp->ms_allocatable, vdev_initialize_range_add,
vd);
mutex_exit(&msp->ms_lock);
error = vdev_initialize_ranges(vd, deadbeef);
metaslab_enable(msp, B_TRUE, unload_when_done);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
range_tree_vacate(vd->vdev_initialize_tree, NULL, NULL);
if (error != 0)
break;
}
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_enter(&vd->vdev_initialize_io_lock);
while (vd->vdev_initialize_inflight > 0) {
cv_wait(&vd->vdev_initialize_io_cv,
&vd->vdev_initialize_io_lock);
}
mutex_exit(&vd->vdev_initialize_io_lock);
range_tree_destroy(vd->vdev_initialize_tree);
vdev_initialize_block_free(deadbeef);
vd->vdev_initialize_tree = NULL;
mutex_enter(&vd->vdev_initialize_lock);
if (!vd->vdev_initialize_exit_wanted) {
if (vdev_writeable(vd)) {
vdev_initialize_change_state(vd,
VDEV_INITIALIZE_COMPLETE);
} else if (vd->vdev_faulted) {
vdev_initialize_change_state(vd,
VDEV_INITIALIZE_CANCELED);
}
}
ASSERT(vd->vdev_initialize_thread != NULL ||
vd->vdev_initialize_inflight == 0);
/*
* Drop the vdev_initialize_lock while we sync out the
* txg since it's possible that a device might be trying to
* come online and must check to see if it needs to restart an
* initialization. That thread will be holding the spa_config_lock
* which would prevent the txg_wait_synced from completing.
*/
mutex_exit(&vd->vdev_initialize_lock);
txg_wait_synced(spa_get_dsl(spa), 0);
mutex_enter(&vd->vdev_initialize_lock);
vd->vdev_initialize_thread = NULL;
cv_broadcast(&vd->vdev_initialize_cv);
mutex_exit(&vd->vdev_initialize_lock);
thread_exit();
}
/*
* Initiates a device. Caller must hold vdev_initialize_lock.
* Device must be a leaf and not already be initializing.
*/
void
vdev_initialize(vdev_t *vd)
{
ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock));
ASSERT(vd->vdev_ops->vdev_op_leaf);
ASSERT(vdev_is_concrete(vd));
ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
ASSERT(!vd->vdev_detached);
ASSERT(!vd->vdev_initialize_exit_wanted);
ASSERT(!vd->vdev_top->vdev_removing);
vdev_initialize_change_state(vd, VDEV_INITIALIZE_ACTIVE);
vd->vdev_initialize_thread = thread_create(NULL, 0,
vdev_initialize_thread, vd, 0, &p0, TS_RUN, maxclsyspri);
}
/*
* Wait for the initialize thread to be terminated (cancelled or stopped).
*/
static void
vdev_initialize_stop_wait_impl(vdev_t *vd)
{
ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock));
while (vd->vdev_initialize_thread != NULL)
cv_wait(&vd->vdev_initialize_cv, &vd->vdev_initialize_lock);
ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
vd->vdev_initialize_exit_wanted = B_FALSE;
}
/*
* Wait for vdev initialize threads which were either to cleanly exit.
*/
void
vdev_initialize_stop_wait(spa_t *spa, list_t *vd_list)
{
+ (void) spa;
vdev_t *vd;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
while ((vd = list_remove_head(vd_list)) != NULL) {
mutex_enter(&vd->vdev_initialize_lock);
vdev_initialize_stop_wait_impl(vd);
mutex_exit(&vd->vdev_initialize_lock);
}
}
/*
* Stop initializing a device, with the resultant initializing state being
* tgt_state. For blocking behavior pass NULL for vd_list. Otherwise, when
* a list_t is provided the stopping vdev is inserted in to the list. Callers
* are then required to call vdev_initialize_stop_wait() to block for all the
* initialization threads to exit. The caller must hold vdev_initialize_lock
* and must not be writing to the spa config, as the initializing thread may
* try to enter the config as a reader before exiting.
*/
void
vdev_initialize_stop(vdev_t *vd, vdev_initializing_state_t tgt_state,
list_t *vd_list)
{
ASSERT(!spa_config_held(vd->vdev_spa, SCL_CONFIG|SCL_STATE, RW_WRITER));
ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock));
ASSERT(vd->vdev_ops->vdev_op_leaf);
ASSERT(vdev_is_concrete(vd));
/*
* Allow cancel requests to proceed even if the initialize thread
* has stopped.
*/
if (vd->vdev_initialize_thread == NULL &&
tgt_state != VDEV_INITIALIZE_CANCELED) {
return;
}
vdev_initialize_change_state(vd, tgt_state);
vd->vdev_initialize_exit_wanted = B_TRUE;
if (vd_list == NULL) {
vdev_initialize_stop_wait_impl(vd);
} else {
ASSERT(MUTEX_HELD(&spa_namespace_lock));
list_insert_tail(vd_list, vd);
}
}
static void
vdev_initialize_stop_all_impl(vdev_t *vd, vdev_initializing_state_t tgt_state,
list_t *vd_list)
{
if (vd->vdev_ops->vdev_op_leaf && vdev_is_concrete(vd)) {
mutex_enter(&vd->vdev_initialize_lock);
vdev_initialize_stop(vd, tgt_state, vd_list);
mutex_exit(&vd->vdev_initialize_lock);
return;
}
for (uint64_t i = 0; i < vd->vdev_children; i++) {
vdev_initialize_stop_all_impl(vd->vdev_child[i], tgt_state,
vd_list);
}
}
/*
* Convenience function to stop initializing of a vdev tree and set all
* initialize thread pointers to NULL.
*/
void
vdev_initialize_stop_all(vdev_t *vd, vdev_initializing_state_t tgt_state)
{
spa_t *spa = vd->vdev_spa;
list_t vd_list;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
list_create(&vd_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_initialize_node));
vdev_initialize_stop_all_impl(vd, tgt_state, &vd_list);
vdev_initialize_stop_wait(spa, &vd_list);
if (vd->vdev_spa->spa_sync_on) {
/* Make sure that our state has been synced to disk */
txg_wait_synced(spa_get_dsl(vd->vdev_spa), 0);
}
list_destroy(&vd_list);
}
void
vdev_initialize_restart(vdev_t *vd)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(!spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
if (vd->vdev_leaf_zap != 0) {
mutex_enter(&vd->vdev_initialize_lock);
uint64_t initialize_state = VDEV_INITIALIZE_NONE;
int err = zap_lookup(vd->vdev_spa->spa_meta_objset,
vd->vdev_leaf_zap, VDEV_LEAF_ZAP_INITIALIZE_STATE,
sizeof (initialize_state), 1, &initialize_state);
ASSERT(err == 0 || err == ENOENT);
vd->vdev_initialize_state = initialize_state;
uint64_t timestamp = 0;
err = zap_lookup(vd->vdev_spa->spa_meta_objset,
vd->vdev_leaf_zap, VDEV_LEAF_ZAP_INITIALIZE_ACTION_TIME,
sizeof (timestamp), 1, &timestamp);
ASSERT(err == 0 || err == ENOENT);
vd->vdev_initialize_action_time = timestamp;
if (vd->vdev_initialize_state == VDEV_INITIALIZE_SUSPENDED ||
vd->vdev_offline) {
/* load progress for reporting, but don't resume */
VERIFY0(vdev_initialize_load(vd));
} else if (vd->vdev_initialize_state ==
VDEV_INITIALIZE_ACTIVE && vdev_writeable(vd) &&
!vd->vdev_top->vdev_removing &&
vd->vdev_initialize_thread == NULL) {
vdev_initialize(vd);
}
mutex_exit(&vd->vdev_initialize_lock);
}
for (uint64_t i = 0; i < vd->vdev_children; i++) {
vdev_initialize_restart(vd->vdev_child[i]);
}
}
EXPORT_SYMBOL(vdev_initialize);
EXPORT_SYMBOL(vdev_initialize_stop);
EXPORT_SYMBOL(vdev_initialize_stop_all);
EXPORT_SYMBOL(vdev_initialize_stop_wait);
EXPORT_SYMBOL(vdev_initialize_restart);
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs, zfs_, initialize_value, ULONG, ZMOD_RW,
"Value written during zpool initialize");
ZFS_MODULE_PARAM(zfs, zfs_, initialize_chunk_size, ULONG, ZMOD_RW,
"Size in bytes of writes by zpool initialize");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/vdev_mirror.c b/sys/contrib/openzfs/module/zfs/vdev_mirror.c
index 5eb331046953..45b744b2ec89 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_mirror.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_mirror.c
@@ -1,980 +1,982 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2012, 2015 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_scan.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_draid.h>
#include <sys/zio.h>
#include <sys/abd.h>
#include <sys/fs/zfs.h>
/*
* Vdev mirror kstats
*/
static kstat_t *mirror_ksp = NULL;
typedef struct mirror_stats {
kstat_named_t vdev_mirror_stat_rotating_linear;
kstat_named_t vdev_mirror_stat_rotating_offset;
kstat_named_t vdev_mirror_stat_rotating_seek;
kstat_named_t vdev_mirror_stat_non_rotating_linear;
kstat_named_t vdev_mirror_stat_non_rotating_seek;
kstat_named_t vdev_mirror_stat_preferred_found;
kstat_named_t vdev_mirror_stat_preferred_not_found;
} mirror_stats_t;
static mirror_stats_t mirror_stats = {
/* New I/O follows directly the last I/O */
{ "rotating_linear", KSTAT_DATA_UINT64 },
/* New I/O is within zfs_vdev_mirror_rotating_seek_offset of the last */
{ "rotating_offset", KSTAT_DATA_UINT64 },
/* New I/O requires random seek */
{ "rotating_seek", KSTAT_DATA_UINT64 },
/* New I/O follows directly the last I/O (nonrot) */
{ "non_rotating_linear", KSTAT_DATA_UINT64 },
/* New I/O requires random seek (nonrot) */
{ "non_rotating_seek", KSTAT_DATA_UINT64 },
/* Preferred child vdev found */
{ "preferred_found", KSTAT_DATA_UINT64 },
/* Preferred child vdev not found or equal load */
{ "preferred_not_found", KSTAT_DATA_UINT64 },
};
#define MIRROR_STAT(stat) (mirror_stats.stat.value.ui64)
#define MIRROR_INCR(stat, val) atomic_add_64(&MIRROR_STAT(stat), val)
#define MIRROR_BUMP(stat) MIRROR_INCR(stat, 1)
void
vdev_mirror_stat_init(void)
{
mirror_ksp = kstat_create("zfs", 0, "vdev_mirror_stats",
"misc", KSTAT_TYPE_NAMED,
sizeof (mirror_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
if (mirror_ksp != NULL) {
mirror_ksp->ks_data = &mirror_stats;
kstat_install(mirror_ksp);
}
}
void
vdev_mirror_stat_fini(void)
{
if (mirror_ksp != NULL) {
kstat_delete(mirror_ksp);
mirror_ksp = NULL;
}
}
/*
* Virtual device vector for mirroring.
*/
typedef struct mirror_child {
vdev_t *mc_vd;
uint64_t mc_offset;
int mc_error;
int mc_load;
uint8_t mc_tried;
uint8_t mc_skipped;
uint8_t mc_speculative;
uint8_t mc_rebuilding;
} mirror_child_t;
typedef struct mirror_map {
int *mm_preferred;
int mm_preferred_cnt;
int mm_children;
boolean_t mm_resilvering;
boolean_t mm_rebuilding;
boolean_t mm_root;
mirror_child_t mm_child[];
} mirror_map_t;
static int vdev_mirror_shift = 21;
/*
* The load configuration settings below are tuned by default for
* the case where all devices are of the same rotational type.
*
* If there is a mixture of rotating and non-rotating media, setting
* zfs_vdev_mirror_non_rotating_seek_inc to 0 may well provide better results
* as it will direct more reads to the non-rotating vdevs which are more likely
* to have a higher performance.
*/
/* Rotating media load calculation configuration. */
static int zfs_vdev_mirror_rotating_inc = 0;
static int zfs_vdev_mirror_rotating_seek_inc = 5;
static int zfs_vdev_mirror_rotating_seek_offset = 1 * 1024 * 1024;
/* Non-rotating media load calculation configuration. */
static int zfs_vdev_mirror_non_rotating_inc = 0;
static int zfs_vdev_mirror_non_rotating_seek_inc = 1;
static inline size_t
vdev_mirror_map_size(int children)
{
return (offsetof(mirror_map_t, mm_child[children]) +
sizeof (int) * children);
}
static inline mirror_map_t *
vdev_mirror_map_alloc(int children, boolean_t resilvering, boolean_t root)
{
mirror_map_t *mm;
mm = kmem_zalloc(vdev_mirror_map_size(children), KM_SLEEP);
mm->mm_children = children;
mm->mm_resilvering = resilvering;
mm->mm_root = root;
mm->mm_preferred = (int *)((uintptr_t)mm +
offsetof(mirror_map_t, mm_child[children]));
return (mm);
}
static void
vdev_mirror_map_free(zio_t *zio)
{
mirror_map_t *mm = zio->io_vsd;
kmem_free(mm, vdev_mirror_map_size(mm->mm_children));
}
static const zio_vsd_ops_t vdev_mirror_vsd_ops = {
.vsd_free = vdev_mirror_map_free,
};
static int
vdev_mirror_load(mirror_map_t *mm, vdev_t *vd, uint64_t zio_offset)
{
uint64_t last_offset;
int64_t offset_diff;
int load;
/* All DVAs have equal weight at the root. */
if (mm->mm_root)
return (INT_MAX);
/*
* We don't return INT_MAX if the device is resilvering i.e.
* vdev_resilver_txg != 0 as when tested performance was slightly
* worse overall when resilvering with compared to without.
*/
/* Fix zio_offset for leaf vdevs */
if (vd->vdev_ops->vdev_op_leaf)
zio_offset += VDEV_LABEL_START_SIZE;
/* Standard load based on pending queue length. */
load = vdev_queue_length(vd);
last_offset = vdev_queue_last_offset(vd);
if (vd->vdev_nonrot) {
/* Non-rotating media. */
if (last_offset == zio_offset) {
MIRROR_BUMP(vdev_mirror_stat_non_rotating_linear);
return (load + zfs_vdev_mirror_non_rotating_inc);
}
/*
* Apply a seek penalty even for non-rotating devices as
* sequential I/O's can be aggregated into fewer operations on
* the device, thus avoiding unnecessary per-command overhead
* and boosting performance.
*/
MIRROR_BUMP(vdev_mirror_stat_non_rotating_seek);
return (load + zfs_vdev_mirror_non_rotating_seek_inc);
}
/* Rotating media I/O's which directly follow the last I/O. */
if (last_offset == zio_offset) {
MIRROR_BUMP(vdev_mirror_stat_rotating_linear);
return (load + zfs_vdev_mirror_rotating_inc);
}
/*
* Apply half the seek increment to I/O's within seek offset
* of the last I/O issued to this vdev as they should incur less
* of a seek increment.
*/
offset_diff = (int64_t)(last_offset - zio_offset);
if (ABS(offset_diff) < zfs_vdev_mirror_rotating_seek_offset) {
MIRROR_BUMP(vdev_mirror_stat_rotating_offset);
return (load + (zfs_vdev_mirror_rotating_seek_inc / 2));
}
/* Apply the full seek increment to all other I/O's. */
MIRROR_BUMP(vdev_mirror_stat_rotating_seek);
return (load + zfs_vdev_mirror_rotating_seek_inc);
}
static boolean_t
vdev_mirror_rebuilding(vdev_t *vd)
{
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_rebuild_txg)
return (B_TRUE);
for (int i = 0; i < vd->vdev_children; i++) {
if (vdev_mirror_rebuilding(vd->vdev_child[i])) {
return (B_TRUE);
}
}
return (B_FALSE);
}
/*
* Avoid inlining the function to keep vdev_mirror_io_start(), which
* is this functions only caller, as small as possible on the stack.
*/
noinline static mirror_map_t *
vdev_mirror_map_init(zio_t *zio)
{
mirror_map_t *mm = NULL;
mirror_child_t *mc;
vdev_t *vd = zio->io_vd;
int c;
if (vd == NULL) {
dva_t *dva = zio->io_bp->blk_dva;
spa_t *spa = zio->io_spa;
dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
dva_t dva_copy[SPA_DVAS_PER_BP];
/*
* The sequential scrub code sorts and issues all DVAs
* of a bp separately. Each of these IOs includes all
* original DVA copies so that repairs can be performed
* in the event of an error, but we only actually want
* to check the first DVA since the others will be
* checked by their respective sorted IOs. Only if we
* hit an error will we try all DVAs upon retrying.
*
* Note: This check is safe even if the user switches
* from a legacy scrub to a sequential one in the middle
* of processing, since scn_is_sorted isn't updated until
* all outstanding IOs from the previous scrub pass
* complete.
*/
if ((zio->io_flags & ZIO_FLAG_SCRUB) &&
!(zio->io_flags & ZIO_FLAG_IO_RETRY) &&
dsl_scan_scrubbing(spa->spa_dsl_pool) &&
scn->scn_is_sorted) {
c = 1;
} else {
c = BP_GET_NDVAS(zio->io_bp);
}
/*
* If the pool cannot be written to, then infer that some
* DVAs might be invalid or point to vdevs that do not exist.
* We skip them.
*/
if (!spa_writeable(spa)) {
ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
int j = 0;
for (int i = 0; i < c; i++) {
if (zfs_dva_valid(spa, &dva[i], zio->io_bp))
dva_copy[j++] = dva[i];
}
if (j == 0) {
zio->io_vsd = NULL;
zio->io_error = ENXIO;
return (NULL);
}
if (j < c) {
dva = dva_copy;
c = j;
}
}
mm = vdev_mirror_map_alloc(c, B_FALSE, B_TRUE);
for (c = 0; c < mm->mm_children; c++) {
mc = &mm->mm_child[c];
mc->mc_vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[c]));
mc->mc_offset = DVA_GET_OFFSET(&dva[c]);
if (mc->mc_vd == NULL) {
kmem_free(mm, vdev_mirror_map_size(
mm->mm_children));
zio->io_vsd = NULL;
zio->io_error = ENXIO;
return (NULL);
}
}
} else {
/*
* If we are resilvering, then we should handle scrub reads
* differently; we shouldn't issue them to the resilvering
* device because it might not have those blocks.
*
* We are resilvering iff:
* 1) We are a replacing vdev (ie our name is "replacing-1" or
* "spare-1" or something like that), and
* 2) The pool is currently being resilvered.
*
* We cannot simply check vd->vdev_resilver_txg, because it's
* not set in this path.
*
* Nor can we just check our vdev_ops; there are cases (such as
* when a user types "zpool replace pool odev spare_dev" and
* spare_dev is in the spare list, or when a spare device is
* automatically used to replace a DEGRADED device) when
* resilvering is complete but both the original vdev and the
* spare vdev remain in the pool. That behavior is intentional.
* It helps implement the policy that a spare should be
* automatically removed from the pool after the user replaces
* the device that originally failed.
*
* If a spa load is in progress, then spa_dsl_pool may be
* uninitialized. But we shouldn't be resilvering during a spa
* load anyway.
*/
boolean_t replacing = (vd->vdev_ops == &vdev_replacing_ops ||
vd->vdev_ops == &vdev_spare_ops) &&
spa_load_state(vd->vdev_spa) == SPA_LOAD_NONE &&
dsl_scan_resilvering(vd->vdev_spa->spa_dsl_pool);
mm = vdev_mirror_map_alloc(vd->vdev_children, replacing,
B_FALSE);
for (c = 0; c < mm->mm_children; c++) {
mc = &mm->mm_child[c];
mc->mc_vd = vd->vdev_child[c];
mc->mc_offset = zio->io_offset;
if (vdev_mirror_rebuilding(mc->mc_vd))
mm->mm_rebuilding = mc->mc_rebuilding = B_TRUE;
}
}
return (mm);
}
static int
vdev_mirror_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
uint64_t *logical_ashift, uint64_t *physical_ashift)
{
int numerrors = 0;
int lasterror = 0;
if (vd->vdev_children == 0) {
vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
return (SET_ERROR(EINVAL));
}
vdev_open_children(vd);
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (cvd->vdev_open_error) {
lasterror = cvd->vdev_open_error;
numerrors++;
continue;
}
*asize = MIN(*asize - 1, cvd->vdev_asize - 1) + 1;
*max_asize = MIN(*max_asize - 1, cvd->vdev_max_asize - 1) + 1;
*logical_ashift = MAX(*logical_ashift, cvd->vdev_ashift);
*physical_ashift = MAX(*physical_ashift,
cvd->vdev_physical_ashift);
}
if (numerrors == vd->vdev_children) {
if (vdev_children_are_offline(vd))
vd->vdev_stat.vs_aux = VDEV_AUX_CHILDREN_OFFLINE;
else
vd->vdev_stat.vs_aux = VDEV_AUX_NO_REPLICAS;
return (lasterror);
}
return (0);
}
static void
vdev_mirror_close(vdev_t *vd)
{
for (int c = 0; c < vd->vdev_children; c++)
vdev_close(vd->vdev_child[c]);
}
static void
vdev_mirror_child_done(zio_t *zio)
{
mirror_child_t *mc = zio->io_private;
mc->mc_error = zio->io_error;
mc->mc_tried = 1;
mc->mc_skipped = 0;
}
static void
vdev_mirror_scrub_done(zio_t *zio)
{
mirror_child_t *mc = zio->io_private;
if (zio->io_error == 0) {
zio_t *pio;
zio_link_t *zl = NULL;
mutex_enter(&zio->io_lock);
while ((pio = zio_walk_parents(zio, &zl)) != NULL) {
mutex_enter(&pio->io_lock);
ASSERT3U(zio->io_size, >=, pio->io_size);
abd_copy(pio->io_abd, zio->io_abd, pio->io_size);
mutex_exit(&pio->io_lock);
}
mutex_exit(&zio->io_lock);
}
abd_free(zio->io_abd);
mc->mc_error = zio->io_error;
mc->mc_tried = 1;
mc->mc_skipped = 0;
}
/*
* Check the other, lower-index DVAs to see if they're on the same
* vdev as the child we picked. If they are, use them since they
* are likely to have been allocated from the primary metaslab in
* use at the time, and hence are more likely to have locality with
* single-copy data.
*/
static int
vdev_mirror_dva_select(zio_t *zio, int p)
{
dva_t *dva = zio->io_bp->blk_dva;
mirror_map_t *mm = zio->io_vsd;
int preferred;
int c;
preferred = mm->mm_preferred[p];
for (p--; p >= 0; p--) {
c = mm->mm_preferred[p];
if (DVA_GET_VDEV(&dva[c]) == DVA_GET_VDEV(&dva[preferred]))
preferred = c;
}
return (preferred);
}
static int
vdev_mirror_preferred_child_randomize(zio_t *zio)
{
mirror_map_t *mm = zio->io_vsd;
int p;
if (mm->mm_root) {
p = random_in_range(mm->mm_preferred_cnt);
return (vdev_mirror_dva_select(zio, p));
}
/*
* To ensure we don't always favour the first matching vdev,
* which could lead to wear leveling issues on SSD's, we
* use the I/O offset as a pseudo random seed into the vdevs
* which have the lowest load.
*/
p = (zio->io_offset >> vdev_mirror_shift) % mm->mm_preferred_cnt;
return (mm->mm_preferred[p]);
}
static boolean_t
vdev_mirror_child_readable(mirror_child_t *mc)
{
vdev_t *vd = mc->mc_vd;
if (vd->vdev_top != NULL && vd->vdev_top->vdev_ops == &vdev_draid_ops)
return (vdev_draid_readable(vd, mc->mc_offset));
else
return (vdev_readable(vd));
}
static boolean_t
vdev_mirror_child_missing(mirror_child_t *mc, uint64_t txg, uint64_t size)
{
vdev_t *vd = mc->mc_vd;
if (vd->vdev_top != NULL && vd->vdev_top->vdev_ops == &vdev_draid_ops)
return (vdev_draid_missing(vd, mc->mc_offset, txg, size));
else
return (vdev_dtl_contains(vd, DTL_MISSING, txg, size));
}
/*
* Try to find a vdev whose DTL doesn't contain the block we want to read
* preferring vdevs based on determined load. If we can't, try the read on
* any vdev we haven't already tried.
*
* Distributed spares are an exception to the above load rule. They are
* always preferred in order to detect gaps in the distributed spare which
* are created when another disk in the dRAID fails. In order to restore
* redundancy those gaps must be read to trigger the required repair IO.
*/
static int
vdev_mirror_child_select(zio_t *zio)
{
mirror_map_t *mm = zio->io_vsd;
uint64_t txg = zio->io_txg;
int c, lowest_load;
ASSERT(zio->io_bp == NULL || BP_PHYSICAL_BIRTH(zio->io_bp) == txg);
lowest_load = INT_MAX;
mm->mm_preferred_cnt = 0;
for (c = 0; c < mm->mm_children; c++) {
mirror_child_t *mc;
mc = &mm->mm_child[c];
if (mc->mc_tried || mc->mc_skipped)
continue;
if (mc->mc_vd == NULL ||
!vdev_mirror_child_readable(mc)) {
mc->mc_error = SET_ERROR(ENXIO);
mc->mc_tried = 1; /* don't even try */
mc->mc_skipped = 1;
continue;
}
if (vdev_mirror_child_missing(mc, txg, 1)) {
mc->mc_error = SET_ERROR(ESTALE);
mc->mc_skipped = 1;
mc->mc_speculative = 1;
continue;
}
if (mc->mc_vd->vdev_ops == &vdev_draid_spare_ops) {
mm->mm_preferred[0] = c;
mm->mm_preferred_cnt = 1;
break;
}
mc->mc_load = vdev_mirror_load(mm, mc->mc_vd, mc->mc_offset);
if (mc->mc_load > lowest_load)
continue;
if (mc->mc_load < lowest_load) {
lowest_load = mc->mc_load;
mm->mm_preferred_cnt = 0;
}
mm->mm_preferred[mm->mm_preferred_cnt] = c;
mm->mm_preferred_cnt++;
}
if (mm->mm_preferred_cnt == 1) {
MIRROR_BUMP(vdev_mirror_stat_preferred_found);
return (mm->mm_preferred[0]);
}
if (mm->mm_preferred_cnt > 1) {
MIRROR_BUMP(vdev_mirror_stat_preferred_not_found);
return (vdev_mirror_preferred_child_randomize(zio));
}
/*
* Every device is either missing or has this txg in its DTL.
* Look for any child we haven't already tried before giving up.
*/
for (c = 0; c < mm->mm_children; c++) {
if (!mm->mm_child[c].mc_tried)
return (c);
}
/*
* Every child failed. There's no place left to look.
*/
return (-1);
}
static void
vdev_mirror_io_start(zio_t *zio)
{
mirror_map_t *mm;
mirror_child_t *mc;
int c, children;
mm = vdev_mirror_map_init(zio);
zio->io_vsd = mm;
zio->io_vsd_ops = &vdev_mirror_vsd_ops;
if (mm == NULL) {
ASSERT(!spa_trust_config(zio->io_spa));
ASSERT(zio->io_type == ZIO_TYPE_READ);
zio_execute(zio);
return;
}
if (zio->io_type == ZIO_TYPE_READ) {
if (zio->io_bp != NULL &&
(zio->io_flags & ZIO_FLAG_SCRUB) && !mm->mm_resilvering) {
/*
* For scrubbing reads (if we can verify the
* checksum here, as indicated by io_bp being
* non-NULL) we need to allocate a read buffer for
* each child and issue reads to all children. If
* any child succeeds, it will copy its data into
* zio->io_data in vdev_mirror_scrub_done.
*/
for (c = 0; c < mm->mm_children; c++) {
mc = &mm->mm_child[c];
/* Don't issue ZIOs to offline children */
if (!vdev_mirror_child_readable(mc)) {
mc->mc_error = SET_ERROR(ENXIO);
mc->mc_tried = 1;
mc->mc_skipped = 1;
continue;
}
zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
mc->mc_vd, mc->mc_offset,
abd_alloc_sametype(zio->io_abd,
zio->io_size), zio->io_size,
zio->io_type, zio->io_priority, 0,
vdev_mirror_scrub_done, mc));
}
zio_execute(zio);
return;
}
/*
* For normal reads just pick one child.
*/
c = vdev_mirror_child_select(zio);
children = (c >= 0);
} else {
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
/*
* Writes go to all children.
*/
c = 0;
children = mm->mm_children;
}
while (children--) {
mc = &mm->mm_child[c];
c++;
/*
* When sequentially resilvering only issue write repair
* IOs to the vdev which is being rebuilt since performance
* is limited by the slowest child. This is an issue for
* faster replacement devices such as distributed spares.
*/
if ((zio->io_priority == ZIO_PRIORITY_REBUILD) &&
(zio->io_flags & ZIO_FLAG_IO_REPAIR) &&
!(zio->io_flags & ZIO_FLAG_SCRUB) &&
mm->mm_rebuilding && !mc->mc_rebuilding) {
continue;
}
zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
mc->mc_vd, mc->mc_offset, zio->io_abd, zio->io_size,
zio->io_type, zio->io_priority, 0,
vdev_mirror_child_done, mc));
}
zio_execute(zio);
}
static int
vdev_mirror_worst_error(mirror_map_t *mm)
{
int error[2] = { 0, 0 };
for (int c = 0; c < mm->mm_children; c++) {
mirror_child_t *mc = &mm->mm_child[c];
int s = mc->mc_speculative;
error[s] = zio_worst_error(error[s], mc->mc_error);
}
return (error[0] ? error[0] : error[1]);
}
static void
vdev_mirror_io_done(zio_t *zio)
{
mirror_map_t *mm = zio->io_vsd;
mirror_child_t *mc;
int c;
int good_copies = 0;
int unexpected_errors = 0;
if (mm == NULL)
return;
for (c = 0; c < mm->mm_children; c++) {
mc = &mm->mm_child[c];
if (mc->mc_error) {
if (!mc->mc_skipped)
unexpected_errors++;
} else if (mc->mc_tried) {
good_copies++;
}
}
if (zio->io_type == ZIO_TYPE_WRITE) {
/*
* XXX -- for now, treat partial writes as success.
*
* Now that we support write reallocation, it would be better
* to treat partial failure as real failure unless there are
* no non-degraded top-level vdevs left, and not update DTLs
* if we intend to reallocate.
*/
/* XXPOLICY */
if (good_copies != mm->mm_children) {
/*
* Always require at least one good copy.
*
* For ditto blocks (io_vd == NULL), require
* all copies to be good.
*
* XXX -- for replacing vdevs, there's no great answer.
* If the old device is really dead, we may not even
* be able to access it -- so we only want to
* require good writes to the new device. But if
* the new device turns out to be flaky, we want
* to be able to detach it -- which requires all
* writes to the old device to have succeeded.
*/
if (good_copies == 0 || zio->io_vd == NULL)
zio->io_error = vdev_mirror_worst_error(mm);
}
return;
}
ASSERT(zio->io_type == ZIO_TYPE_READ);
/*
* If we don't have a good copy yet, keep trying other children.
*/
/* XXPOLICY */
if (good_copies == 0 && (c = vdev_mirror_child_select(zio)) != -1) {
ASSERT(c >= 0 && c < mm->mm_children);
mc = &mm->mm_child[c];
zio_vdev_io_redone(zio);
zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
mc->mc_vd, mc->mc_offset, zio->io_abd, zio->io_size,
ZIO_TYPE_READ, zio->io_priority, 0,
vdev_mirror_child_done, mc));
return;
}
/* XXPOLICY */
if (good_copies == 0) {
zio->io_error = vdev_mirror_worst_error(mm);
ASSERT(zio->io_error != 0);
}
if (good_copies && spa_writeable(zio->io_spa) &&
(unexpected_errors ||
(zio->io_flags & ZIO_FLAG_RESILVER) ||
((zio->io_flags & ZIO_FLAG_SCRUB) && mm->mm_resilvering))) {
/*
* Use the good data we have in hand to repair damaged children.
*/
for (c = 0; c < mm->mm_children; c++) {
/*
* Don't rewrite known good children.
* Not only is it unnecessary, it could
* actually be harmful: if the system lost
* power while rewriting the only good copy,
* there would be no good copies left!
*/
mc = &mm->mm_child[c];
if (mc->mc_error == 0) {
vdev_ops_t *ops = mc->mc_vd->vdev_ops;
if (mc->mc_tried)
continue;
/*
* We didn't try this child. We need to
* repair it if:
* 1. it's a scrub (in which case we have
* tried everything that was healthy)
* - or -
* 2. it's an indirect or distributed spare
* vdev (in which case it could point to any
* other vdev, which might have a bad DTL)
* - or -
* 3. the DTL indicates that this data is
* missing from this vdev
*/
if (!(zio->io_flags & ZIO_FLAG_SCRUB) &&
ops != &vdev_indirect_ops &&
ops != &vdev_draid_spare_ops &&
!vdev_dtl_contains(mc->mc_vd, DTL_PARTIAL,
zio->io_txg, 1))
continue;
mc->mc_error = SET_ERROR(ESTALE);
}
zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
mc->mc_vd, mc->mc_offset,
zio->io_abd, zio->io_size, ZIO_TYPE_WRITE,
zio->io_priority == ZIO_PRIORITY_REBUILD ?
ZIO_PRIORITY_REBUILD : ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_IO_REPAIR | (unexpected_errors ?
ZIO_FLAG_SELF_HEAL : 0), NULL, NULL));
}
}
}
static void
vdev_mirror_state_change(vdev_t *vd, int faulted, int degraded)
{
if (faulted == vd->vdev_children) {
if (vdev_children_are_offline(vd)) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_OFFLINE,
VDEV_AUX_CHILDREN_OFFLINE);
} else {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_NO_REPLICAS);
}
} else if (degraded + faulted != 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE);
} else {
vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE);
}
}
/*
* Return the maximum asize for a rebuild zio in the provided range.
*/
static uint64_t
vdev_mirror_rebuild_asize(vdev_t *vd, uint64_t start, uint64_t asize,
uint64_t max_segment)
{
+ (void) start;
+
uint64_t psize = MIN(P2ROUNDUP(max_segment, 1 << vd->vdev_ashift),
SPA_MAXBLOCKSIZE);
return (MIN(asize, vdev_psize_to_asize(vd, psize)));
}
vdev_ops_t vdev_mirror_ops = {
.vdev_op_init = NULL,
.vdev_op_fini = NULL,
.vdev_op_open = vdev_mirror_open,
.vdev_op_close = vdev_mirror_close,
.vdev_op_asize = vdev_default_asize,
.vdev_op_min_asize = vdev_default_min_asize,
.vdev_op_min_alloc = NULL,
.vdev_op_io_start = vdev_mirror_io_start,
.vdev_op_io_done = vdev_mirror_io_done,
.vdev_op_state_change = vdev_mirror_state_change,
.vdev_op_need_resilver = vdev_default_need_resilver,
.vdev_op_hold = NULL,
.vdev_op_rele = NULL,
.vdev_op_remap = NULL,
.vdev_op_xlate = vdev_default_xlate,
.vdev_op_rebuild_asize = vdev_mirror_rebuild_asize,
.vdev_op_metaslab_init = NULL,
.vdev_op_config_generate = NULL,
.vdev_op_nparity = NULL,
.vdev_op_ndisks = NULL,
.vdev_op_type = VDEV_TYPE_MIRROR, /* name of this vdev type */
.vdev_op_leaf = B_FALSE /* not a leaf vdev */
};
vdev_ops_t vdev_replacing_ops = {
.vdev_op_init = NULL,
.vdev_op_fini = NULL,
.vdev_op_open = vdev_mirror_open,
.vdev_op_close = vdev_mirror_close,
.vdev_op_asize = vdev_default_asize,
.vdev_op_min_asize = vdev_default_min_asize,
.vdev_op_min_alloc = NULL,
.vdev_op_io_start = vdev_mirror_io_start,
.vdev_op_io_done = vdev_mirror_io_done,
.vdev_op_state_change = vdev_mirror_state_change,
.vdev_op_need_resilver = vdev_default_need_resilver,
.vdev_op_hold = NULL,
.vdev_op_rele = NULL,
.vdev_op_remap = NULL,
.vdev_op_xlate = vdev_default_xlate,
.vdev_op_rebuild_asize = vdev_mirror_rebuild_asize,
.vdev_op_metaslab_init = NULL,
.vdev_op_config_generate = NULL,
.vdev_op_nparity = NULL,
.vdev_op_ndisks = NULL,
.vdev_op_type = VDEV_TYPE_REPLACING, /* name of this vdev type */
.vdev_op_leaf = B_FALSE /* not a leaf vdev */
};
vdev_ops_t vdev_spare_ops = {
.vdev_op_init = NULL,
.vdev_op_fini = NULL,
.vdev_op_open = vdev_mirror_open,
.vdev_op_close = vdev_mirror_close,
.vdev_op_asize = vdev_default_asize,
.vdev_op_min_asize = vdev_default_min_asize,
.vdev_op_min_alloc = NULL,
.vdev_op_io_start = vdev_mirror_io_start,
.vdev_op_io_done = vdev_mirror_io_done,
.vdev_op_state_change = vdev_mirror_state_change,
.vdev_op_need_resilver = vdev_default_need_resilver,
.vdev_op_hold = NULL,
.vdev_op_rele = NULL,
.vdev_op_remap = NULL,
.vdev_op_xlate = vdev_default_xlate,
.vdev_op_rebuild_asize = vdev_mirror_rebuild_asize,
.vdev_op_metaslab_init = NULL,
.vdev_op_config_generate = NULL,
.vdev_op_nparity = NULL,
.vdev_op_ndisks = NULL,
.vdev_op_type = VDEV_TYPE_SPARE, /* name of this vdev type */
.vdev_op_leaf = B_FALSE /* not a leaf vdev */
};
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_vdev_mirror, zfs_vdev_mirror_, rotating_inc, INT, ZMOD_RW,
"Rotating media load increment for non-seeking I/O's");
ZFS_MODULE_PARAM(zfs_vdev_mirror, zfs_vdev_mirror_, rotating_seek_inc, INT, ZMOD_RW,
"Rotating media load increment for seeking I/O's");
ZFS_MODULE_PARAM(zfs_vdev_mirror, zfs_vdev_mirror_, rotating_seek_offset, INT, ZMOD_RW,
"Offset in bytes from the last I/O which triggers "
"a reduced rotating media seek increment");
ZFS_MODULE_PARAM(zfs_vdev_mirror, zfs_vdev_mirror_, non_rotating_inc, INT, ZMOD_RW,
"Non-rotating media load increment for non-seeking I/O's");
ZFS_MODULE_PARAM(zfs_vdev_mirror, zfs_vdev_mirror_, non_rotating_seek_inc, INT, ZMOD_RW,
"Non-rotating media load increment for seeking I/O's");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/vdev_missing.c b/sys/contrib/openzfs/module/zfs/vdev_missing.c
index e9145fd012d7..505df23c1fb2 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_missing.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_missing.c
@@ -1,131 +1,130 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2012, 2016 by Delphix. All rights reserved.
*/
/*
* The 'missing' vdev is a special vdev type used only during import. It
* signifies a placeholder in the root vdev for some vdev that we know is
* missing. We pass it down to the kernel to allow the rest of the
* configuration to parsed and an attempt made to open all available devices.
* Because its GUID is always 0, we know that the guid sum will mismatch and we
* won't be able to open the pool anyway.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/vdev_impl.h>
#include <sys/fs/zfs.h>
#include <sys/zio.h>
-/* ARGSUSED */
static int
vdev_missing_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
uint64_t *ashift, uint64_t *pshift)
{
/*
* Really this should just fail. But then the root vdev will be in the
* faulted state with VDEV_AUX_NO_REPLICAS, when what we really want is
* VDEV_AUX_BAD_GUID_SUM. So we pretend to succeed, knowing that we
* will fail the GUID sum check before ever trying to open the pool.
*/
+ (void) vd;
*psize = 0;
*max_psize = 0;
*ashift = 0;
*pshift = 0;
return (0);
}
-/* ARGSUSED */
static void
vdev_missing_close(vdev_t *vd)
{
+ (void) vd;
}
-/* ARGSUSED */
static void
vdev_missing_io_start(zio_t *zio)
{
zio->io_error = SET_ERROR(ENOTSUP);
zio_execute(zio);
}
-/* ARGSUSED */
static void
vdev_missing_io_done(zio_t *zio)
{
+ (void) zio;
}
vdev_ops_t vdev_missing_ops = {
.vdev_op_init = NULL,
.vdev_op_fini = NULL,
.vdev_op_open = vdev_missing_open,
.vdev_op_close = vdev_missing_close,
.vdev_op_asize = vdev_default_asize,
.vdev_op_min_asize = vdev_default_min_asize,
.vdev_op_min_alloc = NULL,
.vdev_op_io_start = vdev_missing_io_start,
.vdev_op_io_done = vdev_missing_io_done,
.vdev_op_state_change = NULL,
.vdev_op_need_resilver = NULL,
.vdev_op_hold = NULL,
.vdev_op_rele = NULL,
.vdev_op_remap = NULL,
.vdev_op_xlate = NULL,
.vdev_op_rebuild_asize = NULL,
.vdev_op_metaslab_init = NULL,
.vdev_op_config_generate = NULL,
.vdev_op_nparity = NULL,
.vdev_op_ndisks = NULL,
.vdev_op_type = VDEV_TYPE_MISSING, /* name of this vdev type */
.vdev_op_leaf = B_TRUE /* leaf vdev */
};
vdev_ops_t vdev_hole_ops = {
.vdev_op_init = NULL,
.vdev_op_fini = NULL,
.vdev_op_open = vdev_missing_open,
.vdev_op_close = vdev_missing_close,
.vdev_op_asize = vdev_default_asize,
.vdev_op_min_asize = vdev_default_min_asize,
.vdev_op_min_alloc = NULL,
.vdev_op_io_start = vdev_missing_io_start,
.vdev_op_io_done = vdev_missing_io_done,
.vdev_op_state_change = NULL,
.vdev_op_need_resilver = NULL,
.vdev_op_hold = NULL,
.vdev_op_rele = NULL,
.vdev_op_remap = NULL,
.vdev_op_xlate = NULL,
.vdev_op_rebuild_asize = NULL,
.vdev_op_metaslab_init = NULL,
.vdev_op_config_generate = NULL,
.vdev_op_nparity = NULL,
.vdev_op_ndisks = NULL,
.vdev_op_type = VDEV_TYPE_HOLE, /* name of this vdev type */
.vdev_op_leaf = B_TRUE /* leaf vdev */
};
diff --git a/sys/contrib/openzfs/module/zfs/vdev_raidz.c b/sys/contrib/openzfs/module/zfs/vdev_raidz.c
index 1feebf7089b4..b14e995e3ce1 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_raidz.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_raidz.c
@@ -1,2550 +1,2558 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
* Copyright (c) 2016 Gvozden Nešković. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/vdev_impl.h>
#include <sys/zio.h>
#include <sys/zio_checksum.h>
#include <sys/abd.h>
#include <sys/fs/zfs.h>
#include <sys/fm/fs/zfs.h>
#include <sys/vdev_raidz.h>
#include <sys/vdev_raidz_impl.h>
#include <sys/vdev_draid.h>
#ifdef ZFS_DEBUG
#include <sys/vdev.h> /* For vdev_xlate() in vdev_raidz_io_verify() */
#endif
/*
* Virtual device vector for RAID-Z.
*
* This vdev supports single, double, and triple parity. For single parity,
* we use a simple XOR of all the data columns. For double or triple parity,
* we use a special case of Reed-Solomon coding. This extends the
* technique described in "The mathematics of RAID-6" by H. Peter Anvin by
* drawing on the system described in "A Tutorial on Reed-Solomon Coding for
* Fault-Tolerance in RAID-like Systems" by James S. Plank on which the
* former is also based. The latter is designed to provide higher performance
* for writes.
*
* Note that the Plank paper claimed to support arbitrary N+M, but was then
* amended six years later identifying a critical flaw that invalidates its
* claims. Nevertheless, the technique can be adapted to work for up to
* triple parity. For additional parity, the amendment "Note: Correction to
* the 1997 Tutorial on Reed-Solomon Coding" by James S. Plank and Ying Ding
* is viable, but the additional complexity means that write performance will
* suffer.
*
* All of the methods above operate on a Galois field, defined over the
* integers mod 2^N. In our case we choose N=8 for GF(8) so that all elements
* can be expressed with a single byte. Briefly, the operations on the
* field are defined as follows:
*
* o addition (+) is represented by a bitwise XOR
* o subtraction (-) is therefore identical to addition: A + B = A - B
* o multiplication of A by 2 is defined by the following bitwise expression:
*
* (A * 2)_7 = A_6
* (A * 2)_6 = A_5
* (A * 2)_5 = A_4
* (A * 2)_4 = A_3 + A_7
* (A * 2)_3 = A_2 + A_7
* (A * 2)_2 = A_1 + A_7
* (A * 2)_1 = A_0
* (A * 2)_0 = A_7
*
* In C, multiplying by 2 is therefore ((a << 1) ^ ((a & 0x80) ? 0x1d : 0)).
* As an aside, this multiplication is derived from the error correcting
* primitive polynomial x^8 + x^4 + x^3 + x^2 + 1.
*
* Observe that any number in the field (except for 0) can be expressed as a
* power of 2 -- a generator for the field. We store a table of the powers of
* 2 and logs base 2 for quick look ups, and exploit the fact that A * B can
* be rewritten as 2^(log_2(A) + log_2(B)) (where '+' is normal addition rather
* than field addition). The inverse of a field element A (A^-1) is therefore
* A ^ (255 - 1) = A^254.
*
* The up-to-three parity columns, P, Q, R over several data columns,
* D_0, ... D_n-1, can be expressed by field operations:
*
* P = D_0 + D_1 + ... + D_n-2 + D_n-1
* Q = 2^n-1 * D_0 + 2^n-2 * D_1 + ... + 2^1 * D_n-2 + 2^0 * D_n-1
* = ((...((D_0) * 2 + D_1) * 2 + ...) * 2 + D_n-2) * 2 + D_n-1
* R = 4^n-1 * D_0 + 4^n-2 * D_1 + ... + 4^1 * D_n-2 + 4^0 * D_n-1
* = ((...((D_0) * 4 + D_1) * 4 + ...) * 4 + D_n-2) * 4 + D_n-1
*
* We chose 1, 2, and 4 as our generators because 1 corresponds to the trivial
* XOR operation, and 2 and 4 can be computed quickly and generate linearly-
* independent coefficients. (There are no additional coefficients that have
* this property which is why the uncorrected Plank method breaks down.)
*
* See the reconstruction code below for how P, Q and R can used individually
* or in concert to recover missing data columns.
*/
#define VDEV_RAIDZ_P 0
#define VDEV_RAIDZ_Q 1
#define VDEV_RAIDZ_R 2
#define VDEV_RAIDZ_MUL_2(x) (((x) << 1) ^ (((x) & 0x80) ? 0x1d : 0))
#define VDEV_RAIDZ_MUL_4(x) (VDEV_RAIDZ_MUL_2(VDEV_RAIDZ_MUL_2(x)))
/*
* We provide a mechanism to perform the field multiplication operation on a
* 64-bit value all at once rather than a byte at a time. This works by
* creating a mask from the top bit in each byte and using that to
* conditionally apply the XOR of 0x1d.
*/
#define VDEV_RAIDZ_64MUL_2(x, mask) \
{ \
(mask) = (x) & 0x8080808080808080ULL; \
(mask) = ((mask) << 1) - ((mask) >> 7); \
(x) = (((x) << 1) & 0xfefefefefefefefeULL) ^ \
((mask) & 0x1d1d1d1d1d1d1d1dULL); \
}
#define VDEV_RAIDZ_64MUL_4(x, mask) \
{ \
VDEV_RAIDZ_64MUL_2((x), mask); \
VDEV_RAIDZ_64MUL_2((x), mask); \
}
static void
vdev_raidz_row_free(raidz_row_t *rr)
{
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_size != 0)
abd_free(rc->rc_abd);
if (rc->rc_orig_data != NULL)
abd_free(rc->rc_orig_data);
}
if (rr->rr_abd_empty != NULL)
abd_free(rr->rr_abd_empty);
kmem_free(rr, offsetof(raidz_row_t, rr_col[rr->rr_scols]));
}
void
vdev_raidz_map_free(raidz_map_t *rm)
{
for (int i = 0; i < rm->rm_nrows; i++)
vdev_raidz_row_free(rm->rm_row[i]);
kmem_free(rm, offsetof(raidz_map_t, rm_row[rm->rm_nrows]));
}
static void
vdev_raidz_map_free_vsd(zio_t *zio)
{
raidz_map_t *rm = zio->io_vsd;
vdev_raidz_map_free(rm);
}
const zio_vsd_ops_t vdev_raidz_vsd_ops = {
.vsd_free = vdev_raidz_map_free_vsd,
};
/*
* Divides the IO evenly across all child vdevs; usually, dcols is
* the number of children in the target vdev.
*
* Avoid inlining the function to keep vdev_raidz_io_start(), which
* is this functions only caller, as small as possible on the stack.
*/
noinline raidz_map_t *
vdev_raidz_map_alloc(zio_t *zio, uint64_t ashift, uint64_t dcols,
uint64_t nparity)
{
raidz_row_t *rr;
/* The starting RAIDZ (parent) vdev sector of the block. */
uint64_t b = zio->io_offset >> ashift;
/* The zio's size in units of the vdev's minimum sector size. */
uint64_t s = zio->io_size >> ashift;
/* The first column for this stripe. */
uint64_t f = b % dcols;
/* The starting byte offset on each child vdev. */
uint64_t o = (b / dcols) << ashift;
uint64_t q, r, c, bc, col, acols, scols, coff, devidx, asize, tot;
raidz_map_t *rm =
kmem_zalloc(offsetof(raidz_map_t, rm_row[1]), KM_SLEEP);
rm->rm_nrows = 1;
/*
* "Quotient": The number of data sectors for this stripe on all but
* the "big column" child vdevs that also contain "remainder" data.
*/
q = s / (dcols - nparity);
/*
* "Remainder": The number of partial stripe data sectors in this I/O.
* This will add a sector to some, but not all, child vdevs.
*/
r = s - q * (dcols - nparity);
/* The number of "big columns" - those which contain remainder data. */
bc = (r == 0 ? 0 : r + nparity);
/*
* The total number of data and parity sectors associated with
* this I/O.
*/
tot = s + nparity * (q + (r == 0 ? 0 : 1));
/*
* acols: The columns that will be accessed.
* scols: The columns that will be accessed or skipped.
*/
if (q == 0) {
/* Our I/O request doesn't span all child vdevs. */
acols = bc;
scols = MIN(dcols, roundup(bc, nparity + 1));
} else {
acols = dcols;
scols = dcols;
}
ASSERT3U(acols, <=, scols);
rr = kmem_alloc(offsetof(raidz_row_t, rr_col[scols]), KM_SLEEP);
rm->rm_row[0] = rr;
rr->rr_cols = acols;
rr->rr_scols = scols;
rr->rr_bigcols = bc;
rr->rr_missingdata = 0;
rr->rr_missingparity = 0;
rr->rr_firstdatacol = nparity;
rr->rr_abd_empty = NULL;
rr->rr_nempty = 0;
#ifdef ZFS_DEBUG
rr->rr_offset = zio->io_offset;
rr->rr_size = zio->io_size;
#endif
asize = 0;
for (c = 0; c < scols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
col = f + c;
coff = o;
if (col >= dcols) {
col -= dcols;
coff += 1ULL << ashift;
}
rc->rc_devidx = col;
rc->rc_offset = coff;
rc->rc_abd = NULL;
rc->rc_orig_data = NULL;
rc->rc_error = 0;
rc->rc_tried = 0;
rc->rc_skipped = 0;
rc->rc_force_repair = 0;
rc->rc_allow_repair = 1;
rc->rc_need_orig_restore = B_FALSE;
if (c >= acols)
rc->rc_size = 0;
else if (c < bc)
rc->rc_size = (q + 1) << ashift;
else
rc->rc_size = q << ashift;
asize += rc->rc_size;
}
ASSERT3U(asize, ==, tot << ashift);
rm->rm_nskip = roundup(tot, nparity + 1) - tot;
rm->rm_skipstart = bc;
for (c = 0; c < rr->rr_firstdatacol; c++)
rr->rr_col[c].rc_abd =
abd_alloc_linear(rr->rr_col[c].rc_size, B_FALSE);
for (uint64_t off = 0; c < acols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
rc->rc_abd = abd_get_offset_struct(&rc->rc_abdstruct,
zio->io_abd, off, rc->rc_size);
off += rc->rc_size;
}
/*
* If all data stored spans all columns, there's a danger that parity
* will always be on the same device and, since parity isn't read
* during normal operation, that device's I/O bandwidth won't be
* used effectively. We therefore switch the parity every 1MB.
*
* ... at least that was, ostensibly, the theory. As a practical
* matter unless we juggle the parity between all devices evenly, we
* won't see any benefit. Further, occasional writes that aren't a
* multiple of the LCM of the number of children and the minimum
* stripe width are sufficient to avoid pessimal behavior.
* Unfortunately, this decision created an implicit on-disk format
* requirement that we need to support for all eternity, but only
* for single-parity RAID-Z.
*
* If we intend to skip a sector in the zeroth column for padding
* we must make sure to note this swap. We will never intend to
* skip the first column since at least one data and one parity
* column must appear in each row.
*/
ASSERT(rr->rr_cols >= 2);
ASSERT(rr->rr_col[0].rc_size == rr->rr_col[1].rc_size);
if (rr->rr_firstdatacol == 1 && (zio->io_offset & (1ULL << 20))) {
devidx = rr->rr_col[0].rc_devidx;
o = rr->rr_col[0].rc_offset;
rr->rr_col[0].rc_devidx = rr->rr_col[1].rc_devidx;
rr->rr_col[0].rc_offset = rr->rr_col[1].rc_offset;
rr->rr_col[1].rc_devidx = devidx;
rr->rr_col[1].rc_offset = o;
if (rm->rm_skipstart == 0)
rm->rm_skipstart = 1;
}
/* init RAIDZ parity ops */
rm->rm_ops = vdev_raidz_math_get_ops();
return (rm);
}
struct pqr_struct {
uint64_t *p;
uint64_t *q;
uint64_t *r;
};
static int
vdev_raidz_p_func(void *buf, size_t size, void *private)
{
struct pqr_struct *pqr = private;
const uint64_t *src = buf;
int i, cnt = size / sizeof (src[0]);
ASSERT(pqr->p && !pqr->q && !pqr->r);
for (i = 0; i < cnt; i++, src++, pqr->p++)
*pqr->p ^= *src;
return (0);
}
static int
vdev_raidz_pq_func(void *buf, size_t size, void *private)
{
struct pqr_struct *pqr = private;
const uint64_t *src = buf;
uint64_t mask;
int i, cnt = size / sizeof (src[0]);
ASSERT(pqr->p && pqr->q && !pqr->r);
for (i = 0; i < cnt; i++, src++, pqr->p++, pqr->q++) {
*pqr->p ^= *src;
VDEV_RAIDZ_64MUL_2(*pqr->q, mask);
*pqr->q ^= *src;
}
return (0);
}
static int
vdev_raidz_pqr_func(void *buf, size_t size, void *private)
{
struct pqr_struct *pqr = private;
const uint64_t *src = buf;
uint64_t mask;
int i, cnt = size / sizeof (src[0]);
ASSERT(pqr->p && pqr->q && pqr->r);
for (i = 0; i < cnt; i++, src++, pqr->p++, pqr->q++, pqr->r++) {
*pqr->p ^= *src;
VDEV_RAIDZ_64MUL_2(*pqr->q, mask);
*pqr->q ^= *src;
VDEV_RAIDZ_64MUL_4(*pqr->r, mask);
*pqr->r ^= *src;
}
return (0);
}
static void
vdev_raidz_generate_parity_p(raidz_row_t *rr)
{
uint64_t *p = abd_to_buf(rr->rr_col[VDEV_RAIDZ_P].rc_abd);
for (int c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
abd_t *src = rr->rr_col[c].rc_abd;
if (c == rr->rr_firstdatacol) {
abd_copy_to_buf(p, src, rr->rr_col[c].rc_size);
} else {
struct pqr_struct pqr = { p, NULL, NULL };
(void) abd_iterate_func(src, 0, rr->rr_col[c].rc_size,
vdev_raidz_p_func, &pqr);
}
}
}
static void
vdev_raidz_generate_parity_pq(raidz_row_t *rr)
{
uint64_t *p = abd_to_buf(rr->rr_col[VDEV_RAIDZ_P].rc_abd);
uint64_t *q = abd_to_buf(rr->rr_col[VDEV_RAIDZ_Q].rc_abd);
uint64_t pcnt = rr->rr_col[VDEV_RAIDZ_P].rc_size / sizeof (p[0]);
ASSERT(rr->rr_col[VDEV_RAIDZ_P].rc_size ==
rr->rr_col[VDEV_RAIDZ_Q].rc_size);
for (int c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
abd_t *src = rr->rr_col[c].rc_abd;
uint64_t ccnt = rr->rr_col[c].rc_size / sizeof (p[0]);
if (c == rr->rr_firstdatacol) {
ASSERT(ccnt == pcnt || ccnt == 0);
abd_copy_to_buf(p, src, rr->rr_col[c].rc_size);
(void) memcpy(q, p, rr->rr_col[c].rc_size);
for (uint64_t i = ccnt; i < pcnt; i++) {
p[i] = 0;
q[i] = 0;
}
} else {
struct pqr_struct pqr = { p, q, NULL };
ASSERT(ccnt <= pcnt);
(void) abd_iterate_func(src, 0, rr->rr_col[c].rc_size,
vdev_raidz_pq_func, &pqr);
/*
* Treat short columns as though they are full of 0s.
* Note that there's therefore nothing needed for P.
*/
uint64_t mask;
for (uint64_t i = ccnt; i < pcnt; i++) {
VDEV_RAIDZ_64MUL_2(q[i], mask);
}
}
}
}
static void
vdev_raidz_generate_parity_pqr(raidz_row_t *rr)
{
uint64_t *p = abd_to_buf(rr->rr_col[VDEV_RAIDZ_P].rc_abd);
uint64_t *q = abd_to_buf(rr->rr_col[VDEV_RAIDZ_Q].rc_abd);
uint64_t *r = abd_to_buf(rr->rr_col[VDEV_RAIDZ_R].rc_abd);
uint64_t pcnt = rr->rr_col[VDEV_RAIDZ_P].rc_size / sizeof (p[0]);
ASSERT(rr->rr_col[VDEV_RAIDZ_P].rc_size ==
rr->rr_col[VDEV_RAIDZ_Q].rc_size);
ASSERT(rr->rr_col[VDEV_RAIDZ_P].rc_size ==
rr->rr_col[VDEV_RAIDZ_R].rc_size);
for (int c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
abd_t *src = rr->rr_col[c].rc_abd;
uint64_t ccnt = rr->rr_col[c].rc_size / sizeof (p[0]);
if (c == rr->rr_firstdatacol) {
ASSERT(ccnt == pcnt || ccnt == 0);
abd_copy_to_buf(p, src, rr->rr_col[c].rc_size);
(void) memcpy(q, p, rr->rr_col[c].rc_size);
(void) memcpy(r, p, rr->rr_col[c].rc_size);
for (uint64_t i = ccnt; i < pcnt; i++) {
p[i] = 0;
q[i] = 0;
r[i] = 0;
}
} else {
struct pqr_struct pqr = { p, q, r };
ASSERT(ccnt <= pcnt);
(void) abd_iterate_func(src, 0, rr->rr_col[c].rc_size,
vdev_raidz_pqr_func, &pqr);
/*
* Treat short columns as though they are full of 0s.
* Note that there's therefore nothing needed for P.
*/
uint64_t mask;
for (uint64_t i = ccnt; i < pcnt; i++) {
VDEV_RAIDZ_64MUL_2(q[i], mask);
VDEV_RAIDZ_64MUL_4(r[i], mask);
}
}
}
}
/*
* Generate RAID parity in the first virtual columns according to the number of
* parity columns available.
*/
void
vdev_raidz_generate_parity_row(raidz_map_t *rm, raidz_row_t *rr)
{
ASSERT3U(rr->rr_cols, !=, 0);
/* Generate using the new math implementation */
if (vdev_raidz_math_generate(rm, rr) != RAIDZ_ORIGINAL_IMPL)
return;
switch (rr->rr_firstdatacol) {
case 1:
vdev_raidz_generate_parity_p(rr);
break;
case 2:
vdev_raidz_generate_parity_pq(rr);
break;
case 3:
vdev_raidz_generate_parity_pqr(rr);
break;
default:
cmn_err(CE_PANIC, "invalid RAID-Z configuration");
}
}
void
vdev_raidz_generate_parity(raidz_map_t *rm)
{
for (int i = 0; i < rm->rm_nrows; i++) {
raidz_row_t *rr = rm->rm_row[i];
vdev_raidz_generate_parity_row(rm, rr);
}
}
-/* ARGSUSED */
static int
vdev_raidz_reconst_p_func(void *dbuf, void *sbuf, size_t size, void *private)
{
+ (void) private;
uint64_t *dst = dbuf;
uint64_t *src = sbuf;
int cnt = size / sizeof (src[0]);
for (int i = 0; i < cnt; i++) {
dst[i] ^= src[i];
}
return (0);
}
-/* ARGSUSED */
static int
vdev_raidz_reconst_q_pre_func(void *dbuf, void *sbuf, size_t size,
void *private)
{
+ (void) private;
uint64_t *dst = dbuf;
uint64_t *src = sbuf;
uint64_t mask;
int cnt = size / sizeof (dst[0]);
for (int i = 0; i < cnt; i++, dst++, src++) {
VDEV_RAIDZ_64MUL_2(*dst, mask);
*dst ^= *src;
}
return (0);
}
-/* ARGSUSED */
static int
vdev_raidz_reconst_q_pre_tail_func(void *buf, size_t size, void *private)
{
+ (void) private;
uint64_t *dst = buf;
uint64_t mask;
int cnt = size / sizeof (dst[0]);
for (int i = 0; i < cnt; i++, dst++) {
/* same operation as vdev_raidz_reconst_q_pre_func() on dst */
VDEV_RAIDZ_64MUL_2(*dst, mask);
}
return (0);
}
struct reconst_q_struct {
uint64_t *q;
int exp;
};
static int
vdev_raidz_reconst_q_post_func(void *buf, size_t size, void *private)
{
struct reconst_q_struct *rq = private;
uint64_t *dst = buf;
int cnt = size / sizeof (dst[0]);
for (int i = 0; i < cnt; i++, dst++, rq->q++) {
int j;
uint8_t *b;
*dst ^= *rq->q;
for (j = 0, b = (uint8_t *)dst; j < 8; j++, b++) {
*b = vdev_raidz_exp2(*b, rq->exp);
}
}
return (0);
}
struct reconst_pq_struct {
uint8_t *p;
uint8_t *q;
uint8_t *pxy;
uint8_t *qxy;
int aexp;
int bexp;
};
static int
vdev_raidz_reconst_pq_func(void *xbuf, void *ybuf, size_t size, void *private)
{
struct reconst_pq_struct *rpq = private;
uint8_t *xd = xbuf;
uint8_t *yd = ybuf;
for (int i = 0; i < size;
i++, rpq->p++, rpq->q++, rpq->pxy++, rpq->qxy++, xd++, yd++) {
*xd = vdev_raidz_exp2(*rpq->p ^ *rpq->pxy, rpq->aexp) ^
vdev_raidz_exp2(*rpq->q ^ *rpq->qxy, rpq->bexp);
*yd = *rpq->p ^ *rpq->pxy ^ *xd;
}
return (0);
}
static int
vdev_raidz_reconst_pq_tail_func(void *xbuf, size_t size, void *private)
{
struct reconst_pq_struct *rpq = private;
uint8_t *xd = xbuf;
for (int i = 0; i < size;
i++, rpq->p++, rpq->q++, rpq->pxy++, rpq->qxy++, xd++) {
/* same operation as vdev_raidz_reconst_pq_func() on xd */
*xd = vdev_raidz_exp2(*rpq->p ^ *rpq->pxy, rpq->aexp) ^
vdev_raidz_exp2(*rpq->q ^ *rpq->qxy, rpq->bexp);
}
return (0);
}
static void
vdev_raidz_reconstruct_p(raidz_row_t *rr, int *tgts, int ntgts)
{
int x = tgts[0];
abd_t *dst, *src;
ASSERT3U(ntgts, ==, 1);
ASSERT3U(x, >=, rr->rr_firstdatacol);
ASSERT3U(x, <, rr->rr_cols);
ASSERT3U(rr->rr_col[x].rc_size, <=, rr->rr_col[VDEV_RAIDZ_P].rc_size);
src = rr->rr_col[VDEV_RAIDZ_P].rc_abd;
dst = rr->rr_col[x].rc_abd;
abd_copy_from_buf(dst, abd_to_buf(src), rr->rr_col[x].rc_size);
for (int c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
uint64_t size = MIN(rr->rr_col[x].rc_size,
rr->rr_col[c].rc_size);
src = rr->rr_col[c].rc_abd;
if (c == x)
continue;
(void) abd_iterate_func2(dst, src, 0, 0, size,
vdev_raidz_reconst_p_func, NULL);
}
}
static void
vdev_raidz_reconstruct_q(raidz_row_t *rr, int *tgts, int ntgts)
{
int x = tgts[0];
int c, exp;
abd_t *dst, *src;
ASSERT(ntgts == 1);
ASSERT(rr->rr_col[x].rc_size <= rr->rr_col[VDEV_RAIDZ_Q].rc_size);
for (c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
uint64_t size = (c == x) ? 0 : MIN(rr->rr_col[x].rc_size,
rr->rr_col[c].rc_size);
src = rr->rr_col[c].rc_abd;
dst = rr->rr_col[x].rc_abd;
if (c == rr->rr_firstdatacol) {
abd_copy(dst, src, size);
if (rr->rr_col[x].rc_size > size) {
abd_zero_off(dst, size,
rr->rr_col[x].rc_size - size);
}
} else {
ASSERT3U(size, <=, rr->rr_col[x].rc_size);
(void) abd_iterate_func2(dst, src, 0, 0, size,
vdev_raidz_reconst_q_pre_func, NULL);
(void) abd_iterate_func(dst,
size, rr->rr_col[x].rc_size - size,
vdev_raidz_reconst_q_pre_tail_func, NULL);
}
}
src = rr->rr_col[VDEV_RAIDZ_Q].rc_abd;
dst = rr->rr_col[x].rc_abd;
exp = 255 - (rr->rr_cols - 1 - x);
struct reconst_q_struct rq = { abd_to_buf(src), exp };
(void) abd_iterate_func(dst, 0, rr->rr_col[x].rc_size,
vdev_raidz_reconst_q_post_func, &rq);
}
static void
vdev_raidz_reconstruct_pq(raidz_row_t *rr, int *tgts, int ntgts)
{
uint8_t *p, *q, *pxy, *qxy, tmp, a, b, aexp, bexp;
abd_t *pdata, *qdata;
uint64_t xsize, ysize;
int x = tgts[0];
int y = tgts[1];
abd_t *xd, *yd;
ASSERT(ntgts == 2);
ASSERT(x < y);
ASSERT(x >= rr->rr_firstdatacol);
ASSERT(y < rr->rr_cols);
ASSERT(rr->rr_col[x].rc_size >= rr->rr_col[y].rc_size);
/*
* Move the parity data aside -- we're going to compute parity as
* though columns x and y were full of zeros -- Pxy and Qxy. We want to
* reuse the parity generation mechanism without trashing the actual
* parity so we make those columns appear to be full of zeros by
* setting their lengths to zero.
*/
pdata = rr->rr_col[VDEV_RAIDZ_P].rc_abd;
qdata = rr->rr_col[VDEV_RAIDZ_Q].rc_abd;
xsize = rr->rr_col[x].rc_size;
ysize = rr->rr_col[y].rc_size;
rr->rr_col[VDEV_RAIDZ_P].rc_abd =
abd_alloc_linear(rr->rr_col[VDEV_RAIDZ_P].rc_size, B_TRUE);
rr->rr_col[VDEV_RAIDZ_Q].rc_abd =
abd_alloc_linear(rr->rr_col[VDEV_RAIDZ_Q].rc_size, B_TRUE);
rr->rr_col[x].rc_size = 0;
rr->rr_col[y].rc_size = 0;
vdev_raidz_generate_parity_pq(rr);
rr->rr_col[x].rc_size = xsize;
rr->rr_col[y].rc_size = ysize;
p = abd_to_buf(pdata);
q = abd_to_buf(qdata);
pxy = abd_to_buf(rr->rr_col[VDEV_RAIDZ_P].rc_abd);
qxy = abd_to_buf(rr->rr_col[VDEV_RAIDZ_Q].rc_abd);
xd = rr->rr_col[x].rc_abd;
yd = rr->rr_col[y].rc_abd;
/*
* We now have:
* Pxy = P + D_x + D_y
* Qxy = Q + 2^(ndevs - 1 - x) * D_x + 2^(ndevs - 1 - y) * D_y
*
* We can then solve for D_x:
* D_x = A * (P + Pxy) + B * (Q + Qxy)
* where
* A = 2^(x - y) * (2^(x - y) + 1)^-1
* B = 2^(ndevs - 1 - x) * (2^(x - y) + 1)^-1
*
* With D_x in hand, we can easily solve for D_y:
* D_y = P + Pxy + D_x
*/
a = vdev_raidz_pow2[255 + x - y];
b = vdev_raidz_pow2[255 - (rr->rr_cols - 1 - x)];
tmp = 255 - vdev_raidz_log2[a ^ 1];
aexp = vdev_raidz_log2[vdev_raidz_exp2(a, tmp)];
bexp = vdev_raidz_log2[vdev_raidz_exp2(b, tmp)];
ASSERT3U(xsize, >=, ysize);
struct reconst_pq_struct rpq = { p, q, pxy, qxy, aexp, bexp };
(void) abd_iterate_func2(xd, yd, 0, 0, ysize,
vdev_raidz_reconst_pq_func, &rpq);
(void) abd_iterate_func(xd, ysize, xsize - ysize,
vdev_raidz_reconst_pq_tail_func, &rpq);
abd_free(rr->rr_col[VDEV_RAIDZ_P].rc_abd);
abd_free(rr->rr_col[VDEV_RAIDZ_Q].rc_abd);
/*
* Restore the saved parity data.
*/
rr->rr_col[VDEV_RAIDZ_P].rc_abd = pdata;
rr->rr_col[VDEV_RAIDZ_Q].rc_abd = qdata;
}
/* BEGIN CSTYLED */
/*
* In the general case of reconstruction, we must solve the system of linear
* equations defined by the coefficients used to generate parity as well as
* the contents of the data and parity disks. This can be expressed with
* vectors for the original data (D) and the actual data (d) and parity (p)
* and a matrix composed of the identity matrix (I) and a dispersal matrix (V):
*
* __ __ __ __
* | | __ __ | p_0 |
* | V | | D_0 | | p_m-1 |
* | | x | : | = | d_0 |
* | I | | D_n-1 | | : |
* | | ~~ ~~ | d_n-1 |
* ~~ ~~ ~~ ~~
*
* I is simply a square identity matrix of size n, and V is a vandermonde
* matrix defined by the coefficients we chose for the various parity columns
* (1, 2, 4). Note that these values were chosen both for simplicity, speedy
* computation as well as linear separability.
*
* __ __ __ __
* | 1 .. 1 1 1 | | p_0 |
* | 2^n-1 .. 4 2 1 | __ __ | : |
* | 4^n-1 .. 16 4 1 | | D_0 | | p_m-1 |
* | 1 .. 0 0 0 | | D_1 | | d_0 |
* | 0 .. 0 0 0 | x | D_2 | = | d_1 |
* | : : : : | | : | | d_2 |
* | 0 .. 1 0 0 | | D_n-1 | | : |
* | 0 .. 0 1 0 | ~~ ~~ | : |
* | 0 .. 0 0 1 | | d_n-1 |
* ~~ ~~ ~~ ~~
*
* Note that I, V, d, and p are known. To compute D, we must invert the
* matrix and use the known data and parity values to reconstruct the unknown
* data values. We begin by removing the rows in V|I and d|p that correspond
* to failed or missing columns; we then make V|I square (n x n) and d|p
* sized n by removing rows corresponding to unused parity from the bottom up
* to generate (V|I)' and (d|p)'. We can then generate the inverse of (V|I)'
* using Gauss-Jordan elimination. In the example below we use m=3 parity
* columns, n=8 data columns, with errors in d_1, d_2, and p_1:
* __ __
* | 1 1 1 1 1 1 1 1 |
* | 128 64 32 16 8 4 2 1 | <-----+-+-- missing disks
* | 19 205 116 29 64 16 4 1 | / /
* | 1 0 0 0 0 0 0 0 | / /
* | 0 1 0 0 0 0 0 0 | <--' /
* (V|I) = | 0 0 1 0 0 0 0 0 | <---'
* | 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 |
* ~~ ~~
* __ __
* | 1 1 1 1 1 1 1 1 |
* | 128 64 32 16 8 4 2 1 |
* | 19 205 116 29 64 16 4 1 |
* | 1 0 0 0 0 0 0 0 |
* | 0 1 0 0 0 0 0 0 |
* (V|I)' = | 0 0 1 0 0 0 0 0 |
* | 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 |
* ~~ ~~
*
* Here we employ Gauss-Jordan elimination to find the inverse of (V|I)'. We
* have carefully chosen the seed values 1, 2, and 4 to ensure that this
* matrix is not singular.
* __ __
* | 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 |
* | 19 205 116 29 64 16 4 1 0 1 0 0 0 0 0 0 |
* | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
* | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
* ~~ ~~
* __ __
* | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
* | 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 |
* | 19 205 116 29 64 16 4 1 0 1 0 0 0 0 0 0 |
* | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
* ~~ ~~
* __ __
* | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
* | 0 1 1 0 0 0 0 0 1 0 1 1 1 1 1 1 |
* | 0 205 116 0 0 0 0 0 0 1 19 29 64 16 4 1 |
* | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
* ~~ ~~
* __ __
* | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
* | 0 1 1 0 0 0 0 0 1 0 1 1 1 1 1 1 |
* | 0 0 185 0 0 0 0 0 205 1 222 208 141 221 201 204 |
* | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
* ~~ ~~
* __ __
* | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
* | 0 1 1 0 0 0 0 0 1 0 1 1 1 1 1 1 |
* | 0 0 1 0 0 0 0 0 166 100 4 40 158 168 216 209 |
* | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
* ~~ ~~
* __ __
* | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
* | 0 1 0 0 0 0 0 0 167 100 5 41 159 169 217 208 |
* | 0 0 1 0 0 0 0 0 166 100 4 40 158 168 216 209 |
* | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
* ~~ ~~
* __ __
* | 0 0 1 0 0 0 0 0 |
* | 167 100 5 41 159 169 217 208 |
* | 166 100 4 40 158 168 216 209 |
* (V|I)'^-1 = | 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 |
* ~~ ~~
*
* We can then simply compute D = (V|I)'^-1 x (d|p)' to discover the values
* of the missing data.
*
* As is apparent from the example above, the only non-trivial rows in the
* inverse matrix correspond to the data disks that we're trying to
* reconstruct. Indeed, those are the only rows we need as the others would
* only be useful for reconstructing data known or assumed to be valid. For
* that reason, we only build the coefficients in the rows that correspond to
* targeted columns.
*/
/* END CSTYLED */
static void
vdev_raidz_matrix_init(raidz_row_t *rr, int n, int nmap, int *map,
uint8_t **rows)
{
int i, j;
int pow;
ASSERT(n == rr->rr_cols - rr->rr_firstdatacol);
/*
* Fill in the missing rows of interest.
*/
for (i = 0; i < nmap; i++) {
ASSERT3S(0, <=, map[i]);
ASSERT3S(map[i], <=, 2);
pow = map[i] * n;
if (pow > 255)
pow -= 255;
ASSERT(pow <= 255);
for (j = 0; j < n; j++) {
pow -= map[i];
if (pow < 0)
pow += 255;
rows[i][j] = vdev_raidz_pow2[pow];
}
}
}
static void
vdev_raidz_matrix_invert(raidz_row_t *rr, int n, int nmissing, int *missing,
uint8_t **rows, uint8_t **invrows, const uint8_t *used)
{
int i, j, ii, jj;
uint8_t log;
/*
* Assert that the first nmissing entries from the array of used
* columns correspond to parity columns and that subsequent entries
* correspond to data columns.
*/
for (i = 0; i < nmissing; i++) {
ASSERT3S(used[i], <, rr->rr_firstdatacol);
}
for (; i < n; i++) {
ASSERT3S(used[i], >=, rr->rr_firstdatacol);
}
/*
* First initialize the storage where we'll compute the inverse rows.
*/
for (i = 0; i < nmissing; i++) {
for (j = 0; j < n; j++) {
invrows[i][j] = (i == j) ? 1 : 0;
}
}
/*
* Subtract all trivial rows from the rows of consequence.
*/
for (i = 0; i < nmissing; i++) {
for (j = nmissing; j < n; j++) {
ASSERT3U(used[j], >=, rr->rr_firstdatacol);
jj = used[j] - rr->rr_firstdatacol;
ASSERT3S(jj, <, n);
invrows[i][j] = rows[i][jj];
rows[i][jj] = 0;
}
}
/*
* For each of the rows of interest, we must normalize it and subtract
* a multiple of it from the other rows.
*/
for (i = 0; i < nmissing; i++) {
for (j = 0; j < missing[i]; j++) {
ASSERT0(rows[i][j]);
}
ASSERT3U(rows[i][missing[i]], !=, 0);
/*
* Compute the inverse of the first element and multiply each
* element in the row by that value.
*/
log = 255 - vdev_raidz_log2[rows[i][missing[i]]];
for (j = 0; j < n; j++) {
rows[i][j] = vdev_raidz_exp2(rows[i][j], log);
invrows[i][j] = vdev_raidz_exp2(invrows[i][j], log);
}
for (ii = 0; ii < nmissing; ii++) {
if (i == ii)
continue;
ASSERT3U(rows[ii][missing[i]], !=, 0);
log = vdev_raidz_log2[rows[ii][missing[i]]];
for (j = 0; j < n; j++) {
rows[ii][j] ^=
vdev_raidz_exp2(rows[i][j], log);
invrows[ii][j] ^=
vdev_raidz_exp2(invrows[i][j], log);
}
}
}
/*
* Verify that the data that is left in the rows are properly part of
* an identity matrix.
*/
for (i = 0; i < nmissing; i++) {
for (j = 0; j < n; j++) {
if (j == missing[i]) {
ASSERT3U(rows[i][j], ==, 1);
} else {
ASSERT0(rows[i][j]);
}
}
}
}
static void
vdev_raidz_matrix_reconstruct(raidz_row_t *rr, int n, int nmissing,
int *missing, uint8_t **invrows, const uint8_t *used)
{
int i, j, x, cc, c;
uint8_t *src;
uint64_t ccount;
uint8_t *dst[VDEV_RAIDZ_MAXPARITY] = { NULL };
uint64_t dcount[VDEV_RAIDZ_MAXPARITY] = { 0 };
uint8_t log = 0;
uint8_t val;
int ll;
uint8_t *invlog[VDEV_RAIDZ_MAXPARITY];
uint8_t *p, *pp;
size_t psize;
psize = sizeof (invlog[0][0]) * n * nmissing;
p = kmem_alloc(psize, KM_SLEEP);
for (pp = p, i = 0; i < nmissing; i++) {
invlog[i] = pp;
pp += n;
}
for (i = 0; i < nmissing; i++) {
for (j = 0; j < n; j++) {
ASSERT3U(invrows[i][j], !=, 0);
invlog[i][j] = vdev_raidz_log2[invrows[i][j]];
}
}
for (i = 0; i < n; i++) {
c = used[i];
ASSERT3U(c, <, rr->rr_cols);
ccount = rr->rr_col[c].rc_size;
ASSERT(ccount >= rr->rr_col[missing[0]].rc_size || i > 0);
if (ccount == 0)
continue;
src = abd_to_buf(rr->rr_col[c].rc_abd);
for (j = 0; j < nmissing; j++) {
cc = missing[j] + rr->rr_firstdatacol;
ASSERT3U(cc, >=, rr->rr_firstdatacol);
ASSERT3U(cc, <, rr->rr_cols);
ASSERT3U(cc, !=, c);
dcount[j] = rr->rr_col[cc].rc_size;
if (dcount[j] != 0)
dst[j] = abd_to_buf(rr->rr_col[cc].rc_abd);
}
for (x = 0; x < ccount; x++, src++) {
if (*src != 0)
log = vdev_raidz_log2[*src];
for (cc = 0; cc < nmissing; cc++) {
if (x >= dcount[cc])
continue;
if (*src == 0) {
val = 0;
} else {
if ((ll = log + invlog[cc][i]) >= 255)
ll -= 255;
val = vdev_raidz_pow2[ll];
}
if (i == 0)
dst[cc][x] = val;
else
dst[cc][x] ^= val;
}
}
}
kmem_free(p, psize);
}
static void
vdev_raidz_reconstruct_general(raidz_row_t *rr, int *tgts, int ntgts)
{
int n, i, c, t, tt;
int nmissing_rows;
int missing_rows[VDEV_RAIDZ_MAXPARITY];
int parity_map[VDEV_RAIDZ_MAXPARITY];
uint8_t *p, *pp;
size_t psize;
uint8_t *rows[VDEV_RAIDZ_MAXPARITY];
uint8_t *invrows[VDEV_RAIDZ_MAXPARITY];
uint8_t *used;
abd_t **bufs = NULL;
/*
* Matrix reconstruction can't use scatter ABDs yet, so we allocate
* temporary linear ABDs if any non-linear ABDs are found.
*/
for (i = rr->rr_firstdatacol; i < rr->rr_cols; i++) {
if (!abd_is_linear(rr->rr_col[i].rc_abd)) {
bufs = kmem_alloc(rr->rr_cols * sizeof (abd_t *),
KM_PUSHPAGE);
for (c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
raidz_col_t *col = &rr->rr_col[c];
bufs[c] = col->rc_abd;
if (bufs[c] != NULL) {
col->rc_abd = abd_alloc_linear(
col->rc_size, B_TRUE);
abd_copy(col->rc_abd, bufs[c],
col->rc_size);
}
}
break;
}
}
n = rr->rr_cols - rr->rr_firstdatacol;
/*
* Figure out which data columns are missing.
*/
nmissing_rows = 0;
for (t = 0; t < ntgts; t++) {
if (tgts[t] >= rr->rr_firstdatacol) {
missing_rows[nmissing_rows++] =
tgts[t] - rr->rr_firstdatacol;
}
}
/*
* Figure out which parity columns to use to help generate the missing
* data columns.
*/
for (tt = 0, c = 0, i = 0; i < nmissing_rows; c++) {
ASSERT(tt < ntgts);
ASSERT(c < rr->rr_firstdatacol);
/*
* Skip any targeted parity columns.
*/
if (c == tgts[tt]) {
tt++;
continue;
}
parity_map[i] = c;
i++;
}
psize = (sizeof (rows[0][0]) + sizeof (invrows[0][0])) *
nmissing_rows * n + sizeof (used[0]) * n;
p = kmem_alloc(psize, KM_SLEEP);
for (pp = p, i = 0; i < nmissing_rows; i++) {
rows[i] = pp;
pp += n;
invrows[i] = pp;
pp += n;
}
used = pp;
for (i = 0; i < nmissing_rows; i++) {
used[i] = parity_map[i];
}
for (tt = 0, c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
if (tt < nmissing_rows &&
c == missing_rows[tt] + rr->rr_firstdatacol) {
tt++;
continue;
}
ASSERT3S(i, <, n);
used[i] = c;
i++;
}
/*
* Initialize the interesting rows of the matrix.
*/
vdev_raidz_matrix_init(rr, n, nmissing_rows, parity_map, rows);
/*
* Invert the matrix.
*/
vdev_raidz_matrix_invert(rr, n, nmissing_rows, missing_rows, rows,
invrows, used);
/*
* Reconstruct the missing data using the generated matrix.
*/
vdev_raidz_matrix_reconstruct(rr, n, nmissing_rows, missing_rows,
invrows, used);
kmem_free(p, psize);
/*
* copy back from temporary linear abds and free them
*/
if (bufs) {
for (c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
raidz_col_t *col = &rr->rr_col[c];
if (bufs[c] != NULL) {
abd_copy(bufs[c], col->rc_abd, col->rc_size);
abd_free(col->rc_abd);
}
col->rc_abd = bufs[c];
}
kmem_free(bufs, rr->rr_cols * sizeof (abd_t *));
}
}
static void
vdev_raidz_reconstruct_row(raidz_map_t *rm, raidz_row_t *rr,
const int *t, int nt)
{
int tgts[VDEV_RAIDZ_MAXPARITY], *dt;
int ntgts;
int i, c, ret;
int nbadparity, nbaddata;
int parity_valid[VDEV_RAIDZ_MAXPARITY];
nbadparity = rr->rr_firstdatacol;
nbaddata = rr->rr_cols - nbadparity;
ntgts = 0;
for (i = 0, c = 0; c < rr->rr_cols; c++) {
if (c < rr->rr_firstdatacol)
parity_valid[c] = B_FALSE;
if (i < nt && c == t[i]) {
tgts[ntgts++] = c;
i++;
} else if (rr->rr_col[c].rc_error != 0) {
tgts[ntgts++] = c;
} else if (c >= rr->rr_firstdatacol) {
nbaddata--;
} else {
parity_valid[c] = B_TRUE;
nbadparity--;
}
}
ASSERT(ntgts >= nt);
ASSERT(nbaddata >= 0);
ASSERT(nbaddata + nbadparity == ntgts);
dt = &tgts[nbadparity];
/* Reconstruct using the new math implementation */
ret = vdev_raidz_math_reconstruct(rm, rr, parity_valid, dt, nbaddata);
if (ret != RAIDZ_ORIGINAL_IMPL)
return;
/*
* See if we can use any of our optimized reconstruction routines.
*/
switch (nbaddata) {
case 1:
if (parity_valid[VDEV_RAIDZ_P]) {
vdev_raidz_reconstruct_p(rr, dt, 1);
return;
}
ASSERT(rr->rr_firstdatacol > 1);
if (parity_valid[VDEV_RAIDZ_Q]) {
vdev_raidz_reconstruct_q(rr, dt, 1);
return;
}
ASSERT(rr->rr_firstdatacol > 2);
break;
case 2:
ASSERT(rr->rr_firstdatacol > 1);
if (parity_valid[VDEV_RAIDZ_P] &&
parity_valid[VDEV_RAIDZ_Q]) {
vdev_raidz_reconstruct_pq(rr, dt, 2);
return;
}
ASSERT(rr->rr_firstdatacol > 2);
break;
}
vdev_raidz_reconstruct_general(rr, tgts, ntgts);
}
static int
vdev_raidz_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
uint64_t *logical_ashift, uint64_t *physical_ashift)
{
vdev_raidz_t *vdrz = vd->vdev_tsd;
uint64_t nparity = vdrz->vd_nparity;
int c;
int lasterror = 0;
int numerrors = 0;
ASSERT(nparity > 0);
if (nparity > VDEV_RAIDZ_MAXPARITY ||
vd->vdev_children < nparity + 1) {
vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
return (SET_ERROR(EINVAL));
}
vdev_open_children(vd);
for (c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (cvd->vdev_open_error != 0) {
lasterror = cvd->vdev_open_error;
numerrors++;
continue;
}
*asize = MIN(*asize - 1, cvd->vdev_asize - 1) + 1;
*max_asize = MIN(*max_asize - 1, cvd->vdev_max_asize - 1) + 1;
*logical_ashift = MAX(*logical_ashift, cvd->vdev_ashift);
*physical_ashift = MAX(*physical_ashift,
cvd->vdev_physical_ashift);
}
*asize *= vd->vdev_children;
*max_asize *= vd->vdev_children;
if (numerrors > nparity) {
vd->vdev_stat.vs_aux = VDEV_AUX_NO_REPLICAS;
return (lasterror);
}
return (0);
}
static void
vdev_raidz_close(vdev_t *vd)
{
for (int c = 0; c < vd->vdev_children; c++) {
if (vd->vdev_child[c] != NULL)
vdev_close(vd->vdev_child[c]);
}
}
static uint64_t
vdev_raidz_asize(vdev_t *vd, uint64_t psize)
{
vdev_raidz_t *vdrz = vd->vdev_tsd;
uint64_t asize;
uint64_t ashift = vd->vdev_top->vdev_ashift;
uint64_t cols = vdrz->vd_logical_width;
uint64_t nparity = vdrz->vd_nparity;
asize = ((psize - 1) >> ashift) + 1;
asize += nparity * ((asize + cols - nparity - 1) / (cols - nparity));
asize = roundup(asize, nparity + 1) << ashift;
return (asize);
}
/*
* The allocatable space for a raidz vdev is N * sizeof(smallest child)
* so each child must provide at least 1/Nth of its asize.
*/
static uint64_t
vdev_raidz_min_asize(vdev_t *vd)
{
return ((vd->vdev_min_asize + vd->vdev_children - 1) /
vd->vdev_children);
}
void
vdev_raidz_child_done(zio_t *zio)
{
raidz_col_t *rc = zio->io_private;
rc->rc_error = zio->io_error;
rc->rc_tried = 1;
rc->rc_skipped = 0;
}
static void
vdev_raidz_io_verify(vdev_t *vd, raidz_row_t *rr, int col)
{
#ifdef ZFS_DEBUG
vdev_t *tvd = vd->vdev_top;
range_seg64_t logical_rs, physical_rs, remain_rs;
logical_rs.rs_start = rr->rr_offset;
logical_rs.rs_end = logical_rs.rs_start +
vdev_raidz_asize(vd, rr->rr_size);
raidz_col_t *rc = &rr->rr_col[col];
vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
vdev_xlate(cvd, &logical_rs, &physical_rs, &remain_rs);
ASSERT(vdev_xlate_is_empty(&remain_rs));
ASSERT3U(rc->rc_offset, ==, physical_rs.rs_start);
ASSERT3U(rc->rc_offset, <, physical_rs.rs_end);
/*
* It would be nice to assert that rs_end is equal
* to rc_offset + rc_size but there might be an
* optional I/O at the end that is not accounted in
* rc_size.
*/
if (physical_rs.rs_end > rc->rc_offset + rc->rc_size) {
ASSERT3U(physical_rs.rs_end, ==, rc->rc_offset +
rc->rc_size + (1 << tvd->vdev_ashift));
} else {
ASSERT3U(physical_rs.rs_end, ==, rc->rc_offset + rc->rc_size);
}
#endif
}
static void
vdev_raidz_io_start_write(zio_t *zio, raidz_row_t *rr, uint64_t ashift)
{
vdev_t *vd = zio->io_vd;
raidz_map_t *rm = zio->io_vsd;
int c, i;
vdev_raidz_generate_parity_row(rm, rr);
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_size == 0)
continue;
/* Verify physical to logical translation */
vdev_raidz_io_verify(vd, rr, c);
zio_nowait(zio_vdev_child_io(zio, NULL,
vd->vdev_child[rc->rc_devidx], rc->rc_offset,
rc->rc_abd, rc->rc_size, zio->io_type, zio->io_priority,
0, vdev_raidz_child_done, rc));
}
/*
* Generate optional I/Os for skip sectors to improve aggregation
* contiguity.
*/
for (c = rm->rm_skipstart, i = 0; i < rm->rm_nskip; c++, i++) {
ASSERT(c <= rr->rr_scols);
if (c == rr->rr_scols)
c = 0;
raidz_col_t *rc = &rr->rr_col[c];
vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
rc->rc_offset + rc->rc_size, NULL, 1ULL << ashift,
zio->io_type, zio->io_priority,
ZIO_FLAG_NODATA | ZIO_FLAG_OPTIONAL, NULL, NULL));
}
}
static void
vdev_raidz_io_start_read(zio_t *zio, raidz_row_t *rr)
{
vdev_t *vd = zio->io_vd;
/*
* Iterate over the columns in reverse order so that we hit the parity
* last -- any errors along the way will force us to read the parity.
*/
for (int c = rr->rr_cols - 1; c >= 0; c--) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_size == 0)
continue;
vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
if (!vdev_readable(cvd)) {
if (c >= rr->rr_firstdatacol)
rr->rr_missingdata++;
else
rr->rr_missingparity++;
rc->rc_error = SET_ERROR(ENXIO);
rc->rc_tried = 1; /* don't even try */
rc->rc_skipped = 1;
continue;
}
if (vdev_dtl_contains(cvd, DTL_MISSING, zio->io_txg, 1)) {
if (c >= rr->rr_firstdatacol)
rr->rr_missingdata++;
else
rr->rr_missingparity++;
rc->rc_error = SET_ERROR(ESTALE);
rc->rc_skipped = 1;
continue;
}
if (c >= rr->rr_firstdatacol || rr->rr_missingdata > 0 ||
(zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER))) {
zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
rc->rc_offset, rc->rc_abd, rc->rc_size,
zio->io_type, zio->io_priority, 0,
vdev_raidz_child_done, rc));
}
}
}
/*
* Start an IO operation on a RAIDZ VDev
*
* Outline:
* - For write operations:
* 1. Generate the parity data
* 2. Create child zio write operations to each column's vdev, for both
* data and parity.
* 3. If the column skips any sectors for padding, create optional dummy
* write zio children for those areas to improve aggregation continuity.
* - For read operations:
* 1. Create child zio read operations to each data column's vdev to read
* the range of data required for zio.
* 2. If this is a scrub or resilver operation, or if any of the data
* vdevs have had errors, then create zio read operations to the parity
* columns' VDevs as well.
*/
static void
vdev_raidz_io_start(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
vdev_t *tvd = vd->vdev_top;
vdev_raidz_t *vdrz = vd->vdev_tsd;
raidz_map_t *rm = vdev_raidz_map_alloc(zio, tvd->vdev_ashift,
vdrz->vd_logical_width, vdrz->vd_nparity);
zio->io_vsd = rm;
zio->io_vsd_ops = &vdev_raidz_vsd_ops;
/*
* Until raidz expansion is implemented all maps for a raidz vdev
* contain a single row.
*/
ASSERT3U(rm->rm_nrows, ==, 1);
raidz_row_t *rr = rm->rm_row[0];
if (zio->io_type == ZIO_TYPE_WRITE) {
vdev_raidz_io_start_write(zio, rr, tvd->vdev_ashift);
} else {
ASSERT(zio->io_type == ZIO_TYPE_READ);
vdev_raidz_io_start_read(zio, rr);
}
zio_execute(zio);
}
/*
* Report a checksum error for a child of a RAID-Z device.
*/
-static void
-raidz_checksum_error(zio_t *zio, raidz_col_t *rc, abd_t *bad_data)
+void
+vdev_raidz_checksum_error(zio_t *zio, raidz_col_t *rc, abd_t *bad_data)
{
vdev_t *vd = zio->io_vd->vdev_child[rc->rc_devidx];
if (!(zio->io_flags & ZIO_FLAG_SPECULATIVE) &&
zio->io_priority != ZIO_PRIORITY_REBUILD) {
zio_bad_cksum_t zbc;
raidz_map_t *rm = zio->io_vsd;
zbc.zbc_has_cksum = 0;
zbc.zbc_injected = rm->rm_ecksuminjected;
(void) zfs_ereport_post_checksum(zio->io_spa, vd,
&zio->io_bookmark, zio, rc->rc_offset, rc->rc_size,
rc->rc_abd, bad_data, &zbc);
mutex_enter(&vd->vdev_stat_lock);
vd->vdev_stat.vs_checksum_errors++;
mutex_exit(&vd->vdev_stat_lock);
}
}
/*
* We keep track of whether or not there were any injected errors, so that
* any ereports we generate can note it.
*/
static int
raidz_checksum_verify(zio_t *zio)
{
zio_bad_cksum_t zbc;
raidz_map_t *rm = zio->io_vsd;
bzero(&zbc, sizeof (zio_bad_cksum_t));
int ret = zio_checksum_error(zio, &zbc);
if (ret != 0 && zbc.zbc_injected != 0)
rm->rm_ecksuminjected = 1;
return (ret);
}
/*
* Generate the parity from the data columns. If we tried and were able to
* read the parity without error, verify that the generated parity matches the
* data we read. If it doesn't, we fire off a checksum error. Return the
* number of such failures.
*/
static int
raidz_parity_verify(zio_t *zio, raidz_row_t *rr)
{
abd_t *orig[VDEV_RAIDZ_MAXPARITY];
int c, ret = 0;
raidz_map_t *rm = zio->io_vsd;
raidz_col_t *rc;
blkptr_t *bp = zio->io_bp;
enum zio_checksum checksum = (bp == NULL ? zio->io_prop.zp_checksum :
(BP_IS_GANG(bp) ? ZIO_CHECKSUM_GANG_HEADER : BP_GET_CHECKSUM(bp)));
if (checksum == ZIO_CHECKSUM_NOPARITY)
return (ret);
for (c = 0; c < rr->rr_firstdatacol; c++) {
rc = &rr->rr_col[c];
if (!rc->rc_tried || rc->rc_error != 0)
continue;
orig[c] = abd_alloc_sametype(rc->rc_abd, rc->rc_size);
abd_copy(orig[c], rc->rc_abd, rc->rc_size);
}
+ /*
+ * Verify any empty sectors are zero filled to ensure the parity
+ * is calculated correctly even if these non-data sectors are damaged.
+ */
+ if (rr->rr_nempty && rr->rr_abd_empty != NULL)
+ ret += vdev_draid_map_verify_empty(zio, rr);
+
/*
* Regenerates parity even for !tried||rc_error!=0 columns. This
* isn't harmful but it does have the side effect of fixing stuff
* we didn't realize was necessary (i.e. even if we return 0).
*/
vdev_raidz_generate_parity_row(rm, rr);
for (c = 0; c < rr->rr_firstdatacol; c++) {
rc = &rr->rr_col[c];
if (!rc->rc_tried || rc->rc_error != 0)
continue;
if (abd_cmp(orig[c], rc->rc_abd) != 0) {
- raidz_checksum_error(zio, rc, orig[c]);
+ vdev_raidz_checksum_error(zio, rc, orig[c]);
rc->rc_error = SET_ERROR(ECKSUM);
ret++;
}
abd_free(orig[c]);
}
return (ret);
}
static int
vdev_raidz_worst_error(raidz_row_t *rr)
{
int error = 0;
for (int c = 0; c < rr->rr_cols; c++)
error = zio_worst_error(error, rr->rr_col[c].rc_error);
return (error);
}
static void
vdev_raidz_io_done_verified(zio_t *zio, raidz_row_t *rr)
{
int unexpected_errors = 0;
int parity_errors = 0;
int parity_untried = 0;
int data_errors = 0;
ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_error) {
if (c < rr->rr_firstdatacol)
parity_errors++;
else
data_errors++;
if (!rc->rc_skipped)
unexpected_errors++;
} else if (c < rr->rr_firstdatacol && !rc->rc_tried) {
parity_untried++;
}
}
/*
* If we read more parity disks than were used for
* reconstruction, confirm that the other parity disks produced
* correct data.
*
* Note that we also regenerate parity when resilvering so we
* can write it out to failed devices later.
*/
if (parity_errors + parity_untried <
rr->rr_firstdatacol - data_errors ||
(zio->io_flags & ZIO_FLAG_RESILVER)) {
int n = raidz_parity_verify(zio, rr);
unexpected_errors += n;
- ASSERT3U(parity_errors + n, <=, rr->rr_firstdatacol);
}
if (zio->io_error == 0 && spa_writeable(zio->io_spa) &&
(unexpected_errors > 0 || (zio->io_flags & ZIO_FLAG_RESILVER))) {
/*
* Use the good data we have in hand to repair damaged children.
*/
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
vdev_t *vd = zio->io_vd;
vdev_t *cvd = vd->vdev_child[rc->rc_devidx];
if (!rc->rc_allow_repair) {
continue;
} else if (!rc->rc_force_repair &&
(rc->rc_error == 0 || rc->rc_size == 0)) {
continue;
}
zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
rc->rc_offset, rc->rc_abd, rc->rc_size,
ZIO_TYPE_WRITE,
zio->io_priority == ZIO_PRIORITY_REBUILD ?
ZIO_PRIORITY_REBUILD : ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_IO_REPAIR | (unexpected_errors ?
ZIO_FLAG_SELF_HEAL : 0), NULL, NULL));
}
}
}
static void
raidz_restore_orig_data(raidz_map_t *rm)
{
for (int i = 0; i < rm->rm_nrows; i++) {
raidz_row_t *rr = rm->rm_row[i];
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_need_orig_restore) {
abd_copy(rc->rc_abd,
rc->rc_orig_data, rc->rc_size);
rc->rc_need_orig_restore = B_FALSE;
}
}
}
}
/*
* returns EINVAL if reconstruction of the block will not be possible
* returns ECKSUM if this specific reconstruction failed
* returns 0 on successful reconstruction
*/
static int
raidz_reconstruct(zio_t *zio, int *ltgts, int ntgts, int nparity)
{
raidz_map_t *rm = zio->io_vsd;
/* Reconstruct each row */
for (int r = 0; r < rm->rm_nrows; r++) {
raidz_row_t *rr = rm->rm_row[r];
int my_tgts[VDEV_RAIDZ_MAXPARITY]; /* value is child id */
int t = 0;
int dead = 0;
int dead_data = 0;
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
ASSERT0(rc->rc_need_orig_restore);
if (rc->rc_error != 0) {
dead++;
if (c >= nparity)
dead_data++;
continue;
}
if (rc->rc_size == 0)
continue;
for (int lt = 0; lt < ntgts; lt++) {
if (rc->rc_devidx == ltgts[lt]) {
if (rc->rc_orig_data == NULL) {
rc->rc_orig_data =
abd_alloc_linear(
rc->rc_size, B_TRUE);
abd_copy(rc->rc_orig_data,
rc->rc_abd, rc->rc_size);
}
rc->rc_need_orig_restore = B_TRUE;
dead++;
if (c >= nparity)
dead_data++;
my_tgts[t++] = c;
break;
}
}
}
if (dead > nparity) {
/* reconstruction not possible */
raidz_restore_orig_data(rm);
return (EINVAL);
}
if (dead_data > 0)
vdev_raidz_reconstruct_row(rm, rr, my_tgts, t);
}
/* Check for success */
if (raidz_checksum_verify(zio) == 0) {
/* Reconstruction succeeded - report errors */
for (int i = 0; i < rm->rm_nrows; i++) {
raidz_row_t *rr = rm->rm_row[i];
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_need_orig_restore) {
/*
* Note: if this is a parity column,
* we don't really know if it's wrong.
* We need to let
* vdev_raidz_io_done_verified() check
* it, and if we set rc_error, it will
* think that it is a "known" error
* that doesn't need to be checked
* or corrected.
*/
if (rc->rc_error == 0 &&
c >= rr->rr_firstdatacol) {
- raidz_checksum_error(zio,
+ vdev_raidz_checksum_error(zio,
rc, rc->rc_orig_data);
rc->rc_error =
SET_ERROR(ECKSUM);
}
rc->rc_need_orig_restore = B_FALSE;
}
}
vdev_raidz_io_done_verified(zio, rr);
}
zio_checksum_verified(zio);
return (0);
}
/* Reconstruction failed - restore original data */
raidz_restore_orig_data(rm);
return (ECKSUM);
}
/*
* Iterate over all combinations of N bad vdevs and attempt a reconstruction.
* Note that the algorithm below is non-optimal because it doesn't take into
* account how reconstruction is actually performed. For example, with
* triple-parity RAID-Z the reconstruction procedure is the same if column 4
* is targeted as invalid as if columns 1 and 4 are targeted since in both
* cases we'd only use parity information in column 0.
*
* The order that we find the various possible combinations of failed
* disks is dictated by these rules:
* - Examine each "slot" (the "i" in tgts[i])
* - Try to increment this slot (tgts[i] = tgts[i] + 1)
* - if we can't increment because it runs into the next slot,
* reset our slot to the minimum, and examine the next slot
*
* For example, with a 6-wide RAIDZ3, and no known errors (so we have to choose
* 3 columns to reconstruct), we will generate the following sequence:
*
* STATE ACTION
* 0 1 2 special case: skip since these are all parity
* 0 1 3 first slot: reset to 0; middle slot: increment to 2
* 0 2 3 first slot: increment to 1
* 1 2 3 first: reset to 0; middle: reset to 1; last: increment to 4
* 0 1 4 first: reset to 0; middle: increment to 2
* 0 2 4 first: increment to 1
* 1 2 4 first: reset to 0; middle: increment to 3
* 0 3 4 first: increment to 1
* 1 3 4 first: increment to 2
* 2 3 4 first: reset to 0; middle: reset to 1; last: increment to 5
* 0 1 5 first: reset to 0; middle: increment to 2
* 0 2 5 first: increment to 1
* 1 2 5 first: reset to 0; middle: increment to 3
* 0 3 5 first: increment to 1
* 1 3 5 first: increment to 2
* 2 3 5 first: reset to 0; middle: increment to 4
* 0 4 5 first: increment to 1
* 1 4 5 first: increment to 2
* 2 4 5 first: increment to 3
* 3 4 5 done
*
* This strategy works for dRAID but is less efficient when there are a large
* number of child vdevs and therefore permutations to check. Furthermore,
* since the raidz_map_t rows likely do not overlap reconstruction would be
* possible as long as there are no more than nparity data errors per row.
* These additional permutations are not currently checked but could be as
* a future improvement.
*/
static int
vdev_raidz_combrec(zio_t *zio)
{
int nparity = vdev_get_nparity(zio->io_vd);
raidz_map_t *rm = zio->io_vsd;
/* Check if there's enough data to attempt reconstrution. */
for (int i = 0; i < rm->rm_nrows; i++) {
raidz_row_t *rr = rm->rm_row[i];
int total_errors = 0;
for (int c = 0; c < rr->rr_cols; c++) {
if (rr->rr_col[c].rc_error)
total_errors++;
}
if (total_errors > nparity)
return (vdev_raidz_worst_error(rr));
}
for (int num_failures = 1; num_failures <= nparity; num_failures++) {
int tstore[VDEV_RAIDZ_MAXPARITY + 2];
int *ltgts = &tstore[1]; /* value is logical child ID */
/* Determine number of logical children, n */
int n = zio->io_vd->vdev_children;
ASSERT3U(num_failures, <=, nparity);
ASSERT3U(num_failures, <=, VDEV_RAIDZ_MAXPARITY);
/* Handle corner cases in combrec logic */
ltgts[-1] = -1;
for (int i = 0; i < num_failures; i++) {
ltgts[i] = i;
}
ltgts[num_failures] = n;
for (;;) {
int err = raidz_reconstruct(zio, ltgts, num_failures,
nparity);
if (err == EINVAL) {
/*
* Reconstruction not possible with this #
* failures; try more failures.
*/
break;
} else if (err == 0)
return (0);
/* Compute next targets to try */
for (int t = 0; ; t++) {
ASSERT3U(t, <, num_failures);
ltgts[t]++;
if (ltgts[t] == n) {
/* try more failures */
ASSERT3U(t, ==, num_failures - 1);
break;
}
ASSERT3U(ltgts[t], <, n);
ASSERT3U(ltgts[t], <=, ltgts[t + 1]);
/*
* If that spot is available, we're done here.
* Try the next combination.
*/
if (ltgts[t] != ltgts[t + 1])
break;
/*
* Otherwise, reset this tgt to the minimum,
* and move on to the next tgt.
*/
ltgts[t] = ltgts[t - 1] + 1;
ASSERT3U(ltgts[t], ==, t);
}
/* Increase the number of failures and keep trying. */
if (ltgts[num_failures - 1] == n)
break;
}
}
return (ECKSUM);
}
void
vdev_raidz_reconstruct(raidz_map_t *rm, const int *t, int nt)
{
for (uint64_t row = 0; row < rm->rm_nrows; row++) {
raidz_row_t *rr = rm->rm_row[row];
vdev_raidz_reconstruct_row(rm, rr, t, nt);
}
}
/*
* Complete a write IO operation on a RAIDZ VDev
*
* Outline:
* 1. Check for errors on the child IOs.
* 2. Return, setting an error code if too few child VDevs were written
* to reconstruct the data later. Note that partial writes are
* considered successful if they can be reconstructed at all.
*/
static void
vdev_raidz_io_done_write_impl(zio_t *zio, raidz_row_t *rr)
{
int total_errors = 0;
ASSERT3U(rr->rr_missingparity, <=, rr->rr_firstdatacol);
ASSERT3U(rr->rr_missingdata, <=, rr->rr_cols - rr->rr_firstdatacol);
ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_error) {
ASSERT(rc->rc_error != ECKSUM); /* child has no bp */
total_errors++;
}
}
/*
* Treat partial writes as a success. If we couldn't write enough
* columns to reconstruct the data, the I/O failed. Otherwise,
* good enough.
*
* Now that we support write reallocation, it would be better
* to treat partial failure as real failure unless there are
* no non-degraded top-level vdevs left, and not update DTLs
* if we intend to reallocate.
*/
if (total_errors > rr->rr_firstdatacol) {
zio->io_error = zio_worst_error(zio->io_error,
vdev_raidz_worst_error(rr));
}
}
static void
vdev_raidz_io_done_reconstruct_known_missing(zio_t *zio, raidz_map_t *rm,
raidz_row_t *rr)
{
int parity_errors = 0;
int parity_untried = 0;
int data_errors = 0;
int total_errors = 0;
ASSERT3U(rr->rr_missingparity, <=, rr->rr_firstdatacol);
ASSERT3U(rr->rr_missingdata, <=, rr->rr_cols - rr->rr_firstdatacol);
ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ);
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_error) {
ASSERT(rc->rc_error != ECKSUM); /* child has no bp */
if (c < rr->rr_firstdatacol)
parity_errors++;
else
data_errors++;
total_errors++;
} else if (c < rr->rr_firstdatacol && !rc->rc_tried) {
parity_untried++;
}
}
/*
* If there were data errors and the number of errors we saw was
* correctable -- less than or equal to the number of parity disks read
* -- reconstruct based on the missing data.
*/
if (data_errors != 0 &&
total_errors <= rr->rr_firstdatacol - parity_untried) {
/*
* We either attempt to read all the parity columns or
* none of them. If we didn't try to read parity, we
* wouldn't be here in the correctable case. There must
* also have been fewer parity errors than parity
* columns or, again, we wouldn't be in this code path.
*/
ASSERT(parity_untried == 0);
ASSERT(parity_errors < rr->rr_firstdatacol);
/*
* Identify the data columns that reported an error.
*/
int n = 0;
int tgts[VDEV_RAIDZ_MAXPARITY];
for (int c = rr->rr_firstdatacol; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_error != 0) {
ASSERT(n < VDEV_RAIDZ_MAXPARITY);
tgts[n++] = c;
}
}
ASSERT(rr->rr_firstdatacol >= n);
vdev_raidz_reconstruct_row(rm, rr, tgts, n);
}
}
/*
* Return the number of reads issued.
*/
static int
vdev_raidz_read_all(zio_t *zio, raidz_row_t *rr)
{
vdev_t *vd = zio->io_vd;
int nread = 0;
rr->rr_missingdata = 0;
rr->rr_missingparity = 0;
/*
* If this rows contains empty sectors which are not required
* for a normal read then allocate an ABD for them now so they
* may be read, verified, and any needed repairs performed.
*/
if (rr->rr_nempty && rr->rr_abd_empty == NULL)
vdev_draid_map_alloc_empty(zio, rr);
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
if (rc->rc_tried || rc->rc_size == 0)
continue;
zio_nowait(zio_vdev_child_io(zio, NULL,
vd->vdev_child[rc->rc_devidx],
rc->rc_offset, rc->rc_abd, rc->rc_size,
zio->io_type, zio->io_priority, 0,
vdev_raidz_child_done, rc));
nread++;
}
return (nread);
}
/*
* We're here because either there were too many errors to even attempt
* reconstruction (total_errors == rm_first_datacol), or vdev_*_combrec()
* failed. In either case, there is enough bad data to prevent reconstruction.
* Start checksum ereports for all children which haven't failed.
*/
static void
vdev_raidz_io_done_unrecoverable(zio_t *zio)
{
raidz_map_t *rm = zio->io_vsd;
for (int i = 0; i < rm->rm_nrows; i++) {
raidz_row_t *rr = rm->rm_row[i];
for (int c = 0; c < rr->rr_cols; c++) {
raidz_col_t *rc = &rr->rr_col[c];
vdev_t *cvd = zio->io_vd->vdev_child[rc->rc_devidx];
if (rc->rc_error != 0)
continue;
zio_bad_cksum_t zbc;
zbc.zbc_has_cksum = 0;
zbc.zbc_injected = rm->rm_ecksuminjected;
(void) zfs_ereport_start_checksum(zio->io_spa,
cvd, &zio->io_bookmark, zio, rc->rc_offset,
rc->rc_size, &zbc);
mutex_enter(&cvd->vdev_stat_lock);
cvd->vdev_stat.vs_checksum_errors++;
mutex_exit(&cvd->vdev_stat_lock);
}
}
}
void
vdev_raidz_io_done(zio_t *zio)
{
raidz_map_t *rm = zio->io_vsd;
if (zio->io_type == ZIO_TYPE_WRITE) {
for (int i = 0; i < rm->rm_nrows; i++) {
vdev_raidz_io_done_write_impl(zio, rm->rm_row[i]);
}
} else {
for (int i = 0; i < rm->rm_nrows; i++) {
raidz_row_t *rr = rm->rm_row[i];
vdev_raidz_io_done_reconstruct_known_missing(zio,
rm, rr);
}
if (raidz_checksum_verify(zio) == 0) {
for (int i = 0; i < rm->rm_nrows; i++) {
raidz_row_t *rr = rm->rm_row[i];
vdev_raidz_io_done_verified(zio, rr);
}
zio_checksum_verified(zio);
} else {
/*
* A sequential resilver has no checksum which makes
* combinatoral reconstruction impossible. This code
* path is unreachable since raidz_checksum_verify()
* has no checksum to verify and must succeed.
*/
ASSERT3U(zio->io_priority, !=, ZIO_PRIORITY_REBUILD);
/*
* This isn't a typical situation -- either we got a
* read error or a child silently returned bad data.
* Read every block so we can try again with as much
* data and parity as we can track down. If we've
* already been through once before, all children will
* be marked as tried so we'll proceed to combinatorial
* reconstruction.
*/
int nread = 0;
for (int i = 0; i < rm->rm_nrows; i++) {
nread += vdev_raidz_read_all(zio,
rm->rm_row[i]);
}
if (nread != 0) {
/*
* Normally our stage is VDEV_IO_DONE, but if
* we've already called redone(), it will have
* changed to VDEV_IO_START, in which case we
* don't want to call redone() again.
*/
if (zio->io_stage != ZIO_STAGE_VDEV_IO_START)
zio_vdev_io_redone(zio);
return;
}
zio->io_error = vdev_raidz_combrec(zio);
if (zio->io_error == ECKSUM &&
!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
vdev_raidz_io_done_unrecoverable(zio);
}
}
}
}
static void
vdev_raidz_state_change(vdev_t *vd, int faulted, int degraded)
{
vdev_raidz_t *vdrz = vd->vdev_tsd;
if (faulted > vdrz->vd_nparity)
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_NO_REPLICAS);
else if (degraded + faulted != 0)
vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE);
else
vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE);
}
/*
* Determine if any portion of the provided block resides on a child vdev
* with a dirty DTL and therefore needs to be resilvered. The function
* assumes that at least one DTL is dirty which implies that full stripe
* width blocks must be resilvered.
*/
static boolean_t
vdev_raidz_need_resilver(vdev_t *vd, const dva_t *dva, size_t psize,
uint64_t phys_birth)
{
vdev_raidz_t *vdrz = vd->vdev_tsd;
uint64_t dcols = vd->vdev_children;
uint64_t nparity = vdrz->vd_nparity;
uint64_t ashift = vd->vdev_top->vdev_ashift;
/* The starting RAIDZ (parent) vdev sector of the block. */
uint64_t b = DVA_GET_OFFSET(dva) >> ashift;
/* The zio's size in units of the vdev's minimum sector size. */
uint64_t s = ((psize - 1) >> ashift) + 1;
/* The first column for this stripe. */
uint64_t f = b % dcols;
/* Unreachable by sequential resilver. */
ASSERT3U(phys_birth, !=, TXG_UNKNOWN);
if (!vdev_dtl_contains(vd, DTL_PARTIAL, phys_birth, 1))
return (B_FALSE);
if (s + nparity >= dcols)
return (B_TRUE);
for (uint64_t c = 0; c < s + nparity; c++) {
uint64_t devidx = (f + c) % dcols;
vdev_t *cvd = vd->vdev_child[devidx];
/*
* dsl_scan_need_resilver() already checked vd with
* vdev_dtl_contains(). So here just check cvd with
* vdev_dtl_empty(), cheaper and a good approximation.
*/
if (!vdev_dtl_empty(cvd, DTL_PARTIAL))
return (B_TRUE);
}
return (B_FALSE);
}
static void
vdev_raidz_xlate(vdev_t *cvd, const range_seg64_t *logical_rs,
range_seg64_t *physical_rs, range_seg64_t *remain_rs)
{
+ (void) remain_rs;
+
vdev_t *raidvd = cvd->vdev_parent;
ASSERT(raidvd->vdev_ops == &vdev_raidz_ops);
uint64_t width = raidvd->vdev_children;
uint64_t tgt_col = cvd->vdev_id;
uint64_t ashift = raidvd->vdev_top->vdev_ashift;
/* make sure the offsets are block-aligned */
ASSERT0(logical_rs->rs_start % (1 << ashift));
ASSERT0(logical_rs->rs_end % (1 << ashift));
uint64_t b_start = logical_rs->rs_start >> ashift;
uint64_t b_end = logical_rs->rs_end >> ashift;
uint64_t start_row = 0;
if (b_start > tgt_col) /* avoid underflow */
start_row = ((b_start - tgt_col - 1) / width) + 1;
uint64_t end_row = 0;
if (b_end > tgt_col)
end_row = ((b_end - tgt_col - 1) / width) + 1;
physical_rs->rs_start = start_row << ashift;
physical_rs->rs_end = end_row << ashift;
ASSERT3U(physical_rs->rs_start, <=, logical_rs->rs_start);
ASSERT3U(physical_rs->rs_end - physical_rs->rs_start, <=,
logical_rs->rs_end - logical_rs->rs_start);
}
/*
* Initialize private RAIDZ specific fields from the nvlist.
*/
static int
vdev_raidz_init(spa_t *spa, nvlist_t *nv, void **tsd)
{
vdev_raidz_t *vdrz;
uint64_t nparity;
uint_t children;
nvlist_t **child;
int error = nvlist_lookup_nvlist_array(nv,
ZPOOL_CONFIG_CHILDREN, &child, &children);
if (error != 0)
return (SET_ERROR(EINVAL));
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, &nparity) == 0) {
if (nparity == 0 || nparity > VDEV_RAIDZ_MAXPARITY)
return (SET_ERROR(EINVAL));
/*
* Previous versions could only support 1 or 2 parity
* device.
*/
if (nparity > 1 && spa_version(spa) < SPA_VERSION_RAIDZ2)
return (SET_ERROR(EINVAL));
else if (nparity > 2 && spa_version(spa) < SPA_VERSION_RAIDZ3)
return (SET_ERROR(EINVAL));
} else {
/*
* We require the parity to be specified for SPAs that
* support multiple parity levels.
*/
if (spa_version(spa) >= SPA_VERSION_RAIDZ2)
return (SET_ERROR(EINVAL));
/*
* Otherwise, we default to 1 parity device for RAID-Z.
*/
nparity = 1;
}
vdrz = kmem_zalloc(sizeof (*vdrz), KM_SLEEP);
vdrz->vd_logical_width = children;
vdrz->vd_nparity = nparity;
*tsd = vdrz;
return (0);
}
static void
vdev_raidz_fini(vdev_t *vd)
{
kmem_free(vd->vdev_tsd, sizeof (vdev_raidz_t));
}
/*
* Add RAIDZ specific fields to the config nvlist.
*/
static void
vdev_raidz_config_generate(vdev_t *vd, nvlist_t *nv)
{
ASSERT3P(vd->vdev_ops, ==, &vdev_raidz_ops);
vdev_raidz_t *vdrz = vd->vdev_tsd;
/*
* Make sure someone hasn't managed to sneak a fancy new vdev
* into a crufty old storage pool.
*/
ASSERT(vdrz->vd_nparity == 1 ||
(vdrz->vd_nparity <= 2 &&
spa_version(vd->vdev_spa) >= SPA_VERSION_RAIDZ2) ||
(vdrz->vd_nparity <= 3 &&
spa_version(vd->vdev_spa) >= SPA_VERSION_RAIDZ3));
/*
* Note that we'll add these even on storage pools where they
* aren't strictly required -- older software will just ignore
* it.
*/
fnvlist_add_uint64(nv, ZPOOL_CONFIG_NPARITY, vdrz->vd_nparity);
}
static uint64_t
vdev_raidz_nparity(vdev_t *vd)
{
vdev_raidz_t *vdrz = vd->vdev_tsd;
return (vdrz->vd_nparity);
}
static uint64_t
vdev_raidz_ndisks(vdev_t *vd)
{
return (vd->vdev_children);
}
vdev_ops_t vdev_raidz_ops = {
.vdev_op_init = vdev_raidz_init,
.vdev_op_fini = vdev_raidz_fini,
.vdev_op_open = vdev_raidz_open,
.vdev_op_close = vdev_raidz_close,
.vdev_op_asize = vdev_raidz_asize,
.vdev_op_min_asize = vdev_raidz_min_asize,
.vdev_op_min_alloc = NULL,
.vdev_op_io_start = vdev_raidz_io_start,
.vdev_op_io_done = vdev_raidz_io_done,
.vdev_op_state_change = vdev_raidz_state_change,
.vdev_op_need_resilver = vdev_raidz_need_resilver,
.vdev_op_hold = NULL,
.vdev_op_rele = NULL,
.vdev_op_remap = NULL,
.vdev_op_xlate = vdev_raidz_xlate,
.vdev_op_rebuild_asize = NULL,
.vdev_op_metaslab_init = NULL,
.vdev_op_config_generate = vdev_raidz_config_generate,
.vdev_op_nparity = vdev_raidz_nparity,
.vdev_op_ndisks = vdev_raidz_ndisks,
.vdev_op_type = VDEV_TYPE_RAIDZ, /* name of this vdev type */
.vdev_op_leaf = B_FALSE /* not a leaf vdev */
};
diff --git a/sys/contrib/openzfs/module/zfs/vdev_removal.c b/sys/contrib/openzfs/module/zfs/vdev_removal.c
index 8b0c76ff0a14..b51d862f6466 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_removal.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_removal.c
@@ -1,2393 +1,2393 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa_impl.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/zap.h>
#include <sys/vdev_impl.h>
#include <sys/metaslab.h>
#include <sys/metaslab_impl.h>
#include <sys/uberblock_impl.h>
#include <sys/txg.h>
#include <sys/avl.h>
#include <sys/bpobj.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_synctask.h>
#include <sys/dsl_dir.h>
#include <sys/arc.h>
#include <sys/zfeature.h>
#include <sys/vdev_indirect_births.h>
#include <sys/vdev_indirect_mapping.h>
#include <sys/abd.h>
#include <sys/vdev_initialize.h>
#include <sys/vdev_trim.h>
#include <sys/trace_zfs.h>
/*
* This file contains the necessary logic to remove vdevs from a
* storage pool. Currently, the only devices that can be removed
* are log, cache, and spare devices; and top level vdevs from a pool
* w/o raidz or mirrors. (Note that members of a mirror can be removed
* by the detach operation.)
*
* Log vdevs are removed by evacuating them and then turning the vdev
* into a hole vdev while holding spa config locks.
*
* Top level vdevs are removed and converted into an indirect vdev via
* a multi-step process:
*
* - Disable allocations from this device (spa_vdev_remove_top).
*
* - From a new thread (spa_vdev_remove_thread), copy data from
* the removing vdev to a different vdev. The copy happens in open
* context (spa_vdev_copy_impl) and issues a sync task
* (vdev_mapping_sync) so the sync thread can update the partial
* indirect mappings in core and on disk.
*
* - If a free happens during a removal, it is freed from the
* removing vdev, and if it has already been copied, from the new
* location as well (free_from_removing_vdev).
*
* - After the removal is completed, the copy thread converts the vdev
* into an indirect vdev (vdev_remove_complete) before instructing
* the sync thread to destroy the space maps and finish the removal
* (spa_finish_removal).
*/
typedef struct vdev_copy_arg {
metaslab_t *vca_msp;
uint64_t vca_outstanding_bytes;
uint64_t vca_read_error_bytes;
uint64_t vca_write_error_bytes;
kcondvar_t vca_cv;
kmutex_t vca_lock;
} vdev_copy_arg_t;
/*
* The maximum amount of memory we can use for outstanding i/o while
* doing a device removal. This determines how much i/o we can have
* in flight concurrently.
*/
int zfs_remove_max_copy_bytes = 64 * 1024 * 1024;
/*
* The largest contiguous segment that we will attempt to allocate when
* removing a device. This can be no larger than SPA_MAXBLOCKSIZE. If
* there is a performance problem with attempting to allocate large blocks,
* consider decreasing this.
*
* See also the accessor function spa_remove_max_segment().
*/
int zfs_remove_max_segment = SPA_MAXBLOCKSIZE;
/*
* Ignore hard IO errors during device removal. When set if a device
* encounters hard IO error during the removal process the removal will
* not be cancelled. This can result in a normally recoverable block
* becoming permanently damaged and is not recommended.
*/
int zfs_removal_ignore_errors = 0;
/*
* Allow a remap segment to span free chunks of at most this size. The main
* impact of a larger span is that we will read and write larger, more
* contiguous chunks, with more "unnecessary" data -- trading off bandwidth
* for iops. The value here was chosen to align with
* zfs_vdev_read_gap_limit, which is a similar concept when doing regular
* reads (but there's no reason it has to be the same).
*
* Additionally, a higher span will have the following relatively minor
* effects:
* - the mapping will be smaller, since one entry can cover more allocated
* segments
* - more of the fragmentation in the removing device will be preserved
* - we'll do larger allocations, which may fail and fall back on smaller
* allocations
*/
int vdev_removal_max_span = 32 * 1024;
/*
* This is used by the test suite so that it can ensure that certain
* actions happen while in the middle of a removal.
*/
int zfs_removal_suspend_progress = 0;
#define VDEV_REMOVAL_ZAP_OBJS "lzap"
static void spa_vdev_remove_thread(void *arg);
static int spa_vdev_remove_cancel_impl(spa_t *spa);
static void
spa_sync_removing_state(spa_t *spa, dmu_tx_t *tx)
{
VERIFY0(zap_update(spa->spa_dsl_pool->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_REMOVING, sizeof (uint64_t),
sizeof (spa->spa_removing_phys) / sizeof (uint64_t),
&spa->spa_removing_phys, tx));
}
static nvlist_t *
spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid)
{
for (int i = 0; i < count; i++) {
uint64_t guid =
fnvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID);
if (guid == target_guid)
return (nvpp[i]);
}
return (NULL);
}
static void
spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count,
nvlist_t *dev_to_remove)
{
nvlist_t **newdev = NULL;
if (count > 1)
newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP);
for (int i = 0, j = 0; i < count; i++) {
if (dev[i] == dev_to_remove)
continue;
VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0);
}
VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0);
for (int i = 0; i < count - 1; i++)
nvlist_free(newdev[i]);
if (count > 1)
kmem_free(newdev, (count - 1) * sizeof (void *));
}
static spa_vdev_removal_t *
spa_vdev_removal_create(vdev_t *vd)
{
spa_vdev_removal_t *svr = kmem_zalloc(sizeof (*svr), KM_SLEEP);
mutex_init(&svr->svr_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&svr->svr_cv, NULL, CV_DEFAULT, NULL);
svr->svr_allocd_segs = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
svr->svr_vdev_id = vd->vdev_id;
for (int i = 0; i < TXG_SIZE; i++) {
svr->svr_frees[i] = range_tree_create(NULL, RANGE_SEG64, NULL,
0, 0);
list_create(&svr->svr_new_segments[i],
sizeof (vdev_indirect_mapping_entry_t),
offsetof(vdev_indirect_mapping_entry_t, vime_node));
}
return (svr);
}
void
spa_vdev_removal_destroy(spa_vdev_removal_t *svr)
{
for (int i = 0; i < TXG_SIZE; i++) {
ASSERT0(svr->svr_bytes_done[i]);
ASSERT0(svr->svr_max_offset_to_sync[i]);
range_tree_destroy(svr->svr_frees[i]);
list_destroy(&svr->svr_new_segments[i]);
}
range_tree_destroy(svr->svr_allocd_segs);
mutex_destroy(&svr->svr_lock);
cv_destroy(&svr->svr_cv);
kmem_free(svr, sizeof (*svr));
}
/*
* This is called as a synctask in the txg in which we will mark this vdev
* as removing (in the config stored in the MOS).
*
* It begins the evacuation of a toplevel vdev by:
* - initializing the spa_removing_phys which tracks this removal
* - computing the amount of space to remove for accounting purposes
* - dirtying all dbufs in the spa_config_object
* - creating the spa_vdev_removal
* - starting the spa_vdev_remove_thread
*/
static void
vdev_remove_initiate_sync(void *arg, dmu_tx_t *tx)
{
int vdev_id = (uintptr_t)arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
objset_t *mos = spa->spa_dsl_pool->dp_meta_objset;
spa_vdev_removal_t *svr = NULL;
uint64_t txg __maybe_unused = dmu_tx_get_txg(tx);
ASSERT0(vdev_get_nparity(vd));
svr = spa_vdev_removal_create(vd);
ASSERT(vd->vdev_removing);
ASSERT3P(vd->vdev_indirect_mapping, ==, NULL);
spa_feature_incr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx);
if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
/*
* By activating the OBSOLETE_COUNTS feature, we prevent
* the pool from being downgraded and ensure that the
* refcounts are precise.
*/
spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
uint64_t one = 1;
VERIFY0(zap_add(spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, sizeof (one), 1,
&one, tx));
boolean_t are_precise __maybe_unused;
ASSERT0(vdev_obsolete_counts_are_precise(vd, &are_precise));
ASSERT3B(are_precise, ==, B_TRUE);
}
vic->vic_mapping_object = vdev_indirect_mapping_alloc(mos, tx);
vd->vdev_indirect_mapping =
vdev_indirect_mapping_open(mos, vic->vic_mapping_object);
vic->vic_births_object = vdev_indirect_births_alloc(mos, tx);
vd->vdev_indirect_births =
vdev_indirect_births_open(mos, vic->vic_births_object);
spa->spa_removing_phys.sr_removing_vdev = vd->vdev_id;
spa->spa_removing_phys.sr_start_time = gethrestime_sec();
spa->spa_removing_phys.sr_end_time = 0;
spa->spa_removing_phys.sr_state = DSS_SCANNING;
spa->spa_removing_phys.sr_to_copy = 0;
spa->spa_removing_phys.sr_copied = 0;
/*
* Note: We can't use vdev_stat's vs_alloc for sr_to_copy, because
* there may be space in the defer tree, which is free, but still
* counted in vs_alloc.
*/
for (uint64_t i = 0; i < vd->vdev_ms_count; i++) {
metaslab_t *ms = vd->vdev_ms[i];
if (ms->ms_sm == NULL)
continue;
spa->spa_removing_phys.sr_to_copy +=
metaslab_allocated_space(ms);
/*
* Space which we are freeing this txg does not need to
* be copied.
*/
spa->spa_removing_phys.sr_to_copy -=
range_tree_space(ms->ms_freeing);
ASSERT0(range_tree_space(ms->ms_freed));
for (int t = 0; t < TXG_SIZE; t++)
ASSERT0(range_tree_space(ms->ms_allocating[t]));
}
/*
* Sync tasks are called before metaslab_sync(), so there should
* be no already-synced metaslabs in the TXG_CLEAN list.
*/
ASSERT3P(txg_list_head(&vd->vdev_ms_list, TXG_CLEAN(txg)), ==, NULL);
spa_sync_removing_state(spa, tx);
/*
* All blocks that we need to read the most recent mapping must be
* stored on concrete vdevs. Therefore, we must dirty anything that
* is read before spa_remove_init(). Specifically, the
* spa_config_object. (Note that although we already modified the
* spa_config_object in spa_sync_removing_state, that may not have
* modified all blocks of the object.)
*/
dmu_object_info_t doi;
VERIFY0(dmu_object_info(mos, DMU_POOL_DIRECTORY_OBJECT, &doi));
for (uint64_t offset = 0; offset < doi.doi_max_offset; ) {
dmu_buf_t *dbuf;
VERIFY0(dmu_buf_hold(mos, DMU_POOL_DIRECTORY_OBJECT,
offset, FTAG, &dbuf, 0));
dmu_buf_will_dirty(dbuf, tx);
offset += dbuf->db_size;
dmu_buf_rele(dbuf, FTAG);
}
/*
* Now that we've allocated the im_object, dirty the vdev to ensure
* that the object gets written to the config on disk.
*/
vdev_config_dirty(vd);
zfs_dbgmsg("starting removal thread for vdev %llu (%px) in txg %llu "
"im_obj=%llu", (u_longlong_t)vd->vdev_id, vd,
(u_longlong_t)dmu_tx_get_txg(tx),
(u_longlong_t)vic->vic_mapping_object);
spa_history_log_internal(spa, "vdev remove started", tx,
"%s vdev %llu %s", spa_name(spa), (u_longlong_t)vd->vdev_id,
(vd->vdev_path != NULL) ? vd->vdev_path : "-");
/*
* Setting spa_vdev_removal causes subsequent frees to call
* free_from_removing_vdev(). Note that we don't need any locking
* because we are the sync thread, and metaslab_free_impl() is only
* called from syncing context (potentially from a zio taskq thread,
* but in any case only when there are outstanding free i/os, which
* there are not).
*/
ASSERT3P(spa->spa_vdev_removal, ==, NULL);
spa->spa_vdev_removal = svr;
svr->svr_thread = thread_create(NULL, 0,
spa_vdev_remove_thread, spa, 0, &p0, TS_RUN, minclsyspri);
}
/*
* When we are opening a pool, we must read the mapping for each
* indirect vdev in order from most recently removed to least
* recently removed. We do this because the blocks for the mapping
* of older indirect vdevs may be stored on more recently removed vdevs.
* In order to read each indirect mapping object, we must have
* initialized all more recently removed vdevs.
*/
int
spa_remove_init(spa_t *spa)
{
int error;
error = zap_lookup(spa->spa_dsl_pool->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_REMOVING, sizeof (uint64_t),
sizeof (spa->spa_removing_phys) / sizeof (uint64_t),
&spa->spa_removing_phys);
if (error == ENOENT) {
spa->spa_removing_phys.sr_state = DSS_NONE;
spa->spa_removing_phys.sr_removing_vdev = -1;
spa->spa_removing_phys.sr_prev_indirect_vdev = -1;
spa->spa_indirect_vdevs_loaded = B_TRUE;
return (0);
} else if (error != 0) {
return (error);
}
if (spa->spa_removing_phys.sr_state == DSS_SCANNING) {
/*
* We are currently removing a vdev. Create and
* initialize a spa_vdev_removal_t from the bonus
* buffer of the removing vdevs vdev_im_object, and
* initialize its partial mapping.
*/
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
vdev_t *vd = vdev_lookup_top(spa,
spa->spa_removing_phys.sr_removing_vdev);
if (vd == NULL) {
spa_config_exit(spa, SCL_STATE, FTAG);
return (EINVAL);
}
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
ASSERT(vdev_is_concrete(vd));
spa_vdev_removal_t *svr = spa_vdev_removal_create(vd);
ASSERT3U(svr->svr_vdev_id, ==, vd->vdev_id);
ASSERT(vd->vdev_removing);
vd->vdev_indirect_mapping = vdev_indirect_mapping_open(
spa->spa_meta_objset, vic->vic_mapping_object);
vd->vdev_indirect_births = vdev_indirect_births_open(
spa->spa_meta_objset, vic->vic_births_object);
spa_config_exit(spa, SCL_STATE, FTAG);
spa->spa_vdev_removal = svr;
}
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
uint64_t indirect_vdev_id =
spa->spa_removing_phys.sr_prev_indirect_vdev;
while (indirect_vdev_id != UINT64_MAX) {
vdev_t *vd = vdev_lookup_top(spa, indirect_vdev_id);
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
vd->vdev_indirect_mapping = vdev_indirect_mapping_open(
spa->spa_meta_objset, vic->vic_mapping_object);
vd->vdev_indirect_births = vdev_indirect_births_open(
spa->spa_meta_objset, vic->vic_births_object);
indirect_vdev_id = vic->vic_prev_indirect_vdev;
}
spa_config_exit(spa, SCL_STATE, FTAG);
/*
* Now that we've loaded all the indirect mappings, we can allow
* reads from other blocks (e.g. via predictive prefetch).
*/
spa->spa_indirect_vdevs_loaded = B_TRUE;
return (0);
}
void
spa_restart_removal(spa_t *spa)
{
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
if (svr == NULL)
return;
/*
* In general when this function is called there is no
* removal thread running. The only scenario where this
* is not true is during spa_import() where this function
* is called twice [once from spa_import_impl() and
* spa_async_resume()]. Thus, in the scenario where we
* import a pool that has an ongoing removal we don't
* want to spawn a second thread.
*/
if (svr->svr_thread != NULL)
return;
if (!spa_writeable(spa))
return;
zfs_dbgmsg("restarting removal of %llu",
(u_longlong_t)svr->svr_vdev_id);
svr->svr_thread = thread_create(NULL, 0, spa_vdev_remove_thread, spa,
0, &p0, TS_RUN, minclsyspri);
}
/*
* Process freeing from a device which is in the middle of being removed.
* We must handle this carefully so that we attempt to copy freed data,
* and we correctly free already-copied data.
*/
void
free_from_removing_vdev(vdev_t *vd, uint64_t offset, uint64_t size)
{
spa_t *spa = vd->vdev_spa;
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
uint64_t txg = spa_syncing_txg(spa);
uint64_t max_offset_yet = 0;
ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0);
ASSERT3U(vd->vdev_indirect_config.vic_mapping_object, ==,
vdev_indirect_mapping_object(vim));
ASSERT3U(vd->vdev_id, ==, svr->svr_vdev_id);
mutex_enter(&svr->svr_lock);
/*
* Remove the segment from the removing vdev's spacemap. This
* ensures that we will not attempt to copy this space (if the
* removal thread has not yet visited it), and also ensures
* that we know what is actually allocated on the new vdevs
* (needed if we cancel the removal).
*
* Note: we must do the metaslab_free_concrete() with the svr_lock
* held, so that the remove_thread can not load this metaslab and then
* visit this offset between the time that we metaslab_free_concrete()
* and when we check to see if it has been visited.
*
* Note: The checkpoint flag is set to false as having/taking
* a checkpoint and removing a device can't happen at the same
* time.
*/
ASSERT(!spa_has_checkpoint(spa));
metaslab_free_concrete(vd, offset, size, B_FALSE);
uint64_t synced_size = 0;
uint64_t synced_offset = 0;
uint64_t max_offset_synced = vdev_indirect_mapping_max_offset(vim);
if (offset < max_offset_synced) {
/*
* The mapping for this offset is already on disk.
* Free from the new location.
*
* Note that we use svr_max_synced_offset because it is
* updated atomically with respect to the in-core mapping.
* By contrast, vim_max_offset is not.
*
* This block may be split between a synced entry and an
* in-flight or unvisited entry. Only process the synced
* portion of it here.
*/
synced_size = MIN(size, max_offset_synced - offset);
synced_offset = offset;
ASSERT3U(max_offset_yet, <=, max_offset_synced);
max_offset_yet = max_offset_synced;
DTRACE_PROBE3(remove__free__synced,
spa_t *, spa,
uint64_t, offset,
uint64_t, synced_size);
size -= synced_size;
offset += synced_size;
}
/*
* Look at all in-flight txgs starting from the currently syncing one
* and see if a section of this free is being copied. By starting from
* this txg and iterating forward, we might find that this region
* was copied in two different txgs and handle it appropriately.
*/
for (int i = 0; i < TXG_CONCURRENT_STATES; i++) {
int txgoff = (txg + i) & TXG_MASK;
if (size > 0 && offset < svr->svr_max_offset_to_sync[txgoff]) {
/*
* The mapping for this offset is in flight, and
* will be synced in txg+i.
*/
uint64_t inflight_size = MIN(size,
svr->svr_max_offset_to_sync[txgoff] - offset);
DTRACE_PROBE4(remove__free__inflight,
spa_t *, spa,
uint64_t, offset,
uint64_t, inflight_size,
uint64_t, txg + i);
/*
* We copy data in order of increasing offset.
* Therefore the max_offset_to_sync[] must increase
* (or be zero, indicating that nothing is being
* copied in that txg).
*/
if (svr->svr_max_offset_to_sync[txgoff] != 0) {
ASSERT3U(svr->svr_max_offset_to_sync[txgoff],
>=, max_offset_yet);
max_offset_yet =
svr->svr_max_offset_to_sync[txgoff];
}
/*
* We've already committed to copying this segment:
* we have allocated space elsewhere in the pool for
* it and have an IO outstanding to copy the data. We
* cannot free the space before the copy has
* completed, or else the copy IO might overwrite any
* new data. To free that space, we record the
* segment in the appropriate svr_frees tree and free
* the mapped space later, in the txg where we have
* completed the copy and synced the mapping (see
* vdev_mapping_sync).
*/
range_tree_add(svr->svr_frees[txgoff],
offset, inflight_size);
size -= inflight_size;
offset += inflight_size;
/*
* This space is already accounted for as being
* done, because it is being copied in txg+i.
* However, if i!=0, then it is being copied in
* a future txg. If we crash after this txg
* syncs but before txg+i syncs, then the space
* will be free. Therefore we must account
* for the space being done in *this* txg
* (when it is freed) rather than the future txg
* (when it will be copied).
*/
ASSERT3U(svr->svr_bytes_done[txgoff], >=,
inflight_size);
svr->svr_bytes_done[txgoff] -= inflight_size;
svr->svr_bytes_done[txg & TXG_MASK] += inflight_size;
}
}
ASSERT0(svr->svr_max_offset_to_sync[TXG_CLEAN(txg) & TXG_MASK]);
if (size > 0) {
/*
* The copy thread has not yet visited this offset. Ensure
* that it doesn't.
*/
DTRACE_PROBE3(remove__free__unvisited,
spa_t *, spa,
uint64_t, offset,
uint64_t, size);
if (svr->svr_allocd_segs != NULL)
range_tree_clear(svr->svr_allocd_segs, offset, size);
/*
* Since we now do not need to copy this data, for
* accounting purposes we have done our job and can count
* it as completed.
*/
svr->svr_bytes_done[txg & TXG_MASK] += size;
}
mutex_exit(&svr->svr_lock);
/*
* Now that we have dropped svr_lock, process the synced portion
* of this free.
*/
if (synced_size > 0) {
vdev_indirect_mark_obsolete(vd, synced_offset, synced_size);
/*
* Note: this can only be called from syncing context,
* and the vdev_indirect_mapping is only changed from the
* sync thread, so we don't need svr_lock while doing
* metaslab_free_impl_cb.
*/
boolean_t checkpoint = B_FALSE;
vdev_indirect_ops.vdev_op_remap(vd, synced_offset, synced_size,
metaslab_free_impl_cb, &checkpoint);
}
}
/*
* Stop an active removal and update the spa_removing phys.
*/
static void
spa_finish_removal(spa_t *spa, dsl_scan_state_t state, dmu_tx_t *tx)
{
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
ASSERT3U(dmu_tx_get_txg(tx), ==, spa_syncing_txg(spa));
/* Ensure the removal thread has completed before we free the svr. */
spa_vdev_remove_suspend(spa);
ASSERT(state == DSS_FINISHED || state == DSS_CANCELED);
if (state == DSS_FINISHED) {
spa_removing_phys_t *srp = &spa->spa_removing_phys;
vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
if (srp->sr_prev_indirect_vdev != -1) {
vdev_t *pvd;
pvd = vdev_lookup_top(spa,
srp->sr_prev_indirect_vdev);
ASSERT3P(pvd->vdev_ops, ==, &vdev_indirect_ops);
}
vic->vic_prev_indirect_vdev = srp->sr_prev_indirect_vdev;
srp->sr_prev_indirect_vdev = vd->vdev_id;
}
spa->spa_removing_phys.sr_state = state;
spa->spa_removing_phys.sr_end_time = gethrestime_sec();
spa->spa_vdev_removal = NULL;
spa_vdev_removal_destroy(svr);
spa_sync_removing_state(spa, tx);
spa_notify_waiters(spa);
vdev_config_dirty(spa->spa_root_vdev);
}
static void
free_mapped_segment_cb(void *arg, uint64_t offset, uint64_t size)
{
vdev_t *vd = arg;
vdev_indirect_mark_obsolete(vd, offset, size);
boolean_t checkpoint = B_FALSE;
vdev_indirect_ops.vdev_op_remap(vd, offset, size,
metaslab_free_impl_cb, &checkpoint);
}
/*
* On behalf of the removal thread, syncs an incremental bit more of
* the indirect mapping to disk and updates the in-memory mapping.
* Called as a sync task in every txg that the removal thread makes progress.
*/
static void
vdev_mapping_sync(void *arg, dmu_tx_t *tx)
{
spa_vdev_removal_t *svr = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
vdev_indirect_config_t *vic __maybe_unused = &vd->vdev_indirect_config;
uint64_t txg = dmu_tx_get_txg(tx);
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
ASSERT(vic->vic_mapping_object != 0);
ASSERT3U(txg, ==, spa_syncing_txg(spa));
vdev_indirect_mapping_add_entries(vim,
&svr->svr_new_segments[txg & TXG_MASK], tx);
vdev_indirect_births_add_entry(vd->vdev_indirect_births,
vdev_indirect_mapping_max_offset(vim), dmu_tx_get_txg(tx), tx);
/*
* Free the copied data for anything that was freed while the
* mapping entries were in flight.
*/
mutex_enter(&svr->svr_lock);
range_tree_vacate(svr->svr_frees[txg & TXG_MASK],
free_mapped_segment_cb, vd);
ASSERT3U(svr->svr_max_offset_to_sync[txg & TXG_MASK], >=,
vdev_indirect_mapping_max_offset(vim));
svr->svr_max_offset_to_sync[txg & TXG_MASK] = 0;
mutex_exit(&svr->svr_lock);
spa_sync_removing_state(spa, tx);
}
typedef struct vdev_copy_segment_arg {
spa_t *vcsa_spa;
dva_t *vcsa_dest_dva;
uint64_t vcsa_txg;
range_tree_t *vcsa_obsolete_segs;
} vdev_copy_segment_arg_t;
static void
unalloc_seg(void *arg, uint64_t start, uint64_t size)
{
vdev_copy_segment_arg_t *vcsa = arg;
spa_t *spa = vcsa->vcsa_spa;
blkptr_t bp = { { { {0} } } };
BP_SET_BIRTH(&bp, TXG_INITIAL, TXG_INITIAL);
BP_SET_LSIZE(&bp, size);
BP_SET_PSIZE(&bp, size);
BP_SET_COMPRESS(&bp, ZIO_COMPRESS_OFF);
BP_SET_CHECKSUM(&bp, ZIO_CHECKSUM_OFF);
BP_SET_TYPE(&bp, DMU_OT_NONE);
BP_SET_LEVEL(&bp, 0);
BP_SET_DEDUP(&bp, 0);
BP_SET_BYTEORDER(&bp, ZFS_HOST_BYTEORDER);
DVA_SET_VDEV(&bp.blk_dva[0], DVA_GET_VDEV(vcsa->vcsa_dest_dva));
DVA_SET_OFFSET(&bp.blk_dva[0],
DVA_GET_OFFSET(vcsa->vcsa_dest_dva) + start);
DVA_SET_ASIZE(&bp.blk_dva[0], size);
zio_free(spa, vcsa->vcsa_txg, &bp);
}
/*
* All reads and writes associated with a call to spa_vdev_copy_segment()
* are done.
*/
static void
spa_vdev_copy_segment_done(zio_t *zio)
{
vdev_copy_segment_arg_t *vcsa = zio->io_private;
range_tree_vacate(vcsa->vcsa_obsolete_segs,
unalloc_seg, vcsa);
range_tree_destroy(vcsa->vcsa_obsolete_segs);
kmem_free(vcsa, sizeof (*vcsa));
spa_config_exit(zio->io_spa, SCL_STATE, zio->io_spa);
}
/*
* The write of the new location is done.
*/
static void
spa_vdev_copy_segment_write_done(zio_t *zio)
{
vdev_copy_arg_t *vca = zio->io_private;
abd_free(zio->io_abd);
mutex_enter(&vca->vca_lock);
vca->vca_outstanding_bytes -= zio->io_size;
if (zio->io_error != 0)
vca->vca_write_error_bytes += zio->io_size;
cv_signal(&vca->vca_cv);
mutex_exit(&vca->vca_lock);
}
/*
* The read of the old location is done. The parent zio is the write to
* the new location. Allow it to start.
*/
static void
spa_vdev_copy_segment_read_done(zio_t *zio)
{
vdev_copy_arg_t *vca = zio->io_private;
if (zio->io_error != 0) {
mutex_enter(&vca->vca_lock);
vca->vca_read_error_bytes += zio->io_size;
mutex_exit(&vca->vca_lock);
}
zio_nowait(zio_unique_parent(zio));
}
/*
* If the old and new vdevs are mirrors, we will read both sides of the old
* mirror, and write each copy to the corresponding side of the new mirror.
* If the old and new vdevs have a different number of children, we will do
* this as best as possible. Since we aren't verifying checksums, this
* ensures that as long as there's a good copy of the data, we'll have a
* good copy after the removal, even if there's silent damage to one side
* of the mirror. If we're removing a mirror that has some silent damage,
* we'll have exactly the same damage in the new location (assuming that
* the new location is also a mirror).
*
* We accomplish this by creating a tree of zio_t's, with as many writes as
* there are "children" of the new vdev (a non-redundant vdev counts as one
* child, a 2-way mirror has 2 children, etc). Each write has an associated
* read from a child of the old vdev. Typically there will be the same
* number of children of the old and new vdevs. However, if there are more
* children of the new vdev, some child(ren) of the old vdev will be issued
* multiple reads. If there are more children of the old vdev, some copies
* will be dropped.
*
* For example, the tree of zio_t's for a 2-way mirror is:
*
* null
* / \
* write(new vdev, child 0) write(new vdev, child 1)
* | |
* read(old vdev, child 0) read(old vdev, child 1)
*
* Child zio's complete before their parents complete. However, zio's
* created with zio_vdev_child_io() may be issued before their children
* complete. In this case we need to make sure that the children (reads)
* complete before the parents (writes) are *issued*. We do this by not
* calling zio_nowait() on each write until its corresponding read has
* completed.
*
* The spa_config_lock must be held while zio's created by
* zio_vdev_child_io() are in progress, to ensure that the vdev tree does
* not change (e.g. due to a concurrent "zpool attach/detach"). The "null"
* zio is needed to release the spa_config_lock after all the reads and
* writes complete. (Note that we can't grab the config lock for each read,
* because it is not reentrant - we could deadlock with a thread waiting
* for a write lock.)
*/
static void
spa_vdev_copy_one_child(vdev_copy_arg_t *vca, zio_t *nzio,
vdev_t *source_vd, uint64_t source_offset,
vdev_t *dest_child_vd, uint64_t dest_offset, int dest_id, uint64_t size)
{
ASSERT3U(spa_config_held(nzio->io_spa, SCL_ALL, RW_READER), !=, 0);
/*
* If the destination child in unwritable then there is no point
* in issuing the source reads which cannot be written.
*/
if (!vdev_writeable(dest_child_vd))
return;
mutex_enter(&vca->vca_lock);
vca->vca_outstanding_bytes += size;
mutex_exit(&vca->vca_lock);
abd_t *abd = abd_alloc_for_io(size, B_FALSE);
vdev_t *source_child_vd = NULL;
if (source_vd->vdev_ops == &vdev_mirror_ops && dest_id != -1) {
/*
* Source and dest are both mirrors. Copy from the same
* child id as we are copying to (wrapping around if there
* are more dest children than source children). If the
* preferred source child is unreadable select another.
*/
for (int i = 0; i < source_vd->vdev_children; i++) {
source_child_vd = source_vd->vdev_child[
(dest_id + i) % source_vd->vdev_children];
if (vdev_readable(source_child_vd))
break;
}
} else {
source_child_vd = source_vd;
}
/*
* There should always be at least one readable source child or
* the pool would be in a suspended state. Somehow selecting an
* unreadable child would result in IO errors, the removal process
* being cancelled, and the pool reverting to its pre-removal state.
*/
ASSERT3P(source_child_vd, !=, NULL);
zio_t *write_zio = zio_vdev_child_io(nzio, NULL,
dest_child_vd, dest_offset, abd, size,
ZIO_TYPE_WRITE, ZIO_PRIORITY_REMOVAL,
ZIO_FLAG_CANFAIL,
spa_vdev_copy_segment_write_done, vca);
zio_nowait(zio_vdev_child_io(write_zio, NULL,
source_child_vd, source_offset, abd, size,
ZIO_TYPE_READ, ZIO_PRIORITY_REMOVAL,
ZIO_FLAG_CANFAIL,
spa_vdev_copy_segment_read_done, vca));
}
/*
* Allocate a new location for this segment, and create the zio_t's to
* read from the old location and write to the new location.
*/
static int
spa_vdev_copy_segment(vdev_t *vd, range_tree_t *segs,
uint64_t maxalloc, uint64_t txg,
vdev_copy_arg_t *vca, zio_alloc_list_t *zal)
{
metaslab_group_t *mg = vd->vdev_mg;
spa_t *spa = vd->vdev_spa;
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
vdev_indirect_mapping_entry_t *entry;
dva_t dst = {{ 0 }};
uint64_t start = range_tree_min(segs);
ASSERT0(P2PHASE(start, 1 << spa->spa_min_ashift));
ASSERT3U(maxalloc, <=, SPA_MAXBLOCKSIZE);
ASSERT0(P2PHASE(maxalloc, 1 << spa->spa_min_ashift));
uint64_t size = range_tree_span(segs);
if (range_tree_span(segs) > maxalloc) {
/*
* We can't allocate all the segments. Prefer to end
* the allocation at the end of a segment, thus avoiding
* additional split blocks.
*/
range_seg_max_t search;
zfs_btree_index_t where;
rs_set_start(&search, segs, start + maxalloc);
rs_set_end(&search, segs, start + maxalloc);
(void) zfs_btree_find(&segs->rt_root, &search, &where);
range_seg_t *rs = zfs_btree_prev(&segs->rt_root, &where,
&where);
if (rs != NULL) {
size = rs_get_end(rs, segs) - start;
} else {
/*
* There are no segments that end before maxalloc.
* I.e. the first segment is larger than maxalloc,
* so we must split it.
*/
size = maxalloc;
}
}
ASSERT3U(size, <=, maxalloc);
ASSERT0(P2PHASE(size, 1 << spa->spa_min_ashift));
/*
* An allocation class might not have any remaining vdevs or space
*/
metaslab_class_t *mc = mg->mg_class;
if (mc->mc_groups == 0)
mc = spa_normal_class(spa);
int error = metaslab_alloc_dva(spa, mc, size, &dst, 0, NULL, txg, 0,
zal, 0);
if (error == ENOSPC && mc != spa_normal_class(spa)) {
error = metaslab_alloc_dva(spa, spa_normal_class(spa), size,
&dst, 0, NULL, txg, 0, zal, 0);
}
if (error != 0)
return (error);
/*
* Determine the ranges that are not actually needed. Offsets are
* relative to the start of the range to be copied (i.e. relative to the
* local variable "start").
*/
range_tree_t *obsolete_segs = range_tree_create(NULL, RANGE_SEG64, NULL,
0, 0);
zfs_btree_index_t where;
range_seg_t *rs = zfs_btree_first(&segs->rt_root, &where);
ASSERT3U(rs_get_start(rs, segs), ==, start);
uint64_t prev_seg_end = rs_get_end(rs, segs);
while ((rs = zfs_btree_next(&segs->rt_root, &where, &where)) != NULL) {
if (rs_get_start(rs, segs) >= start + size) {
break;
} else {
range_tree_add(obsolete_segs,
prev_seg_end - start,
rs_get_start(rs, segs) - prev_seg_end);
}
prev_seg_end = rs_get_end(rs, segs);
}
/* We don't end in the middle of an obsolete range */
ASSERT3U(start + size, <=, prev_seg_end);
range_tree_clear(segs, start, size);
/*
* We can't have any padding of the allocated size, otherwise we will
* misunderstand what's allocated, and the size of the mapping. We
* prevent padding by ensuring that all devices in the pool have the
* same ashift, and the allocation size is a multiple of the ashift.
*/
VERIFY3U(DVA_GET_ASIZE(&dst), ==, size);
entry = kmem_zalloc(sizeof (vdev_indirect_mapping_entry_t), KM_SLEEP);
DVA_MAPPING_SET_SRC_OFFSET(&entry->vime_mapping, start);
entry->vime_mapping.vimep_dst = dst;
if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
entry->vime_obsolete_count = range_tree_space(obsolete_segs);
}
vdev_copy_segment_arg_t *vcsa = kmem_zalloc(sizeof (*vcsa), KM_SLEEP);
vcsa->vcsa_dest_dva = &entry->vime_mapping.vimep_dst;
vcsa->vcsa_obsolete_segs = obsolete_segs;
vcsa->vcsa_spa = spa;
vcsa->vcsa_txg = txg;
/*
* See comment before spa_vdev_copy_one_child().
*/
spa_config_enter(spa, SCL_STATE, spa, RW_READER);
zio_t *nzio = zio_null(spa->spa_txg_zio[txg & TXG_MASK], spa, NULL,
spa_vdev_copy_segment_done, vcsa, 0);
vdev_t *dest_vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dst));
if (dest_vd->vdev_ops == &vdev_mirror_ops) {
for (int i = 0; i < dest_vd->vdev_children; i++) {
vdev_t *child = dest_vd->vdev_child[i];
spa_vdev_copy_one_child(vca, nzio, vd, start,
child, DVA_GET_OFFSET(&dst), i, size);
}
} else {
spa_vdev_copy_one_child(vca, nzio, vd, start,
dest_vd, DVA_GET_OFFSET(&dst), -1, size);
}
zio_nowait(nzio);
list_insert_tail(&svr->svr_new_segments[txg & TXG_MASK], entry);
ASSERT3U(start + size, <=, vd->vdev_ms_count << vd->vdev_ms_shift);
vdev_dirty(vd, 0, NULL, txg);
return (0);
}
/*
* Complete the removal of a toplevel vdev. This is called as a
* synctask in the same txg that we will sync out the new config (to the
* MOS object) which indicates that this vdev is indirect.
*/
static void
vdev_remove_complete_sync(void *arg, dmu_tx_t *tx)
{
spa_vdev_removal_t *svr = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
for (int i = 0; i < TXG_SIZE; i++) {
ASSERT0(svr->svr_bytes_done[i]);
}
ASSERT3U(spa->spa_removing_phys.sr_copied, ==,
spa->spa_removing_phys.sr_to_copy);
vdev_destroy_spacemaps(vd, tx);
/* destroy leaf zaps, if any */
ASSERT3P(svr->svr_zaplist, !=, NULL);
for (nvpair_t *pair = nvlist_next_nvpair(svr->svr_zaplist, NULL);
pair != NULL;
pair = nvlist_next_nvpair(svr->svr_zaplist, pair)) {
vdev_destroy_unlink_zap(vd, fnvpair_value_uint64(pair), tx);
}
fnvlist_free(svr->svr_zaplist);
spa_finish_removal(dmu_tx_pool(tx)->dp_spa, DSS_FINISHED, tx);
/* vd->vdev_path is not available here */
spa_history_log_internal(spa, "vdev remove completed", tx,
"%s vdev %llu", spa_name(spa), (u_longlong_t)vd->vdev_id);
}
static void
vdev_remove_enlist_zaps(vdev_t *vd, nvlist_t *zlist)
{
ASSERT3P(zlist, !=, NULL);
ASSERT0(vdev_get_nparity(vd));
if (vd->vdev_leaf_zap != 0) {
char zkey[32];
(void) snprintf(zkey, sizeof (zkey), "%s-%llu",
VDEV_REMOVAL_ZAP_OBJS, (u_longlong_t)vd->vdev_leaf_zap);
fnvlist_add_uint64(zlist, zkey, vd->vdev_leaf_zap);
}
for (uint64_t id = 0; id < vd->vdev_children; id++) {
vdev_remove_enlist_zaps(vd->vdev_child[id], zlist);
}
}
static void
vdev_remove_replace_with_indirect(vdev_t *vd, uint64_t txg)
{
vdev_t *ivd;
dmu_tx_t *tx;
spa_t *spa = vd->vdev_spa;
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
/*
* First, build a list of leaf zaps to be destroyed.
* This is passed to the sync context thread,
* which does the actual unlinking.
*/
svr->svr_zaplist = fnvlist_alloc();
vdev_remove_enlist_zaps(vd, svr->svr_zaplist);
ivd = vdev_add_parent(vd, &vdev_indirect_ops);
ivd->vdev_removing = 0;
vd->vdev_leaf_zap = 0;
vdev_remove_child(ivd, vd);
vdev_compact_children(ivd);
ASSERT(!list_link_active(&vd->vdev_state_dirty_node));
mutex_enter(&svr->svr_lock);
svr->svr_thread = NULL;
cv_broadcast(&svr->svr_cv);
mutex_exit(&svr->svr_lock);
/* After this, we can not use svr. */
tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
dsl_sync_task_nowait(spa->spa_dsl_pool,
vdev_remove_complete_sync, svr, tx);
dmu_tx_commit(tx);
}
/*
* Complete the removal of a toplevel vdev. This is called in open
* context by the removal thread after we have copied all vdev's data.
*/
static void
vdev_remove_complete(spa_t *spa)
{
uint64_t txg;
/*
* Wait for any deferred frees to be synced before we call
* vdev_metaslab_fini()
*/
txg_wait_synced(spa->spa_dsl_pool, 0);
txg = spa_vdev_enter(spa);
vdev_t *vd = vdev_lookup_top(spa, spa->spa_vdev_removal->svr_vdev_id);
ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
ASSERT3P(vd->vdev_trim_thread, ==, NULL);
ASSERT3P(vd->vdev_autotrim_thread, ==, NULL);
sysevent_t *ev = spa_event_create(spa, vd, NULL,
ESC_ZFS_VDEV_REMOVE_DEV);
zfs_dbgmsg("finishing device removal for vdev %llu in txg %llu",
(u_longlong_t)vd->vdev_id, (u_longlong_t)txg);
/*
* Discard allocation state.
*/
if (vd->vdev_mg != NULL) {
vdev_metaslab_fini(vd);
metaslab_group_destroy(vd->vdev_mg);
vd->vdev_mg = NULL;
spa_log_sm_set_blocklimit(spa);
}
if (vd->vdev_log_mg != NULL) {
ASSERT0(vd->vdev_ms_count);
metaslab_group_destroy(vd->vdev_log_mg);
vd->vdev_log_mg = NULL;
}
ASSERT0(vd->vdev_stat.vs_space);
ASSERT0(vd->vdev_stat.vs_dspace);
vdev_remove_replace_with_indirect(vd, txg);
/*
* We now release the locks, allowing spa_sync to run and finish the
* removal via vdev_remove_complete_sync in syncing context.
*
* Note that we hold on to the vdev_t that has been replaced. Since
* it isn't part of the vdev tree any longer, it can't be concurrently
* manipulated, even while we don't have the config lock.
*/
(void) spa_vdev_exit(spa, NULL, txg, 0);
/*
* Top ZAP should have been transferred to the indirect vdev in
* vdev_remove_replace_with_indirect.
*/
ASSERT0(vd->vdev_top_zap);
/*
* Leaf ZAP should have been moved in vdev_remove_replace_with_indirect.
*/
ASSERT0(vd->vdev_leaf_zap);
txg = spa_vdev_enter(spa);
(void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
/*
* Request to update the config and the config cachefile.
*/
vdev_config_dirty(spa->spa_root_vdev);
(void) spa_vdev_exit(spa, vd, txg, 0);
if (ev != NULL)
spa_event_post(ev);
}
/*
* Evacuates a segment of size at most max_alloc from the vdev
* via repeated calls to spa_vdev_copy_segment. If an allocation
* fails, the pool is probably too fragmented to handle such a
* large size, so decrease max_alloc so that the caller will not try
* this size again this txg.
*/
static void
spa_vdev_copy_impl(vdev_t *vd, spa_vdev_removal_t *svr, vdev_copy_arg_t *vca,
uint64_t *max_alloc, dmu_tx_t *tx)
{
uint64_t txg = dmu_tx_get_txg(tx);
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
mutex_enter(&svr->svr_lock);
/*
* Determine how big of a chunk to copy. We can allocate up
* to max_alloc bytes, and we can span up to vdev_removal_max_span
* bytes of unallocated space at a time. "segs" will track the
* allocated segments that we are copying. We may also be copying
* free segments (of up to vdev_removal_max_span bytes).
*/
range_tree_t *segs = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
for (;;) {
range_tree_t *rt = svr->svr_allocd_segs;
range_seg_t *rs = range_tree_first(rt);
if (rs == NULL)
break;
uint64_t seg_length;
if (range_tree_is_empty(segs)) {
/* need to truncate the first seg based on max_alloc */
seg_length = MIN(rs_get_end(rs, rt) - rs_get_start(rs,
rt), *max_alloc);
} else {
if (rs_get_start(rs, rt) - range_tree_max(segs) >
vdev_removal_max_span) {
/*
* Including this segment would cause us to
* copy a larger unneeded chunk than is allowed.
*/
break;
} else if (rs_get_end(rs, rt) - range_tree_min(segs) >
*max_alloc) {
/*
* This additional segment would extend past
* max_alloc. Rather than splitting this
* segment, leave it for the next mapping.
*/
break;
} else {
seg_length = rs_get_end(rs, rt) -
rs_get_start(rs, rt);
}
}
range_tree_add(segs, rs_get_start(rs, rt), seg_length);
range_tree_remove(svr->svr_allocd_segs,
rs_get_start(rs, rt), seg_length);
}
if (range_tree_is_empty(segs)) {
mutex_exit(&svr->svr_lock);
range_tree_destroy(segs);
return;
}
if (svr->svr_max_offset_to_sync[txg & TXG_MASK] == 0) {
dsl_sync_task_nowait(dmu_tx_pool(tx), vdev_mapping_sync,
svr, tx);
}
svr->svr_max_offset_to_sync[txg & TXG_MASK] = range_tree_max(segs);
/*
* Note: this is the amount of *allocated* space
* that we are taking care of each txg.
*/
svr->svr_bytes_done[txg & TXG_MASK] += range_tree_space(segs);
mutex_exit(&svr->svr_lock);
zio_alloc_list_t zal;
metaslab_trace_init(&zal);
uint64_t thismax = SPA_MAXBLOCKSIZE;
while (!range_tree_is_empty(segs)) {
int error = spa_vdev_copy_segment(vd,
segs, thismax, txg, vca, &zal);
if (error == ENOSPC) {
/*
* Cut our segment in half, and don't try this
* segment size again this txg. Note that the
* allocation size must be aligned to the highest
* ashift in the pool, so that the allocation will
* not be padded out to a multiple of the ashift,
* which could cause us to think that this mapping
* is larger than we intended.
*/
ASSERT3U(spa->spa_max_ashift, >=, SPA_MINBLOCKSHIFT);
ASSERT3U(spa->spa_max_ashift, ==, spa->spa_min_ashift);
uint64_t attempted =
MIN(range_tree_span(segs), thismax);
thismax = P2ROUNDUP(attempted / 2,
1 << spa->spa_max_ashift);
/*
* The minimum-size allocation can not fail.
*/
ASSERT3U(attempted, >, 1 << spa->spa_max_ashift);
*max_alloc = attempted - (1 << spa->spa_max_ashift);
} else {
ASSERT0(error);
/*
* We've performed an allocation, so reset the
* alloc trace list.
*/
metaslab_trace_fini(&zal);
metaslab_trace_init(&zal);
}
}
metaslab_trace_fini(&zal);
range_tree_destroy(segs);
}
/*
* The size of each removal mapping is limited by the tunable
* zfs_remove_max_segment, but we must adjust this to be a multiple of the
* pool's ashift, so that we don't try to split individual sectors regardless
* of the tunable value. (Note that device removal requires that all devices
* have the same ashift, so there's no difference between spa_min_ashift and
* spa_max_ashift.) The raw tunable should not be used elsewhere.
*/
uint64_t
spa_remove_max_segment(spa_t *spa)
{
return (P2ROUNDUP(zfs_remove_max_segment, 1 << spa->spa_max_ashift));
}
/*
* The removal thread operates in open context. It iterates over all
* allocated space in the vdev, by loading each metaslab's spacemap.
* For each contiguous segment of allocated space (capping the segment
* size at SPA_MAXBLOCKSIZE), we:
* - Allocate space for it on another vdev.
* - Create a new mapping from the old location to the new location
* (as a record in svr_new_segments).
* - Initiate a physical read zio to get the data off the removing disk.
* - In the read zio's done callback, initiate a physical write zio to
* write it to the new vdev.
* Note that all of this will take effect when a particular TXG syncs.
* The sync thread ensures that all the phys reads and writes for the syncing
* TXG have completed (see spa_txg_zio) and writes the new mappings to disk
* (see vdev_mapping_sync()).
*/
static void
spa_vdev_remove_thread(void *arg)
{
spa_t *spa = arg;
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
vdev_copy_arg_t vca;
uint64_t max_alloc = spa_remove_max_segment(spa);
uint64_t last_txg = 0;
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
uint64_t start_offset = vdev_indirect_mapping_max_offset(vim);
ASSERT3P(vd->vdev_ops, !=, &vdev_indirect_ops);
ASSERT(vdev_is_concrete(vd));
ASSERT(vd->vdev_removing);
ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0);
ASSERT(vim != NULL);
mutex_init(&vca.vca_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&vca.vca_cv, NULL, CV_DEFAULT, NULL);
vca.vca_outstanding_bytes = 0;
vca.vca_read_error_bytes = 0;
vca.vca_write_error_bytes = 0;
mutex_enter(&svr->svr_lock);
/*
* Start from vim_max_offset so we pick up where we left off
* if we are restarting the removal after opening the pool.
*/
uint64_t msi;
for (msi = start_offset >> vd->vdev_ms_shift;
msi < vd->vdev_ms_count && !svr->svr_thread_exit; msi++) {
metaslab_t *msp = vd->vdev_ms[msi];
ASSERT3U(msi, <=, vd->vdev_ms_count);
ASSERT0(range_tree_space(svr->svr_allocd_segs));
mutex_enter(&msp->ms_sync_lock);
mutex_enter(&msp->ms_lock);
/*
* Assert nothing in flight -- ms_*tree is empty.
*/
for (int i = 0; i < TXG_SIZE; i++) {
ASSERT0(range_tree_space(msp->ms_allocating[i]));
}
/*
* If the metaslab has ever been allocated from (ms_sm!=NULL),
* read the allocated segments from the space map object
* into svr_allocd_segs. Since we do this while holding
* svr_lock and ms_sync_lock, concurrent frees (which
* would have modified the space map) will wait for us
* to finish loading the spacemap, and then take the
* appropriate action (see free_from_removing_vdev()).
*/
if (msp->ms_sm != NULL) {
VERIFY0(space_map_load(msp->ms_sm,
svr->svr_allocd_segs, SM_ALLOC));
range_tree_walk(msp->ms_unflushed_allocs,
range_tree_add, svr->svr_allocd_segs);
range_tree_walk(msp->ms_unflushed_frees,
range_tree_remove, svr->svr_allocd_segs);
range_tree_walk(msp->ms_freeing,
range_tree_remove, svr->svr_allocd_segs);
/*
* When we are resuming from a paused removal (i.e.
* when importing a pool with a removal in progress),
* discard any state that we have already processed.
*/
range_tree_clear(svr->svr_allocd_segs, 0, start_offset);
}
mutex_exit(&msp->ms_lock);
mutex_exit(&msp->ms_sync_lock);
vca.vca_msp = msp;
zfs_dbgmsg("copying %llu segments for metaslab %llu",
(u_longlong_t)zfs_btree_numnodes(
&svr->svr_allocd_segs->rt_root),
(u_longlong_t)msp->ms_id);
while (!svr->svr_thread_exit &&
!range_tree_is_empty(svr->svr_allocd_segs)) {
mutex_exit(&svr->svr_lock);
/*
* We need to periodically drop the config lock so that
* writers can get in. Additionally, we can't wait
* for a txg to sync while holding a config lock
* (since a waiting writer could cause a 3-way deadlock
* with the sync thread, which also gets a config
* lock for reader). So we can't hold the config lock
* while calling dmu_tx_assign().
*/
spa_config_exit(spa, SCL_CONFIG, FTAG);
/*
* This delay will pause the removal around the point
* specified by zfs_removal_suspend_progress. We do this
* solely from the test suite or during debugging.
*/
uint64_t bytes_copied =
spa->spa_removing_phys.sr_copied;
for (int i = 0; i < TXG_SIZE; i++)
bytes_copied += svr->svr_bytes_done[i];
while (zfs_removal_suspend_progress &&
!svr->svr_thread_exit)
delay(hz);
mutex_enter(&vca.vca_lock);
while (vca.vca_outstanding_bytes >
zfs_remove_max_copy_bytes) {
cv_wait(&vca.vca_cv, &vca.vca_lock);
}
mutex_exit(&vca.vca_lock);
dmu_tx_t *tx =
dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
uint64_t txg = dmu_tx_get_txg(tx);
/*
* Reacquire the vdev_config lock. The vdev_t
* that we're removing may have changed, e.g. due
* to a vdev_attach or vdev_detach.
*/
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vd = vdev_lookup_top(spa, svr->svr_vdev_id);
if (txg != last_txg)
max_alloc = spa_remove_max_segment(spa);
last_txg = txg;
spa_vdev_copy_impl(vd, svr, &vca, &max_alloc, tx);
dmu_tx_commit(tx);
mutex_enter(&svr->svr_lock);
}
mutex_enter(&vca.vca_lock);
if (zfs_removal_ignore_errors == 0 &&
(vca.vca_read_error_bytes > 0 ||
vca.vca_write_error_bytes > 0)) {
svr->svr_thread_exit = B_TRUE;
}
mutex_exit(&vca.vca_lock);
}
mutex_exit(&svr->svr_lock);
spa_config_exit(spa, SCL_CONFIG, FTAG);
/*
* Wait for all copies to finish before cleaning up the vca.
*/
txg_wait_synced(spa->spa_dsl_pool, 0);
ASSERT0(vca.vca_outstanding_bytes);
mutex_destroy(&vca.vca_lock);
cv_destroy(&vca.vca_cv);
if (svr->svr_thread_exit) {
mutex_enter(&svr->svr_lock);
range_tree_vacate(svr->svr_allocd_segs, NULL, NULL);
svr->svr_thread = NULL;
cv_broadcast(&svr->svr_cv);
mutex_exit(&svr->svr_lock);
/*
* During the removal process an unrecoverable read or write
* error was encountered. The removal process must be
* cancelled or this damage may become permanent.
*/
if (zfs_removal_ignore_errors == 0 &&
(vca.vca_read_error_bytes > 0 ||
vca.vca_write_error_bytes > 0)) {
zfs_dbgmsg("canceling removal due to IO errors: "
"[read_error_bytes=%llu] [write_error_bytes=%llu]",
(u_longlong_t)vca.vca_read_error_bytes,
(u_longlong_t)vca.vca_write_error_bytes);
spa_vdev_remove_cancel_impl(spa);
}
} else {
ASSERT0(range_tree_space(svr->svr_allocd_segs));
vdev_remove_complete(spa);
}
thread_exit();
}
void
spa_vdev_remove_suspend(spa_t *spa)
{
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
if (svr == NULL)
return;
mutex_enter(&svr->svr_lock);
svr->svr_thread_exit = B_TRUE;
while (svr->svr_thread != NULL)
cv_wait(&svr->svr_cv, &svr->svr_lock);
svr->svr_thread_exit = B_FALSE;
mutex_exit(&svr->svr_lock);
}
-/* ARGSUSED */
static int
spa_vdev_remove_cancel_check(void *arg, dmu_tx_t *tx)
{
+ (void) arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
if (spa->spa_vdev_removal == NULL)
return (ENOTACTIVE);
return (0);
}
/*
* Cancel a removal by freeing all entries from the partial mapping
* and marking the vdev as no longer being removing.
*/
-/* ARGSUSED */
static void
spa_vdev_remove_cancel_sync(void *arg, dmu_tx_t *tx)
{
+ (void) arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
objset_t *mos = spa->spa_meta_objset;
ASSERT3P(svr->svr_thread, ==, NULL);
spa_feature_decr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx);
boolean_t are_precise;
VERIFY0(vdev_obsolete_counts_are_precise(vd, &are_precise));
if (are_precise) {
spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, tx));
}
uint64_t obsolete_sm_object;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
if (obsolete_sm_object != 0) {
ASSERT(vd->vdev_obsolete_sm != NULL);
ASSERT3U(obsolete_sm_object, ==,
space_map_object(vd->vdev_obsolete_sm));
space_map_free(vd->vdev_obsolete_sm, tx);
VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap,
VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, tx));
space_map_close(vd->vdev_obsolete_sm);
vd->vdev_obsolete_sm = NULL;
spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
}
for (int i = 0; i < TXG_SIZE; i++) {
ASSERT(list_is_empty(&svr->svr_new_segments[i]));
ASSERT3U(svr->svr_max_offset_to_sync[i], <=,
vdev_indirect_mapping_max_offset(vim));
}
for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) {
metaslab_t *msp = vd->vdev_ms[msi];
if (msp->ms_start >= vdev_indirect_mapping_max_offset(vim))
break;
ASSERT0(range_tree_space(svr->svr_allocd_segs));
mutex_enter(&msp->ms_lock);
/*
* Assert nothing in flight -- ms_*tree is empty.
*/
for (int i = 0; i < TXG_SIZE; i++)
ASSERT0(range_tree_space(msp->ms_allocating[i]));
for (int i = 0; i < TXG_DEFER_SIZE; i++)
ASSERT0(range_tree_space(msp->ms_defer[i]));
ASSERT0(range_tree_space(msp->ms_freed));
if (msp->ms_sm != NULL) {
mutex_enter(&svr->svr_lock);
VERIFY0(space_map_load(msp->ms_sm,
svr->svr_allocd_segs, SM_ALLOC));
range_tree_walk(msp->ms_unflushed_allocs,
range_tree_add, svr->svr_allocd_segs);
range_tree_walk(msp->ms_unflushed_frees,
range_tree_remove, svr->svr_allocd_segs);
range_tree_walk(msp->ms_freeing,
range_tree_remove, svr->svr_allocd_segs);
/*
* Clear everything past what has been synced,
* because we have not allocated mappings for it yet.
*/
uint64_t syncd = vdev_indirect_mapping_max_offset(vim);
uint64_t sm_end = msp->ms_sm->sm_start +
msp->ms_sm->sm_size;
if (sm_end > syncd)
range_tree_clear(svr->svr_allocd_segs,
syncd, sm_end - syncd);
mutex_exit(&svr->svr_lock);
}
mutex_exit(&msp->ms_lock);
mutex_enter(&svr->svr_lock);
range_tree_vacate(svr->svr_allocd_segs,
free_mapped_segment_cb, vd);
mutex_exit(&svr->svr_lock);
}
/*
* Note: this must happen after we invoke free_mapped_segment_cb,
* because it adds to the obsolete_segments.
*/
range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL);
ASSERT3U(vic->vic_mapping_object, ==,
vdev_indirect_mapping_object(vd->vdev_indirect_mapping));
vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
vd->vdev_indirect_mapping = NULL;
vdev_indirect_mapping_free(mos, vic->vic_mapping_object, tx);
vic->vic_mapping_object = 0;
ASSERT3U(vic->vic_births_object, ==,
vdev_indirect_births_object(vd->vdev_indirect_births));
vdev_indirect_births_close(vd->vdev_indirect_births);
vd->vdev_indirect_births = NULL;
vdev_indirect_births_free(mos, vic->vic_births_object, tx);
vic->vic_births_object = 0;
/*
* We may have processed some frees from the removing vdev in this
* txg, thus increasing svr_bytes_done; discard that here to
* satisfy the assertions in spa_vdev_removal_destroy().
* Note that future txg's can not have any bytes_done, because
* future TXG's are only modified from open context, and we have
* already shut down the copying thread.
*/
svr->svr_bytes_done[dmu_tx_get_txg(tx) & TXG_MASK] = 0;
spa_finish_removal(spa, DSS_CANCELED, tx);
vd->vdev_removing = B_FALSE;
vdev_config_dirty(vd);
zfs_dbgmsg("canceled device removal for vdev %llu in %llu",
(u_longlong_t)vd->vdev_id, (u_longlong_t)dmu_tx_get_txg(tx));
spa_history_log_internal(spa, "vdev remove canceled", tx,
"%s vdev %llu %s", spa_name(spa),
(u_longlong_t)vd->vdev_id,
(vd->vdev_path != NULL) ? vd->vdev_path : "-");
}
static int
spa_vdev_remove_cancel_impl(spa_t *spa)
{
uint64_t vdid = spa->spa_vdev_removal->svr_vdev_id;
int error = dsl_sync_task(spa->spa_name, spa_vdev_remove_cancel_check,
spa_vdev_remove_cancel_sync, NULL, 0,
ZFS_SPACE_CHECK_EXTRA_RESERVED);
if (error == 0) {
spa_config_enter(spa, SCL_ALLOC | SCL_VDEV, FTAG, RW_WRITER);
vdev_t *vd = vdev_lookup_top(spa, vdid);
metaslab_group_activate(vd->vdev_mg);
ASSERT(!vd->vdev_islog);
metaslab_group_activate(vd->vdev_log_mg);
spa_config_exit(spa, SCL_ALLOC | SCL_VDEV, FTAG);
}
return (error);
}
int
spa_vdev_remove_cancel(spa_t *spa)
{
spa_vdev_remove_suspend(spa);
if (spa->spa_vdev_removal == NULL)
return (ENOTACTIVE);
return (spa_vdev_remove_cancel_impl(spa));
}
void
svr_sync(spa_t *spa, dmu_tx_t *tx)
{
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
if (svr == NULL)
return;
/*
* This check is necessary so that we do not dirty the
* DIRECTORY_OBJECT via spa_sync_removing_state() when there
* is nothing to do. Dirtying it every time would prevent us
* from syncing-to-convergence.
*/
if (svr->svr_bytes_done[txgoff] == 0)
return;
/*
* Update progress accounting.
*/
spa->spa_removing_phys.sr_copied += svr->svr_bytes_done[txgoff];
svr->svr_bytes_done[txgoff] = 0;
spa_sync_removing_state(spa, tx);
}
static void
vdev_remove_make_hole_and_free(vdev_t *vd)
{
uint64_t id = vd->vdev_id;
spa_t *spa = vd->vdev_spa;
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
vdev_free(vd);
vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops);
vdev_add_child(rvd, vd);
vdev_config_dirty(rvd);
/*
* Reassess the health of our root vdev.
*/
vdev_reopen(rvd);
}
/*
* Remove a log device. The config lock is held for the specified TXG.
*/
static int
spa_vdev_remove_log(vdev_t *vd, uint64_t *txg)
{
metaslab_group_t *mg = vd->vdev_mg;
spa_t *spa = vd->vdev_spa;
int error = 0;
ASSERT(vd->vdev_islog);
ASSERT(vd == vd->vdev_top);
ASSERT3P(vd->vdev_log_mg, ==, NULL);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
/*
* Stop allocating from this vdev.
*/
metaslab_group_passivate(mg);
/*
* Wait for the youngest allocations and frees to sync,
* and then wait for the deferral of those frees to finish.
*/
spa_vdev_config_exit(spa, NULL,
*txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
/*
* Cancel any initialize or TRIM which was in progress.
*/
vdev_initialize_stop_all(vd, VDEV_INITIALIZE_CANCELED);
vdev_trim_stop_all(vd, VDEV_TRIM_CANCELED);
vdev_autotrim_stop_wait(vd);
/*
* Evacuate the device. We don't hold the config lock as
* writer since we need to do I/O but we do keep the
* spa_namespace_lock held. Once this completes the device
* should no longer have any blocks allocated on it.
*/
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (vd->vdev_stat.vs_alloc != 0)
error = spa_reset_logs(spa);
*txg = spa_vdev_config_enter(spa);
if (error != 0) {
metaslab_group_activate(mg);
ASSERT3P(vd->vdev_log_mg, ==, NULL);
return (error);
}
ASSERT0(vd->vdev_stat.vs_alloc);
/*
* The evacuation succeeded. Remove any remaining MOS metadata
* associated with this vdev, and wait for these changes to sync.
*/
vd->vdev_removing = B_TRUE;
vdev_dirty_leaves(vd, VDD_DTL, *txg);
vdev_config_dirty(vd);
/*
* When the log space map feature is enabled we look at
* the vdev's top_zap to find the on-disk flush data of
* the metaslab we just flushed. Thus, while removing a
* log vdev we make sure to call vdev_metaslab_fini()
* first, which removes all metaslabs of this vdev from
* spa_metaslabs_by_flushed before vdev_remove_empty()
* destroys the top_zap of this log vdev.
*
* This avoids the scenario where we flush a metaslab
* from the log vdev being removed that doesn't have a
* top_zap and end up failing to lookup its on-disk flush
* data.
*
* We don't call metaslab_group_destroy() right away
* though (it will be called in vdev_free() later) as
* during metaslab_sync() of metaslabs from other vdevs
* we may touch the metaslab group of this vdev through
* metaslab_class_histogram_verify()
*/
vdev_metaslab_fini(vd);
spa_log_sm_set_blocklimit(spa);
spa_vdev_config_exit(spa, NULL, *txg, 0, FTAG);
*txg = spa_vdev_config_enter(spa);
sysevent_t *ev = spa_event_create(spa, vd, NULL,
ESC_ZFS_VDEV_REMOVE_DEV);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
/* The top ZAP should have been destroyed by vdev_remove_empty. */
ASSERT0(vd->vdev_top_zap);
/* The leaf ZAP should have been destroyed by vdev_dtl_sync. */
ASSERT0(vd->vdev_leaf_zap);
(void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
if (list_link_active(&vd->vdev_state_dirty_node))
vdev_state_clean(vd);
if (list_link_active(&vd->vdev_config_dirty_node))
vdev_config_clean(vd);
ASSERT0(vd->vdev_stat.vs_alloc);
/*
* Clean up the vdev namespace.
*/
vdev_remove_make_hole_and_free(vd);
if (ev != NULL)
spa_event_post(ev);
return (0);
}
static int
spa_vdev_remove_top_check(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
if (vd != vd->vdev_top)
return (SET_ERROR(ENOTSUP));
if (!vdev_is_concrete(vd))
return (SET_ERROR(ENOTSUP));
if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REMOVAL))
return (SET_ERROR(ENOTSUP));
metaslab_class_t *mc = vd->vdev_mg->mg_class;
metaslab_class_t *normal = spa_normal_class(spa);
if (mc != normal) {
/*
* Space allocated from the special (or dedup) class is
* included in the DMU's space usage, but it's not included
* in spa_dspace (or dsl_pool_adjustedsize()). Therefore
* there is always at least as much free space in the normal
* class, as is allocated from the special (and dedup) class.
* As a backup check, we will return ENOSPC if this is
* violated. See also spa_update_dspace().
*/
uint64_t available = metaslab_class_get_space(normal) -
metaslab_class_get_alloc(normal);
ASSERT3U(available, >=, vd->vdev_stat.vs_alloc);
if (available < vd->vdev_stat.vs_alloc)
return (SET_ERROR(ENOSPC));
} else {
/* available space in the pool's normal class */
uint64_t available = dsl_dir_space_available(
spa->spa_dsl_pool->dp_root_dir, NULL, 0, B_TRUE);
if (available <
vd->vdev_stat.vs_dspace + spa_get_slop_space(spa)) {
/*
* This is a normal device. There has to be enough free
* space to remove the device and leave double the
* "slop" space (i.e. we must leave at least 3% of the
* pool free, in addition to the normal slop space).
*/
return (SET_ERROR(ENOSPC));
}
}
/*
* There can not be a removal in progress.
*/
if (spa->spa_removing_phys.sr_state == DSS_SCANNING)
return (SET_ERROR(EBUSY));
/*
* The device must have all its data.
*/
if (!vdev_dtl_empty(vd, DTL_MISSING) ||
!vdev_dtl_empty(vd, DTL_OUTAGE))
return (SET_ERROR(EBUSY));
/*
* The device must be healthy.
*/
if (!vdev_readable(vd))
return (SET_ERROR(EIO));
/*
* All vdevs in normal class must have the same ashift.
*/
if (spa->spa_max_ashift != spa->spa_min_ashift) {
return (SET_ERROR(EINVAL));
}
/*
* A removed special/dedup vdev must have same ashift as normal class.
*/
ASSERT(!vd->vdev_islog);
if (vd->vdev_alloc_bias != VDEV_BIAS_NONE &&
vd->vdev_ashift != spa->spa_max_ashift) {
return (SET_ERROR(EINVAL));
}
/*
* All vdevs in normal class must have the same ashift
* and not be raidz or draid.
*/
vdev_t *rvd = spa->spa_root_vdev;
int num_indirect = 0;
for (uint64_t id = 0; id < rvd->vdev_children; id++) {
vdev_t *cvd = rvd->vdev_child[id];
/*
* A removed special/dedup vdev must have the same ashift
* across all vdevs in its class.
*/
if (vd->vdev_alloc_bias != VDEV_BIAS_NONE &&
cvd->vdev_alloc_bias == vd->vdev_alloc_bias &&
cvd->vdev_ashift != vd->vdev_ashift) {
return (SET_ERROR(EINVAL));
}
if (cvd->vdev_ashift != 0 &&
cvd->vdev_alloc_bias == VDEV_BIAS_NONE)
ASSERT3U(cvd->vdev_ashift, ==, spa->spa_max_ashift);
if (cvd->vdev_ops == &vdev_indirect_ops)
num_indirect++;
if (!vdev_is_concrete(cvd))
continue;
if (vdev_get_nparity(cvd) != 0)
return (SET_ERROR(EINVAL));
/*
* Need the mirror to be mirror of leaf vdevs only
*/
if (cvd->vdev_ops == &vdev_mirror_ops) {
for (uint64_t cid = 0;
cid < cvd->vdev_children; cid++) {
if (!cvd->vdev_child[cid]->vdev_ops->
vdev_op_leaf)
return (SET_ERROR(EINVAL));
}
}
}
return (0);
}
/*
* Initiate removal of a top-level vdev, reducing the total space in the pool.
* The config lock is held for the specified TXG. Once initiated,
* evacuation of all allocated space (copying it to other vdevs) happens
* in the background (see spa_vdev_remove_thread()), and can be canceled
* (see spa_vdev_remove_cancel()). If successful, the vdev will
* be transformed to an indirect vdev (see spa_vdev_remove_complete()).
*/
static int
spa_vdev_remove_top(vdev_t *vd, uint64_t *txg)
{
spa_t *spa = vd->vdev_spa;
int error;
/*
* Check for errors up-front, so that we don't waste time
* passivating the metaslab group and clearing the ZIL if there
* are errors.
*/
error = spa_vdev_remove_top_check(vd);
if (error != 0)
return (error);
/*
* Stop allocating from this vdev. Note that we must check
* that this is not the only device in the pool before
* passivating, otherwise we will not be able to make
* progress because we can't allocate from any vdevs.
* The above check for sufficient free space serves this
* purpose.
*/
metaslab_group_t *mg = vd->vdev_mg;
metaslab_group_passivate(mg);
ASSERT(!vd->vdev_islog);
metaslab_group_passivate(vd->vdev_log_mg);
/*
* Wait for the youngest allocations and frees to sync,
* and then wait for the deferral of those frees to finish.
*/
spa_vdev_config_exit(spa, NULL,
*txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
/*
* We must ensure that no "stubby" log blocks are allocated
* on the device to be removed. These blocks could be
* written at any time, including while we are in the middle
* of copying them.
*/
error = spa_reset_logs(spa);
/*
* We stop any initializing and TRIM that is currently in progress
* but leave the state as "active". This will allow the process to
* resume if the removal is canceled sometime later.
*/
vdev_initialize_stop_all(vd, VDEV_INITIALIZE_ACTIVE);
vdev_trim_stop_all(vd, VDEV_TRIM_ACTIVE);
vdev_autotrim_stop_wait(vd);
*txg = spa_vdev_config_enter(spa);
/*
* Things might have changed while the config lock was dropped
* (e.g. space usage). Check for errors again.
*/
if (error == 0)
error = spa_vdev_remove_top_check(vd);
if (error != 0) {
metaslab_group_activate(mg);
ASSERT(!vd->vdev_islog);
metaslab_group_activate(vd->vdev_log_mg);
spa_async_request(spa, SPA_ASYNC_INITIALIZE_RESTART);
spa_async_request(spa, SPA_ASYNC_TRIM_RESTART);
spa_async_request(spa, SPA_ASYNC_AUTOTRIM_RESTART);
return (error);
}
vd->vdev_removing = B_TRUE;
vdev_dirty_leaves(vd, VDD_DTL, *txg);
vdev_config_dirty(vd);
dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, *txg);
dsl_sync_task_nowait(spa->spa_dsl_pool,
vdev_remove_initiate_sync, (void *)(uintptr_t)vd->vdev_id, tx);
dmu_tx_commit(tx);
return (0);
}
/*
* Remove a device from the pool.
*
* Removing a device from the vdev namespace requires several steps
* and can take a significant amount of time. As a result we use
* the spa_vdev_config_[enter/exit] functions which allow us to
* grab and release the spa_config_lock while still holding the namespace
* lock. During each step the configuration is synced out.
*/
int
spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
{
vdev_t *vd;
nvlist_t **spares, **l2cache, *nv;
uint64_t txg = 0;
uint_t nspares, nl2cache;
int error = 0, error_log;
boolean_t locked = MUTEX_HELD(&spa_namespace_lock);
sysevent_t *ev = NULL;
char *vd_type = NULL, *vd_path = NULL;
ASSERT(spa_writeable(spa));
if (!locked)
txg = spa_vdev_enter(spa);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
error = (spa_has_checkpoint(spa)) ?
ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
if (!locked)
return (spa_vdev_exit(spa, NULL, txg, error));
return (error);
}
vd = spa_lookup_by_guid(spa, guid, B_FALSE);
if (spa->spa_spares.sav_vdevs != NULL &&
nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 &&
(nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) {
/*
* Only remove the hot spare if it's not currently in use
* in this pool.
*/
if (vd == NULL || unspare) {
char *type;
boolean_t draid_spare = B_FALSE;
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type)
== 0 && strcmp(type, VDEV_TYPE_DRAID_SPARE) == 0)
draid_spare = B_TRUE;
if (vd == NULL && draid_spare) {
error = SET_ERROR(ENOTSUP);
} else {
if (vd == NULL)
vd = spa_lookup_by_guid(spa,
guid, B_TRUE);
ev = spa_event_create(spa, vd, NULL,
ESC_ZFS_VDEV_REMOVE_AUX);
vd_type = VDEV_TYPE_SPARE;
vd_path = spa_strdup(fnvlist_lookup_string(
nv, ZPOOL_CONFIG_PATH));
spa_vdev_remove_aux(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, spares, nspares, nv);
spa_load_spares(spa);
spa->spa_spares.sav_sync = B_TRUE;
}
} else {
error = SET_ERROR(EBUSY);
}
} else if (spa->spa_l2cache.sav_vdevs != NULL &&
nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 &&
(nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) {
vd_type = VDEV_TYPE_L2CACHE;
vd_path = spa_strdup(fnvlist_lookup_string(
nv, ZPOOL_CONFIG_PATH));
/*
* Cache devices can always be removed.
*/
vd = spa_lookup_by_guid(spa, guid, B_TRUE);
/*
* Stop trimming the cache device. We need to release the
* config lock to allow the syncing of TRIM transactions
* without releasing the spa_namespace_lock. The same
* strategy is employed in spa_vdev_remove_top().
*/
spa_vdev_config_exit(spa, NULL,
txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
mutex_enter(&vd->vdev_trim_lock);
vdev_trim_stop(vd, VDEV_TRIM_CANCELED, NULL);
mutex_exit(&vd->vdev_trim_lock);
txg = spa_vdev_config_enter(spa);
ev = spa_event_create(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE_AUX);
spa_vdev_remove_aux(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv);
spa_load_l2cache(spa);
spa->spa_l2cache.sav_sync = B_TRUE;
} else if (vd != NULL && vd->vdev_islog) {
ASSERT(!locked);
vd_type = VDEV_TYPE_LOG;
vd_path = spa_strdup((vd->vdev_path != NULL) ?
vd->vdev_path : "-");
error = spa_vdev_remove_log(vd, &txg);
} else if (vd != NULL) {
ASSERT(!locked);
error = spa_vdev_remove_top(vd, &txg);
} else {
/*
* There is no vdev of any kind with the specified guid.
*/
error = SET_ERROR(ENOENT);
}
error_log = error;
if (!locked)
error = spa_vdev_exit(spa, NULL, txg, error);
/*
* Logging must be done outside the spa config lock. Otherwise,
* this code path could end up holding the spa config lock while
* waiting for a txg_sync so it can write to the internal log.
* Doing that would prevent the txg sync from actually happening,
* causing a deadlock.
*/
if (error_log == 0 && vd_type != NULL && vd_path != NULL) {
spa_history_log_internal(spa, "vdev remove", NULL,
"%s vdev (%s) %s", spa_name(spa), vd_type, vd_path);
}
if (vd_path != NULL)
spa_strfree(vd_path);
if (ev != NULL)
spa_event_post(ev);
return (error);
}
int
spa_removal_get_stats(spa_t *spa, pool_removal_stat_t *prs)
{
prs->prs_state = spa->spa_removing_phys.sr_state;
if (prs->prs_state == DSS_NONE)
return (SET_ERROR(ENOENT));
prs->prs_removing_vdev = spa->spa_removing_phys.sr_removing_vdev;
prs->prs_start_time = spa->spa_removing_phys.sr_start_time;
prs->prs_end_time = spa->spa_removing_phys.sr_end_time;
prs->prs_to_copy = spa->spa_removing_phys.sr_to_copy;
prs->prs_copied = spa->spa_removing_phys.sr_copied;
prs->prs_mapping_memory = 0;
uint64_t indirect_vdev_id =
spa->spa_removing_phys.sr_prev_indirect_vdev;
while (indirect_vdev_id != -1) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[indirect_vdev_id];
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
prs->prs_mapping_memory += vdev_indirect_mapping_size(vim);
indirect_vdev_id = vic->vic_prev_indirect_vdev;
}
return (0);
}
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_vdev, zfs_, removal_ignore_errors, INT, ZMOD_RW,
"Ignore hard IO errors when removing device");
ZFS_MODULE_PARAM(zfs_vdev, zfs_, remove_max_segment, INT, ZMOD_RW,
"Largest contiguous segment to allocate when removing device");
ZFS_MODULE_PARAM(zfs_vdev, vdev_, removal_max_span, INT, ZMOD_RW,
"Largest span of free chunks a remap segment can span");
ZFS_MODULE_PARAM(zfs_vdev, zfs_, removal_suspend_progress, INT, ZMOD_RW,
"Pause device removal after this many bytes are copied "
"(debug use only - causes removal to hang)");
/* END CSTYLED */
EXPORT_SYMBOL(free_from_removing_vdev);
EXPORT_SYMBOL(spa_removal_get_stats);
EXPORT_SYMBOL(spa_remove_init);
EXPORT_SYMBOL(spa_restart_removal);
EXPORT_SYMBOL(spa_vdev_removal_destroy);
EXPORT_SYMBOL(spa_vdev_remove);
EXPORT_SYMBOL(spa_vdev_remove_cancel);
EXPORT_SYMBOL(spa_vdev_remove_suspend);
EXPORT_SYMBOL(svr_sync);
diff --git a/sys/contrib/openzfs/module/zfs/vdev_trim.c b/sys/contrib/openzfs/module/zfs/vdev_trim.c
index deea7fedd770..2bae33b2b532 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_trim.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_trim.c
@@ -1,1726 +1,1727 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2016 by Delphix. All rights reserved.
* Copyright (c) 2019 by Lawrence Livermore National Security, LLC.
* Copyright (c) 2021 Hewlett Packard Enterprise Development LP
*/
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/txg.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_trim.h>
#include <sys/metaslab_impl.h>
#include <sys/dsl_synctask.h>
#include <sys/zap.h>
#include <sys/dmu_tx.h>
#include <sys/arc_impl.h>
/*
* TRIM is a feature which is used to notify a SSD that some previously
* written space is no longer allocated by the pool. This is useful because
* writes to a SSD must be performed to blocks which have first been erased.
* Ensuring the SSD always has a supply of erased blocks for new writes
* helps prevent the performance from deteriorating.
*
* There are two supported TRIM methods; manual and automatic.
*
* Manual TRIM:
*
* A manual TRIM is initiated by running the 'zpool trim' command. A single
* 'vdev_trim' thread is created for each leaf vdev, and it is responsible for
* managing that vdev TRIM process. This involves iterating over all the
* metaslabs, calculating the unallocated space ranges, and then issuing the
* required TRIM I/Os.
*
* While a metaslab is being actively trimmed it is not eligible to perform
* new allocations. After traversing all of the metaslabs the thread is
* terminated. Finally, both the requested options and current progress of
* the TRIM are regularly written to the pool. This allows the TRIM to be
* suspended and resumed as needed.
*
* Automatic TRIM:
*
* An automatic TRIM is enabled by setting the 'autotrim' pool property
* to 'on'. When enabled, a `vdev_autotrim' thread is created for each
* top-level (not leaf) vdev in the pool. These threads perform the same
* core TRIM process as a manual TRIM, but with a few key differences.
*
* 1) Automatic TRIM happens continuously in the background and operates
* solely on recently freed blocks (ms_trim not ms_allocatable).
*
* 2) Each thread is associated with a top-level (not leaf) vdev. This has
* the benefit of simplifying the threading model, it makes it easier
* to coordinate administrative commands, and it ensures only a single
* metaslab is disabled at a time. Unlike manual TRIM, this means each
* 'vdev_autotrim' thread is responsible for issuing TRIM I/Os for its
* children.
*
* 3) There is no automatic TRIM progress information stored on disk, nor
* is it reported by 'zpool status'.
*
* While the automatic TRIM process is highly effective it is more likely
* than a manual TRIM to encounter tiny ranges. Ranges less than or equal to
* 'zfs_trim_extent_bytes_min' (32k) are considered too small to efficiently
* TRIM and are skipped. This means small amounts of freed space may not
* be automatically trimmed.
*
* Furthermore, devices with attached hot spares and devices being actively
* replaced are skipped. This is done to avoid adding additional stress to
* a potentially unhealthy device and to minimize the required rebuild time.
*
* For this reason it may be beneficial to occasionally manually TRIM a pool
* even when automatic TRIM is enabled.
*/
/*
* Maximum size of TRIM I/O, ranges will be chunked in to 128MiB lengths.
*/
unsigned int zfs_trim_extent_bytes_max = 128 * 1024 * 1024;
/*
* Minimum size of TRIM I/O, extents smaller than 32Kib will be skipped.
*/
unsigned int zfs_trim_extent_bytes_min = 32 * 1024;
/*
* Skip uninitialized metaslabs during the TRIM process. This option is
* useful for pools constructed from large thinly-provisioned devices where
* TRIM operations are slow. As a pool ages an increasing fraction of
* the pools metaslabs will be initialized progressively degrading the
* usefulness of this option. This setting is stored when starting a
* manual TRIM and will persist for the duration of the requested TRIM.
*/
unsigned int zfs_trim_metaslab_skip = 0;
/*
* Maximum number of queued TRIM I/Os per leaf vdev. The number of
* concurrent TRIM I/Os issued to the device is controlled by the
* zfs_vdev_trim_min_active and zfs_vdev_trim_max_active module options.
*/
unsigned int zfs_trim_queue_limit = 10;
/*
* The minimum number of transaction groups between automatic trims of a
* metaslab. This setting represents a trade-off between issuing more
* efficient TRIM operations, by allowing them to be aggregated longer,
* and issuing them promptly so the trimmed space is available. Note
* that this value is a minimum; metaslabs can be trimmed less frequently
* when there are a large number of ranges which need to be trimmed.
*
* Increasing this value will allow frees to be aggregated for a longer
* time. This can result is larger TRIM operations, and increased memory
* usage in order to track the ranges to be trimmed. Decreasing this value
* has the opposite effect. The default value of 32 was determined though
* testing to be a reasonable compromise.
*/
unsigned int zfs_trim_txg_batch = 32;
/*
* The trim_args are a control structure which describe how a leaf vdev
* should be trimmed. The core elements are the vdev, the metaslab being
* trimmed and a range tree containing the extents to TRIM. All provided
* ranges must be within the metaslab.
*/
typedef struct trim_args {
/*
* These fields are set by the caller of vdev_trim_ranges().
*/
vdev_t *trim_vdev; /* Leaf vdev to TRIM */
metaslab_t *trim_msp; /* Disabled metaslab */
range_tree_t *trim_tree; /* TRIM ranges (in metaslab) */
trim_type_t trim_type; /* Manual or auto TRIM */
uint64_t trim_extent_bytes_max; /* Maximum TRIM I/O size */
uint64_t trim_extent_bytes_min; /* Minimum TRIM I/O size */
enum trim_flag trim_flags; /* TRIM flags (secure) */
/*
* These fields are updated by vdev_trim_ranges().
*/
hrtime_t trim_start_time; /* Start time */
uint64_t trim_bytes_done; /* Bytes trimmed */
} trim_args_t;
/*
* Determines whether a vdev_trim_thread() should be stopped.
*/
static boolean_t
vdev_trim_should_stop(vdev_t *vd)
{
return (vd->vdev_trim_exit_wanted || !vdev_writeable(vd) ||
vd->vdev_detached || vd->vdev_top->vdev_removing);
}
/*
* Determines whether a vdev_autotrim_thread() should be stopped.
*/
static boolean_t
vdev_autotrim_should_stop(vdev_t *tvd)
{
return (tvd->vdev_autotrim_exit_wanted ||
!vdev_writeable(tvd) || tvd->vdev_removing ||
spa_get_autotrim(tvd->vdev_spa) == SPA_AUTOTRIM_OFF);
}
/*
* The sync task for updating the on-disk state of a manual TRIM. This
* is scheduled by vdev_trim_change_state().
*/
static void
vdev_trim_zap_update_sync(void *arg, dmu_tx_t *tx)
{
/*
* We pass in the guid instead of the vdev_t since the vdev may
* have been freed prior to the sync task being processed. This
* happens when a vdev is detached as we call spa_config_vdev_exit(),
* stop the trimming thread, schedule the sync task, and free
* the vdev. Later when the scheduled sync task is invoked, it would
* find that the vdev has been freed.
*/
uint64_t guid = *(uint64_t *)arg;
uint64_t txg = dmu_tx_get_txg(tx);
kmem_free(arg, sizeof (uint64_t));
vdev_t *vd = spa_lookup_by_guid(tx->tx_pool->dp_spa, guid, B_FALSE);
if (vd == NULL || vd->vdev_top->vdev_removing || !vdev_is_concrete(vd))
return;
uint64_t last_offset = vd->vdev_trim_offset[txg & TXG_MASK];
vd->vdev_trim_offset[txg & TXG_MASK] = 0;
VERIFY3U(vd->vdev_leaf_zap, !=, 0);
objset_t *mos = vd->vdev_spa->spa_meta_objset;
if (last_offset > 0 || vd->vdev_trim_last_offset == UINT64_MAX) {
if (vd->vdev_trim_last_offset == UINT64_MAX)
last_offset = 0;
vd->vdev_trim_last_offset = last_offset;
VERIFY0(zap_update(mos, vd->vdev_leaf_zap,
VDEV_LEAF_ZAP_TRIM_LAST_OFFSET,
sizeof (last_offset), 1, &last_offset, tx));
}
if (vd->vdev_trim_action_time > 0) {
uint64_t val = (uint64_t)vd->vdev_trim_action_time;
VERIFY0(zap_update(mos, vd->vdev_leaf_zap,
VDEV_LEAF_ZAP_TRIM_ACTION_TIME, sizeof (val),
1, &val, tx));
}
if (vd->vdev_trim_rate > 0) {
uint64_t rate = (uint64_t)vd->vdev_trim_rate;
if (rate == UINT64_MAX)
rate = 0;
VERIFY0(zap_update(mos, vd->vdev_leaf_zap,
VDEV_LEAF_ZAP_TRIM_RATE, sizeof (rate), 1, &rate, tx));
}
uint64_t partial = vd->vdev_trim_partial;
if (partial == UINT64_MAX)
partial = 0;
VERIFY0(zap_update(mos, vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_PARTIAL,
sizeof (partial), 1, &partial, tx));
uint64_t secure = vd->vdev_trim_secure;
if (secure == UINT64_MAX)
secure = 0;
VERIFY0(zap_update(mos, vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_SECURE,
sizeof (secure), 1, &secure, tx));
uint64_t trim_state = vd->vdev_trim_state;
VERIFY0(zap_update(mos, vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_STATE,
sizeof (trim_state), 1, &trim_state, tx));
}
/*
* Update the on-disk state of a manual TRIM. This is called to request
* that a TRIM be started/suspended/canceled, or to change one of the
* TRIM options (partial, secure, rate).
*/
static void
vdev_trim_change_state(vdev_t *vd, vdev_trim_state_t new_state,
uint64_t rate, boolean_t partial, boolean_t secure)
{
ASSERT(MUTEX_HELD(&vd->vdev_trim_lock));
spa_t *spa = vd->vdev_spa;
if (new_state == vd->vdev_trim_state)
return;
/*
* Copy the vd's guid, this will be freed by the sync task.
*/
uint64_t *guid = kmem_zalloc(sizeof (uint64_t), KM_SLEEP);
*guid = vd->vdev_guid;
/*
* If we're suspending, then preserve the original start time.
*/
if (vd->vdev_trim_state != VDEV_TRIM_SUSPENDED) {
vd->vdev_trim_action_time = gethrestime_sec();
}
/*
* If we're activating, then preserve the requested rate and trim
* method. Setting the last offset and rate to UINT64_MAX is used
* as a sentinel to indicate they should be reset to default values.
*/
if (new_state == VDEV_TRIM_ACTIVE) {
if (vd->vdev_trim_state == VDEV_TRIM_COMPLETE ||
vd->vdev_trim_state == VDEV_TRIM_CANCELED) {
vd->vdev_trim_last_offset = UINT64_MAX;
vd->vdev_trim_rate = UINT64_MAX;
vd->vdev_trim_partial = UINT64_MAX;
vd->vdev_trim_secure = UINT64_MAX;
}
if (rate != 0)
vd->vdev_trim_rate = rate;
if (partial != 0)
vd->vdev_trim_partial = partial;
if (secure != 0)
vd->vdev_trim_secure = secure;
}
vdev_trim_state_t old_state = vd->vdev_trim_state;
boolean_t resumed = (old_state == VDEV_TRIM_SUSPENDED);
vd->vdev_trim_state = new_state;
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
dsl_sync_task_nowait(spa_get_dsl(spa), vdev_trim_zap_update_sync,
guid, tx);
switch (new_state) {
case VDEV_TRIM_ACTIVE:
spa_event_notify(spa, vd, NULL,
resumed ? ESC_ZFS_TRIM_RESUME : ESC_ZFS_TRIM_START);
spa_history_log_internal(spa, "trim", tx,
"vdev=%s activated", vd->vdev_path);
break;
case VDEV_TRIM_SUSPENDED:
spa_event_notify(spa, vd, NULL, ESC_ZFS_TRIM_SUSPEND);
spa_history_log_internal(spa, "trim", tx,
"vdev=%s suspended", vd->vdev_path);
break;
case VDEV_TRIM_CANCELED:
if (old_state == VDEV_TRIM_ACTIVE ||
old_state == VDEV_TRIM_SUSPENDED) {
spa_event_notify(spa, vd, NULL, ESC_ZFS_TRIM_CANCEL);
spa_history_log_internal(spa, "trim", tx,
"vdev=%s canceled", vd->vdev_path);
}
break;
case VDEV_TRIM_COMPLETE:
spa_event_notify(spa, vd, NULL, ESC_ZFS_TRIM_FINISH);
spa_history_log_internal(spa, "trim", tx,
"vdev=%s complete", vd->vdev_path);
break;
default:
panic("invalid state %llu", (unsigned long long)new_state);
}
dmu_tx_commit(tx);
if (new_state != VDEV_TRIM_ACTIVE)
spa_notify_waiters(spa);
}
/*
* The zio_done_func_t done callback for each manual TRIM issued. It is
* responsible for updating the TRIM stats, reissuing failed TRIM I/Os,
* and limiting the number of in flight TRIM I/Os.
*/
static void
vdev_trim_cb(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
mutex_enter(&vd->vdev_trim_io_lock);
if (zio->io_error == ENXIO && !vdev_writeable(vd)) {
/*
* The I/O failed because the vdev was unavailable; roll the
* last offset back. (This works because spa_sync waits on
* spa_txg_zio before it runs sync tasks.)
*/
uint64_t *offset =
&vd->vdev_trim_offset[zio->io_txg & TXG_MASK];
*offset = MIN(*offset, zio->io_offset);
} else {
if (zio->io_error != 0) {
vd->vdev_stat.vs_trim_errors++;
spa_iostats_trim_add(vd->vdev_spa, TRIM_TYPE_MANUAL,
0, 0, 0, 0, 1, zio->io_orig_size);
} else {
spa_iostats_trim_add(vd->vdev_spa, TRIM_TYPE_MANUAL,
1, zio->io_orig_size, 0, 0, 0, 0);
}
vd->vdev_trim_bytes_done += zio->io_orig_size;
}
ASSERT3U(vd->vdev_trim_inflight[TRIM_TYPE_MANUAL], >, 0);
vd->vdev_trim_inflight[TRIM_TYPE_MANUAL]--;
cv_broadcast(&vd->vdev_trim_io_cv);
mutex_exit(&vd->vdev_trim_io_lock);
spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd);
}
/*
* The zio_done_func_t done callback for each automatic TRIM issued. It
* is responsible for updating the TRIM stats and limiting the number of
* in flight TRIM I/Os. Automatic TRIM I/Os are best effort and are
* never reissued on failure.
*/
static void
vdev_autotrim_cb(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
mutex_enter(&vd->vdev_trim_io_lock);
if (zio->io_error != 0) {
vd->vdev_stat.vs_trim_errors++;
spa_iostats_trim_add(vd->vdev_spa, TRIM_TYPE_AUTO,
0, 0, 0, 0, 1, zio->io_orig_size);
} else {
spa_iostats_trim_add(vd->vdev_spa, TRIM_TYPE_AUTO,
1, zio->io_orig_size, 0, 0, 0, 0);
}
ASSERT3U(vd->vdev_trim_inflight[TRIM_TYPE_AUTO], >, 0);
vd->vdev_trim_inflight[TRIM_TYPE_AUTO]--;
cv_broadcast(&vd->vdev_trim_io_cv);
mutex_exit(&vd->vdev_trim_io_lock);
spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd);
}
/*
* The zio_done_func_t done callback for each TRIM issued via
* vdev_trim_simple(). It is responsible for updating the TRIM stats and
* limiting the number of in flight TRIM I/Os. Simple TRIM I/Os are best
* effort and are never reissued on failure.
*/
static void
vdev_trim_simple_cb(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
mutex_enter(&vd->vdev_trim_io_lock);
if (zio->io_error != 0) {
vd->vdev_stat.vs_trim_errors++;
spa_iostats_trim_add(vd->vdev_spa, TRIM_TYPE_SIMPLE,
0, 0, 0, 0, 1, zio->io_orig_size);
} else {
spa_iostats_trim_add(vd->vdev_spa, TRIM_TYPE_SIMPLE,
1, zio->io_orig_size, 0, 0, 0, 0);
}
ASSERT3U(vd->vdev_trim_inflight[TRIM_TYPE_SIMPLE], >, 0);
vd->vdev_trim_inflight[TRIM_TYPE_SIMPLE]--;
cv_broadcast(&vd->vdev_trim_io_cv);
mutex_exit(&vd->vdev_trim_io_lock);
spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd);
}
/*
* Returns the average trim rate in bytes/sec for the ta->trim_vdev.
*/
static uint64_t
vdev_trim_calculate_rate(trim_args_t *ta)
{
return (ta->trim_bytes_done * 1000 /
(NSEC2MSEC(gethrtime() - ta->trim_start_time) + 1));
}
/*
* Issues a physical TRIM and takes care of rate limiting (bytes/sec)
* and number of concurrent TRIM I/Os.
*/
static int
vdev_trim_range(trim_args_t *ta, uint64_t start, uint64_t size)
{
vdev_t *vd = ta->trim_vdev;
spa_t *spa = vd->vdev_spa;
void *cb;
mutex_enter(&vd->vdev_trim_io_lock);
/*
* Limit manual TRIM I/Os to the requested rate. This does not
* apply to automatic TRIM since no per vdev rate can be specified.
*/
if (ta->trim_type == TRIM_TYPE_MANUAL) {
while (vd->vdev_trim_rate != 0 && !vdev_trim_should_stop(vd) &&
vdev_trim_calculate_rate(ta) > vd->vdev_trim_rate) {
cv_timedwait_idle(&vd->vdev_trim_io_cv,
&vd->vdev_trim_io_lock, ddi_get_lbolt() +
MSEC_TO_TICK(10));
}
}
ta->trim_bytes_done += size;
/* Limit in flight trimming I/Os */
while (vd->vdev_trim_inflight[0] + vd->vdev_trim_inflight[1] +
vd->vdev_trim_inflight[2] >= zfs_trim_queue_limit) {
cv_wait(&vd->vdev_trim_io_cv, &vd->vdev_trim_io_lock);
}
vd->vdev_trim_inflight[ta->trim_type]++;
mutex_exit(&vd->vdev_trim_io_lock);
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
uint64_t txg = dmu_tx_get_txg(tx);
spa_config_enter(spa, SCL_STATE_ALL, vd, RW_READER);
mutex_enter(&vd->vdev_trim_lock);
if (ta->trim_type == TRIM_TYPE_MANUAL &&
vd->vdev_trim_offset[txg & TXG_MASK] == 0) {
uint64_t *guid = kmem_zalloc(sizeof (uint64_t), KM_SLEEP);
*guid = vd->vdev_guid;
/* This is the first write of this txg. */
dsl_sync_task_nowait(spa_get_dsl(spa),
vdev_trim_zap_update_sync, guid, tx);
}
/*
* We know the vdev_t will still be around since all consumers of
* vdev_free must stop the trimming first.
*/
if ((ta->trim_type == TRIM_TYPE_MANUAL &&
vdev_trim_should_stop(vd)) ||
(ta->trim_type == TRIM_TYPE_AUTO &&
vdev_autotrim_should_stop(vd->vdev_top))) {
mutex_enter(&vd->vdev_trim_io_lock);
vd->vdev_trim_inflight[ta->trim_type]--;
mutex_exit(&vd->vdev_trim_io_lock);
spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd);
mutex_exit(&vd->vdev_trim_lock);
dmu_tx_commit(tx);
return (SET_ERROR(EINTR));
}
mutex_exit(&vd->vdev_trim_lock);
if (ta->trim_type == TRIM_TYPE_MANUAL)
vd->vdev_trim_offset[txg & TXG_MASK] = start + size;
if (ta->trim_type == TRIM_TYPE_MANUAL) {
cb = vdev_trim_cb;
} else if (ta->trim_type == TRIM_TYPE_AUTO) {
cb = vdev_autotrim_cb;
} else {
cb = vdev_trim_simple_cb;
}
zio_nowait(zio_trim(spa->spa_txg_zio[txg & TXG_MASK], vd,
start, size, cb, NULL, ZIO_PRIORITY_TRIM, ZIO_FLAG_CANFAIL,
ta->trim_flags));
/* vdev_trim_cb and vdev_autotrim_cb release SCL_STATE_ALL */
dmu_tx_commit(tx);
return (0);
}
/*
* Issues TRIM I/Os for all ranges in the provided ta->trim_tree range tree.
* Additional parameters describing how the TRIM should be performed must
* be set in the trim_args structure. See the trim_args definition for
* additional information.
*/
static int
vdev_trim_ranges(trim_args_t *ta)
{
vdev_t *vd = ta->trim_vdev;
zfs_btree_t *t = &ta->trim_tree->rt_root;
zfs_btree_index_t idx;
uint64_t extent_bytes_max = ta->trim_extent_bytes_max;
uint64_t extent_bytes_min = ta->trim_extent_bytes_min;
spa_t *spa = vd->vdev_spa;
ta->trim_start_time = gethrtime();
ta->trim_bytes_done = 0;
for (range_seg_t *rs = zfs_btree_first(t, &idx); rs != NULL;
rs = zfs_btree_next(t, &idx, &idx)) {
uint64_t size = rs_get_end(rs, ta->trim_tree) - rs_get_start(rs,
ta->trim_tree);
if (extent_bytes_min && size < extent_bytes_min) {
spa_iostats_trim_add(spa, ta->trim_type,
0, 0, 1, size, 0, 0);
continue;
}
/* Split range into legally-sized physical chunks */
uint64_t writes_required = ((size - 1) / extent_bytes_max) + 1;
for (uint64_t w = 0; w < writes_required; w++) {
int error;
error = vdev_trim_range(ta, VDEV_LABEL_START_SIZE +
rs_get_start(rs, ta->trim_tree) +
(w *extent_bytes_max), MIN(size -
(w * extent_bytes_max), extent_bytes_max));
if (error != 0) {
return (error);
}
}
}
return (0);
}
static void
vdev_trim_xlate_last_rs_end(void *arg, range_seg64_t *physical_rs)
{
uint64_t *last_rs_end = (uint64_t *)arg;
if (physical_rs->rs_end > *last_rs_end)
*last_rs_end = physical_rs->rs_end;
}
static void
vdev_trim_xlate_progress(void *arg, range_seg64_t *physical_rs)
{
vdev_t *vd = (vdev_t *)arg;
uint64_t size = physical_rs->rs_end - physical_rs->rs_start;
vd->vdev_trim_bytes_est += size;
if (vd->vdev_trim_last_offset >= physical_rs->rs_end) {
vd->vdev_trim_bytes_done += size;
} else if (vd->vdev_trim_last_offset > physical_rs->rs_start &&
vd->vdev_trim_last_offset <= physical_rs->rs_end) {
vd->vdev_trim_bytes_done +=
vd->vdev_trim_last_offset - physical_rs->rs_start;
}
}
/*
* Calculates the completion percentage of a manual TRIM.
*/
static void
vdev_trim_calculate_progress(vdev_t *vd)
{
ASSERT(spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_READER) ||
spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_WRITER));
ASSERT(vd->vdev_leaf_zap != 0);
vd->vdev_trim_bytes_est = 0;
vd->vdev_trim_bytes_done = 0;
for (uint64_t i = 0; i < vd->vdev_top->vdev_ms_count; i++) {
metaslab_t *msp = vd->vdev_top->vdev_ms[i];
mutex_enter(&msp->ms_lock);
uint64_t ms_free = (msp->ms_size -
metaslab_allocated_space(msp)) /
vdev_get_ndisks(vd->vdev_top);
/*
* Convert the metaslab range to a physical range
* on our vdev. We use this to determine if we are
* in the middle of this metaslab range.
*/
range_seg64_t logical_rs, physical_rs, remain_rs;
logical_rs.rs_start = msp->ms_start;
logical_rs.rs_end = msp->ms_start + msp->ms_size;
/* Metaslab space after this offset has not been trimmed. */
vdev_xlate(vd, &logical_rs, &physical_rs, &remain_rs);
if (vd->vdev_trim_last_offset <= physical_rs.rs_start) {
vd->vdev_trim_bytes_est += ms_free;
mutex_exit(&msp->ms_lock);
continue;
}
/* Metaslab space before this offset has been trimmed */
uint64_t last_rs_end = physical_rs.rs_end;
if (!vdev_xlate_is_empty(&remain_rs)) {
vdev_xlate_walk(vd, &remain_rs,
vdev_trim_xlate_last_rs_end, &last_rs_end);
}
if (vd->vdev_trim_last_offset > last_rs_end) {
vd->vdev_trim_bytes_done += ms_free;
vd->vdev_trim_bytes_est += ms_free;
mutex_exit(&msp->ms_lock);
continue;
}
/*
* If we get here, we're in the middle of trimming this
* metaslab. Load it and walk the free tree for more
* accurate progress estimation.
*/
VERIFY0(metaslab_load(msp));
range_tree_t *rt = msp->ms_allocatable;
zfs_btree_t *bt = &rt->rt_root;
zfs_btree_index_t idx;
for (range_seg_t *rs = zfs_btree_first(bt, &idx);
rs != NULL; rs = zfs_btree_next(bt, &idx, &idx)) {
logical_rs.rs_start = rs_get_start(rs, rt);
logical_rs.rs_end = rs_get_end(rs, rt);
vdev_xlate_walk(vd, &logical_rs,
vdev_trim_xlate_progress, vd);
}
mutex_exit(&msp->ms_lock);
}
}
/*
* Load from disk the vdev's manual TRIM information. This includes the
* state, progress, and options provided when initiating the manual TRIM.
*/
static int
vdev_trim_load(vdev_t *vd)
{
int err = 0;
ASSERT(spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_READER) ||
spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_WRITER));
ASSERT(vd->vdev_leaf_zap != 0);
if (vd->vdev_trim_state == VDEV_TRIM_ACTIVE ||
vd->vdev_trim_state == VDEV_TRIM_SUSPENDED) {
err = zap_lookup(vd->vdev_spa->spa_meta_objset,
vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_LAST_OFFSET,
sizeof (vd->vdev_trim_last_offset), 1,
&vd->vdev_trim_last_offset);
if (err == ENOENT) {
vd->vdev_trim_last_offset = 0;
err = 0;
}
if (err == 0) {
err = zap_lookup(vd->vdev_spa->spa_meta_objset,
vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_RATE,
sizeof (vd->vdev_trim_rate), 1,
&vd->vdev_trim_rate);
if (err == ENOENT) {
vd->vdev_trim_rate = 0;
err = 0;
}
}
if (err == 0) {
err = zap_lookup(vd->vdev_spa->spa_meta_objset,
vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_PARTIAL,
sizeof (vd->vdev_trim_partial), 1,
&vd->vdev_trim_partial);
if (err == ENOENT) {
vd->vdev_trim_partial = 0;
err = 0;
}
}
if (err == 0) {
err = zap_lookup(vd->vdev_spa->spa_meta_objset,
vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_SECURE,
sizeof (vd->vdev_trim_secure), 1,
&vd->vdev_trim_secure);
if (err == ENOENT) {
vd->vdev_trim_secure = 0;
err = 0;
}
}
}
vdev_trim_calculate_progress(vd);
return (err);
}
static void
vdev_trim_xlate_range_add(void *arg, range_seg64_t *physical_rs)
{
trim_args_t *ta = arg;
vdev_t *vd = ta->trim_vdev;
/*
* Only a manual trim will be traversing the vdev sequentially.
* For an auto trim all valid ranges should be added.
*/
if (ta->trim_type == TRIM_TYPE_MANUAL) {
/* Only add segments that we have not visited yet */
if (physical_rs->rs_end <= vd->vdev_trim_last_offset)
return;
/* Pick up where we left off mid-range. */
if (vd->vdev_trim_last_offset > physical_rs->rs_start) {
ASSERT3U(physical_rs->rs_end, >,
vd->vdev_trim_last_offset);
physical_rs->rs_start = vd->vdev_trim_last_offset;
}
}
ASSERT3U(physical_rs->rs_end, >, physical_rs->rs_start);
range_tree_add(ta->trim_tree, physical_rs->rs_start,
physical_rs->rs_end - physical_rs->rs_start);
}
/*
* Convert the logical range into physical ranges and add them to the
* range tree passed in the trim_args_t.
*/
static void
vdev_trim_range_add(void *arg, uint64_t start, uint64_t size)
{
trim_args_t *ta = arg;
vdev_t *vd = ta->trim_vdev;
range_seg64_t logical_rs;
logical_rs.rs_start = start;
logical_rs.rs_end = start + size;
/*
* Every range to be trimmed must be part of ms_allocatable.
* When ZFS_DEBUG_TRIM is set load the metaslab to verify this
* is always the case.
*/
if (zfs_flags & ZFS_DEBUG_TRIM) {
metaslab_t *msp = ta->trim_msp;
VERIFY0(metaslab_load(msp));
VERIFY3B(msp->ms_loaded, ==, B_TRUE);
VERIFY(range_tree_contains(msp->ms_allocatable, start, size));
}
ASSERT(vd->vdev_ops->vdev_op_leaf);
vdev_xlate_walk(vd, &logical_rs, vdev_trim_xlate_range_add, arg);
}
/*
* Each manual TRIM thread is responsible for trimming the unallocated
* space for each leaf vdev. This is accomplished by sequentially iterating
* over its top-level metaslabs and issuing TRIM I/O for the space described
* by its ms_allocatable. While a metaslab is undergoing trimming it is
* not eligible for new allocations.
*/
static void
vdev_trim_thread(void *arg)
{
vdev_t *vd = arg;
spa_t *spa = vd->vdev_spa;
trim_args_t ta;
int error = 0;
/*
* The VDEV_LEAF_ZAP_TRIM_* entries may have been updated by
* vdev_trim(). Wait for the updated values to be reflected
* in the zap in order to start with the requested settings.
*/
txg_wait_synced(spa_get_dsl(vd->vdev_spa), 0);
ASSERT(vdev_is_concrete(vd));
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vd->vdev_trim_last_offset = 0;
vd->vdev_trim_rate = 0;
vd->vdev_trim_partial = 0;
vd->vdev_trim_secure = 0;
VERIFY0(vdev_trim_load(vd));
ta.trim_vdev = vd;
ta.trim_extent_bytes_max = zfs_trim_extent_bytes_max;
ta.trim_extent_bytes_min = zfs_trim_extent_bytes_min;
ta.trim_tree = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
ta.trim_type = TRIM_TYPE_MANUAL;
ta.trim_flags = 0;
/*
* When a secure TRIM has been requested infer that the intent
* is that everything must be trimmed. Override the default
* minimum TRIM size to prevent ranges from being skipped.
*/
if (vd->vdev_trim_secure) {
ta.trim_flags |= ZIO_TRIM_SECURE;
ta.trim_extent_bytes_min = SPA_MINBLOCKSIZE;
}
uint64_t ms_count = 0;
for (uint64_t i = 0; !vd->vdev_detached &&
i < vd->vdev_top->vdev_ms_count; i++) {
metaslab_t *msp = vd->vdev_top->vdev_ms[i];
/*
* If we've expanded the top-level vdev or it's our
* first pass, calculate our progress.
*/
if (vd->vdev_top->vdev_ms_count != ms_count) {
vdev_trim_calculate_progress(vd);
ms_count = vd->vdev_top->vdev_ms_count;
}
spa_config_exit(spa, SCL_CONFIG, FTAG);
metaslab_disable(msp);
mutex_enter(&msp->ms_lock);
VERIFY0(metaslab_load(msp));
/*
* If a partial TRIM was requested skip metaslabs which have
* never been initialized and thus have never been written.
*/
if (msp->ms_sm == NULL && vd->vdev_trim_partial) {
mutex_exit(&msp->ms_lock);
metaslab_enable(msp, B_FALSE, B_FALSE);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_trim_calculate_progress(vd);
continue;
}
ta.trim_msp = msp;
range_tree_walk(msp->ms_allocatable, vdev_trim_range_add, &ta);
range_tree_vacate(msp->ms_trim, NULL, NULL);
mutex_exit(&msp->ms_lock);
error = vdev_trim_ranges(&ta);
metaslab_enable(msp, B_TRUE, B_FALSE);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
range_tree_vacate(ta.trim_tree, NULL, NULL);
if (error != 0)
break;
}
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_enter(&vd->vdev_trim_io_lock);
while (vd->vdev_trim_inflight[0] > 0) {
cv_wait(&vd->vdev_trim_io_cv, &vd->vdev_trim_io_lock);
}
mutex_exit(&vd->vdev_trim_io_lock);
range_tree_destroy(ta.trim_tree);
mutex_enter(&vd->vdev_trim_lock);
if (!vd->vdev_trim_exit_wanted) {
if (vdev_writeable(vd)) {
vdev_trim_change_state(vd, VDEV_TRIM_COMPLETE,
vd->vdev_trim_rate, vd->vdev_trim_partial,
vd->vdev_trim_secure);
} else if (vd->vdev_faulted) {
vdev_trim_change_state(vd, VDEV_TRIM_CANCELED,
vd->vdev_trim_rate, vd->vdev_trim_partial,
vd->vdev_trim_secure);
}
}
ASSERT(vd->vdev_trim_thread != NULL || vd->vdev_trim_inflight[0] == 0);
/*
* Drop the vdev_trim_lock while we sync out the txg since it's
* possible that a device might be trying to come online and must
* check to see if it needs to restart a trim. That thread will be
* holding the spa_config_lock which would prevent the txg_wait_synced
* from completing.
*/
mutex_exit(&vd->vdev_trim_lock);
txg_wait_synced(spa_get_dsl(spa), 0);
mutex_enter(&vd->vdev_trim_lock);
vd->vdev_trim_thread = NULL;
cv_broadcast(&vd->vdev_trim_cv);
mutex_exit(&vd->vdev_trim_lock);
thread_exit();
}
/*
* Initiates a manual TRIM for the vdev_t. Callers must hold vdev_trim_lock,
* the vdev_t must be a leaf and cannot already be manually trimming.
*/
void
vdev_trim(vdev_t *vd, uint64_t rate, boolean_t partial, boolean_t secure)
{
ASSERT(MUTEX_HELD(&vd->vdev_trim_lock));
ASSERT(vd->vdev_ops->vdev_op_leaf);
ASSERT(vdev_is_concrete(vd));
ASSERT3P(vd->vdev_trim_thread, ==, NULL);
ASSERT(!vd->vdev_detached);
ASSERT(!vd->vdev_trim_exit_wanted);
ASSERT(!vd->vdev_top->vdev_removing);
vdev_trim_change_state(vd, VDEV_TRIM_ACTIVE, rate, partial, secure);
vd->vdev_trim_thread = thread_create(NULL, 0,
vdev_trim_thread, vd, 0, &p0, TS_RUN, maxclsyspri);
}
/*
* Wait for the trimming thread to be terminated (canceled or stopped).
*/
static void
vdev_trim_stop_wait_impl(vdev_t *vd)
{
ASSERT(MUTEX_HELD(&vd->vdev_trim_lock));
while (vd->vdev_trim_thread != NULL)
cv_wait(&vd->vdev_trim_cv, &vd->vdev_trim_lock);
ASSERT3P(vd->vdev_trim_thread, ==, NULL);
vd->vdev_trim_exit_wanted = B_FALSE;
}
/*
* Wait for vdev trim threads which were listed to cleanly exit.
*/
void
vdev_trim_stop_wait(spa_t *spa, list_t *vd_list)
{
+ (void) spa;
vdev_t *vd;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
while ((vd = list_remove_head(vd_list)) != NULL) {
mutex_enter(&vd->vdev_trim_lock);
vdev_trim_stop_wait_impl(vd);
mutex_exit(&vd->vdev_trim_lock);
}
}
/*
* Stop trimming a device, with the resultant trimming state being tgt_state.
* For blocking behavior pass NULL for vd_list. Otherwise, when a list_t is
* provided the stopping vdev is inserted in to the list. Callers are then
* required to call vdev_trim_stop_wait() to block for all the trim threads
* to exit. The caller must hold vdev_trim_lock and must not be writing to
* the spa config, as the trimming thread may try to enter the config as a
* reader before exiting.
*/
void
vdev_trim_stop(vdev_t *vd, vdev_trim_state_t tgt_state, list_t *vd_list)
{
ASSERT(!spa_config_held(vd->vdev_spa, SCL_CONFIG|SCL_STATE, RW_WRITER));
ASSERT(MUTEX_HELD(&vd->vdev_trim_lock));
ASSERT(vd->vdev_ops->vdev_op_leaf);
ASSERT(vdev_is_concrete(vd));
/*
* Allow cancel requests to proceed even if the trim thread has
* stopped.
*/
if (vd->vdev_trim_thread == NULL && tgt_state != VDEV_TRIM_CANCELED)
return;
vdev_trim_change_state(vd, tgt_state, 0, 0, 0);
vd->vdev_trim_exit_wanted = B_TRUE;
if (vd_list == NULL) {
vdev_trim_stop_wait_impl(vd);
} else {
ASSERT(MUTEX_HELD(&spa_namespace_lock));
list_insert_tail(vd_list, vd);
}
}
/*
* Requests that all listed vdevs stop trimming.
*/
static void
vdev_trim_stop_all_impl(vdev_t *vd, vdev_trim_state_t tgt_state,
list_t *vd_list)
{
if (vd->vdev_ops->vdev_op_leaf && vdev_is_concrete(vd)) {
mutex_enter(&vd->vdev_trim_lock);
vdev_trim_stop(vd, tgt_state, vd_list);
mutex_exit(&vd->vdev_trim_lock);
return;
}
for (uint64_t i = 0; i < vd->vdev_children; i++) {
vdev_trim_stop_all_impl(vd->vdev_child[i], tgt_state,
vd_list);
}
}
/*
* Convenience function to stop trimming of a vdev tree and set all trim
* thread pointers to NULL.
*/
void
vdev_trim_stop_all(vdev_t *vd, vdev_trim_state_t tgt_state)
{
spa_t *spa = vd->vdev_spa;
list_t vd_list;
vdev_t *vd_l2cache;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
list_create(&vd_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_trim_node));
vdev_trim_stop_all_impl(vd, tgt_state, &vd_list);
/*
* Iterate over cache devices and request stop trimming the
* whole device in case we export the pool or remove the cache
* device prematurely.
*/
for (int i = 0; i < spa->spa_l2cache.sav_count; i++) {
vd_l2cache = spa->spa_l2cache.sav_vdevs[i];
vdev_trim_stop_all_impl(vd_l2cache, tgt_state, &vd_list);
}
vdev_trim_stop_wait(spa, &vd_list);
if (vd->vdev_spa->spa_sync_on) {
/* Make sure that our state has been synced to disk */
txg_wait_synced(spa_get_dsl(vd->vdev_spa), 0);
}
list_destroy(&vd_list);
}
/*
* Conditionally restarts a manual TRIM given its on-disk state.
*/
void
vdev_trim_restart(vdev_t *vd)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(!spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
if (vd->vdev_leaf_zap != 0) {
mutex_enter(&vd->vdev_trim_lock);
uint64_t trim_state = VDEV_TRIM_NONE;
int err = zap_lookup(vd->vdev_spa->spa_meta_objset,
vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_STATE,
sizeof (trim_state), 1, &trim_state);
ASSERT(err == 0 || err == ENOENT);
vd->vdev_trim_state = trim_state;
uint64_t timestamp = 0;
err = zap_lookup(vd->vdev_spa->spa_meta_objset,
vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_ACTION_TIME,
sizeof (timestamp), 1, &timestamp);
ASSERT(err == 0 || err == ENOENT);
vd->vdev_trim_action_time = timestamp;
if (vd->vdev_trim_state == VDEV_TRIM_SUSPENDED ||
vd->vdev_offline) {
/* load progress for reporting, but don't resume */
VERIFY0(vdev_trim_load(vd));
} else if (vd->vdev_trim_state == VDEV_TRIM_ACTIVE &&
vdev_writeable(vd) && !vd->vdev_top->vdev_removing &&
vd->vdev_trim_thread == NULL) {
VERIFY0(vdev_trim_load(vd));
vdev_trim(vd, vd->vdev_trim_rate,
vd->vdev_trim_partial, vd->vdev_trim_secure);
}
mutex_exit(&vd->vdev_trim_lock);
}
for (uint64_t i = 0; i < vd->vdev_children; i++) {
vdev_trim_restart(vd->vdev_child[i]);
}
}
/*
* Used by the automatic TRIM when ZFS_DEBUG_TRIM is set to verify that
* every TRIM range is contained within ms_allocatable.
*/
static void
vdev_trim_range_verify(void *arg, uint64_t start, uint64_t size)
{
trim_args_t *ta = arg;
metaslab_t *msp = ta->trim_msp;
VERIFY3B(msp->ms_loaded, ==, B_TRUE);
VERIFY3U(msp->ms_disabled, >, 0);
VERIFY(range_tree_contains(msp->ms_allocatable, start, size));
}
/*
* Each automatic TRIM thread is responsible for managing the trimming of a
* top-level vdev in the pool. No automatic TRIM state is maintained on-disk.
*
* N.B. This behavior is different from a manual TRIM where a thread
* is created for each leaf vdev, instead of each top-level vdev.
*/
static void
vdev_autotrim_thread(void *arg)
{
vdev_t *vd = arg;
spa_t *spa = vd->vdev_spa;
int shift = 0;
mutex_enter(&vd->vdev_autotrim_lock);
ASSERT3P(vd->vdev_top, ==, vd);
ASSERT3P(vd->vdev_autotrim_thread, !=, NULL);
mutex_exit(&vd->vdev_autotrim_lock);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
uint64_t extent_bytes_max = zfs_trim_extent_bytes_max;
uint64_t extent_bytes_min = zfs_trim_extent_bytes_min;
while (!vdev_autotrim_should_stop(vd)) {
int txgs_per_trim = MAX(zfs_trim_txg_batch, 1);
boolean_t issued_trim = B_FALSE;
/*
* All of the metaslabs are divided in to groups of size
* num_metaslabs / zfs_trim_txg_batch. Each of these groups
* is composed of metaslabs which are spread evenly over the
* device.
*
* For example, when zfs_trim_txg_batch = 32 (default) then
* group 0 will contain metaslabs 0, 32, 64, ...;
* group 1 will contain metaslabs 1, 33, 65, ...;
* group 2 will contain metaslabs 2, 34, 66, ...; and so on.
*
* On each pass through the while() loop one of these groups
* is selected. This is accomplished by using a shift value
* to select the starting metaslab, then striding over the
* metaslabs using the zfs_trim_txg_batch size. This is
* done to accomplish two things.
*
* 1) By dividing the metaslabs in to groups, and making sure
* that each group takes a minimum of one txg to process.
* Then zfs_trim_txg_batch controls the minimum number of
* txgs which must occur before a metaslab is revisited.
*
* 2) Selecting non-consecutive metaslabs distributes the
* TRIM commands for a group evenly over the entire device.
* This can be advantageous for certain types of devices.
*/
for (uint64_t i = shift % txgs_per_trim; i < vd->vdev_ms_count;
i += txgs_per_trim) {
metaslab_t *msp = vd->vdev_ms[i];
range_tree_t *trim_tree;
spa_config_exit(spa, SCL_CONFIG, FTAG);
metaslab_disable(msp);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
mutex_enter(&msp->ms_lock);
/*
* Skip the metaslab when it has never been allocated
* or when there are no recent frees to trim.
*/
if (msp->ms_sm == NULL ||
range_tree_is_empty(msp->ms_trim)) {
mutex_exit(&msp->ms_lock);
metaslab_enable(msp, B_FALSE, B_FALSE);
continue;
}
/*
* Skip the metaslab when it has already been disabled.
* This may happen when a manual TRIM or initialize
* operation is running concurrently. In the case
* of a manual TRIM, the ms_trim tree will have been
* vacated. Only ranges added after the manual TRIM
* disabled the metaslab will be included in the tree.
* These will be processed when the automatic TRIM
* next revisits this metaslab.
*/
if (msp->ms_disabled > 1) {
mutex_exit(&msp->ms_lock);
metaslab_enable(msp, B_FALSE, B_FALSE);
continue;
}
/*
* Allocate an empty range tree which is swapped in
* for the existing ms_trim tree while it is processed.
*/
trim_tree = range_tree_create(NULL, RANGE_SEG64, NULL,
0, 0);
range_tree_swap(&msp->ms_trim, &trim_tree);
ASSERT(range_tree_is_empty(msp->ms_trim));
/*
* There are two cases when constructing the per-vdev
* trim trees for a metaslab. If the top-level vdev
* has no children then it is also a leaf and should
* be trimmed. Otherwise our children are the leaves
* and a trim tree should be constructed for each.
*/
trim_args_t *tap;
uint64_t children = vd->vdev_children;
if (children == 0) {
children = 1;
tap = kmem_zalloc(sizeof (trim_args_t) *
children, KM_SLEEP);
tap[0].trim_vdev = vd;
} else {
tap = kmem_zalloc(sizeof (trim_args_t) *
children, KM_SLEEP);
for (uint64_t c = 0; c < children; c++) {
tap[c].trim_vdev = vd->vdev_child[c];
}
}
for (uint64_t c = 0; c < children; c++) {
trim_args_t *ta = &tap[c];
vdev_t *cvd = ta->trim_vdev;
ta->trim_msp = msp;
ta->trim_extent_bytes_max = extent_bytes_max;
ta->trim_extent_bytes_min = extent_bytes_min;
ta->trim_type = TRIM_TYPE_AUTO;
ta->trim_flags = 0;
if (cvd->vdev_detached ||
!vdev_writeable(cvd) ||
!cvd->vdev_has_trim ||
cvd->vdev_trim_thread != NULL) {
continue;
}
/*
* When a device has an attached hot spare, or
* is being replaced it will not be trimmed.
* This is done to avoid adding additional
* stress to a potentially unhealthy device,
* and to minimize the required rebuild time.
*/
if (!cvd->vdev_ops->vdev_op_leaf)
continue;
ta->trim_tree = range_tree_create(NULL,
RANGE_SEG64, NULL, 0, 0);
range_tree_walk(trim_tree,
vdev_trim_range_add, ta);
}
mutex_exit(&msp->ms_lock);
spa_config_exit(spa, SCL_CONFIG, FTAG);
/*
* Issue the TRIM I/Os for all ranges covered by the
* TRIM trees. These ranges are safe to TRIM because
* no new allocations will be performed until the call
* to metaslab_enabled() below.
*/
for (uint64_t c = 0; c < children; c++) {
trim_args_t *ta = &tap[c];
/*
* Always yield to a manual TRIM if one has
* been started for the child vdev.
*/
if (ta->trim_tree == NULL ||
ta->trim_vdev->vdev_trim_thread != NULL) {
continue;
}
/*
* After this point metaslab_enable() must be
* called with the sync flag set. This is done
* here because vdev_trim_ranges() is allowed
* to be interrupted (EINTR) before issuing all
* of the required TRIM I/Os.
*/
issued_trim = B_TRUE;
int error = vdev_trim_ranges(ta);
if (error)
break;
}
/*
* Verify every range which was trimmed is still
* contained within the ms_allocatable tree.
*/
if (zfs_flags & ZFS_DEBUG_TRIM) {
mutex_enter(&msp->ms_lock);
VERIFY0(metaslab_load(msp));
VERIFY3P(tap[0].trim_msp, ==, msp);
range_tree_walk(trim_tree,
vdev_trim_range_verify, &tap[0]);
mutex_exit(&msp->ms_lock);
}
range_tree_vacate(trim_tree, NULL, NULL);
range_tree_destroy(trim_tree);
metaslab_enable(msp, issued_trim, B_FALSE);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
for (uint64_t c = 0; c < children; c++) {
trim_args_t *ta = &tap[c];
if (ta->trim_tree == NULL)
continue;
range_tree_vacate(ta->trim_tree, NULL, NULL);
range_tree_destroy(ta->trim_tree);
}
kmem_free(tap, sizeof (trim_args_t) * children);
}
spa_config_exit(spa, SCL_CONFIG, FTAG);
/*
* After completing the group of metaslabs wait for the next
* open txg. This is done to make sure that a minimum of
* zfs_trim_txg_batch txgs will occur before these metaslabs
* are trimmed again.
*/
txg_wait_open(spa_get_dsl(spa), 0, issued_trim);
shift++;
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
}
for (uint64_t c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
mutex_enter(&cvd->vdev_trim_io_lock);
while (cvd->vdev_trim_inflight[1] > 0) {
cv_wait(&cvd->vdev_trim_io_cv,
&cvd->vdev_trim_io_lock);
}
mutex_exit(&cvd->vdev_trim_io_lock);
}
spa_config_exit(spa, SCL_CONFIG, FTAG);
/*
* When exiting because the autotrim property was set to off, then
* abandon any unprocessed ms_trim ranges to reclaim the memory.
*/
if (spa_get_autotrim(spa) == SPA_AUTOTRIM_OFF) {
for (uint64_t i = 0; i < vd->vdev_ms_count; i++) {
metaslab_t *msp = vd->vdev_ms[i];
mutex_enter(&msp->ms_lock);
range_tree_vacate(msp->ms_trim, NULL, NULL);
mutex_exit(&msp->ms_lock);
}
}
mutex_enter(&vd->vdev_autotrim_lock);
ASSERT(vd->vdev_autotrim_thread != NULL);
vd->vdev_autotrim_thread = NULL;
cv_broadcast(&vd->vdev_autotrim_cv);
mutex_exit(&vd->vdev_autotrim_lock);
thread_exit();
}
/*
* Starts an autotrim thread, if needed, for each top-level vdev which can be
* trimmed. A top-level vdev which has been evacuated will never be trimmed.
*/
void
vdev_autotrim(spa_t *spa)
{
vdev_t *root_vd = spa->spa_root_vdev;
for (uint64_t i = 0; i < root_vd->vdev_children; i++) {
vdev_t *tvd = root_vd->vdev_child[i];
mutex_enter(&tvd->vdev_autotrim_lock);
if (vdev_writeable(tvd) && !tvd->vdev_removing &&
tvd->vdev_autotrim_thread == NULL) {
ASSERT3P(tvd->vdev_top, ==, tvd);
tvd->vdev_autotrim_thread = thread_create(NULL, 0,
vdev_autotrim_thread, tvd, 0, &p0, TS_RUN,
maxclsyspri);
ASSERT(tvd->vdev_autotrim_thread != NULL);
}
mutex_exit(&tvd->vdev_autotrim_lock);
}
}
/*
* Wait for the vdev_autotrim_thread associated with the passed top-level
* vdev to be terminated (canceled or stopped).
*/
void
vdev_autotrim_stop_wait(vdev_t *tvd)
{
mutex_enter(&tvd->vdev_autotrim_lock);
if (tvd->vdev_autotrim_thread != NULL) {
tvd->vdev_autotrim_exit_wanted = B_TRUE;
while (tvd->vdev_autotrim_thread != NULL) {
cv_wait(&tvd->vdev_autotrim_cv,
&tvd->vdev_autotrim_lock);
}
ASSERT3P(tvd->vdev_autotrim_thread, ==, NULL);
tvd->vdev_autotrim_exit_wanted = B_FALSE;
}
mutex_exit(&tvd->vdev_autotrim_lock);
}
/*
* Wait for all of the vdev_autotrim_thread associated with the pool to
* be terminated (canceled or stopped).
*/
void
vdev_autotrim_stop_all(spa_t *spa)
{
vdev_t *root_vd = spa->spa_root_vdev;
for (uint64_t i = 0; i < root_vd->vdev_children; i++)
vdev_autotrim_stop_wait(root_vd->vdev_child[i]);
}
/*
* Conditionally restart all of the vdev_autotrim_thread's for the pool.
*/
void
vdev_autotrim_restart(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (spa->spa_autotrim)
vdev_autotrim(spa);
}
static void
vdev_trim_l2arc_thread(void *arg)
{
vdev_t *vd = arg;
spa_t *spa = vd->vdev_spa;
l2arc_dev_t *dev = l2arc_vdev_get(vd);
trim_args_t ta;
range_seg64_t physical_rs;
ASSERT(vdev_is_concrete(vd));
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vd->vdev_trim_last_offset = 0;
vd->vdev_trim_rate = 0;
vd->vdev_trim_partial = 0;
vd->vdev_trim_secure = 0;
bzero(&ta, sizeof (ta));
ta.trim_vdev = vd;
ta.trim_tree = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
ta.trim_type = TRIM_TYPE_MANUAL;
ta.trim_extent_bytes_max = zfs_trim_extent_bytes_max;
ta.trim_extent_bytes_min = SPA_MINBLOCKSIZE;
ta.trim_flags = 0;
physical_rs.rs_start = vd->vdev_trim_bytes_done = 0;
physical_rs.rs_end = vd->vdev_trim_bytes_est =
vdev_get_min_asize(vd);
range_tree_add(ta.trim_tree, physical_rs.rs_start,
physical_rs.rs_end - physical_rs.rs_start);
mutex_enter(&vd->vdev_trim_lock);
vdev_trim_change_state(vd, VDEV_TRIM_ACTIVE, 0, 0, 0);
mutex_exit(&vd->vdev_trim_lock);
(void) vdev_trim_ranges(&ta);
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_enter(&vd->vdev_trim_io_lock);
while (vd->vdev_trim_inflight[TRIM_TYPE_MANUAL] > 0) {
cv_wait(&vd->vdev_trim_io_cv, &vd->vdev_trim_io_lock);
}
mutex_exit(&vd->vdev_trim_io_lock);
range_tree_vacate(ta.trim_tree, NULL, NULL);
range_tree_destroy(ta.trim_tree);
mutex_enter(&vd->vdev_trim_lock);
if (!vd->vdev_trim_exit_wanted && vdev_writeable(vd)) {
vdev_trim_change_state(vd, VDEV_TRIM_COMPLETE,
vd->vdev_trim_rate, vd->vdev_trim_partial,
vd->vdev_trim_secure);
}
ASSERT(vd->vdev_trim_thread != NULL ||
vd->vdev_trim_inflight[TRIM_TYPE_MANUAL] == 0);
/*
* Drop the vdev_trim_lock while we sync out the txg since it's
* possible that a device might be trying to come online and
* must check to see if it needs to restart a trim. That thread
* will be holding the spa_config_lock which would prevent the
* txg_wait_synced from completing. Same strategy as in
* vdev_trim_thread().
*/
mutex_exit(&vd->vdev_trim_lock);
txg_wait_synced(spa_get_dsl(vd->vdev_spa), 0);
mutex_enter(&vd->vdev_trim_lock);
/*
* Update the header of the cache device here, before
* broadcasting vdev_trim_cv which may lead to the removal
* of the device. The same applies for setting l2ad_trim_all to
* false.
*/
spa_config_enter(vd->vdev_spa, SCL_L2ARC, vd,
RW_READER);
bzero(dev->l2ad_dev_hdr, dev->l2ad_dev_hdr_asize);
l2arc_dev_hdr_update(dev);
spa_config_exit(vd->vdev_spa, SCL_L2ARC, vd);
vd->vdev_trim_thread = NULL;
if (vd->vdev_trim_state == VDEV_TRIM_COMPLETE)
dev->l2ad_trim_all = B_FALSE;
cv_broadcast(&vd->vdev_trim_cv);
mutex_exit(&vd->vdev_trim_lock);
thread_exit();
}
/*
* Punches out TRIM threads for the L2ARC devices in a spa and assigns them
* to vd->vdev_trim_thread variable. This facilitates the management of
* trimming the whole cache device using TRIM_TYPE_MANUAL upon addition
* to a pool or pool creation or when the header of the device is invalid.
*/
void
vdev_trim_l2arc(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
/*
* Locate the spa's l2arc devices and kick off TRIM threads.
*/
for (int i = 0; i < spa->spa_l2cache.sav_count; i++) {
vdev_t *vd = spa->spa_l2cache.sav_vdevs[i];
l2arc_dev_t *dev = l2arc_vdev_get(vd);
if (dev == NULL || !dev->l2ad_trim_all) {
/*
* Don't attempt TRIM if the vdev is UNAVAIL or if the
* cache device was not marked for whole device TRIM
* (ie l2arc_trim_ahead = 0, or the L2ARC device header
* is valid with trim_state = VDEV_TRIM_COMPLETE and
* l2ad_log_entries > 0).
*/
continue;
}
mutex_enter(&vd->vdev_trim_lock);
ASSERT(vd->vdev_ops->vdev_op_leaf);
ASSERT(vdev_is_concrete(vd));
ASSERT3P(vd->vdev_trim_thread, ==, NULL);
ASSERT(!vd->vdev_detached);
ASSERT(!vd->vdev_trim_exit_wanted);
ASSERT(!vd->vdev_top->vdev_removing);
vdev_trim_change_state(vd, VDEV_TRIM_ACTIVE, 0, 0, 0);
vd->vdev_trim_thread = thread_create(NULL, 0,
vdev_trim_l2arc_thread, vd, 0, &p0, TS_RUN, maxclsyspri);
mutex_exit(&vd->vdev_trim_lock);
}
}
/*
* A wrapper which calls vdev_trim_ranges(). It is intended to be called
* on leaf vdevs.
*/
int
vdev_trim_simple(vdev_t *vd, uint64_t start, uint64_t size)
{
trim_args_t ta;
range_seg64_t physical_rs;
int error;
physical_rs.rs_start = start;
physical_rs.rs_end = start + size;
ASSERT(vdev_is_concrete(vd));
ASSERT(vd->vdev_ops->vdev_op_leaf);
ASSERT(!vd->vdev_detached);
ASSERT(!vd->vdev_top->vdev_removing);
bzero(&ta, sizeof (ta));
ta.trim_vdev = vd;
ta.trim_tree = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
ta.trim_type = TRIM_TYPE_SIMPLE;
ta.trim_extent_bytes_max = zfs_trim_extent_bytes_max;
ta.trim_extent_bytes_min = SPA_MINBLOCKSIZE;
ta.trim_flags = 0;
ASSERT3U(physical_rs.rs_end, >=, physical_rs.rs_start);
if (physical_rs.rs_end > physical_rs.rs_start) {
range_tree_add(ta.trim_tree, physical_rs.rs_start,
physical_rs.rs_end - physical_rs.rs_start);
} else {
ASSERT3U(physical_rs.rs_end, ==, physical_rs.rs_start);
}
error = vdev_trim_ranges(&ta);
mutex_enter(&vd->vdev_trim_io_lock);
while (vd->vdev_trim_inflight[TRIM_TYPE_SIMPLE] > 0) {
cv_wait(&vd->vdev_trim_io_cv, &vd->vdev_trim_io_lock);
}
mutex_exit(&vd->vdev_trim_io_lock);
range_tree_vacate(ta.trim_tree, NULL, NULL);
range_tree_destroy(ta.trim_tree);
return (error);
}
EXPORT_SYMBOL(vdev_trim);
EXPORT_SYMBOL(vdev_trim_stop);
EXPORT_SYMBOL(vdev_trim_stop_all);
EXPORT_SYMBOL(vdev_trim_stop_wait);
EXPORT_SYMBOL(vdev_trim_restart);
EXPORT_SYMBOL(vdev_autotrim);
EXPORT_SYMBOL(vdev_autotrim_stop_all);
EXPORT_SYMBOL(vdev_autotrim_stop_wait);
EXPORT_SYMBOL(vdev_autotrim_restart);
EXPORT_SYMBOL(vdev_trim_l2arc);
EXPORT_SYMBOL(vdev_trim_simple);
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_trim, zfs_trim_, extent_bytes_max, UINT, ZMOD_RW,
"Max size of TRIM commands, larger will be split");
ZFS_MODULE_PARAM(zfs_trim, zfs_trim_, extent_bytes_min, UINT, ZMOD_RW,
"Min size of TRIM commands, smaller will be skipped");
ZFS_MODULE_PARAM(zfs_trim, zfs_trim_, metaslab_skip, UINT, ZMOD_RW,
"Skip metaslabs which have never been initialized");
ZFS_MODULE_PARAM(zfs_trim, zfs_trim_, txg_batch, UINT, ZMOD_RW,
"Min number of txgs to aggregate frees before issuing TRIM");
ZFS_MODULE_PARAM(zfs_trim, zfs_trim_, queue_limit, UINT, ZMOD_RW,
"Max queued TRIMs outstanding per leaf vdev");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/zap.c b/sys/contrib/openzfs/module/zfs/zap.c
index 6f03beef3bdb..4cd14287a5a2 100644
--- a/sys/contrib/openzfs/module/zfs/zap.c
+++ b/sys/contrib/openzfs/module/zfs/zap.c
@@ -1,1386 +1,1384 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
*/
/*
* This file contains the top half of the zfs directory structure
* implementation. The bottom half is in zap_leaf.c.
*
* The zdir is an extendable hash data structure. There is a table of
* pointers to buckets (zap_t->zd_data->zd_leafs). The buckets are
* each a constant size and hold a variable number of directory entries.
* The buckets (aka "leaf nodes") are implemented in zap_leaf.c.
*
* The pointer table holds a power of 2 number of pointers.
* (1<<zap_t->zd_data->zd_phys->zd_prefix_len). The bucket pointed to
* by the pointer at index i in the table holds entries whose hash value
* has a zd_prefix_len - bit prefix
*/
#include <sys/spa.h>
#include <sys/dmu.h>
#include <sys/zfs_context.h>
#include <sys/zfs_znode.h>
#include <sys/fs/zfs.h>
#include <sys/zap.h>
#include <sys/zap_impl.h>
#include <sys/zap_leaf.h>
/*
* If zap_iterate_prefetch is set, we will prefetch the entire ZAP object
* (all leaf blocks) when we start iterating over it.
*
* For zap_cursor_init(), the callers all intend to iterate through all the
* entries. There are a few cases where an error (typically i/o error) could
* cause it to bail out early.
*
* For zap_cursor_init_serialized(), there are callers that do the iteration
* outside of ZFS. Typically they would iterate over everything, but we
* don't have control of that. E.g. zfs_ioc_snapshot_list_next(),
* zcp_snapshots_iter(), and other iterators over things in the MOS - these
* are called by /sbin/zfs and channel programs. The other example is
* zfs_readdir() which iterates over directory entries for the getdents()
* syscall. /sbin/ls iterates to the end (unless it receives a signal), but
* userland doesn't have to.
*
* Given that the ZAP entries aren't returned in a specific order, the only
* legitimate use cases for partial iteration would be:
*
* 1. Pagination: e.g. you only want to display 100 entries at a time, so you
* get the first 100 and then wait for the user to hit "next page", which
* they may never do).
*
* 2. You want to know if there are more than X entries, without relying on
* the zfs-specific implementation of the directory's st_size (which is
* the number of entries).
*/
int zap_iterate_prefetch = B_TRUE;
int fzap_default_block_shift = 14; /* 16k blocksize */
-extern inline zap_phys_t *zap_f_phys(zap_t *zap);
-
static uint64_t zap_allocate_blocks(zap_t *zap, int nblocks);
void
fzap_byteswap(void *vbuf, size_t size)
{
uint64_t block_type = *(uint64_t *)vbuf;
if (block_type == ZBT_LEAF || block_type == BSWAP_64(ZBT_LEAF))
zap_leaf_byteswap(vbuf, size);
else {
/* it's a ptrtbl block */
byteswap_uint64_array(vbuf, size);
}
}
void
fzap_upgrade(zap_t *zap, dmu_tx_t *tx, zap_flags_t flags)
{
ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
zap->zap_ismicro = FALSE;
zap->zap_dbu.dbu_evict_func_sync = zap_evict_sync;
zap->zap_dbu.dbu_evict_func_async = NULL;
mutex_init(&zap->zap_f.zap_num_entries_mtx, 0, MUTEX_DEFAULT, 0);
zap->zap_f.zap_block_shift = highbit64(zap->zap_dbuf->db_size) - 1;
zap_phys_t *zp = zap_f_phys(zap);
/*
* explicitly zero it since it might be coming from an
* initialized microzap
*/
bzero(zap->zap_dbuf->db_data, zap->zap_dbuf->db_size);
zp->zap_block_type = ZBT_HEADER;
zp->zap_magic = ZAP_MAGIC;
zp->zap_ptrtbl.zt_shift = ZAP_EMBEDDED_PTRTBL_SHIFT(zap);
zp->zap_freeblk = 2; /* block 1 will be the first leaf */
zp->zap_num_leafs = 1;
zp->zap_num_entries = 0;
zp->zap_salt = zap->zap_salt;
zp->zap_normflags = zap->zap_normflags;
zp->zap_flags = flags;
/* block 1 will be the first leaf */
for (int i = 0; i < (1<<zp->zap_ptrtbl.zt_shift); i++)
ZAP_EMBEDDED_PTRTBL_ENT(zap, i) = 1;
/*
* set up block 1 - the first leaf
*/
dmu_buf_t *db;
VERIFY0(dmu_buf_hold(zap->zap_objset, zap->zap_object,
1<<FZAP_BLOCK_SHIFT(zap), FTAG, &db, DMU_READ_NO_PREFETCH));
dmu_buf_will_dirty(db, tx);
zap_leaf_t *l = kmem_zalloc(sizeof (zap_leaf_t), KM_SLEEP);
l->l_dbuf = db;
zap_leaf_init(l, zp->zap_normflags != 0);
kmem_free(l, sizeof (zap_leaf_t));
dmu_buf_rele(db, FTAG);
}
static int
zap_tryupgradedir(zap_t *zap, dmu_tx_t *tx)
{
if (RW_WRITE_HELD(&zap->zap_rwlock))
return (1);
if (rw_tryupgrade(&zap->zap_rwlock)) {
dmu_buf_will_dirty(zap->zap_dbuf, tx);
return (1);
}
return (0);
}
/*
* Generic routines for dealing with the pointer & cookie tables.
*/
static int
zap_table_grow(zap_t *zap, zap_table_phys_t *tbl,
void (*transfer_func)(const uint64_t *src, uint64_t *dst, int n),
dmu_tx_t *tx)
{
uint64_t newblk;
int bs = FZAP_BLOCK_SHIFT(zap);
int hepb = 1<<(bs-4);
/* hepb = half the number of entries in a block */
ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
ASSERT(tbl->zt_blk != 0);
ASSERT(tbl->zt_numblks > 0);
if (tbl->zt_nextblk != 0) {
newblk = tbl->zt_nextblk;
} else {
newblk = zap_allocate_blocks(zap, tbl->zt_numblks * 2);
tbl->zt_nextblk = newblk;
ASSERT0(tbl->zt_blks_copied);
dmu_prefetch(zap->zap_objset, zap->zap_object, 0,
tbl->zt_blk << bs, tbl->zt_numblks << bs,
ZIO_PRIORITY_SYNC_READ);
}
/*
* Copy the ptrtbl from the old to new location.
*/
uint64_t b = tbl->zt_blks_copied;
dmu_buf_t *db_old;
int err = dmu_buf_hold(zap->zap_objset, zap->zap_object,
(tbl->zt_blk + b) << bs, FTAG, &db_old, DMU_READ_NO_PREFETCH);
if (err != 0)
return (err);
/* first half of entries in old[b] go to new[2*b+0] */
dmu_buf_t *db_new;
VERIFY0(dmu_buf_hold(zap->zap_objset, zap->zap_object,
(newblk + 2*b+0) << bs, FTAG, &db_new, DMU_READ_NO_PREFETCH));
dmu_buf_will_dirty(db_new, tx);
transfer_func(db_old->db_data, db_new->db_data, hepb);
dmu_buf_rele(db_new, FTAG);
/* second half of entries in old[b] go to new[2*b+1] */
VERIFY0(dmu_buf_hold(zap->zap_objset, zap->zap_object,
(newblk + 2*b+1) << bs, FTAG, &db_new, DMU_READ_NO_PREFETCH));
dmu_buf_will_dirty(db_new, tx);
transfer_func((uint64_t *)db_old->db_data + hepb,
db_new->db_data, hepb);
dmu_buf_rele(db_new, FTAG);
dmu_buf_rele(db_old, FTAG);
tbl->zt_blks_copied++;
dprintf("copied block %llu of %llu\n",
(u_longlong_t)tbl->zt_blks_copied,
(u_longlong_t)tbl->zt_numblks);
if (tbl->zt_blks_copied == tbl->zt_numblks) {
(void) dmu_free_range(zap->zap_objset, zap->zap_object,
tbl->zt_blk << bs, tbl->zt_numblks << bs, tx);
tbl->zt_blk = newblk;
tbl->zt_numblks *= 2;
tbl->zt_shift++;
tbl->zt_nextblk = 0;
tbl->zt_blks_copied = 0;
dprintf("finished; numblocks now %llu (%uk entries)\n",
(u_longlong_t)tbl->zt_numblks, 1<<(tbl->zt_shift-10));
}
return (0);
}
static int
zap_table_store(zap_t *zap, zap_table_phys_t *tbl, uint64_t idx, uint64_t val,
dmu_tx_t *tx)
{
int bs = FZAP_BLOCK_SHIFT(zap);
ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
ASSERT(tbl->zt_blk != 0);
dprintf("storing %llx at index %llx\n", (u_longlong_t)val,
(u_longlong_t)idx);
uint64_t blk = idx >> (bs-3);
uint64_t off = idx & ((1<<(bs-3))-1);
dmu_buf_t *db;
int err = dmu_buf_hold(zap->zap_objset, zap->zap_object,
(tbl->zt_blk + blk) << bs, FTAG, &db, DMU_READ_NO_PREFETCH);
if (err != 0)
return (err);
dmu_buf_will_dirty(db, tx);
if (tbl->zt_nextblk != 0) {
uint64_t idx2 = idx * 2;
uint64_t blk2 = idx2 >> (bs-3);
uint64_t off2 = idx2 & ((1<<(bs-3))-1);
dmu_buf_t *db2;
err = dmu_buf_hold(zap->zap_objset, zap->zap_object,
(tbl->zt_nextblk + blk2) << bs, FTAG, &db2,
DMU_READ_NO_PREFETCH);
if (err != 0) {
dmu_buf_rele(db, FTAG);
return (err);
}
dmu_buf_will_dirty(db2, tx);
((uint64_t *)db2->db_data)[off2] = val;
((uint64_t *)db2->db_data)[off2+1] = val;
dmu_buf_rele(db2, FTAG);
}
((uint64_t *)db->db_data)[off] = val;
dmu_buf_rele(db, FTAG);
return (0);
}
static int
zap_table_load(zap_t *zap, zap_table_phys_t *tbl, uint64_t idx, uint64_t *valp)
{
int bs = FZAP_BLOCK_SHIFT(zap);
ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
uint64_t blk = idx >> (bs-3);
uint64_t off = idx & ((1<<(bs-3))-1);
/*
* Note: this is equivalent to dmu_buf_hold(), but we use
* _dnode_enter / _by_dnode because it's faster because we don't
* have to hold the dnode.
*/
dnode_t *dn = dmu_buf_dnode_enter(zap->zap_dbuf);
dmu_buf_t *db;
int err = dmu_buf_hold_by_dnode(dn,
(tbl->zt_blk + blk) << bs, FTAG, &db, DMU_READ_NO_PREFETCH);
dmu_buf_dnode_exit(zap->zap_dbuf);
if (err != 0)
return (err);
*valp = ((uint64_t *)db->db_data)[off];
dmu_buf_rele(db, FTAG);
if (tbl->zt_nextblk != 0) {
/*
* read the nextblk for the sake of i/o error checking,
* so that zap_table_load() will catch errors for
* zap_table_store.
*/
blk = (idx*2) >> (bs-3);
dn = dmu_buf_dnode_enter(zap->zap_dbuf);
err = dmu_buf_hold_by_dnode(dn,
(tbl->zt_nextblk + blk) << bs, FTAG, &db,
DMU_READ_NO_PREFETCH);
dmu_buf_dnode_exit(zap->zap_dbuf);
if (err == 0)
dmu_buf_rele(db, FTAG);
}
return (err);
}
/*
* Routines for growing the ptrtbl.
*/
static void
zap_ptrtbl_transfer(const uint64_t *src, uint64_t *dst, int n)
{
for (int i = 0; i < n; i++) {
uint64_t lb = src[i];
dst[2 * i + 0] = lb;
dst[2 * i + 1] = lb;
}
}
static int
zap_grow_ptrtbl(zap_t *zap, dmu_tx_t *tx)
{
/*
* The pointer table should never use more hash bits than we
* have (otherwise we'd be using useless zero bits to index it).
* If we are within 2 bits of running out, stop growing, since
* this is already an aberrant condition.
*/
if (zap_f_phys(zap)->zap_ptrtbl.zt_shift >= zap_hashbits(zap) - 2)
return (SET_ERROR(ENOSPC));
if (zap_f_phys(zap)->zap_ptrtbl.zt_numblks == 0) {
/*
* We are outgrowing the "embedded" ptrtbl (the one
* stored in the header block). Give it its own entire
* block, which will double the size of the ptrtbl.
*/
ASSERT3U(zap_f_phys(zap)->zap_ptrtbl.zt_shift, ==,
ZAP_EMBEDDED_PTRTBL_SHIFT(zap));
ASSERT0(zap_f_phys(zap)->zap_ptrtbl.zt_blk);
uint64_t newblk = zap_allocate_blocks(zap, 1);
dmu_buf_t *db_new;
int err = dmu_buf_hold(zap->zap_objset, zap->zap_object,
newblk << FZAP_BLOCK_SHIFT(zap), FTAG, &db_new,
DMU_READ_NO_PREFETCH);
if (err != 0)
return (err);
dmu_buf_will_dirty(db_new, tx);
zap_ptrtbl_transfer(&ZAP_EMBEDDED_PTRTBL_ENT(zap, 0),
db_new->db_data, 1 << ZAP_EMBEDDED_PTRTBL_SHIFT(zap));
dmu_buf_rele(db_new, FTAG);
zap_f_phys(zap)->zap_ptrtbl.zt_blk = newblk;
zap_f_phys(zap)->zap_ptrtbl.zt_numblks = 1;
zap_f_phys(zap)->zap_ptrtbl.zt_shift++;
ASSERT3U(1ULL << zap_f_phys(zap)->zap_ptrtbl.zt_shift, ==,
zap_f_phys(zap)->zap_ptrtbl.zt_numblks <<
(FZAP_BLOCK_SHIFT(zap)-3));
return (0);
} else {
return (zap_table_grow(zap, &zap_f_phys(zap)->zap_ptrtbl,
zap_ptrtbl_transfer, tx));
}
}
static void
zap_increment_num_entries(zap_t *zap, int delta, dmu_tx_t *tx)
{
dmu_buf_will_dirty(zap->zap_dbuf, tx);
mutex_enter(&zap->zap_f.zap_num_entries_mtx);
ASSERT(delta > 0 || zap_f_phys(zap)->zap_num_entries >= -delta);
zap_f_phys(zap)->zap_num_entries += delta;
mutex_exit(&zap->zap_f.zap_num_entries_mtx);
}
static uint64_t
zap_allocate_blocks(zap_t *zap, int nblocks)
{
ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
uint64_t newblk = zap_f_phys(zap)->zap_freeblk;
zap_f_phys(zap)->zap_freeblk += nblocks;
return (newblk);
}
static void
zap_leaf_evict_sync(void *dbu)
{
zap_leaf_t *l = dbu;
rw_destroy(&l->l_rwlock);
kmem_free(l, sizeof (zap_leaf_t));
}
static zap_leaf_t *
zap_create_leaf(zap_t *zap, dmu_tx_t *tx)
{
zap_leaf_t *l = kmem_zalloc(sizeof (zap_leaf_t), KM_SLEEP);
ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
rw_init(&l->l_rwlock, NULL, RW_NOLOCKDEP, NULL);
rw_enter(&l->l_rwlock, RW_WRITER);
l->l_blkid = zap_allocate_blocks(zap, 1);
l->l_dbuf = NULL;
VERIFY0(dmu_buf_hold(zap->zap_objset, zap->zap_object,
l->l_blkid << FZAP_BLOCK_SHIFT(zap), NULL, &l->l_dbuf,
DMU_READ_NO_PREFETCH));
dmu_buf_init_user(&l->l_dbu, zap_leaf_evict_sync, NULL, &l->l_dbuf);
VERIFY3P(NULL, ==, dmu_buf_set_user(l->l_dbuf, &l->l_dbu));
dmu_buf_will_dirty(l->l_dbuf, tx);
zap_leaf_init(l, zap->zap_normflags != 0);
zap_f_phys(zap)->zap_num_leafs++;
return (l);
}
int
fzap_count(zap_t *zap, uint64_t *count)
{
ASSERT(!zap->zap_ismicro);
mutex_enter(&zap->zap_f.zap_num_entries_mtx); /* unnecessary */
*count = zap_f_phys(zap)->zap_num_entries;
mutex_exit(&zap->zap_f.zap_num_entries_mtx);
return (0);
}
/*
* Routines for obtaining zap_leaf_t's
*/
void
zap_put_leaf(zap_leaf_t *l)
{
rw_exit(&l->l_rwlock);
dmu_buf_rele(l->l_dbuf, NULL);
}
static zap_leaf_t *
zap_open_leaf(uint64_t blkid, dmu_buf_t *db)
{
ASSERT(blkid != 0);
zap_leaf_t *l = kmem_zalloc(sizeof (zap_leaf_t), KM_SLEEP);
rw_init(&l->l_rwlock, NULL, RW_DEFAULT, NULL);
rw_enter(&l->l_rwlock, RW_WRITER);
l->l_blkid = blkid;
l->l_bs = highbit64(db->db_size) - 1;
l->l_dbuf = db;
dmu_buf_init_user(&l->l_dbu, zap_leaf_evict_sync, NULL, &l->l_dbuf);
zap_leaf_t *winner = dmu_buf_set_user(db, &l->l_dbu);
rw_exit(&l->l_rwlock);
if (winner != NULL) {
/* someone else set it first */
zap_leaf_evict_sync(&l->l_dbu);
l = winner;
}
/*
* lhr_pad was previously used for the next leaf in the leaf
* chain. There should be no chained leafs (as we have removed
* support for them).
*/
ASSERT0(zap_leaf_phys(l)->l_hdr.lh_pad1);
/*
* There should be more hash entries than there can be
* chunks to put in the hash table
*/
ASSERT3U(ZAP_LEAF_HASH_NUMENTRIES(l), >, ZAP_LEAF_NUMCHUNKS(l) / 3);
/* The chunks should begin at the end of the hash table */
ASSERT3P(&ZAP_LEAF_CHUNK(l, 0), ==, (zap_leaf_chunk_t *)
&zap_leaf_phys(l)->l_hash[ZAP_LEAF_HASH_NUMENTRIES(l)]);
/* The chunks should end at the end of the block */
ASSERT3U((uintptr_t)&ZAP_LEAF_CHUNK(l, ZAP_LEAF_NUMCHUNKS(l)) -
(uintptr_t)zap_leaf_phys(l), ==, l->l_dbuf->db_size);
return (l);
}
static int
zap_get_leaf_byblk(zap_t *zap, uint64_t blkid, dmu_tx_t *tx, krw_t lt,
zap_leaf_t **lp)
{
dmu_buf_t *db;
ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
/*
* If system crashed just after dmu_free_long_range in zfs_rmnode, we
* would be left with an empty xattr dir in delete queue. blkid=0
* would be passed in when doing zfs_purgedir. If that's the case we
* should just return immediately. The underlying objects should
* already be freed, so this should be perfectly fine.
*/
if (blkid == 0)
return (SET_ERROR(ENOENT));
int bs = FZAP_BLOCK_SHIFT(zap);
dnode_t *dn = dmu_buf_dnode_enter(zap->zap_dbuf);
int err = dmu_buf_hold_by_dnode(dn,
blkid << bs, NULL, &db, DMU_READ_NO_PREFETCH);
dmu_buf_dnode_exit(zap->zap_dbuf);
if (err != 0)
return (err);
ASSERT3U(db->db_object, ==, zap->zap_object);
ASSERT3U(db->db_offset, ==, blkid << bs);
ASSERT3U(db->db_size, ==, 1 << bs);
ASSERT(blkid != 0);
zap_leaf_t *l = dmu_buf_get_user(db);
if (l == NULL)
l = zap_open_leaf(blkid, db);
rw_enter(&l->l_rwlock, lt);
/*
* Must lock before dirtying, otherwise zap_leaf_phys(l) could change,
* causing ASSERT below to fail.
*/
if (lt == RW_WRITER)
dmu_buf_will_dirty(db, tx);
ASSERT3U(l->l_blkid, ==, blkid);
ASSERT3P(l->l_dbuf, ==, db);
ASSERT3U(zap_leaf_phys(l)->l_hdr.lh_block_type, ==, ZBT_LEAF);
ASSERT3U(zap_leaf_phys(l)->l_hdr.lh_magic, ==, ZAP_LEAF_MAGIC);
*lp = l;
return (0);
}
static int
zap_idx_to_blk(zap_t *zap, uint64_t idx, uint64_t *valp)
{
ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
if (zap_f_phys(zap)->zap_ptrtbl.zt_numblks == 0) {
ASSERT3U(idx, <,
(1ULL << zap_f_phys(zap)->zap_ptrtbl.zt_shift));
*valp = ZAP_EMBEDDED_PTRTBL_ENT(zap, idx);
return (0);
} else {
return (zap_table_load(zap, &zap_f_phys(zap)->zap_ptrtbl,
idx, valp));
}
}
static int
zap_set_idx_to_blk(zap_t *zap, uint64_t idx, uint64_t blk, dmu_tx_t *tx)
{
ASSERT(tx != NULL);
ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
if (zap_f_phys(zap)->zap_ptrtbl.zt_blk == 0) {
ZAP_EMBEDDED_PTRTBL_ENT(zap, idx) = blk;
return (0);
} else {
return (zap_table_store(zap, &zap_f_phys(zap)->zap_ptrtbl,
idx, blk, tx));
}
}
static int
zap_deref_leaf(zap_t *zap, uint64_t h, dmu_tx_t *tx, krw_t lt, zap_leaf_t **lp)
{
uint64_t blk;
ASSERT(zap->zap_dbuf == NULL ||
zap_f_phys(zap) == zap->zap_dbuf->db_data);
/* Reality check for corrupt zap objects (leaf or header). */
if ((zap_f_phys(zap)->zap_block_type != ZBT_LEAF &&
zap_f_phys(zap)->zap_block_type != ZBT_HEADER) ||
zap_f_phys(zap)->zap_magic != ZAP_MAGIC) {
return (SET_ERROR(EIO));
}
uint64_t idx = ZAP_HASH_IDX(h, zap_f_phys(zap)->zap_ptrtbl.zt_shift);
int err = zap_idx_to_blk(zap, idx, &blk);
if (err != 0)
return (err);
err = zap_get_leaf_byblk(zap, blk, tx, lt, lp);
ASSERT(err ||
ZAP_HASH_IDX(h, zap_leaf_phys(*lp)->l_hdr.lh_prefix_len) ==
zap_leaf_phys(*lp)->l_hdr.lh_prefix);
return (err);
}
static int
zap_expand_leaf(zap_name_t *zn, zap_leaf_t *l,
void *tag, dmu_tx_t *tx, zap_leaf_t **lp)
{
zap_t *zap = zn->zn_zap;
uint64_t hash = zn->zn_hash;
int err;
int old_prefix_len = zap_leaf_phys(l)->l_hdr.lh_prefix_len;
ASSERT3U(old_prefix_len, <=, zap_f_phys(zap)->zap_ptrtbl.zt_shift);
ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
ASSERT3U(ZAP_HASH_IDX(hash, old_prefix_len), ==,
zap_leaf_phys(l)->l_hdr.lh_prefix);
if (zap_tryupgradedir(zap, tx) == 0 ||
old_prefix_len == zap_f_phys(zap)->zap_ptrtbl.zt_shift) {
/* We failed to upgrade, or need to grow the pointer table */
objset_t *os = zap->zap_objset;
uint64_t object = zap->zap_object;
zap_put_leaf(l);
zap_unlockdir(zap, tag);
err = zap_lockdir(os, object, tx, RW_WRITER,
FALSE, FALSE, tag, &zn->zn_zap);
zap = zn->zn_zap;
if (err != 0)
return (err);
ASSERT(!zap->zap_ismicro);
while (old_prefix_len ==
zap_f_phys(zap)->zap_ptrtbl.zt_shift) {
err = zap_grow_ptrtbl(zap, tx);
if (err != 0)
return (err);
}
err = zap_deref_leaf(zap, hash, tx, RW_WRITER, &l);
if (err != 0)
return (err);
if (zap_leaf_phys(l)->l_hdr.lh_prefix_len != old_prefix_len) {
/* it split while our locks were down */
*lp = l;
return (0);
}
}
ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
ASSERT3U(old_prefix_len, <, zap_f_phys(zap)->zap_ptrtbl.zt_shift);
ASSERT3U(ZAP_HASH_IDX(hash, old_prefix_len), ==,
zap_leaf_phys(l)->l_hdr.lh_prefix);
int prefix_diff = zap_f_phys(zap)->zap_ptrtbl.zt_shift -
(old_prefix_len + 1);
uint64_t sibling =
(ZAP_HASH_IDX(hash, old_prefix_len + 1) | 1) << prefix_diff;
/* check for i/o errors before doing zap_leaf_split */
for (int i = 0; i < (1ULL << prefix_diff); i++) {
uint64_t blk;
err = zap_idx_to_blk(zap, sibling + i, &blk);
if (err != 0)
return (err);
ASSERT3U(blk, ==, l->l_blkid);
}
zap_leaf_t *nl = zap_create_leaf(zap, tx);
zap_leaf_split(l, nl, zap->zap_normflags != 0);
/* set sibling pointers */
for (int i = 0; i < (1ULL << prefix_diff); i++) {
err = zap_set_idx_to_blk(zap, sibling + i, nl->l_blkid, tx);
ASSERT0(err); /* we checked for i/o errors above */
}
ASSERT3U(zap_leaf_phys(l)->l_hdr.lh_prefix_len, >, 0);
if (hash & (1ULL << (64 - zap_leaf_phys(l)->l_hdr.lh_prefix_len))) {
/* we want the sibling */
zap_put_leaf(l);
*lp = nl;
} else {
zap_put_leaf(nl);
*lp = l;
}
return (0);
}
static void
zap_put_leaf_maybe_grow_ptrtbl(zap_name_t *zn, zap_leaf_t *l,
void *tag, dmu_tx_t *tx)
{
zap_t *zap = zn->zn_zap;
int shift = zap_f_phys(zap)->zap_ptrtbl.zt_shift;
int leaffull = (zap_leaf_phys(l)->l_hdr.lh_prefix_len == shift &&
zap_leaf_phys(l)->l_hdr.lh_nfree < ZAP_LEAF_LOW_WATER);
zap_put_leaf(l);
if (leaffull || zap_f_phys(zap)->zap_ptrtbl.zt_nextblk) {
/*
* We are in the middle of growing the pointer table, or
* this leaf will soon make us grow it.
*/
if (zap_tryupgradedir(zap, tx) == 0) {
objset_t *os = zap->zap_objset;
uint64_t zapobj = zap->zap_object;
zap_unlockdir(zap, tag);
int err = zap_lockdir(os, zapobj, tx,
RW_WRITER, FALSE, FALSE, tag, &zn->zn_zap);
zap = zn->zn_zap;
if (err != 0)
return;
}
/* could have finished growing while our locks were down */
if (zap_f_phys(zap)->zap_ptrtbl.zt_shift == shift)
(void) zap_grow_ptrtbl(zap, tx);
}
}
static int
fzap_checkname(zap_name_t *zn)
{
if (zn->zn_key_orig_numints * zn->zn_key_intlen > ZAP_MAXNAMELEN)
return (SET_ERROR(ENAMETOOLONG));
return (0);
}
static int
fzap_checksize(uint64_t integer_size, uint64_t num_integers)
{
/* Only integer sizes supported by C */
switch (integer_size) {
case 1:
case 2:
case 4:
case 8:
break;
default:
return (SET_ERROR(EINVAL));
}
if (integer_size * num_integers > ZAP_MAXVALUELEN)
return (SET_ERROR(E2BIG));
return (0);
}
static int
fzap_check(zap_name_t *zn, uint64_t integer_size, uint64_t num_integers)
{
int err = fzap_checkname(zn);
if (err != 0)
return (err);
return (fzap_checksize(integer_size, num_integers));
}
/*
* Routines for manipulating attributes.
*/
int
fzap_lookup(zap_name_t *zn,
uint64_t integer_size, uint64_t num_integers, void *buf,
char *realname, int rn_len, boolean_t *ncp)
{
zap_leaf_t *l;
zap_entry_handle_t zeh;
int err = fzap_checkname(zn);
if (err != 0)
return (err);
err = zap_deref_leaf(zn->zn_zap, zn->zn_hash, NULL, RW_READER, &l);
if (err != 0)
return (err);
err = zap_leaf_lookup(l, zn, &zeh);
if (err == 0) {
if ((err = fzap_checksize(integer_size, num_integers)) != 0) {
zap_put_leaf(l);
return (err);
}
err = zap_entry_read(&zeh, integer_size, num_integers, buf);
(void) zap_entry_read_name(zn->zn_zap, &zeh, rn_len, realname);
if (ncp) {
*ncp = zap_entry_normalization_conflict(&zeh,
zn, NULL, zn->zn_zap);
}
}
zap_put_leaf(l);
return (err);
}
int
fzap_add_cd(zap_name_t *zn,
uint64_t integer_size, uint64_t num_integers,
const void *val, uint32_t cd, void *tag, dmu_tx_t *tx)
{
zap_leaf_t *l;
int err;
zap_entry_handle_t zeh;
zap_t *zap = zn->zn_zap;
ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
ASSERT(!zap->zap_ismicro);
ASSERT(fzap_check(zn, integer_size, num_integers) == 0);
err = zap_deref_leaf(zap, zn->zn_hash, tx, RW_WRITER, &l);
if (err != 0)
return (err);
retry:
err = zap_leaf_lookup(l, zn, &zeh);
if (err == 0) {
err = SET_ERROR(EEXIST);
goto out;
}
if (err != ENOENT)
goto out;
err = zap_entry_create(l, zn, cd,
integer_size, num_integers, val, &zeh);
if (err == 0) {
zap_increment_num_entries(zap, 1, tx);
} else if (err == EAGAIN) {
err = zap_expand_leaf(zn, l, tag, tx, &l);
zap = zn->zn_zap; /* zap_expand_leaf() may change zap */
if (err == 0) {
goto retry;
} else if (err == ENOSPC) {
/*
* If we failed to expand the leaf, then bailout
* as there is no point trying
* zap_put_leaf_maybe_grow_ptrtbl().
*/
return (err);
}
}
out:
if (zap != NULL)
zap_put_leaf_maybe_grow_ptrtbl(zn, l, tag, tx);
return (err);
}
int
fzap_add(zap_name_t *zn,
uint64_t integer_size, uint64_t num_integers,
const void *val, void *tag, dmu_tx_t *tx)
{
int err = fzap_check(zn, integer_size, num_integers);
if (err != 0)
return (err);
return (fzap_add_cd(zn, integer_size, num_integers,
val, ZAP_NEED_CD, tag, tx));
}
int
fzap_update(zap_name_t *zn,
int integer_size, uint64_t num_integers, const void *val,
void *tag, dmu_tx_t *tx)
{
zap_leaf_t *l;
int err;
boolean_t create;
zap_entry_handle_t zeh;
zap_t *zap = zn->zn_zap;
ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
err = fzap_check(zn, integer_size, num_integers);
if (err != 0)
return (err);
err = zap_deref_leaf(zap, zn->zn_hash, tx, RW_WRITER, &l);
if (err != 0)
return (err);
retry:
err = zap_leaf_lookup(l, zn, &zeh);
create = (err == ENOENT);
ASSERT(err == 0 || err == ENOENT);
if (create) {
err = zap_entry_create(l, zn, ZAP_NEED_CD,
integer_size, num_integers, val, &zeh);
if (err == 0)
zap_increment_num_entries(zap, 1, tx);
} else {
err = zap_entry_update(&zeh, integer_size, num_integers, val);
}
if (err == EAGAIN) {
err = zap_expand_leaf(zn, l, tag, tx, &l);
zap = zn->zn_zap; /* zap_expand_leaf() may change zap */
if (err == 0)
goto retry;
}
if (zap != NULL)
zap_put_leaf_maybe_grow_ptrtbl(zn, l, tag, tx);
return (err);
}
int
fzap_length(zap_name_t *zn,
uint64_t *integer_size, uint64_t *num_integers)
{
zap_leaf_t *l;
int err;
zap_entry_handle_t zeh;
err = zap_deref_leaf(zn->zn_zap, zn->zn_hash, NULL, RW_READER, &l);
if (err != 0)
return (err);
err = zap_leaf_lookup(l, zn, &zeh);
if (err != 0)
goto out;
if (integer_size != 0)
*integer_size = zeh.zeh_integer_size;
if (num_integers != 0)
*num_integers = zeh.zeh_num_integers;
out:
zap_put_leaf(l);
return (err);
}
int
fzap_remove(zap_name_t *zn, dmu_tx_t *tx)
{
zap_leaf_t *l;
int err;
zap_entry_handle_t zeh;
err = zap_deref_leaf(zn->zn_zap, zn->zn_hash, tx, RW_WRITER, &l);
if (err != 0)
return (err);
err = zap_leaf_lookup(l, zn, &zeh);
if (err == 0) {
zap_entry_remove(&zeh);
zap_increment_num_entries(zn->zn_zap, -1, tx);
}
zap_put_leaf(l);
return (err);
}
void
fzap_prefetch(zap_name_t *zn)
{
uint64_t blk;
zap_t *zap = zn->zn_zap;
uint64_t idx = ZAP_HASH_IDX(zn->zn_hash,
zap_f_phys(zap)->zap_ptrtbl.zt_shift);
if (zap_idx_to_blk(zap, idx, &blk) != 0)
return;
int bs = FZAP_BLOCK_SHIFT(zap);
dmu_prefetch(zap->zap_objset, zap->zap_object, 0, blk << bs, 1 << bs,
ZIO_PRIORITY_SYNC_READ);
}
/*
* Helper functions for consumers.
*/
uint64_t
zap_create_link(objset_t *os, dmu_object_type_t ot, uint64_t parent_obj,
const char *name, dmu_tx_t *tx)
{
return (zap_create_link_dnsize(os, ot, parent_obj, name, 0, tx));
}
uint64_t
zap_create_link_dnsize(objset_t *os, dmu_object_type_t ot, uint64_t parent_obj,
const char *name, int dnodesize, dmu_tx_t *tx)
{
uint64_t new_obj;
new_obj = zap_create_dnsize(os, ot, DMU_OT_NONE, 0, dnodesize, tx);
VERIFY(new_obj != 0);
VERIFY0(zap_add(os, parent_obj, name, sizeof (uint64_t), 1, &new_obj,
tx));
return (new_obj);
}
int
zap_value_search(objset_t *os, uint64_t zapobj, uint64_t value, uint64_t mask,
char *name)
{
zap_cursor_t zc;
int err;
if (mask == 0)
mask = -1ULL;
zap_attribute_t *za = kmem_alloc(sizeof (*za), KM_SLEEP);
for (zap_cursor_init(&zc, os, zapobj);
(err = zap_cursor_retrieve(&zc, za)) == 0;
zap_cursor_advance(&zc)) {
if ((za->za_first_integer & mask) == (value & mask)) {
(void) strlcpy(name, za->za_name, MAXNAMELEN);
break;
}
}
zap_cursor_fini(&zc);
kmem_free(za, sizeof (*za));
return (err);
}
int
zap_join(objset_t *os, uint64_t fromobj, uint64_t intoobj, dmu_tx_t *tx)
{
zap_cursor_t zc;
int err = 0;
zap_attribute_t *za = kmem_alloc(sizeof (*za), KM_SLEEP);
for (zap_cursor_init(&zc, os, fromobj);
zap_cursor_retrieve(&zc, za) == 0;
(void) zap_cursor_advance(&zc)) {
if (za->za_integer_length != 8 || za->za_num_integers != 1) {
err = SET_ERROR(EINVAL);
break;
}
err = zap_add(os, intoobj, za->za_name,
8, 1, &za->za_first_integer, tx);
if (err != 0)
break;
}
zap_cursor_fini(&zc);
kmem_free(za, sizeof (*za));
return (err);
}
int
zap_join_key(objset_t *os, uint64_t fromobj, uint64_t intoobj,
uint64_t value, dmu_tx_t *tx)
{
zap_cursor_t zc;
int err = 0;
zap_attribute_t *za = kmem_alloc(sizeof (*za), KM_SLEEP);
for (zap_cursor_init(&zc, os, fromobj);
zap_cursor_retrieve(&zc, za) == 0;
(void) zap_cursor_advance(&zc)) {
if (za->za_integer_length != 8 || za->za_num_integers != 1) {
err = SET_ERROR(EINVAL);
break;
}
err = zap_add(os, intoobj, za->za_name,
8, 1, &value, tx);
if (err != 0)
break;
}
zap_cursor_fini(&zc);
kmem_free(za, sizeof (*za));
return (err);
}
int
zap_join_increment(objset_t *os, uint64_t fromobj, uint64_t intoobj,
dmu_tx_t *tx)
{
zap_cursor_t zc;
int err = 0;
zap_attribute_t *za = kmem_alloc(sizeof (*za), KM_SLEEP);
for (zap_cursor_init(&zc, os, fromobj);
zap_cursor_retrieve(&zc, za) == 0;
(void) zap_cursor_advance(&zc)) {
uint64_t delta = 0;
if (za->za_integer_length != 8 || za->za_num_integers != 1) {
err = SET_ERROR(EINVAL);
break;
}
err = zap_lookup(os, intoobj, za->za_name, 8, 1, &delta);
if (err != 0 && err != ENOENT)
break;
delta += za->za_first_integer;
err = zap_update(os, intoobj, za->za_name, 8, 1, &delta, tx);
if (err != 0)
break;
}
zap_cursor_fini(&zc);
kmem_free(za, sizeof (*za));
return (err);
}
int
zap_add_int(objset_t *os, uint64_t obj, uint64_t value, dmu_tx_t *tx)
{
char name[20];
(void) snprintf(name, sizeof (name), "%llx", (longlong_t)value);
return (zap_add(os, obj, name, 8, 1, &value, tx));
}
int
zap_remove_int(objset_t *os, uint64_t obj, uint64_t value, dmu_tx_t *tx)
{
char name[20];
(void) snprintf(name, sizeof (name), "%llx", (longlong_t)value);
return (zap_remove(os, obj, name, tx));
}
int
zap_lookup_int(objset_t *os, uint64_t obj, uint64_t value)
{
char name[20];
(void) snprintf(name, sizeof (name), "%llx", (longlong_t)value);
return (zap_lookup(os, obj, name, 8, 1, &value));
}
int
zap_add_int_key(objset_t *os, uint64_t obj,
uint64_t key, uint64_t value, dmu_tx_t *tx)
{
char name[20];
(void) snprintf(name, sizeof (name), "%llx", (longlong_t)key);
return (zap_add(os, obj, name, 8, 1, &value, tx));
}
int
zap_update_int_key(objset_t *os, uint64_t obj,
uint64_t key, uint64_t value, dmu_tx_t *tx)
{
char name[20];
(void) snprintf(name, sizeof (name), "%llx", (longlong_t)key);
return (zap_update(os, obj, name, 8, 1, &value, tx));
}
int
zap_lookup_int_key(objset_t *os, uint64_t obj, uint64_t key, uint64_t *valuep)
{
char name[20];
(void) snprintf(name, sizeof (name), "%llx", (longlong_t)key);
return (zap_lookup(os, obj, name, 8, 1, valuep));
}
int
zap_increment(objset_t *os, uint64_t obj, const char *name, int64_t delta,
dmu_tx_t *tx)
{
uint64_t value = 0;
if (delta == 0)
return (0);
int err = zap_lookup(os, obj, name, 8, 1, &value);
if (err != 0 && err != ENOENT)
return (err);
value += delta;
if (value == 0)
err = zap_remove(os, obj, name, tx);
else
err = zap_update(os, obj, name, 8, 1, &value, tx);
return (err);
}
int
zap_increment_int(objset_t *os, uint64_t obj, uint64_t key, int64_t delta,
dmu_tx_t *tx)
{
char name[20];
(void) snprintf(name, sizeof (name), "%llx", (longlong_t)key);
return (zap_increment(os, obj, name, delta, tx));
}
/*
* Routines for iterating over the attributes.
*/
int
fzap_cursor_retrieve(zap_t *zap, zap_cursor_t *zc, zap_attribute_t *za)
{
int err = ENOENT;
zap_entry_handle_t zeh;
zap_leaf_t *l;
/* retrieve the next entry at or after zc_hash/zc_cd */
/* if no entry, return ENOENT */
/*
* If we are reading from the beginning, we're almost certain to
* iterate over the entire ZAP object. If there are multiple leaf
* blocks (freeblk > 2), prefetch the whole object (up to
* dmu_prefetch_max bytes), so that we read the leaf blocks
* concurrently. (Unless noprefetch was requested via
* zap_cursor_init_noprefetch()).
*/
if (zc->zc_hash == 0 && zap_iterate_prefetch &&
zc->zc_prefetch && zap_f_phys(zap)->zap_freeblk > 2) {
dmu_prefetch(zc->zc_objset, zc->zc_zapobj, 0, 0,
zap_f_phys(zap)->zap_freeblk << FZAP_BLOCK_SHIFT(zap),
ZIO_PRIORITY_ASYNC_READ);
}
if (zc->zc_leaf &&
(ZAP_HASH_IDX(zc->zc_hash,
zap_leaf_phys(zc->zc_leaf)->l_hdr.lh_prefix_len) !=
zap_leaf_phys(zc->zc_leaf)->l_hdr.lh_prefix)) {
rw_enter(&zc->zc_leaf->l_rwlock, RW_READER);
zap_put_leaf(zc->zc_leaf);
zc->zc_leaf = NULL;
}
again:
if (zc->zc_leaf == NULL) {
err = zap_deref_leaf(zap, zc->zc_hash, NULL, RW_READER,
&zc->zc_leaf);
if (err != 0)
return (err);
} else {
rw_enter(&zc->zc_leaf->l_rwlock, RW_READER);
}
l = zc->zc_leaf;
err = zap_leaf_lookup_closest(l, zc->zc_hash, zc->zc_cd, &zeh);
if (err == ENOENT) {
if (zap_leaf_phys(l)->l_hdr.lh_prefix_len == 0) {
zc->zc_hash = -1ULL;
zc->zc_cd = 0;
} else {
uint64_t nocare = (1ULL <<
(64 - zap_leaf_phys(l)->l_hdr.lh_prefix_len)) - 1;
zc->zc_hash = (zc->zc_hash & ~nocare) + nocare + 1;
zc->zc_cd = 0;
if (zc->zc_hash == 0) {
zc->zc_hash = -1ULL;
} else {
zap_put_leaf(zc->zc_leaf);
zc->zc_leaf = NULL;
goto again;
}
}
}
if (err == 0) {
zc->zc_hash = zeh.zeh_hash;
zc->zc_cd = zeh.zeh_cd;
za->za_integer_length = zeh.zeh_integer_size;
za->za_num_integers = zeh.zeh_num_integers;
if (zeh.zeh_num_integers == 0) {
za->za_first_integer = 0;
} else {
err = zap_entry_read(&zeh, 8, 1, &za->za_first_integer);
ASSERT(err == 0 || err == EOVERFLOW);
}
err = zap_entry_read_name(zap, &zeh,
sizeof (za->za_name), za->za_name);
ASSERT(err == 0);
za->za_normalization_conflict =
zap_entry_normalization_conflict(&zeh,
NULL, za->za_name, zap);
}
rw_exit(&zc->zc_leaf->l_rwlock);
return (err);
}
static void
zap_stats_ptrtbl(zap_t *zap, uint64_t *tbl, int len, zap_stats_t *zs)
{
uint64_t lastblk = 0;
/*
* NB: if a leaf has more pointers than an entire ptrtbl block
* can hold, then it'll be accounted for more than once, since
* we won't have lastblk.
*/
for (int i = 0; i < len; i++) {
zap_leaf_t *l;
if (tbl[i] == lastblk)
continue;
lastblk = tbl[i];
int err = zap_get_leaf_byblk(zap, tbl[i], NULL, RW_READER, &l);
if (err == 0) {
zap_leaf_stats(zap, l, zs);
zap_put_leaf(l);
}
}
}
void
fzap_get_stats(zap_t *zap, zap_stats_t *zs)
{
int bs = FZAP_BLOCK_SHIFT(zap);
zs->zs_blocksize = 1ULL << bs;
/*
* Set zap_phys_t fields
*/
zs->zs_num_leafs = zap_f_phys(zap)->zap_num_leafs;
zs->zs_num_entries = zap_f_phys(zap)->zap_num_entries;
zs->zs_num_blocks = zap_f_phys(zap)->zap_freeblk;
zs->zs_block_type = zap_f_phys(zap)->zap_block_type;
zs->zs_magic = zap_f_phys(zap)->zap_magic;
zs->zs_salt = zap_f_phys(zap)->zap_salt;
/*
* Set zap_ptrtbl fields
*/
zs->zs_ptrtbl_len = 1ULL << zap_f_phys(zap)->zap_ptrtbl.zt_shift;
zs->zs_ptrtbl_nextblk = zap_f_phys(zap)->zap_ptrtbl.zt_nextblk;
zs->zs_ptrtbl_blks_copied =
zap_f_phys(zap)->zap_ptrtbl.zt_blks_copied;
zs->zs_ptrtbl_zt_blk = zap_f_phys(zap)->zap_ptrtbl.zt_blk;
zs->zs_ptrtbl_zt_numblks = zap_f_phys(zap)->zap_ptrtbl.zt_numblks;
zs->zs_ptrtbl_zt_shift = zap_f_phys(zap)->zap_ptrtbl.zt_shift;
if (zap_f_phys(zap)->zap_ptrtbl.zt_numblks == 0) {
/* the ptrtbl is entirely in the header block. */
zap_stats_ptrtbl(zap, &ZAP_EMBEDDED_PTRTBL_ENT(zap, 0),
1 << ZAP_EMBEDDED_PTRTBL_SHIFT(zap), zs);
} else {
dmu_prefetch(zap->zap_objset, zap->zap_object, 0,
zap_f_phys(zap)->zap_ptrtbl.zt_blk << bs,
zap_f_phys(zap)->zap_ptrtbl.zt_numblks << bs,
ZIO_PRIORITY_SYNC_READ);
for (int b = 0; b < zap_f_phys(zap)->zap_ptrtbl.zt_numblks;
b++) {
dmu_buf_t *db;
int err;
err = dmu_buf_hold(zap->zap_objset, zap->zap_object,
(zap_f_phys(zap)->zap_ptrtbl.zt_blk + b) << bs,
FTAG, &db, DMU_READ_NO_PREFETCH);
if (err == 0) {
zap_stats_ptrtbl(zap, db->db_data,
1<<(bs-3), zs);
dmu_buf_rele(db, FTAG);
}
}
}
}
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs, , zap_iterate_prefetch, INT, ZMOD_RW,
"When iterating ZAP object, prefetch it");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/zap_leaf.c b/sys/contrib/openzfs/module/zfs/zap_leaf.c
index aa6c298c3b4b..aad923d512df 100644
--- a/sys/contrib/openzfs/module/zfs/zap_leaf.c
+++ b/sys/contrib/openzfs/module/zfs/zap_leaf.c
@@ -1,849 +1,847 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2016 by Delphix. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc.
*/
/*
* The 512-byte leaf is broken into 32 16-byte chunks.
* chunk number n means l_chunk[n], even though the header precedes it.
* the names are stored null-terminated.
*/
#include <sys/zio.h>
#include <sys/spa.h>
#include <sys/dmu.h>
#include <sys/zfs_context.h>
#include <sys/fs/zfs.h>
#include <sys/zap.h>
#include <sys/zap_impl.h>
#include <sys/zap_leaf.h>
#include <sys/arc.h>
static uint16_t *zap_leaf_rehash_entry(zap_leaf_t *l, uint16_t entry);
#define CHAIN_END 0xffff /* end of the chunk chain */
#define LEAF_HASH(l, h) \
((ZAP_LEAF_HASH_NUMENTRIES(l)-1) & \
((h) >> \
(64 - ZAP_LEAF_HASH_SHIFT(l) - zap_leaf_phys(l)->l_hdr.lh_prefix_len)))
#define LEAF_HASH_ENTPTR(l, h) (&zap_leaf_phys(l)->l_hash[LEAF_HASH(l, h)])
-extern inline zap_leaf_phys_t *zap_leaf_phys(zap_leaf_t *l);
-
static void
zap_memset(void *a, int c, size_t n)
{
char *cp = a;
char *cpend = cp + n;
while (cp < cpend)
*cp++ = c;
}
static void
stv(int len, void *addr, uint64_t value)
{
switch (len) {
case 1:
*(uint8_t *)addr = value;
return;
case 2:
*(uint16_t *)addr = value;
return;
case 4:
*(uint32_t *)addr = value;
return;
case 8:
*(uint64_t *)addr = value;
return;
default:
cmn_err(CE_PANIC, "bad int len %d", len);
}
}
static uint64_t
ldv(int len, const void *addr)
{
switch (len) {
case 1:
return (*(uint8_t *)addr);
case 2:
return (*(uint16_t *)addr);
case 4:
return (*(uint32_t *)addr);
case 8:
return (*(uint64_t *)addr);
default:
cmn_err(CE_PANIC, "bad int len %d", len);
}
return (0xFEEDFACEDEADBEEFULL);
}
void
zap_leaf_byteswap(zap_leaf_phys_t *buf, int size)
{
zap_leaf_t l;
dmu_buf_t l_dbuf;
l_dbuf.db_data = buf;
l.l_bs = highbit64(size) - 1;
l.l_dbuf = &l_dbuf;
buf->l_hdr.lh_block_type = BSWAP_64(buf->l_hdr.lh_block_type);
buf->l_hdr.lh_prefix = BSWAP_64(buf->l_hdr.lh_prefix);
buf->l_hdr.lh_magic = BSWAP_32(buf->l_hdr.lh_magic);
buf->l_hdr.lh_nfree = BSWAP_16(buf->l_hdr.lh_nfree);
buf->l_hdr.lh_nentries = BSWAP_16(buf->l_hdr.lh_nentries);
buf->l_hdr.lh_prefix_len = BSWAP_16(buf->l_hdr.lh_prefix_len);
buf->l_hdr.lh_freelist = BSWAP_16(buf->l_hdr.lh_freelist);
for (int i = 0; i < ZAP_LEAF_HASH_NUMENTRIES(&l); i++)
buf->l_hash[i] = BSWAP_16(buf->l_hash[i]);
for (int i = 0; i < ZAP_LEAF_NUMCHUNKS(&l); i++) {
zap_leaf_chunk_t *lc = &ZAP_LEAF_CHUNK(&l, i);
struct zap_leaf_entry *le;
switch (lc->l_free.lf_type) {
case ZAP_CHUNK_ENTRY:
le = &lc->l_entry;
le->le_type = BSWAP_8(le->le_type);
le->le_value_intlen = BSWAP_8(le->le_value_intlen);
le->le_next = BSWAP_16(le->le_next);
le->le_name_chunk = BSWAP_16(le->le_name_chunk);
le->le_name_numints = BSWAP_16(le->le_name_numints);
le->le_value_chunk = BSWAP_16(le->le_value_chunk);
le->le_value_numints = BSWAP_16(le->le_value_numints);
le->le_cd = BSWAP_32(le->le_cd);
le->le_hash = BSWAP_64(le->le_hash);
break;
case ZAP_CHUNK_FREE:
lc->l_free.lf_type = BSWAP_8(lc->l_free.lf_type);
lc->l_free.lf_next = BSWAP_16(lc->l_free.lf_next);
break;
case ZAP_CHUNK_ARRAY:
lc->l_array.la_type = BSWAP_8(lc->l_array.la_type);
lc->l_array.la_next = BSWAP_16(lc->l_array.la_next);
/* la_array doesn't need swapping */
break;
default:
cmn_err(CE_PANIC, "bad leaf type %d",
lc->l_free.lf_type);
}
}
}
void
zap_leaf_init(zap_leaf_t *l, boolean_t sort)
{
l->l_bs = highbit64(l->l_dbuf->db_size) - 1;
zap_memset(&zap_leaf_phys(l)->l_hdr, 0,
sizeof (struct zap_leaf_header));
zap_memset(zap_leaf_phys(l)->l_hash, CHAIN_END,
2*ZAP_LEAF_HASH_NUMENTRIES(l));
for (int i = 0; i < ZAP_LEAF_NUMCHUNKS(l); i++) {
ZAP_LEAF_CHUNK(l, i).l_free.lf_type = ZAP_CHUNK_FREE;
ZAP_LEAF_CHUNK(l, i).l_free.lf_next = i+1;
}
ZAP_LEAF_CHUNK(l, ZAP_LEAF_NUMCHUNKS(l)-1).l_free.lf_next = CHAIN_END;
zap_leaf_phys(l)->l_hdr.lh_block_type = ZBT_LEAF;
zap_leaf_phys(l)->l_hdr.lh_magic = ZAP_LEAF_MAGIC;
zap_leaf_phys(l)->l_hdr.lh_nfree = ZAP_LEAF_NUMCHUNKS(l);
if (sort)
zap_leaf_phys(l)->l_hdr.lh_flags |= ZLF_ENTRIES_CDSORTED;
}
/*
* Routines which manipulate leaf chunks (l_chunk[]).
*/
static uint16_t
zap_leaf_chunk_alloc(zap_leaf_t *l)
{
ASSERT(zap_leaf_phys(l)->l_hdr.lh_nfree > 0);
int chunk = zap_leaf_phys(l)->l_hdr.lh_freelist;
ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(l));
ASSERT3U(ZAP_LEAF_CHUNK(l, chunk).l_free.lf_type, ==, ZAP_CHUNK_FREE);
zap_leaf_phys(l)->l_hdr.lh_freelist =
ZAP_LEAF_CHUNK(l, chunk).l_free.lf_next;
zap_leaf_phys(l)->l_hdr.lh_nfree--;
return (chunk);
}
static void
zap_leaf_chunk_free(zap_leaf_t *l, uint16_t chunk)
{
struct zap_leaf_free *zlf = &ZAP_LEAF_CHUNK(l, chunk).l_free;
ASSERT3U(zap_leaf_phys(l)->l_hdr.lh_nfree, <, ZAP_LEAF_NUMCHUNKS(l));
ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(l));
ASSERT(zlf->lf_type != ZAP_CHUNK_FREE);
zlf->lf_type = ZAP_CHUNK_FREE;
zlf->lf_next = zap_leaf_phys(l)->l_hdr.lh_freelist;
bzero(zlf->lf_pad, sizeof (zlf->lf_pad)); /* help it to compress */
zap_leaf_phys(l)->l_hdr.lh_freelist = chunk;
zap_leaf_phys(l)->l_hdr.lh_nfree++;
}
/*
* Routines which manipulate leaf arrays (zap_leaf_array type chunks).
*/
static uint16_t
zap_leaf_array_create(zap_leaf_t *l, const char *buf,
int integer_size, int num_integers)
{
uint16_t chunk_head;
uint16_t *chunkp = &chunk_head;
int byten = 0;
uint64_t value = 0;
int shift = (integer_size - 1) * 8;
int len = num_integers;
ASSERT3U(num_integers * integer_size, <=, ZAP_MAXVALUELEN);
while (len > 0) {
uint16_t chunk = zap_leaf_chunk_alloc(l);
struct zap_leaf_array *la = &ZAP_LEAF_CHUNK(l, chunk).l_array;
la->la_type = ZAP_CHUNK_ARRAY;
for (int i = 0; i < ZAP_LEAF_ARRAY_BYTES; i++) {
if (byten == 0)
value = ldv(integer_size, buf);
la->la_array[i] = value >> shift;
value <<= 8;
if (++byten == integer_size) {
byten = 0;
buf += integer_size;
if (--len == 0)
break;
}
}
*chunkp = chunk;
chunkp = &la->la_next;
}
*chunkp = CHAIN_END;
return (chunk_head);
}
static void
zap_leaf_array_free(zap_leaf_t *l, uint16_t *chunkp)
{
uint16_t chunk = *chunkp;
*chunkp = CHAIN_END;
while (chunk != CHAIN_END) {
int nextchunk = ZAP_LEAF_CHUNK(l, chunk).l_array.la_next;
ASSERT3U(ZAP_LEAF_CHUNK(l, chunk).l_array.la_type, ==,
ZAP_CHUNK_ARRAY);
zap_leaf_chunk_free(l, chunk);
chunk = nextchunk;
}
}
/* array_len and buf_len are in integers, not bytes */
static void
zap_leaf_array_read(zap_leaf_t *l, uint16_t chunk,
int array_int_len, int array_len, int buf_int_len, uint64_t buf_len,
void *buf)
{
int len = MIN(array_len, buf_len);
int byten = 0;
uint64_t value = 0;
char *p = buf;
ASSERT3U(array_int_len, <=, buf_int_len);
/* Fast path for one 8-byte integer */
if (array_int_len == 8 && buf_int_len == 8 && len == 1) {
struct zap_leaf_array *la = &ZAP_LEAF_CHUNK(l, chunk).l_array;
uint8_t *ip = la->la_array;
uint64_t *buf64 = buf;
*buf64 = (uint64_t)ip[0] << 56 | (uint64_t)ip[1] << 48 |
(uint64_t)ip[2] << 40 | (uint64_t)ip[3] << 32 |
(uint64_t)ip[4] << 24 | (uint64_t)ip[5] << 16 |
(uint64_t)ip[6] << 8 | (uint64_t)ip[7];
return;
}
/* Fast path for an array of 1-byte integers (eg. the entry name) */
if (array_int_len == 1 && buf_int_len == 1 &&
buf_len > array_len + ZAP_LEAF_ARRAY_BYTES) {
while (chunk != CHAIN_END) {
struct zap_leaf_array *la =
&ZAP_LEAF_CHUNK(l, chunk).l_array;
bcopy(la->la_array, p, ZAP_LEAF_ARRAY_BYTES);
p += ZAP_LEAF_ARRAY_BYTES;
chunk = la->la_next;
}
return;
}
while (len > 0) {
struct zap_leaf_array *la = &ZAP_LEAF_CHUNK(l, chunk).l_array;
ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(l));
for (int i = 0; i < ZAP_LEAF_ARRAY_BYTES && len > 0; i++) {
value = (value << 8) | la->la_array[i];
byten++;
if (byten == array_int_len) {
stv(buf_int_len, p, value);
byten = 0;
len--;
if (len == 0)
return;
p += buf_int_len;
}
}
chunk = la->la_next;
}
}
static boolean_t
zap_leaf_array_match(zap_leaf_t *l, zap_name_t *zn,
int chunk, int array_numints)
{
int bseen = 0;
if (zap_getflags(zn->zn_zap) & ZAP_FLAG_UINT64_KEY) {
uint64_t *thiskey =
kmem_alloc(array_numints * sizeof (*thiskey), KM_SLEEP);
ASSERT(zn->zn_key_intlen == sizeof (*thiskey));
zap_leaf_array_read(l, chunk, sizeof (*thiskey), array_numints,
sizeof (*thiskey), array_numints, thiskey);
boolean_t match = bcmp(thiskey, zn->zn_key_orig,
array_numints * sizeof (*thiskey)) == 0;
kmem_free(thiskey, array_numints * sizeof (*thiskey));
return (match);
}
ASSERT(zn->zn_key_intlen == 1);
if (zn->zn_matchtype & MT_NORMALIZE) {
char *thisname = kmem_alloc(array_numints, KM_SLEEP);
zap_leaf_array_read(l, chunk, sizeof (char), array_numints,
sizeof (char), array_numints, thisname);
boolean_t match = zap_match(zn, thisname);
kmem_free(thisname, array_numints);
return (match);
}
/*
* Fast path for exact matching.
* First check that the lengths match, so that we don't read
* past the end of the zn_key_orig array.
*/
if (array_numints != zn->zn_key_orig_numints)
return (B_FALSE);
while (bseen < array_numints) {
struct zap_leaf_array *la = &ZAP_LEAF_CHUNK(l, chunk).l_array;
int toread = MIN(array_numints - bseen, ZAP_LEAF_ARRAY_BYTES);
ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(l));
if (bcmp(la->la_array, (char *)zn->zn_key_orig + bseen, toread))
break;
chunk = la->la_next;
bseen += toread;
}
return (bseen == array_numints);
}
/*
* Routines which manipulate leaf entries.
*/
int
zap_leaf_lookup(zap_leaf_t *l, zap_name_t *zn, zap_entry_handle_t *zeh)
{
struct zap_leaf_entry *le;
ASSERT3U(zap_leaf_phys(l)->l_hdr.lh_magic, ==, ZAP_LEAF_MAGIC);
for (uint16_t *chunkp = LEAF_HASH_ENTPTR(l, zn->zn_hash);
*chunkp != CHAIN_END; chunkp = &le->le_next) {
uint16_t chunk = *chunkp;
le = ZAP_LEAF_ENTRY(l, chunk);
ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(l));
ASSERT3U(le->le_type, ==, ZAP_CHUNK_ENTRY);
if (le->le_hash != zn->zn_hash)
continue;
/*
* NB: the entry chain is always sorted by cd on
* normalized zap objects, so this will find the
* lowest-cd match for MT_NORMALIZE.
*/
ASSERT((zn->zn_matchtype == 0) ||
(zap_leaf_phys(l)->l_hdr.lh_flags & ZLF_ENTRIES_CDSORTED));
if (zap_leaf_array_match(l, zn, le->le_name_chunk,
le->le_name_numints)) {
zeh->zeh_num_integers = le->le_value_numints;
zeh->zeh_integer_size = le->le_value_intlen;
zeh->zeh_cd = le->le_cd;
zeh->zeh_hash = le->le_hash;
zeh->zeh_chunkp = chunkp;
zeh->zeh_leaf = l;
return (0);
}
}
return (SET_ERROR(ENOENT));
}
/* Return (h1,cd1 >= h2,cd2) */
#define HCD_GTEQ(h1, cd1, h2, cd2) \
((h1 > h2) ? TRUE : ((h1 == h2 && cd1 >= cd2) ? TRUE : FALSE))
int
zap_leaf_lookup_closest(zap_leaf_t *l,
uint64_t h, uint32_t cd, zap_entry_handle_t *zeh)
{
uint64_t besth = -1ULL;
uint32_t bestcd = -1U;
uint16_t bestlh = ZAP_LEAF_HASH_NUMENTRIES(l)-1;
struct zap_leaf_entry *le;
ASSERT3U(zap_leaf_phys(l)->l_hdr.lh_magic, ==, ZAP_LEAF_MAGIC);
for (uint16_t lh = LEAF_HASH(l, h); lh <= bestlh; lh++) {
for (uint16_t chunk = zap_leaf_phys(l)->l_hash[lh];
chunk != CHAIN_END; chunk = le->le_next) {
le = ZAP_LEAF_ENTRY(l, chunk);
ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(l));
ASSERT3U(le->le_type, ==, ZAP_CHUNK_ENTRY);
if (HCD_GTEQ(le->le_hash, le->le_cd, h, cd) &&
HCD_GTEQ(besth, bestcd, le->le_hash, le->le_cd)) {
ASSERT3U(bestlh, >=, lh);
bestlh = lh;
besth = le->le_hash;
bestcd = le->le_cd;
zeh->zeh_num_integers = le->le_value_numints;
zeh->zeh_integer_size = le->le_value_intlen;
zeh->zeh_cd = le->le_cd;
zeh->zeh_hash = le->le_hash;
zeh->zeh_fakechunk = chunk;
zeh->zeh_chunkp = &zeh->zeh_fakechunk;
zeh->zeh_leaf = l;
}
}
}
return (bestcd == -1U ? SET_ERROR(ENOENT) : 0);
}
int
zap_entry_read(const zap_entry_handle_t *zeh,
uint8_t integer_size, uint64_t num_integers, void *buf)
{
struct zap_leaf_entry *le =
ZAP_LEAF_ENTRY(zeh->zeh_leaf, *zeh->zeh_chunkp);
ASSERT3U(le->le_type, ==, ZAP_CHUNK_ENTRY);
if (le->le_value_intlen > integer_size)
return (SET_ERROR(EINVAL));
zap_leaf_array_read(zeh->zeh_leaf, le->le_value_chunk,
le->le_value_intlen, le->le_value_numints,
integer_size, num_integers, buf);
if (zeh->zeh_num_integers > num_integers)
return (SET_ERROR(EOVERFLOW));
return (0);
}
int
zap_entry_read_name(zap_t *zap, const zap_entry_handle_t *zeh, uint16_t buflen,
char *buf)
{
struct zap_leaf_entry *le =
ZAP_LEAF_ENTRY(zeh->zeh_leaf, *zeh->zeh_chunkp);
ASSERT3U(le->le_type, ==, ZAP_CHUNK_ENTRY);
if (zap_getflags(zap) & ZAP_FLAG_UINT64_KEY) {
zap_leaf_array_read(zeh->zeh_leaf, le->le_name_chunk, 8,
le->le_name_numints, 8, buflen / 8, buf);
} else {
zap_leaf_array_read(zeh->zeh_leaf, le->le_name_chunk, 1,
le->le_name_numints, 1, buflen, buf);
}
if (le->le_name_numints > buflen)
return (SET_ERROR(EOVERFLOW));
return (0);
}
int
zap_entry_update(zap_entry_handle_t *zeh,
uint8_t integer_size, uint64_t num_integers, const void *buf)
{
zap_leaf_t *l = zeh->zeh_leaf;
struct zap_leaf_entry *le = ZAP_LEAF_ENTRY(l, *zeh->zeh_chunkp);
int delta_chunks = ZAP_LEAF_ARRAY_NCHUNKS(num_integers * integer_size) -
ZAP_LEAF_ARRAY_NCHUNKS(le->le_value_numints * le->le_value_intlen);
if ((int)zap_leaf_phys(l)->l_hdr.lh_nfree < delta_chunks)
return (SET_ERROR(EAGAIN));
zap_leaf_array_free(l, &le->le_value_chunk);
le->le_value_chunk =
zap_leaf_array_create(l, buf, integer_size, num_integers);
le->le_value_numints = num_integers;
le->le_value_intlen = integer_size;
return (0);
}
void
zap_entry_remove(zap_entry_handle_t *zeh)
{
zap_leaf_t *l = zeh->zeh_leaf;
ASSERT3P(zeh->zeh_chunkp, !=, &zeh->zeh_fakechunk);
uint16_t entry_chunk = *zeh->zeh_chunkp;
struct zap_leaf_entry *le = ZAP_LEAF_ENTRY(l, entry_chunk);
ASSERT3U(le->le_type, ==, ZAP_CHUNK_ENTRY);
zap_leaf_array_free(l, &le->le_name_chunk);
zap_leaf_array_free(l, &le->le_value_chunk);
*zeh->zeh_chunkp = le->le_next;
zap_leaf_chunk_free(l, entry_chunk);
zap_leaf_phys(l)->l_hdr.lh_nentries--;
}
int
zap_entry_create(zap_leaf_t *l, zap_name_t *zn, uint32_t cd,
uint8_t integer_size, uint64_t num_integers, const void *buf,
zap_entry_handle_t *zeh)
{
uint16_t chunk;
struct zap_leaf_entry *le;
uint64_t h = zn->zn_hash;
uint64_t valuelen = integer_size * num_integers;
int numchunks = 1 + ZAP_LEAF_ARRAY_NCHUNKS(zn->zn_key_orig_numints *
zn->zn_key_intlen) + ZAP_LEAF_ARRAY_NCHUNKS(valuelen);
if (numchunks > ZAP_LEAF_NUMCHUNKS(l))
return (SET_ERROR(E2BIG));
if (cd == ZAP_NEED_CD) {
/* find the lowest unused cd */
if (zap_leaf_phys(l)->l_hdr.lh_flags & ZLF_ENTRIES_CDSORTED) {
cd = 0;
for (chunk = *LEAF_HASH_ENTPTR(l, h);
chunk != CHAIN_END; chunk = le->le_next) {
le = ZAP_LEAF_ENTRY(l, chunk);
if (le->le_cd > cd)
break;
if (le->le_hash == h) {
ASSERT3U(cd, ==, le->le_cd);
cd++;
}
}
} else {
/* old unsorted format; do it the O(n^2) way */
for (cd = 0; ; cd++) {
for (chunk = *LEAF_HASH_ENTPTR(l, h);
chunk != CHAIN_END; chunk = le->le_next) {
le = ZAP_LEAF_ENTRY(l, chunk);
if (le->le_hash == h &&
le->le_cd == cd) {
break;
}
}
/* If this cd is not in use, we are good. */
if (chunk == CHAIN_END)
break;
}
}
/*
* We would run out of space in a block before we could
* store enough entries to run out of CD values.
*/
ASSERT3U(cd, <, zap_maxcd(zn->zn_zap));
}
if (zap_leaf_phys(l)->l_hdr.lh_nfree < numchunks)
return (SET_ERROR(EAGAIN));
/* make the entry */
chunk = zap_leaf_chunk_alloc(l);
le = ZAP_LEAF_ENTRY(l, chunk);
le->le_type = ZAP_CHUNK_ENTRY;
le->le_name_chunk = zap_leaf_array_create(l, zn->zn_key_orig,
zn->zn_key_intlen, zn->zn_key_orig_numints);
le->le_name_numints = zn->zn_key_orig_numints;
le->le_value_chunk =
zap_leaf_array_create(l, buf, integer_size, num_integers);
le->le_value_numints = num_integers;
le->le_value_intlen = integer_size;
le->le_hash = h;
le->le_cd = cd;
/* link it into the hash chain */
/* XXX if we did the search above, we could just use that */
uint16_t *chunkp = zap_leaf_rehash_entry(l, chunk);
zap_leaf_phys(l)->l_hdr.lh_nentries++;
zeh->zeh_leaf = l;
zeh->zeh_num_integers = num_integers;
zeh->zeh_integer_size = le->le_value_intlen;
zeh->zeh_cd = le->le_cd;
zeh->zeh_hash = le->le_hash;
zeh->zeh_chunkp = chunkp;
return (0);
}
/*
* Determine if there is another entry with the same normalized form.
* For performance purposes, either zn or name must be provided (the
* other can be NULL). Note, there usually won't be any hash
* conflicts, in which case we don't need the concatenated/normalized
* form of the name. But all callers have one of these on hand anyway,
* so might as well take advantage. A cleaner but slower interface
* would accept neither argument, and compute the normalized name as
* needed (using zap_name_alloc(zap_entry_read_name(zeh))).
*/
boolean_t
zap_entry_normalization_conflict(zap_entry_handle_t *zeh, zap_name_t *zn,
const char *name, zap_t *zap)
{
struct zap_leaf_entry *le;
boolean_t allocdzn = B_FALSE;
if (zap->zap_normflags == 0)
return (B_FALSE);
for (uint16_t chunk = *LEAF_HASH_ENTPTR(zeh->zeh_leaf, zeh->zeh_hash);
chunk != CHAIN_END; chunk = le->le_next) {
le = ZAP_LEAF_ENTRY(zeh->zeh_leaf, chunk);
if (le->le_hash != zeh->zeh_hash)
continue;
if (le->le_cd == zeh->zeh_cd)
continue;
if (zn == NULL) {
zn = zap_name_alloc(zap, name, MT_NORMALIZE);
allocdzn = B_TRUE;
}
if (zap_leaf_array_match(zeh->zeh_leaf, zn,
le->le_name_chunk, le->le_name_numints)) {
if (allocdzn)
zap_name_free(zn);
return (B_TRUE);
}
}
if (allocdzn)
zap_name_free(zn);
return (B_FALSE);
}
/*
* Routines for transferring entries between leafs.
*/
static uint16_t *
zap_leaf_rehash_entry(zap_leaf_t *l, uint16_t entry)
{
struct zap_leaf_entry *le = ZAP_LEAF_ENTRY(l, entry);
struct zap_leaf_entry *le2;
uint16_t *chunkp;
/*
* keep the entry chain sorted by cd
* NB: this will not cause problems for unsorted leafs, though
* it is unnecessary there.
*/
for (chunkp = LEAF_HASH_ENTPTR(l, le->le_hash);
*chunkp != CHAIN_END; chunkp = &le2->le_next) {
le2 = ZAP_LEAF_ENTRY(l, *chunkp);
if (le2->le_cd > le->le_cd)
break;
}
le->le_next = *chunkp;
*chunkp = entry;
return (chunkp);
}
static uint16_t
zap_leaf_transfer_array(zap_leaf_t *l, uint16_t chunk, zap_leaf_t *nl)
{
uint16_t new_chunk;
uint16_t *nchunkp = &new_chunk;
while (chunk != CHAIN_END) {
uint16_t nchunk = zap_leaf_chunk_alloc(nl);
struct zap_leaf_array *nla =
&ZAP_LEAF_CHUNK(nl, nchunk).l_array;
struct zap_leaf_array *la =
&ZAP_LEAF_CHUNK(l, chunk).l_array;
int nextchunk = la->la_next;
ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(l));
ASSERT3U(nchunk, <, ZAP_LEAF_NUMCHUNKS(l));
*nla = *la; /* structure assignment */
zap_leaf_chunk_free(l, chunk);
chunk = nextchunk;
*nchunkp = nchunk;
nchunkp = &nla->la_next;
}
*nchunkp = CHAIN_END;
return (new_chunk);
}
static void
zap_leaf_transfer_entry(zap_leaf_t *l, int entry, zap_leaf_t *nl)
{
struct zap_leaf_entry *le = ZAP_LEAF_ENTRY(l, entry);
ASSERT3U(le->le_type, ==, ZAP_CHUNK_ENTRY);
uint16_t chunk = zap_leaf_chunk_alloc(nl);
struct zap_leaf_entry *nle = ZAP_LEAF_ENTRY(nl, chunk);
*nle = *le; /* structure assignment */
(void) zap_leaf_rehash_entry(nl, chunk);
nle->le_name_chunk = zap_leaf_transfer_array(l, le->le_name_chunk, nl);
nle->le_value_chunk =
zap_leaf_transfer_array(l, le->le_value_chunk, nl);
zap_leaf_chunk_free(l, entry);
zap_leaf_phys(l)->l_hdr.lh_nentries--;
zap_leaf_phys(nl)->l_hdr.lh_nentries++;
}
/*
* Transfer the entries whose hash prefix ends in 1 to the new leaf.
*/
void
zap_leaf_split(zap_leaf_t *l, zap_leaf_t *nl, boolean_t sort)
{
int bit = 64 - 1 - zap_leaf_phys(l)->l_hdr.lh_prefix_len;
/* set new prefix and prefix_len */
zap_leaf_phys(l)->l_hdr.lh_prefix <<= 1;
zap_leaf_phys(l)->l_hdr.lh_prefix_len++;
zap_leaf_phys(nl)->l_hdr.lh_prefix =
zap_leaf_phys(l)->l_hdr.lh_prefix | 1;
zap_leaf_phys(nl)->l_hdr.lh_prefix_len =
zap_leaf_phys(l)->l_hdr.lh_prefix_len;
/* break existing hash chains */
zap_memset(zap_leaf_phys(l)->l_hash, CHAIN_END,
2*ZAP_LEAF_HASH_NUMENTRIES(l));
if (sort)
zap_leaf_phys(l)->l_hdr.lh_flags |= ZLF_ENTRIES_CDSORTED;
/*
* Transfer entries whose hash bit 'bit' is set to nl; rehash
* the remaining entries
*
* NB: We could find entries via the hashtable instead. That
* would be O(hashents+numents) rather than O(numblks+numents),
* but this accesses memory more sequentially, and when we're
* called, the block is usually pretty full.
*/
for (int i = 0; i < ZAP_LEAF_NUMCHUNKS(l); i++) {
struct zap_leaf_entry *le = ZAP_LEAF_ENTRY(l, i);
if (le->le_type != ZAP_CHUNK_ENTRY)
continue;
if (le->le_hash & (1ULL << bit))
zap_leaf_transfer_entry(l, i, nl);
else
(void) zap_leaf_rehash_entry(l, i);
}
}
void
zap_leaf_stats(zap_t *zap, zap_leaf_t *l, zap_stats_t *zs)
{
int n = zap_f_phys(zap)->zap_ptrtbl.zt_shift -
zap_leaf_phys(l)->l_hdr.lh_prefix_len;
n = MIN(n, ZAP_HISTOGRAM_SIZE-1);
zs->zs_leafs_with_2n_pointers[n]++;
n = zap_leaf_phys(l)->l_hdr.lh_nentries/5;
n = MIN(n, ZAP_HISTOGRAM_SIZE-1);
zs->zs_blocks_with_n5_entries[n]++;
n = ((1<<FZAP_BLOCK_SHIFT(zap)) -
zap_leaf_phys(l)->l_hdr.lh_nfree * (ZAP_LEAF_ARRAY_BYTES+1))*10 /
(1<<FZAP_BLOCK_SHIFT(zap));
n = MIN(n, ZAP_HISTOGRAM_SIZE-1);
zs->zs_blocks_n_tenths_full[n]++;
for (int i = 0; i < ZAP_LEAF_HASH_NUMENTRIES(l); i++) {
int nentries = 0;
int chunk = zap_leaf_phys(l)->l_hash[i];
while (chunk != CHAIN_END) {
struct zap_leaf_entry *le =
ZAP_LEAF_ENTRY(l, chunk);
n = 1 + ZAP_LEAF_ARRAY_NCHUNKS(le->le_name_numints) +
ZAP_LEAF_ARRAY_NCHUNKS(le->le_value_numints *
le->le_value_intlen);
n = MIN(n, ZAP_HISTOGRAM_SIZE-1);
zs->zs_entries_using_n_chunks[n]++;
chunk = le->le_next;
nentries++;
}
n = nentries;
n = MIN(n, ZAP_HISTOGRAM_SIZE-1);
zs->zs_buckets_with_n_entries[n]++;
}
}
diff --git a/sys/contrib/openzfs/module/zfs/zap_micro.c b/sys/contrib/openzfs/module/zfs/zap_micro.c
index b4611685b204..1f32e4450522 100644
--- a/sys/contrib/openzfs/module/zfs/zap_micro.c
+++ b/sys/contrib/openzfs/module/zfs/zap_micro.c
@@ -1,1698 +1,1696 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2018 by Delphix. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2017 Nexenta Systems, Inc.
*/
#include <sys/zio.h>
#include <sys/spa.h>
#include <sys/dmu.h>
#include <sys/zfs_context.h>
#include <sys/zap.h>
#include <sys/zap_impl.h>
#include <sys/zap_leaf.h>
#include <sys/avl.h>
#include <sys/arc.h>
#include <sys/dmu_objset.h>
#ifdef _KERNEL
#include <sys/sunddi.h>
#endif
-extern inline mzap_phys_t *zap_m_phys(zap_t *zap);
-
static int mzap_upgrade(zap_t **zapp,
void *tag, dmu_tx_t *tx, zap_flags_t flags);
uint64_t
zap_getflags(zap_t *zap)
{
if (zap->zap_ismicro)
return (0);
return (zap_f_phys(zap)->zap_flags);
}
int
zap_hashbits(zap_t *zap)
{
if (zap_getflags(zap) & ZAP_FLAG_HASH64)
return (48);
else
return (28);
}
uint32_t
zap_maxcd(zap_t *zap)
{
if (zap_getflags(zap) & ZAP_FLAG_HASH64)
return ((1<<16)-1);
else
return (-1U);
}
static uint64_t
zap_hash(zap_name_t *zn)
{
zap_t *zap = zn->zn_zap;
uint64_t h = 0;
if (zap_getflags(zap) & ZAP_FLAG_PRE_HASHED_KEY) {
ASSERT(zap_getflags(zap) & ZAP_FLAG_UINT64_KEY);
h = *(uint64_t *)zn->zn_key_orig;
} else {
h = zap->zap_salt;
ASSERT(h != 0);
ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
if (zap_getflags(zap) & ZAP_FLAG_UINT64_KEY) {
const uint64_t *wp = zn->zn_key_norm;
ASSERT(zn->zn_key_intlen == 8);
for (int i = 0; i < zn->zn_key_norm_numints;
wp++, i++) {
uint64_t word = *wp;
for (int j = 0; j < zn->zn_key_intlen; j++) {
h = (h >> 8) ^
zfs_crc64_table[(h ^ word) & 0xFF];
word >>= NBBY;
}
}
} else {
const uint8_t *cp = zn->zn_key_norm;
/*
* We previously stored the terminating null on
* disk, but didn't hash it, so we need to
* continue to not hash it. (The
* zn_key_*_numints includes the terminating
* null for non-binary keys.)
*/
int len = zn->zn_key_norm_numints - 1;
ASSERT(zn->zn_key_intlen == 1);
for (int i = 0; i < len; cp++, i++) {
h = (h >> 8) ^
zfs_crc64_table[(h ^ *cp) & 0xFF];
}
}
}
/*
* Don't use all 64 bits, since we need some in the cookie for
* the collision differentiator. We MUST use the high bits,
* since those are the ones that we first pay attention to when
* choosing the bucket.
*/
h &= ~((1ULL << (64 - zap_hashbits(zap))) - 1);
return (h);
}
static int
zap_normalize(zap_t *zap, const char *name, char *namenorm, int normflags)
{
ASSERT(!(zap_getflags(zap) & ZAP_FLAG_UINT64_KEY));
size_t inlen = strlen(name) + 1;
size_t outlen = ZAP_MAXNAMELEN;
int err = 0;
(void) u8_textprep_str((char *)name, &inlen, namenorm, &outlen,
normflags | U8_TEXTPREP_IGNORE_NULL | U8_TEXTPREP_IGNORE_INVALID,
U8_UNICODE_LATEST, &err);
return (err);
}
boolean_t
zap_match(zap_name_t *zn, const char *matchname)
{
ASSERT(!(zap_getflags(zn->zn_zap) & ZAP_FLAG_UINT64_KEY));
if (zn->zn_matchtype & MT_NORMALIZE) {
char norm[ZAP_MAXNAMELEN];
if (zap_normalize(zn->zn_zap, matchname, norm,
zn->zn_normflags) != 0)
return (B_FALSE);
return (strcmp(zn->zn_key_norm, norm) == 0);
} else {
return (strcmp(zn->zn_key_orig, matchname) == 0);
}
}
void
zap_name_free(zap_name_t *zn)
{
kmem_free(zn, sizeof (zap_name_t));
}
zap_name_t *
zap_name_alloc(zap_t *zap, const char *key, matchtype_t mt)
{
zap_name_t *zn = kmem_alloc(sizeof (zap_name_t), KM_SLEEP);
zn->zn_zap = zap;
zn->zn_key_intlen = sizeof (*key);
zn->zn_key_orig = key;
zn->zn_key_orig_numints = strlen(zn->zn_key_orig) + 1;
zn->zn_matchtype = mt;
zn->zn_normflags = zap->zap_normflags;
/*
* If we're dealing with a case sensitive lookup on a mixed or
* insensitive fs, remove U8_TEXTPREP_TOUPPER or the lookup
* will fold case to all caps overriding the lookup request.
*/
if (mt & MT_MATCH_CASE)
zn->zn_normflags &= ~U8_TEXTPREP_TOUPPER;
if (zap->zap_normflags) {
/*
* We *must* use zap_normflags because this normalization is
* what the hash is computed from.
*/
if (zap_normalize(zap, key, zn->zn_normbuf,
zap->zap_normflags) != 0) {
zap_name_free(zn);
return (NULL);
}
zn->zn_key_norm = zn->zn_normbuf;
zn->zn_key_norm_numints = strlen(zn->zn_key_norm) + 1;
} else {
if (mt != 0) {
zap_name_free(zn);
return (NULL);
}
zn->zn_key_norm = zn->zn_key_orig;
zn->zn_key_norm_numints = zn->zn_key_orig_numints;
}
zn->zn_hash = zap_hash(zn);
if (zap->zap_normflags != zn->zn_normflags) {
/*
* We *must* use zn_normflags because this normalization is
* what the matching is based on. (Not the hash!)
*/
if (zap_normalize(zap, key, zn->zn_normbuf,
zn->zn_normflags) != 0) {
zap_name_free(zn);
return (NULL);
}
zn->zn_key_norm_numints = strlen(zn->zn_key_norm) + 1;
}
return (zn);
}
static zap_name_t *
zap_name_alloc_uint64(zap_t *zap, const uint64_t *key, int numints)
{
zap_name_t *zn = kmem_alloc(sizeof (zap_name_t), KM_SLEEP);
ASSERT(zap->zap_normflags == 0);
zn->zn_zap = zap;
zn->zn_key_intlen = sizeof (*key);
zn->zn_key_orig = zn->zn_key_norm = key;
zn->zn_key_orig_numints = zn->zn_key_norm_numints = numints;
zn->zn_matchtype = 0;
zn->zn_hash = zap_hash(zn);
return (zn);
}
static void
mzap_byteswap(mzap_phys_t *buf, size_t size)
{
buf->mz_block_type = BSWAP_64(buf->mz_block_type);
buf->mz_salt = BSWAP_64(buf->mz_salt);
buf->mz_normflags = BSWAP_64(buf->mz_normflags);
int max = (size / MZAP_ENT_LEN) - 1;
for (int i = 0; i < max; i++) {
buf->mz_chunk[i].mze_value =
BSWAP_64(buf->mz_chunk[i].mze_value);
buf->mz_chunk[i].mze_cd =
BSWAP_32(buf->mz_chunk[i].mze_cd);
}
}
void
zap_byteswap(void *buf, size_t size)
{
uint64_t block_type = *(uint64_t *)buf;
if (block_type == ZBT_MICRO || block_type == BSWAP_64(ZBT_MICRO)) {
/* ASSERT(magic == ZAP_LEAF_MAGIC); */
mzap_byteswap(buf, size);
} else {
fzap_byteswap(buf, size);
}
}
static int
mze_compare(const void *arg1, const void *arg2)
{
const mzap_ent_t *mze1 = arg1;
const mzap_ent_t *mze2 = arg2;
int cmp = TREE_CMP(mze1->mze_hash, mze2->mze_hash);
if (likely(cmp))
return (cmp);
return (TREE_CMP(mze1->mze_cd, mze2->mze_cd));
}
static void
mze_insert(zap_t *zap, int chunkid, uint64_t hash)
{
ASSERT(zap->zap_ismicro);
ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
mzap_ent_t *mze = kmem_alloc(sizeof (mzap_ent_t), KM_SLEEP);
mze->mze_chunkid = chunkid;
mze->mze_hash = hash;
mze->mze_cd = MZE_PHYS(zap, mze)->mze_cd;
ASSERT(MZE_PHYS(zap, mze)->mze_name[0] != 0);
avl_add(&zap->zap_m.zap_avl, mze);
}
static mzap_ent_t *
mze_find(zap_name_t *zn)
{
mzap_ent_t mze_tofind;
mzap_ent_t *mze;
avl_index_t idx;
avl_tree_t *avl = &zn->zn_zap->zap_m.zap_avl;
ASSERT(zn->zn_zap->zap_ismicro);
ASSERT(RW_LOCK_HELD(&zn->zn_zap->zap_rwlock));
mze_tofind.mze_hash = zn->zn_hash;
mze_tofind.mze_cd = 0;
mze = avl_find(avl, &mze_tofind, &idx);
if (mze == NULL)
mze = avl_nearest(avl, idx, AVL_AFTER);
for (; mze && mze->mze_hash == zn->zn_hash; mze = AVL_NEXT(avl, mze)) {
ASSERT3U(mze->mze_cd, ==, MZE_PHYS(zn->zn_zap, mze)->mze_cd);
if (zap_match(zn, MZE_PHYS(zn->zn_zap, mze)->mze_name))
return (mze);
}
return (NULL);
}
static uint32_t
mze_find_unused_cd(zap_t *zap, uint64_t hash)
{
mzap_ent_t mze_tofind;
avl_index_t idx;
avl_tree_t *avl = &zap->zap_m.zap_avl;
ASSERT(zap->zap_ismicro);
ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
mze_tofind.mze_hash = hash;
mze_tofind.mze_cd = 0;
uint32_t cd = 0;
for (mzap_ent_t *mze = avl_find(avl, &mze_tofind, &idx);
mze && mze->mze_hash == hash; mze = AVL_NEXT(avl, mze)) {
if (mze->mze_cd != cd)
break;
cd++;
}
return (cd);
}
/*
* Each mzap entry requires at max : 4 chunks
* 3 chunks for names + 1 chunk for value.
*/
#define MZAP_ENT_CHUNKS (1 + ZAP_LEAF_ARRAY_NCHUNKS(MZAP_NAME_LEN) + \
ZAP_LEAF_ARRAY_NCHUNKS(sizeof (uint64_t)))
/*
* Check if the current entry keeps the colliding entries under the fatzap leaf
* size.
*/
static boolean_t
mze_canfit_fzap_leaf(zap_name_t *zn, uint64_t hash)
{
zap_t *zap = zn->zn_zap;
mzap_ent_t mze_tofind;
mzap_ent_t *mze;
avl_index_t idx;
avl_tree_t *avl = &zap->zap_m.zap_avl;
uint32_t mzap_ents = 0;
mze_tofind.mze_hash = hash;
mze_tofind.mze_cd = 0;
for (mze = avl_find(avl, &mze_tofind, &idx);
mze && mze->mze_hash == hash; mze = AVL_NEXT(avl, mze)) {
mzap_ents++;
}
/* Include the new entry being added */
mzap_ents++;
return (ZAP_LEAF_NUMCHUNKS_DEF > (mzap_ents * MZAP_ENT_CHUNKS));
}
static void
mze_remove(zap_t *zap, mzap_ent_t *mze)
{
ASSERT(zap->zap_ismicro);
ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
avl_remove(&zap->zap_m.zap_avl, mze);
kmem_free(mze, sizeof (mzap_ent_t));
}
static void
mze_destroy(zap_t *zap)
{
mzap_ent_t *mze;
void *avlcookie = NULL;
while ((mze = avl_destroy_nodes(&zap->zap_m.zap_avl, &avlcookie)))
kmem_free(mze, sizeof (mzap_ent_t));
avl_destroy(&zap->zap_m.zap_avl);
}
static zap_t *
mzap_open(objset_t *os, uint64_t obj, dmu_buf_t *db)
{
zap_t *winner;
uint64_t *zap_hdr = (uint64_t *)db->db_data;
uint64_t zap_block_type = zap_hdr[0];
uint64_t zap_magic = zap_hdr[1];
ASSERT3U(MZAP_ENT_LEN, ==, sizeof (mzap_ent_phys_t));
zap_t *zap = kmem_zalloc(sizeof (zap_t), KM_SLEEP);
rw_init(&zap->zap_rwlock, NULL, RW_DEFAULT, NULL);
rw_enter(&zap->zap_rwlock, RW_WRITER);
zap->zap_objset = os;
zap->zap_object = obj;
zap->zap_dbuf = db;
if (zap_block_type != ZBT_MICRO) {
mutex_init(&zap->zap_f.zap_num_entries_mtx, 0, MUTEX_DEFAULT,
0);
zap->zap_f.zap_block_shift = highbit64(db->db_size) - 1;
if (zap_block_type != ZBT_HEADER || zap_magic != ZAP_MAGIC) {
winner = NULL; /* No actual winner here... */
goto handle_winner;
}
} else {
zap->zap_ismicro = TRUE;
}
/*
* Make sure that zap_ismicro is set before we let others see
* it, because zap_lockdir() checks zap_ismicro without the lock
* held.
*/
dmu_buf_init_user(&zap->zap_dbu, zap_evict_sync, NULL, &zap->zap_dbuf);
winner = dmu_buf_set_user(db, &zap->zap_dbu);
if (winner != NULL)
goto handle_winner;
if (zap->zap_ismicro) {
zap->zap_salt = zap_m_phys(zap)->mz_salt;
zap->zap_normflags = zap_m_phys(zap)->mz_normflags;
zap->zap_m.zap_num_chunks = db->db_size / MZAP_ENT_LEN - 1;
avl_create(&zap->zap_m.zap_avl, mze_compare,
sizeof (mzap_ent_t), offsetof(mzap_ent_t, mze_node));
for (int i = 0; i < zap->zap_m.zap_num_chunks; i++) {
mzap_ent_phys_t *mze =
&zap_m_phys(zap)->mz_chunk[i];
if (mze->mze_name[0]) {
zap_name_t *zn;
zap->zap_m.zap_num_entries++;
zn = zap_name_alloc(zap, mze->mze_name, 0);
mze_insert(zap, i, zn->zn_hash);
zap_name_free(zn);
}
}
} else {
zap->zap_salt = zap_f_phys(zap)->zap_salt;
zap->zap_normflags = zap_f_phys(zap)->zap_normflags;
ASSERT3U(sizeof (struct zap_leaf_header), ==,
2*ZAP_LEAF_CHUNKSIZE);
/*
* The embedded pointer table should not overlap the
* other members.
*/
ASSERT3P(&ZAP_EMBEDDED_PTRTBL_ENT(zap, 0), >,
&zap_f_phys(zap)->zap_salt);
/*
* The embedded pointer table should end at the end of
* the block
*/
ASSERT3U((uintptr_t)&ZAP_EMBEDDED_PTRTBL_ENT(zap,
1<<ZAP_EMBEDDED_PTRTBL_SHIFT(zap)) -
(uintptr_t)zap_f_phys(zap), ==,
zap->zap_dbuf->db_size);
}
rw_exit(&zap->zap_rwlock);
return (zap);
handle_winner:
rw_exit(&zap->zap_rwlock);
rw_destroy(&zap->zap_rwlock);
if (!zap->zap_ismicro)
mutex_destroy(&zap->zap_f.zap_num_entries_mtx);
kmem_free(zap, sizeof (zap_t));
return (winner);
}
/*
* This routine "consumes" the caller's hold on the dbuf, which must
* have the specified tag.
*/
static int
zap_lockdir_impl(dmu_buf_t *db, void *tag, dmu_tx_t *tx,
krw_t lti, boolean_t fatreader, boolean_t adding, zap_t **zapp)
{
ASSERT0(db->db_offset);
objset_t *os = dmu_buf_get_objset(db);
uint64_t obj = db->db_object;
dmu_object_info_t doi;
*zapp = NULL;
dmu_object_info_from_db(db, &doi);
if (DMU_OT_BYTESWAP(doi.doi_type) != DMU_BSWAP_ZAP)
return (SET_ERROR(EINVAL));
zap_t *zap = dmu_buf_get_user(db);
if (zap == NULL) {
zap = mzap_open(os, obj, db);
if (zap == NULL) {
/*
* mzap_open() didn't like what it saw on-disk.
* Check for corruption!
*/
return (SET_ERROR(EIO));
}
}
/*
* We're checking zap_ismicro without the lock held, in order to
* tell what type of lock we want. Once we have some sort of
* lock, see if it really is the right type. In practice this
* can only be different if it was upgraded from micro to fat,
* and micro wanted WRITER but fat only needs READER.
*/
krw_t lt = (!zap->zap_ismicro && fatreader) ? RW_READER : lti;
rw_enter(&zap->zap_rwlock, lt);
if (lt != ((!zap->zap_ismicro && fatreader) ? RW_READER : lti)) {
/* it was upgraded, now we only need reader */
ASSERT(lt == RW_WRITER);
ASSERT(RW_READER ==
((!zap->zap_ismicro && fatreader) ? RW_READER : lti));
rw_downgrade(&zap->zap_rwlock);
lt = RW_READER;
}
zap->zap_objset = os;
if (lt == RW_WRITER)
dmu_buf_will_dirty(db, tx);
ASSERT3P(zap->zap_dbuf, ==, db);
ASSERT(!zap->zap_ismicro ||
zap->zap_m.zap_num_entries <= zap->zap_m.zap_num_chunks);
if (zap->zap_ismicro && tx && adding &&
zap->zap_m.zap_num_entries == zap->zap_m.zap_num_chunks) {
uint64_t newsz = db->db_size + SPA_MINBLOCKSIZE;
if (newsz > MZAP_MAX_BLKSZ) {
dprintf("upgrading obj %llu: num_entries=%u\n",
(u_longlong_t)obj, zap->zap_m.zap_num_entries);
*zapp = zap;
int err = mzap_upgrade(zapp, tag, tx, 0);
if (err != 0)
rw_exit(&zap->zap_rwlock);
return (err);
}
VERIFY0(dmu_object_set_blocksize(os, obj, newsz, 0, tx));
zap->zap_m.zap_num_chunks =
db->db_size / MZAP_ENT_LEN - 1;
}
*zapp = zap;
return (0);
}
static int
zap_lockdir_by_dnode(dnode_t *dn, dmu_tx_t *tx,
krw_t lti, boolean_t fatreader, boolean_t adding, void *tag, zap_t **zapp)
{
dmu_buf_t *db;
int err = dmu_buf_hold_by_dnode(dn, 0, tag, &db, DMU_READ_NO_PREFETCH);
if (err != 0) {
return (err);
}
#ifdef ZFS_DEBUG
{
dmu_object_info_t doi;
dmu_object_info_from_db(db, &doi);
ASSERT3U(DMU_OT_BYTESWAP(doi.doi_type), ==, DMU_BSWAP_ZAP);
}
#endif
err = zap_lockdir_impl(db, tag, tx, lti, fatreader, adding, zapp);
if (err != 0) {
dmu_buf_rele(db, tag);
}
return (err);
}
int
zap_lockdir(objset_t *os, uint64_t obj, dmu_tx_t *tx,
krw_t lti, boolean_t fatreader, boolean_t adding, void *tag, zap_t **zapp)
{
dmu_buf_t *db;
int err = dmu_buf_hold(os, obj, 0, tag, &db, DMU_READ_NO_PREFETCH);
if (err != 0)
return (err);
#ifdef ZFS_DEBUG
{
dmu_object_info_t doi;
dmu_object_info_from_db(db, &doi);
ASSERT3U(DMU_OT_BYTESWAP(doi.doi_type), ==, DMU_BSWAP_ZAP);
}
#endif
err = zap_lockdir_impl(db, tag, tx, lti, fatreader, adding, zapp);
if (err != 0)
dmu_buf_rele(db, tag);
return (err);
}
void
zap_unlockdir(zap_t *zap, void *tag)
{
rw_exit(&zap->zap_rwlock);
dmu_buf_rele(zap->zap_dbuf, tag);
}
static int
mzap_upgrade(zap_t **zapp, void *tag, dmu_tx_t *tx, zap_flags_t flags)
{
int err = 0;
zap_t *zap = *zapp;
ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
int sz = zap->zap_dbuf->db_size;
mzap_phys_t *mzp = vmem_alloc(sz, KM_SLEEP);
bcopy(zap->zap_dbuf->db_data, mzp, sz);
int nchunks = zap->zap_m.zap_num_chunks;
if (!flags) {
err = dmu_object_set_blocksize(zap->zap_objset, zap->zap_object,
1ULL << fzap_default_block_shift, 0, tx);
if (err != 0) {
vmem_free(mzp, sz);
return (err);
}
}
dprintf("upgrading obj=%llu with %u chunks\n",
(u_longlong_t)zap->zap_object, nchunks);
/* XXX destroy the avl later, so we can use the stored hash value */
mze_destroy(zap);
fzap_upgrade(zap, tx, flags);
for (int i = 0; i < nchunks; i++) {
mzap_ent_phys_t *mze = &mzp->mz_chunk[i];
if (mze->mze_name[0] == 0)
continue;
dprintf("adding %s=%llu\n",
mze->mze_name, (u_longlong_t)mze->mze_value);
zap_name_t *zn = zap_name_alloc(zap, mze->mze_name, 0);
/* If we fail here, we would end up losing entries */
VERIFY0(fzap_add_cd(zn, 8, 1, &mze->mze_value, mze->mze_cd,
tag, tx));
zap = zn->zn_zap; /* fzap_add_cd() may change zap */
zap_name_free(zn);
}
vmem_free(mzp, sz);
*zapp = zap;
return (0);
}
/*
* The "normflags" determine the behavior of the matchtype_t which is
* passed to zap_lookup_norm(). Names which have the same normalized
* version will be stored with the same hash value, and therefore we can
* perform normalization-insensitive lookups. We can be Unicode form-
* insensitive and/or case-insensitive. The following flags are valid for
* "normflags":
*
* U8_TEXTPREP_NFC
* U8_TEXTPREP_NFD
* U8_TEXTPREP_NFKC
* U8_TEXTPREP_NFKD
* U8_TEXTPREP_TOUPPER
*
* The *_NF* (Normalization Form) flags are mutually exclusive; at most one
* of them may be supplied.
*/
void
mzap_create_impl(dnode_t *dn, int normflags, zap_flags_t flags, dmu_tx_t *tx)
{
dmu_buf_t *db;
VERIFY0(dmu_buf_hold_by_dnode(dn, 0, FTAG, &db, DMU_READ_NO_PREFETCH));
dmu_buf_will_dirty(db, tx);
mzap_phys_t *zp = db->db_data;
zp->mz_block_type = ZBT_MICRO;
zp->mz_salt =
((uintptr_t)db ^ (uintptr_t)tx ^ (dn->dn_object << 1)) | 1ULL;
zp->mz_normflags = normflags;
if (flags != 0) {
zap_t *zap;
/* Only fat zap supports flags; upgrade immediately. */
VERIFY0(zap_lockdir_impl(db, FTAG, tx, RW_WRITER,
B_FALSE, B_FALSE, &zap));
VERIFY0(mzap_upgrade(&zap, FTAG, tx, flags));
zap_unlockdir(zap, FTAG);
} else {
dmu_buf_rele(db, FTAG);
}
}
static uint64_t
zap_create_impl(objset_t *os, int normflags, zap_flags_t flags,
dmu_object_type_t ot, int leaf_blockshift, int indirect_blockshift,
dmu_object_type_t bonustype, int bonuslen, int dnodesize,
dnode_t **allocated_dnode, void *tag, dmu_tx_t *tx)
{
uint64_t obj;
ASSERT3U(DMU_OT_BYTESWAP(ot), ==, DMU_BSWAP_ZAP);
if (allocated_dnode == NULL) {
dnode_t *dn;
obj = dmu_object_alloc_hold(os, ot, 1ULL << leaf_blockshift,
indirect_blockshift, bonustype, bonuslen, dnodesize,
&dn, FTAG, tx);
mzap_create_impl(dn, normflags, flags, tx);
dnode_rele(dn, FTAG);
} else {
obj = dmu_object_alloc_hold(os, ot, 1ULL << leaf_blockshift,
indirect_blockshift, bonustype, bonuslen, dnodesize,
allocated_dnode, tag, tx);
mzap_create_impl(*allocated_dnode, normflags, flags, tx);
}
return (obj);
}
int
zap_create_claim(objset_t *os, uint64_t obj, dmu_object_type_t ot,
dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
{
return (zap_create_claim_dnsize(os, obj, ot, bonustype, bonuslen,
0, tx));
}
int
zap_create_claim_dnsize(objset_t *os, uint64_t obj, dmu_object_type_t ot,
dmu_object_type_t bonustype, int bonuslen, int dnodesize, dmu_tx_t *tx)
{
return (zap_create_claim_norm_dnsize(os, obj,
0, ot, bonustype, bonuslen, dnodesize, tx));
}
int
zap_create_claim_norm(objset_t *os, uint64_t obj, int normflags,
dmu_object_type_t ot,
dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
{
return (zap_create_claim_norm_dnsize(os, obj, normflags, ot, bonustype,
bonuslen, 0, tx));
}
int
zap_create_claim_norm_dnsize(objset_t *os, uint64_t obj, int normflags,
dmu_object_type_t ot, dmu_object_type_t bonustype, int bonuslen,
int dnodesize, dmu_tx_t *tx)
{
dnode_t *dn;
int error;
ASSERT3U(DMU_OT_BYTESWAP(ot), ==, DMU_BSWAP_ZAP);
error = dmu_object_claim_dnsize(os, obj, ot, 0, bonustype, bonuslen,
dnodesize, tx);
if (error != 0)
return (error);
error = dnode_hold(os, obj, FTAG, &dn);
if (error != 0)
return (error);
mzap_create_impl(dn, normflags, 0, tx);
dnode_rele(dn, FTAG);
return (0);
}
uint64_t
zap_create(objset_t *os, dmu_object_type_t ot,
dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
{
return (zap_create_norm(os, 0, ot, bonustype, bonuslen, tx));
}
uint64_t
zap_create_dnsize(objset_t *os, dmu_object_type_t ot,
dmu_object_type_t bonustype, int bonuslen, int dnodesize, dmu_tx_t *tx)
{
return (zap_create_norm_dnsize(os, 0, ot, bonustype, bonuslen,
dnodesize, tx));
}
uint64_t
zap_create_norm(objset_t *os, int normflags, dmu_object_type_t ot,
dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
{
return (zap_create_norm_dnsize(os, normflags, ot, bonustype, bonuslen,
0, tx));
}
uint64_t
zap_create_norm_dnsize(objset_t *os, int normflags, dmu_object_type_t ot,
dmu_object_type_t bonustype, int bonuslen, int dnodesize, dmu_tx_t *tx)
{
return (zap_create_impl(os, normflags, 0, ot, 0, 0,
bonustype, bonuslen, dnodesize, NULL, NULL, tx));
}
uint64_t
zap_create_flags(objset_t *os, int normflags, zap_flags_t flags,
dmu_object_type_t ot, int leaf_blockshift, int indirect_blockshift,
dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
{
return (zap_create_flags_dnsize(os, normflags, flags, ot,
leaf_blockshift, indirect_blockshift, bonustype, bonuslen, 0, tx));
}
uint64_t
zap_create_flags_dnsize(objset_t *os, int normflags, zap_flags_t flags,
dmu_object_type_t ot, int leaf_blockshift, int indirect_blockshift,
dmu_object_type_t bonustype, int bonuslen, int dnodesize, dmu_tx_t *tx)
{
return (zap_create_impl(os, normflags, flags, ot, leaf_blockshift,
indirect_blockshift, bonustype, bonuslen, dnodesize, NULL, NULL,
tx));
}
/*
* Create a zap object and return a pointer to the newly allocated dnode via
* the allocated_dnode argument. The returned dnode will be held and the
* caller is responsible for releasing the hold by calling dnode_rele().
*/
uint64_t
zap_create_hold(objset_t *os, int normflags, zap_flags_t flags,
dmu_object_type_t ot, int leaf_blockshift, int indirect_blockshift,
dmu_object_type_t bonustype, int bonuslen, int dnodesize,
dnode_t **allocated_dnode, void *tag, dmu_tx_t *tx)
{
return (zap_create_impl(os, normflags, flags, ot, leaf_blockshift,
indirect_blockshift, bonustype, bonuslen, dnodesize,
allocated_dnode, tag, tx));
}
int
zap_destroy(objset_t *os, uint64_t zapobj, dmu_tx_t *tx)
{
/*
* dmu_object_free will free the object number and free the
* data. Freeing the data will cause our pageout function to be
* called, which will destroy our data (zap_leaf_t's and zap_t).
*/
return (dmu_object_free(os, zapobj, tx));
}
void
zap_evict_sync(void *dbu)
{
zap_t *zap = dbu;
rw_destroy(&zap->zap_rwlock);
if (zap->zap_ismicro)
mze_destroy(zap);
else
mutex_destroy(&zap->zap_f.zap_num_entries_mtx);
kmem_free(zap, sizeof (zap_t));
}
int
zap_count(objset_t *os, uint64_t zapobj, uint64_t *count)
{
zap_t *zap;
int err =
zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
if (err != 0)
return (err);
if (!zap->zap_ismicro) {
err = fzap_count(zap, count);
} else {
*count = zap->zap_m.zap_num_entries;
}
zap_unlockdir(zap, FTAG);
return (err);
}
/*
* zn may be NULL; if not specified, it will be computed if needed.
* See also the comment above zap_entry_normalization_conflict().
*/
static boolean_t
mzap_normalization_conflict(zap_t *zap, zap_name_t *zn, mzap_ent_t *mze)
{
int direction = AVL_BEFORE;
boolean_t allocdzn = B_FALSE;
if (zap->zap_normflags == 0)
return (B_FALSE);
again:
for (mzap_ent_t *other = avl_walk(&zap->zap_m.zap_avl, mze, direction);
other && other->mze_hash == mze->mze_hash;
other = avl_walk(&zap->zap_m.zap_avl, other, direction)) {
if (zn == NULL) {
zn = zap_name_alloc(zap, MZE_PHYS(zap, mze)->mze_name,
MT_NORMALIZE);
allocdzn = B_TRUE;
}
if (zap_match(zn, MZE_PHYS(zap, other)->mze_name)) {
if (allocdzn)
zap_name_free(zn);
return (B_TRUE);
}
}
if (direction == AVL_BEFORE) {
direction = AVL_AFTER;
goto again;
}
if (allocdzn)
zap_name_free(zn);
return (B_FALSE);
}
/*
* Routines for manipulating attributes.
*/
int
zap_lookup(objset_t *os, uint64_t zapobj, const char *name,
uint64_t integer_size, uint64_t num_integers, void *buf)
{
return (zap_lookup_norm(os, zapobj, name, integer_size,
num_integers, buf, 0, NULL, 0, NULL));
}
static int
zap_lookup_impl(zap_t *zap, const char *name,
uint64_t integer_size, uint64_t num_integers, void *buf,
matchtype_t mt, char *realname, int rn_len,
boolean_t *ncp)
{
int err = 0;
zap_name_t *zn = zap_name_alloc(zap, name, mt);
if (zn == NULL)
return (SET_ERROR(ENOTSUP));
if (!zap->zap_ismicro) {
err = fzap_lookup(zn, integer_size, num_integers, buf,
realname, rn_len, ncp);
} else {
mzap_ent_t *mze = mze_find(zn);
if (mze == NULL) {
err = SET_ERROR(ENOENT);
} else {
if (num_integers < 1) {
err = SET_ERROR(EOVERFLOW);
} else if (integer_size != 8) {
err = SET_ERROR(EINVAL);
} else {
*(uint64_t *)buf =
MZE_PHYS(zap, mze)->mze_value;
(void) strlcpy(realname,
MZE_PHYS(zap, mze)->mze_name, rn_len);
if (ncp) {
*ncp = mzap_normalization_conflict(zap,
zn, mze);
}
}
}
}
zap_name_free(zn);
return (err);
}
int
zap_lookup_norm(objset_t *os, uint64_t zapobj, const char *name,
uint64_t integer_size, uint64_t num_integers, void *buf,
matchtype_t mt, char *realname, int rn_len,
boolean_t *ncp)
{
zap_t *zap;
int err =
zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
if (err != 0)
return (err);
err = zap_lookup_impl(zap, name, integer_size,
num_integers, buf, mt, realname, rn_len, ncp);
zap_unlockdir(zap, FTAG);
return (err);
}
int
zap_prefetch(objset_t *os, uint64_t zapobj, const char *name)
{
zap_t *zap;
int err;
zap_name_t *zn;
err = zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
if (err)
return (err);
zn = zap_name_alloc(zap, name, 0);
if (zn == NULL) {
zap_unlockdir(zap, FTAG);
return (SET_ERROR(ENOTSUP));
}
fzap_prefetch(zn);
zap_name_free(zn);
zap_unlockdir(zap, FTAG);
return (err);
}
int
zap_lookup_by_dnode(dnode_t *dn, const char *name,
uint64_t integer_size, uint64_t num_integers, void *buf)
{
return (zap_lookup_norm_by_dnode(dn, name, integer_size,
num_integers, buf, 0, NULL, 0, NULL));
}
int
zap_lookup_norm_by_dnode(dnode_t *dn, const char *name,
uint64_t integer_size, uint64_t num_integers, void *buf,
matchtype_t mt, char *realname, int rn_len,
boolean_t *ncp)
{
zap_t *zap;
int err = zap_lockdir_by_dnode(dn, NULL, RW_READER, TRUE, FALSE,
FTAG, &zap);
if (err != 0)
return (err);
err = zap_lookup_impl(zap, name, integer_size,
num_integers, buf, mt, realname, rn_len, ncp);
zap_unlockdir(zap, FTAG);
return (err);
}
int
zap_prefetch_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
int key_numints)
{
zap_t *zap;
int err =
zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
if (err != 0)
return (err);
zap_name_t *zn = zap_name_alloc_uint64(zap, key, key_numints);
if (zn == NULL) {
zap_unlockdir(zap, FTAG);
return (SET_ERROR(ENOTSUP));
}
fzap_prefetch(zn);
zap_name_free(zn);
zap_unlockdir(zap, FTAG);
return (err);
}
int
zap_lookup_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
int key_numints, uint64_t integer_size, uint64_t num_integers, void *buf)
{
zap_t *zap;
int err =
zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
if (err != 0)
return (err);
zap_name_t *zn = zap_name_alloc_uint64(zap, key, key_numints);
if (zn == NULL) {
zap_unlockdir(zap, FTAG);
return (SET_ERROR(ENOTSUP));
}
err = fzap_lookup(zn, integer_size, num_integers, buf,
NULL, 0, NULL);
zap_name_free(zn);
zap_unlockdir(zap, FTAG);
return (err);
}
int
zap_contains(objset_t *os, uint64_t zapobj, const char *name)
{
int err = zap_lookup_norm(os, zapobj, name, 0,
0, NULL, 0, NULL, 0, NULL);
if (err == EOVERFLOW || err == EINVAL)
err = 0; /* found, but skipped reading the value */
return (err);
}
int
zap_length(objset_t *os, uint64_t zapobj, const char *name,
uint64_t *integer_size, uint64_t *num_integers)
{
zap_t *zap;
int err =
zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
if (err != 0)
return (err);
zap_name_t *zn = zap_name_alloc(zap, name, 0);
if (zn == NULL) {
zap_unlockdir(zap, FTAG);
return (SET_ERROR(ENOTSUP));
}
if (!zap->zap_ismicro) {
err = fzap_length(zn, integer_size, num_integers);
} else {
mzap_ent_t *mze = mze_find(zn);
if (mze == NULL) {
err = SET_ERROR(ENOENT);
} else {
if (integer_size)
*integer_size = 8;
if (num_integers)
*num_integers = 1;
}
}
zap_name_free(zn);
zap_unlockdir(zap, FTAG);
return (err);
}
int
zap_length_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
int key_numints, uint64_t *integer_size, uint64_t *num_integers)
{
zap_t *zap;
int err =
zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
if (err != 0)
return (err);
zap_name_t *zn = zap_name_alloc_uint64(zap, key, key_numints);
if (zn == NULL) {
zap_unlockdir(zap, FTAG);
return (SET_ERROR(ENOTSUP));
}
err = fzap_length(zn, integer_size, num_integers);
zap_name_free(zn);
zap_unlockdir(zap, FTAG);
return (err);
}
static void
mzap_addent(zap_name_t *zn, uint64_t value)
{
zap_t *zap = zn->zn_zap;
int start = zap->zap_m.zap_alloc_next;
ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
#ifdef ZFS_DEBUG
for (int i = 0; i < zap->zap_m.zap_num_chunks; i++) {
mzap_ent_phys_t *mze = &zap_m_phys(zap)->mz_chunk[i];
ASSERT(strcmp(zn->zn_key_orig, mze->mze_name) != 0);
}
#endif
uint32_t cd = mze_find_unused_cd(zap, zn->zn_hash);
/* given the limited size of the microzap, this can't happen */
ASSERT(cd < zap_maxcd(zap));
again:
for (int i = start; i < zap->zap_m.zap_num_chunks; i++) {
mzap_ent_phys_t *mze = &zap_m_phys(zap)->mz_chunk[i];
if (mze->mze_name[0] == 0) {
mze->mze_value = value;
mze->mze_cd = cd;
(void) strlcpy(mze->mze_name, zn->zn_key_orig,
sizeof (mze->mze_name));
zap->zap_m.zap_num_entries++;
zap->zap_m.zap_alloc_next = i+1;
if (zap->zap_m.zap_alloc_next ==
zap->zap_m.zap_num_chunks)
zap->zap_m.zap_alloc_next = 0;
mze_insert(zap, i, zn->zn_hash);
return;
}
}
if (start != 0) {
start = 0;
goto again;
}
cmn_err(CE_PANIC, "out of entries!");
}
static int
zap_add_impl(zap_t *zap, const char *key,
int integer_size, uint64_t num_integers,
const void *val, dmu_tx_t *tx, void *tag)
{
const uint64_t *intval = val;
int err = 0;
zap_name_t *zn = zap_name_alloc(zap, key, 0);
if (zn == NULL) {
zap_unlockdir(zap, tag);
return (SET_ERROR(ENOTSUP));
}
if (!zap->zap_ismicro) {
err = fzap_add(zn, integer_size, num_integers, val, tag, tx);
zap = zn->zn_zap; /* fzap_add() may change zap */
} else if (integer_size != 8 || num_integers != 1 ||
strlen(key) >= MZAP_NAME_LEN ||
!mze_canfit_fzap_leaf(zn, zn->zn_hash)) {
err = mzap_upgrade(&zn->zn_zap, tag, tx, 0);
if (err == 0) {
err = fzap_add(zn, integer_size, num_integers, val,
tag, tx);
}
zap = zn->zn_zap; /* fzap_add() may change zap */
} else {
if (mze_find(zn) != NULL) {
err = SET_ERROR(EEXIST);
} else {
mzap_addent(zn, *intval);
}
}
ASSERT(zap == zn->zn_zap);
zap_name_free(zn);
if (zap != NULL) /* may be NULL if fzap_add() failed */
zap_unlockdir(zap, tag);
return (err);
}
int
zap_add(objset_t *os, uint64_t zapobj, const char *key,
int integer_size, uint64_t num_integers,
const void *val, dmu_tx_t *tx)
{
zap_t *zap;
int err;
err = zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, TRUE, FTAG, &zap);
if (err != 0)
return (err);
err = zap_add_impl(zap, key, integer_size, num_integers, val, tx, FTAG);
/* zap_add_impl() calls zap_unlockdir() */
return (err);
}
int
zap_add_by_dnode(dnode_t *dn, const char *key,
int integer_size, uint64_t num_integers,
const void *val, dmu_tx_t *tx)
{
zap_t *zap;
int err;
err = zap_lockdir_by_dnode(dn, tx, RW_WRITER, TRUE, TRUE, FTAG, &zap);
if (err != 0)
return (err);
err = zap_add_impl(zap, key, integer_size, num_integers, val, tx, FTAG);
/* zap_add_impl() calls zap_unlockdir() */
return (err);
}
int
zap_add_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
int key_numints, int integer_size, uint64_t num_integers,
const void *val, dmu_tx_t *tx)
{
zap_t *zap;
int err =
zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, TRUE, FTAG, &zap);
if (err != 0)
return (err);
zap_name_t *zn = zap_name_alloc_uint64(zap, key, key_numints);
if (zn == NULL) {
zap_unlockdir(zap, FTAG);
return (SET_ERROR(ENOTSUP));
}
err = fzap_add(zn, integer_size, num_integers, val, FTAG, tx);
zap = zn->zn_zap; /* fzap_add() may change zap */
zap_name_free(zn);
if (zap != NULL) /* may be NULL if fzap_add() failed */
zap_unlockdir(zap, FTAG);
return (err);
}
int
zap_update(objset_t *os, uint64_t zapobj, const char *name,
int integer_size, uint64_t num_integers, const void *val, dmu_tx_t *tx)
{
zap_t *zap;
const uint64_t *intval = val;
int err =
zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, TRUE, FTAG, &zap);
if (err != 0)
return (err);
zap_name_t *zn = zap_name_alloc(zap, name, 0);
if (zn == NULL) {
zap_unlockdir(zap, FTAG);
return (SET_ERROR(ENOTSUP));
}
if (!zap->zap_ismicro) {
err = fzap_update(zn, integer_size, num_integers, val,
FTAG, tx);
zap = zn->zn_zap; /* fzap_update() may change zap */
} else if (integer_size != 8 || num_integers != 1 ||
strlen(name) >= MZAP_NAME_LEN) {
dprintf("upgrading obj %llu: intsz=%u numint=%llu name=%s\n",
(u_longlong_t)zapobj, integer_size,
(u_longlong_t)num_integers, name);
err = mzap_upgrade(&zn->zn_zap, FTAG, tx, 0);
if (err == 0) {
err = fzap_update(zn, integer_size, num_integers,
val, FTAG, tx);
}
zap = zn->zn_zap; /* fzap_update() may change zap */
} else {
mzap_ent_t *mze = mze_find(zn);
if (mze != NULL) {
MZE_PHYS(zap, mze)->mze_value = *intval;
} else {
mzap_addent(zn, *intval);
}
}
ASSERT(zap == zn->zn_zap);
zap_name_free(zn);
if (zap != NULL) /* may be NULL if fzap_upgrade() failed */
zap_unlockdir(zap, FTAG);
return (err);
}
int
zap_update_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
int key_numints,
int integer_size, uint64_t num_integers, const void *val, dmu_tx_t *tx)
{
zap_t *zap;
int err =
zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, TRUE, FTAG, &zap);
if (err != 0)
return (err);
zap_name_t *zn = zap_name_alloc_uint64(zap, key, key_numints);
if (zn == NULL) {
zap_unlockdir(zap, FTAG);
return (SET_ERROR(ENOTSUP));
}
err = fzap_update(zn, integer_size, num_integers, val, FTAG, tx);
zap = zn->zn_zap; /* fzap_update() may change zap */
zap_name_free(zn);
if (zap != NULL) /* may be NULL if fzap_upgrade() failed */
zap_unlockdir(zap, FTAG);
return (err);
}
int
zap_remove(objset_t *os, uint64_t zapobj, const char *name, dmu_tx_t *tx)
{
return (zap_remove_norm(os, zapobj, name, 0, tx));
}
static int
zap_remove_impl(zap_t *zap, const char *name,
matchtype_t mt, dmu_tx_t *tx)
{
int err = 0;
zap_name_t *zn = zap_name_alloc(zap, name, mt);
if (zn == NULL)
return (SET_ERROR(ENOTSUP));
if (!zap->zap_ismicro) {
err = fzap_remove(zn, tx);
} else {
mzap_ent_t *mze = mze_find(zn);
if (mze == NULL) {
err = SET_ERROR(ENOENT);
} else {
zap->zap_m.zap_num_entries--;
bzero(&zap_m_phys(zap)->mz_chunk[mze->mze_chunkid],
sizeof (mzap_ent_phys_t));
mze_remove(zap, mze);
}
}
zap_name_free(zn);
return (err);
}
int
zap_remove_norm(objset_t *os, uint64_t zapobj, const char *name,
matchtype_t mt, dmu_tx_t *tx)
{
zap_t *zap;
int err;
err = zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, FALSE, FTAG, &zap);
if (err)
return (err);
err = zap_remove_impl(zap, name, mt, tx);
zap_unlockdir(zap, FTAG);
return (err);
}
int
zap_remove_by_dnode(dnode_t *dn, const char *name, dmu_tx_t *tx)
{
zap_t *zap;
int err;
err = zap_lockdir_by_dnode(dn, tx, RW_WRITER, TRUE, FALSE, FTAG, &zap);
if (err)
return (err);
err = zap_remove_impl(zap, name, 0, tx);
zap_unlockdir(zap, FTAG);
return (err);
}
int
zap_remove_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
int key_numints, dmu_tx_t *tx)
{
zap_t *zap;
int err =
zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, FALSE, FTAG, &zap);
if (err != 0)
return (err);
zap_name_t *zn = zap_name_alloc_uint64(zap, key, key_numints);
if (zn == NULL) {
zap_unlockdir(zap, FTAG);
return (SET_ERROR(ENOTSUP));
}
err = fzap_remove(zn, tx);
zap_name_free(zn);
zap_unlockdir(zap, FTAG);
return (err);
}
/*
* Routines for iterating over the attributes.
*/
static void
zap_cursor_init_impl(zap_cursor_t *zc, objset_t *os, uint64_t zapobj,
uint64_t serialized, boolean_t prefetch)
{
zc->zc_objset = os;
zc->zc_zap = NULL;
zc->zc_leaf = NULL;
zc->zc_zapobj = zapobj;
zc->zc_serialized = serialized;
zc->zc_hash = 0;
zc->zc_cd = 0;
zc->zc_prefetch = prefetch;
}
void
zap_cursor_init_serialized(zap_cursor_t *zc, objset_t *os, uint64_t zapobj,
uint64_t serialized)
{
zap_cursor_init_impl(zc, os, zapobj, serialized, B_TRUE);
}
/*
* Initialize a cursor at the beginning of the ZAP object. The entire
* ZAP object will be prefetched.
*/
void
zap_cursor_init(zap_cursor_t *zc, objset_t *os, uint64_t zapobj)
{
zap_cursor_init_impl(zc, os, zapobj, 0, B_TRUE);
}
/*
* Initialize a cursor at the beginning, but request that we not prefetch
* the entire ZAP object.
*/
void
zap_cursor_init_noprefetch(zap_cursor_t *zc, objset_t *os, uint64_t zapobj)
{
zap_cursor_init_impl(zc, os, zapobj, 0, B_FALSE);
}
void
zap_cursor_fini(zap_cursor_t *zc)
{
if (zc->zc_zap) {
rw_enter(&zc->zc_zap->zap_rwlock, RW_READER);
zap_unlockdir(zc->zc_zap, NULL);
zc->zc_zap = NULL;
}
if (zc->zc_leaf) {
rw_enter(&zc->zc_leaf->l_rwlock, RW_READER);
zap_put_leaf(zc->zc_leaf);
zc->zc_leaf = NULL;
}
zc->zc_objset = NULL;
}
uint64_t
zap_cursor_serialize(zap_cursor_t *zc)
{
if (zc->zc_hash == -1ULL)
return (-1ULL);
if (zc->zc_zap == NULL)
return (zc->zc_serialized);
ASSERT((zc->zc_hash & zap_maxcd(zc->zc_zap)) == 0);
ASSERT(zc->zc_cd < zap_maxcd(zc->zc_zap));
/*
* We want to keep the high 32 bits of the cursor zero if we can, so
* that 32-bit programs can access this. So usually use a small
* (28-bit) hash value so we can fit 4 bits of cd into the low 32-bits
* of the cursor.
*
* [ collision differentiator | zap_hashbits()-bit hash value ]
*/
return ((zc->zc_hash >> (64 - zap_hashbits(zc->zc_zap))) |
((uint64_t)zc->zc_cd << zap_hashbits(zc->zc_zap)));
}
int
zap_cursor_retrieve(zap_cursor_t *zc, zap_attribute_t *za)
{
int err;
if (zc->zc_hash == -1ULL)
return (SET_ERROR(ENOENT));
if (zc->zc_zap == NULL) {
int hb;
err = zap_lockdir(zc->zc_objset, zc->zc_zapobj, NULL,
RW_READER, TRUE, FALSE, NULL, &zc->zc_zap);
if (err != 0)
return (err);
/*
* To support zap_cursor_init_serialized, advance, retrieve,
* we must add to the existing zc_cd, which may already
* be 1 due to the zap_cursor_advance.
*/
ASSERT(zc->zc_hash == 0);
hb = zap_hashbits(zc->zc_zap);
zc->zc_hash = zc->zc_serialized << (64 - hb);
zc->zc_cd += zc->zc_serialized >> hb;
if (zc->zc_cd >= zap_maxcd(zc->zc_zap)) /* corrupt serialized */
zc->zc_cd = 0;
} else {
rw_enter(&zc->zc_zap->zap_rwlock, RW_READER);
}
if (!zc->zc_zap->zap_ismicro) {
err = fzap_cursor_retrieve(zc->zc_zap, zc, za);
} else {
avl_index_t idx;
mzap_ent_t mze_tofind;
mze_tofind.mze_hash = zc->zc_hash;
mze_tofind.mze_cd = zc->zc_cd;
mzap_ent_t *mze =
avl_find(&zc->zc_zap->zap_m.zap_avl, &mze_tofind, &idx);
if (mze == NULL) {
mze = avl_nearest(&zc->zc_zap->zap_m.zap_avl,
idx, AVL_AFTER);
}
if (mze) {
mzap_ent_phys_t *mzep = MZE_PHYS(zc->zc_zap, mze);
ASSERT3U(mze->mze_cd, ==, mzep->mze_cd);
za->za_normalization_conflict =
mzap_normalization_conflict(zc->zc_zap, NULL, mze);
za->za_integer_length = 8;
za->za_num_integers = 1;
za->za_first_integer = mzep->mze_value;
(void) strlcpy(za->za_name, mzep->mze_name,
sizeof (za->za_name));
zc->zc_hash = mze->mze_hash;
zc->zc_cd = mze->mze_cd;
err = 0;
} else {
zc->zc_hash = -1ULL;
err = SET_ERROR(ENOENT);
}
}
rw_exit(&zc->zc_zap->zap_rwlock);
return (err);
}
void
zap_cursor_advance(zap_cursor_t *zc)
{
if (zc->zc_hash == -1ULL)
return;
zc->zc_cd++;
}
int
zap_get_stats(objset_t *os, uint64_t zapobj, zap_stats_t *zs)
{
zap_t *zap;
int err =
zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
if (err != 0)
return (err);
bzero(zs, sizeof (zap_stats_t));
if (zap->zap_ismicro) {
zs->zs_blocksize = zap->zap_dbuf->db_size;
zs->zs_num_entries = zap->zap_m.zap_num_entries;
zs->zs_num_blocks = 1;
} else {
fzap_get_stats(zap, zs);
}
zap_unlockdir(zap, FTAG);
return (0);
}
#if defined(_KERNEL)
EXPORT_SYMBOL(zap_create);
EXPORT_SYMBOL(zap_create_dnsize);
EXPORT_SYMBOL(zap_create_norm);
EXPORT_SYMBOL(zap_create_norm_dnsize);
EXPORT_SYMBOL(zap_create_flags);
EXPORT_SYMBOL(zap_create_flags_dnsize);
EXPORT_SYMBOL(zap_create_claim);
EXPORT_SYMBOL(zap_create_claim_norm);
EXPORT_SYMBOL(zap_create_claim_norm_dnsize);
EXPORT_SYMBOL(zap_create_hold);
EXPORT_SYMBOL(zap_destroy);
EXPORT_SYMBOL(zap_lookup);
EXPORT_SYMBOL(zap_lookup_by_dnode);
EXPORT_SYMBOL(zap_lookup_norm);
EXPORT_SYMBOL(zap_lookup_uint64);
EXPORT_SYMBOL(zap_contains);
EXPORT_SYMBOL(zap_prefetch);
EXPORT_SYMBOL(zap_prefetch_uint64);
EXPORT_SYMBOL(zap_add);
EXPORT_SYMBOL(zap_add_by_dnode);
EXPORT_SYMBOL(zap_add_uint64);
EXPORT_SYMBOL(zap_update);
EXPORT_SYMBOL(zap_update_uint64);
EXPORT_SYMBOL(zap_length);
EXPORT_SYMBOL(zap_length_uint64);
EXPORT_SYMBOL(zap_remove);
EXPORT_SYMBOL(zap_remove_by_dnode);
EXPORT_SYMBOL(zap_remove_norm);
EXPORT_SYMBOL(zap_remove_uint64);
EXPORT_SYMBOL(zap_count);
EXPORT_SYMBOL(zap_value_search);
EXPORT_SYMBOL(zap_join);
EXPORT_SYMBOL(zap_join_increment);
EXPORT_SYMBOL(zap_add_int);
EXPORT_SYMBOL(zap_remove_int);
EXPORT_SYMBOL(zap_lookup_int);
EXPORT_SYMBOL(zap_increment_int);
EXPORT_SYMBOL(zap_add_int_key);
EXPORT_SYMBOL(zap_lookup_int_key);
EXPORT_SYMBOL(zap_increment);
EXPORT_SYMBOL(zap_cursor_init);
EXPORT_SYMBOL(zap_cursor_fini);
EXPORT_SYMBOL(zap_cursor_retrieve);
EXPORT_SYMBOL(zap_cursor_advance);
EXPORT_SYMBOL(zap_cursor_serialize);
EXPORT_SYMBOL(zap_cursor_init_serialized);
EXPORT_SYMBOL(zap_get_stats);
#endif
diff --git a/sys/contrib/openzfs/module/zfs/zcp.c b/sys/contrib/openzfs/module/zfs/zcp.c
index f724b44baf1d..f200b928bc6d 100644
--- a/sys/contrib/openzfs/module/zfs/zcp.c
+++ b/sys/contrib/openzfs/module/zfs/zcp.c
@@ -1,1452 +1,1452 @@
/*
* CDDL HEADER START
*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2016, 2018 by Delphix. All rights reserved.
*/
/*
* ZFS Channel Programs (ZCP)
*
* The ZCP interface allows various ZFS commands and operations ZFS
* administrative operations (e.g. creating and destroying snapshots, typically
* performed via an ioctl to /dev/zfs by the zfs(8) command and
* libzfs/libzfs_core) to be run * programmatically as a Lua script. A ZCP
* script is run as a dsl_sync_task and fully executed during one transaction
* group sync. This ensures that no other changes can be written concurrently
* with a running Lua script. Combining multiple calls to the exposed ZFS
* functions into one script gives a number of benefits:
*
* 1. Atomicity. For some compound or iterative operations, it's useful to be
* able to guarantee that the state of a pool has not changed between calls to
* ZFS.
*
* 2. Performance. If a large number of changes need to be made (e.g. deleting
* many filesystems), there can be a significant performance penalty as a
* result of the need to wait for a transaction group sync to pass for every
* single operation. When expressed as a single ZCP script, all these changes
* can be performed at once in one txg sync.
*
* A modified version of the Lua 5.2 interpreter is used to run channel program
* scripts. The Lua 5.2 manual can be found at:
*
* http://www.lua.org/manual/5.2/
*
* If being run by a user (via an ioctl syscall), executing a ZCP script
* requires root privileges in the global zone.
*
* Scripts are passed to zcp_eval() as a string, then run in a synctask by
* zcp_eval_sync(). Arguments can be passed into the Lua script as an nvlist,
* which will be converted to a Lua table. Similarly, values returned from
* a ZCP script will be converted to an nvlist. See zcp_lua_to_nvlist_impl()
* for details on exact allowed types and conversion.
*
* ZFS functionality is exposed to a ZCP script as a library of function calls.
* These calls are sorted into submodules, such as zfs.list and zfs.sync, for
* iterators and synctasks, respectively. Each of these submodules resides in
* its own source file, with a zcp_*_info structure describing each library
* call in the submodule.
*
* Error handling in ZCP scripts is handled by a number of different methods
* based on severity:
*
* 1. Memory and time limits are in place to prevent a channel program from
* consuming excessive system or running forever. If one of these limits is
* hit, the channel program will be stopped immediately and return from
* zcp_eval() with an error code. No attempt will be made to roll back or undo
* any changes made by the channel program before the error occurred.
* Consumers invoking zcp_eval() from elsewhere in the kernel may pass a time
* limit of 0, disabling the time limit.
*
* 2. Internal Lua errors can occur as a result of a syntax error, calling a
* library function with incorrect arguments, invoking the error() function,
* failing an assert(), or other runtime errors. In these cases the channel
* program will stop executing and return from zcp_eval() with an error code.
* In place of a return value, an error message will also be returned in the
* 'result' nvlist containing information about the error. No attempt will be
* made to roll back or undo any changes made by the channel program before the
* error occurred.
*
* 3. If an error occurs inside a ZFS library call which returns an error code,
* the error is returned to the Lua script to be handled as desired.
*
* In the first two cases, Lua's error-throwing mechanism is used, which
* longjumps out of the script execution with luaL_error() and returns with the
* error.
*
* See zfs-program(8) for more information on high level usage.
*/
#include <sys/lua/lua.h>
#include <sys/lua/lualib.h>
#include <sys/lua/lauxlib.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_synctask.h>
#include <sys/dsl_dataset.h>
#include <sys/zcp.h>
#include <sys/zcp_iter.h>
#include <sys/zcp_prop.h>
#include <sys/zcp_global.h>
#include <sys/zvol.h>
#ifndef KM_NORMALPRI
#define KM_NORMALPRI 0
#endif
#define ZCP_NVLIST_MAX_DEPTH 20
uint64_t zfs_lua_check_instrlimit_interval = 100;
unsigned long zfs_lua_max_instrlimit = ZCP_MAX_INSTRLIMIT;
unsigned long zfs_lua_max_memlimit = ZCP_MAX_MEMLIMIT;
/*
* Forward declarations for mutually recursive functions
*/
static int zcp_nvpair_value_to_lua(lua_State *, nvpair_t *, char *, int);
static int zcp_lua_to_nvlist_impl(lua_State *, int, nvlist_t *, const char *,
int);
/*
* The outer-most error callback handler for use with lua_pcall(). On
* error Lua will call this callback with a single argument that
* represents the error value. In most cases this will be a string
* containing an error message, but channel programs can use Lua's
* error() function to return arbitrary objects as errors. This callback
* returns (on the Lua stack) the original error object along with a traceback.
*
* Fatal Lua errors can occur while resources are held, so we also call any
* registered cleanup function here.
*/
static int
zcp_error_handler(lua_State *state)
{
const char *msg;
zcp_cleanup(state);
VERIFY3U(1, ==, lua_gettop(state));
msg = lua_tostring(state, 1);
luaL_traceback(state, state, msg, 1);
return (1);
}
int
zcp_argerror(lua_State *state, int narg, const char *msg, ...)
{
va_list alist;
va_start(alist, msg);
const char *buf = lua_pushvfstring(state, msg, alist);
va_end(alist);
return (luaL_argerror(state, narg, buf));
}
/*
* Install a new cleanup function, which will be invoked with the given
* opaque argument if a fatal error causes the Lua interpreter to longjump out
* of a function call.
*
* If an error occurs, the cleanup function will be invoked exactly once and
* then unregistered.
*
* Returns the registered cleanup handler so the caller can deregister it
* if no error occurs.
*/
zcp_cleanup_handler_t *
zcp_register_cleanup(lua_State *state, zcp_cleanup_t cleanfunc, void *cleanarg)
{
zcp_run_info_t *ri = zcp_run_info(state);
zcp_cleanup_handler_t *zch = kmem_alloc(sizeof (*zch), KM_SLEEP);
zch->zch_cleanup_func = cleanfunc;
zch->zch_cleanup_arg = cleanarg;
list_insert_head(&ri->zri_cleanup_handlers, zch);
return (zch);
}
void
zcp_deregister_cleanup(lua_State *state, zcp_cleanup_handler_t *zch)
{
zcp_run_info_t *ri = zcp_run_info(state);
list_remove(&ri->zri_cleanup_handlers, zch);
kmem_free(zch, sizeof (*zch));
}
/*
* Execute the currently registered cleanup handlers then free them and
* destroy the handler list.
*/
void
zcp_cleanup(lua_State *state)
{
zcp_run_info_t *ri = zcp_run_info(state);
for (zcp_cleanup_handler_t *zch =
list_remove_head(&ri->zri_cleanup_handlers); zch != NULL;
zch = list_remove_head(&ri->zri_cleanup_handlers)) {
zch->zch_cleanup_func(zch->zch_cleanup_arg);
kmem_free(zch, sizeof (*zch));
}
}
/*
* Convert the lua table at the given index on the Lua stack to an nvlist
* and return it.
*
* If the table can not be converted for any reason, NULL is returned and
* an error message is pushed onto the Lua stack.
*/
static nvlist_t *
zcp_table_to_nvlist(lua_State *state, int index, int depth)
{
nvlist_t *nvl;
/*
* Converting a Lua table to an nvlist with key uniqueness checking is
* O(n^2) in the number of keys in the nvlist, which can take a long
* time when we return a large table from a channel program.
* Furthermore, Lua's table interface *almost* guarantees unique keys
* on its own (details below). Therefore, we don't use fnvlist_alloc()
* here to avoid the built-in uniqueness checking.
*
* The *almost* is because it's possible to have key collisions between
* e.g. the string "1" and the number 1, or the string "true" and the
* boolean true, so we explicitly check that when we're looking at a
* key which is an integer / boolean or a string that can be parsed as
* one of those types. In the worst case this could still devolve into
* O(n^2), so we only start doing these checks on boolean/integer keys
* once we've seen a string key which fits this weird usage pattern.
*
* Ultimately, we still want callers to know that the keys in this
* nvlist are unique, so before we return this we set the nvlist's
* flags to reflect that.
*/
VERIFY0(nvlist_alloc(&nvl, 0, KM_SLEEP));
/*
* Push an empty stack slot where lua_next() will store each
* table key.
*/
lua_pushnil(state);
boolean_t saw_str_could_collide = B_FALSE;
while (lua_next(state, index) != 0) {
/*
* The next key-value pair from the table at index is
* now on the stack, with the key at stack slot -2 and
* the value at slot -1.
*/
int err = 0;
char buf[32];
const char *key = NULL;
boolean_t key_could_collide = B_FALSE;
switch (lua_type(state, -2)) {
case LUA_TSTRING:
key = lua_tostring(state, -2);
/* check if this could collide with a number or bool */
long long tmp;
int parselen;
if ((sscanf(key, "%lld%n", &tmp, &parselen) > 0 &&
parselen == strlen(key)) ||
strcmp(key, "true") == 0 ||
strcmp(key, "false") == 0) {
key_could_collide = B_TRUE;
saw_str_could_collide = B_TRUE;
}
break;
case LUA_TBOOLEAN:
key = (lua_toboolean(state, -2) == B_TRUE ?
"true" : "false");
if (saw_str_could_collide) {
key_could_collide = B_TRUE;
}
break;
case LUA_TNUMBER:
VERIFY3U(sizeof (buf), >,
snprintf(buf, sizeof (buf), "%lld",
(longlong_t)lua_tonumber(state, -2)));
key = buf;
if (saw_str_could_collide) {
key_could_collide = B_TRUE;
}
break;
default:
fnvlist_free(nvl);
(void) lua_pushfstring(state, "Invalid key "
"type '%s' in table",
lua_typename(state, lua_type(state, -2)));
return (NULL);
}
/*
* Check for type-mismatched key collisions, and throw an error.
*/
if (key_could_collide && nvlist_exists(nvl, key)) {
fnvlist_free(nvl);
(void) lua_pushfstring(state, "Collision of "
"key '%s' in table", key);
return (NULL);
}
/*
* Recursively convert the table value and insert into
* the new nvlist with the parsed key. To prevent
* stack overflow on circular or heavily nested tables,
* we track the current nvlist depth.
*/
if (depth >= ZCP_NVLIST_MAX_DEPTH) {
fnvlist_free(nvl);
(void) lua_pushfstring(state, "Maximum table "
"depth (%d) exceeded for table",
ZCP_NVLIST_MAX_DEPTH);
return (NULL);
}
err = zcp_lua_to_nvlist_impl(state, -1, nvl, key,
depth + 1);
if (err != 0) {
fnvlist_free(nvl);
/*
* Error message has been pushed to the lua
* stack by the recursive call.
*/
return (NULL);
}
/*
* Pop the value pushed by lua_next().
*/
lua_pop(state, 1);
}
/*
* Mark the nvlist as having unique keys. This is a little ugly, but we
* ensured above that there are no duplicate keys in the nvlist.
*/
nvl->nvl_nvflag |= NV_UNIQUE_NAME;
return (nvl);
}
/*
* Convert a value from the given index into the lua stack to an nvpair, adding
* it to an nvlist with the given key.
*
* Values are converted as follows:
*
* string -> string
* number -> int64
* boolean -> boolean
* nil -> boolean (no value)
*
* Lua tables are converted to nvlists and then inserted. The table's keys
* are converted to strings then used as keys in the nvlist to store each table
* element. Keys are converted as follows:
*
* string -> no change
* number -> "%lld"
* boolean -> "true" | "false"
* nil -> error
*
* In the case of a key collision, an error is thrown.
*
* If an error is encountered, a nonzero error code is returned, and an error
* string will be pushed onto the Lua stack.
*/
static int
zcp_lua_to_nvlist_impl(lua_State *state, int index, nvlist_t *nvl,
const char *key, int depth)
{
/*
* Verify that we have enough remaining space in the lua stack to parse
* a key-value pair and push an error.
*/
if (!lua_checkstack(state, 3)) {
(void) lua_pushstring(state, "Lua stack overflow");
return (1);
}
index = lua_absindex(state, index);
switch (lua_type(state, index)) {
case LUA_TNIL:
fnvlist_add_boolean(nvl, key);
break;
case LUA_TBOOLEAN:
fnvlist_add_boolean_value(nvl, key,
lua_toboolean(state, index));
break;
case LUA_TNUMBER:
fnvlist_add_int64(nvl, key, lua_tonumber(state, index));
break;
case LUA_TSTRING:
fnvlist_add_string(nvl, key, lua_tostring(state, index));
break;
case LUA_TTABLE: {
nvlist_t *value_nvl = zcp_table_to_nvlist(state, index, depth);
if (value_nvl == NULL)
return (SET_ERROR(EINVAL));
fnvlist_add_nvlist(nvl, key, value_nvl);
fnvlist_free(value_nvl);
break;
}
default:
(void) lua_pushfstring(state,
"Invalid value type '%s' for key '%s'",
lua_typename(state, lua_type(state, index)), key);
return (SET_ERROR(EINVAL));
}
return (0);
}
/*
* Convert a lua value to an nvpair, adding it to an nvlist with the given key.
*/
static void
zcp_lua_to_nvlist(lua_State *state, int index, nvlist_t *nvl, const char *key)
{
/*
* On error, zcp_lua_to_nvlist_impl pushes an error string onto the Lua
* stack before returning with a nonzero error code. If an error is
* returned, throw a fatal lua error with the given string.
*/
if (zcp_lua_to_nvlist_impl(state, index, nvl, key, 0) != 0)
(void) lua_error(state);
}
static int
zcp_lua_to_nvlist_helper(lua_State *state)
{
nvlist_t *nv = (nvlist_t *)lua_touserdata(state, 2);
const char *key = (const char *)lua_touserdata(state, 1);
zcp_lua_to_nvlist(state, 3, nv, key);
return (0);
}
static void
zcp_convert_return_values(lua_State *state, nvlist_t *nvl,
const char *key, int *result)
{
int err;
VERIFY3U(1, ==, lua_gettop(state));
lua_pushcfunction(state, zcp_lua_to_nvlist_helper);
lua_pushlightuserdata(state, (char *)key);
lua_pushlightuserdata(state, nvl);
lua_pushvalue(state, 1);
lua_remove(state, 1);
err = lua_pcall(state, 3, 0, 0); /* zcp_lua_to_nvlist_helper */
if (err != 0) {
zcp_lua_to_nvlist(state, 1, nvl, ZCP_RET_ERROR);
*result = SET_ERROR(ECHRNG);
}
}
/*
* Push a Lua table representing nvl onto the stack. If it can't be
* converted, return EINVAL, fill in errbuf, and push nothing. errbuf may
* be specified as NULL, in which case no error string will be output.
*
* Most nvlists are converted as simple key->value Lua tables, but we make
* an exception for the case where all nvlist entries are BOOLEANs (a string
* key without a value). In Lua, a table key pointing to a value of Nil
* (no value) is equivalent to the key not existing, so a BOOLEAN nvlist
* entry can't be directly converted to a Lua table entry. Nvlists of entirely
* BOOLEAN entries are frequently used to pass around lists of datasets, so for
* convenience we check for this case, and convert it to a simple Lua array of
* strings.
*/
int
zcp_nvlist_to_lua(lua_State *state, nvlist_t *nvl,
char *errbuf, int errbuf_len)
{
nvpair_t *pair;
lua_newtable(state);
boolean_t has_values = B_FALSE;
/*
* If the list doesn't have any values, just convert it to a string
* array.
*/
for (pair = nvlist_next_nvpair(nvl, NULL);
pair != NULL; pair = nvlist_next_nvpair(nvl, pair)) {
if (nvpair_type(pair) != DATA_TYPE_BOOLEAN) {
has_values = B_TRUE;
break;
}
}
if (!has_values) {
int i = 1;
for (pair = nvlist_next_nvpair(nvl, NULL);
pair != NULL; pair = nvlist_next_nvpair(nvl, pair)) {
(void) lua_pushinteger(state, i);
(void) lua_pushstring(state, nvpair_name(pair));
(void) lua_settable(state, -3);
i++;
}
} else {
for (pair = nvlist_next_nvpair(nvl, NULL);
pair != NULL; pair = nvlist_next_nvpair(nvl, pair)) {
int err = zcp_nvpair_value_to_lua(state, pair,
errbuf, errbuf_len);
if (err != 0) {
lua_pop(state, 1);
return (err);
}
(void) lua_setfield(state, -2, nvpair_name(pair));
}
}
return (0);
}
/*
* Push a Lua object representing the value of "pair" onto the stack.
*
* Only understands boolean_value, string, int64, nvlist,
* string_array, and int64_array type values. For other
* types, returns EINVAL, fills in errbuf, and pushes nothing.
*/
static int
zcp_nvpair_value_to_lua(lua_State *state, nvpair_t *pair,
char *errbuf, int errbuf_len)
{
int err = 0;
if (pair == NULL) {
lua_pushnil(state);
return (0);
}
switch (nvpair_type(pair)) {
case DATA_TYPE_BOOLEAN_VALUE:
(void) lua_pushboolean(state,
fnvpair_value_boolean_value(pair));
break;
case DATA_TYPE_STRING:
(void) lua_pushstring(state, fnvpair_value_string(pair));
break;
case DATA_TYPE_INT64:
(void) lua_pushinteger(state, fnvpair_value_int64(pair));
break;
case DATA_TYPE_NVLIST:
err = zcp_nvlist_to_lua(state,
fnvpair_value_nvlist(pair), errbuf, errbuf_len);
break;
case DATA_TYPE_STRING_ARRAY: {
char **strarr;
uint_t nelem;
(void) nvpair_value_string_array(pair, &strarr, &nelem);
lua_newtable(state);
for (int i = 0; i < nelem; i++) {
(void) lua_pushinteger(state, i + 1);
(void) lua_pushstring(state, strarr[i]);
(void) lua_settable(state, -3);
}
break;
}
case DATA_TYPE_UINT64_ARRAY: {
uint64_t *intarr;
uint_t nelem;
(void) nvpair_value_uint64_array(pair, &intarr, &nelem);
lua_newtable(state);
for (int i = 0; i < nelem; i++) {
(void) lua_pushinteger(state, i + 1);
(void) lua_pushinteger(state, intarr[i]);
(void) lua_settable(state, -3);
}
break;
}
case DATA_TYPE_INT64_ARRAY: {
int64_t *intarr;
uint_t nelem;
(void) nvpair_value_int64_array(pair, &intarr, &nelem);
lua_newtable(state);
for (int i = 0; i < nelem; i++) {
(void) lua_pushinteger(state, i + 1);
(void) lua_pushinteger(state, intarr[i]);
(void) lua_settable(state, -3);
}
break;
}
default: {
if (errbuf != NULL) {
(void) snprintf(errbuf, errbuf_len,
"Unhandled nvpair type %d for key '%s'",
nvpair_type(pair), nvpair_name(pair));
}
return (SET_ERROR(EINVAL));
}
}
return (err);
}
int
zcp_dataset_hold_error(lua_State *state, dsl_pool_t *dp, const char *dsname,
int error)
{
if (error == ENOENT) {
(void) zcp_argerror(state, 1, "no such dataset '%s'", dsname);
return (0); /* not reached; zcp_argerror will longjmp */
} else if (error == EXDEV) {
(void) zcp_argerror(state, 1,
"dataset '%s' is not in the target pool '%s'",
dsname, spa_name(dp->dp_spa));
return (0); /* not reached; zcp_argerror will longjmp */
} else if (error == EIO) {
(void) luaL_error(state,
"I/O error while accessing dataset '%s'", dsname);
return (0); /* not reached; luaL_error will longjmp */
} else if (error != 0) {
(void) luaL_error(state,
"unexpected error %d while accessing dataset '%s'",
error, dsname);
return (0); /* not reached; luaL_error will longjmp */
}
return (0);
}
/*
* Note: will longjmp (via lua_error()) on error.
* Assumes that the dsname is argument #1 (for error reporting purposes).
*/
dsl_dataset_t *
zcp_dataset_hold(lua_State *state, dsl_pool_t *dp, const char *dsname,
void *tag)
{
dsl_dataset_t *ds;
int error = dsl_dataset_hold(dp, dsname, tag, &ds);
(void) zcp_dataset_hold_error(state, dp, dsname, error);
return (ds);
}
static int zcp_debug(lua_State *);
static zcp_lib_info_t zcp_debug_info = {
.name = "debug",
.func = zcp_debug,
.pargs = {
{ .za_name = "debug string", .za_lua_type = LUA_TSTRING},
{NULL, 0}
},
.kwargs = {
{NULL, 0}
}
};
static int
zcp_debug(lua_State *state)
{
const char *dbgstring;
zcp_run_info_t *ri = zcp_run_info(state);
zcp_lib_info_t *libinfo = &zcp_debug_info;
zcp_parse_args(state, libinfo->name, libinfo->pargs, libinfo->kwargs);
dbgstring = lua_tostring(state, 1);
zfs_dbgmsg("txg %lld ZCP: %s", (longlong_t)ri->zri_tx->tx_txg,
dbgstring);
return (0);
}
static int zcp_exists(lua_State *);
static zcp_lib_info_t zcp_exists_info = {
.name = "exists",
.func = zcp_exists,
.pargs = {
{ .za_name = "dataset", .za_lua_type = LUA_TSTRING},
{NULL, 0}
},
.kwargs = {
{NULL, 0}
}
};
static int
zcp_exists(lua_State *state)
{
zcp_run_info_t *ri = zcp_run_info(state);
dsl_pool_t *dp = ri->zri_pool;
zcp_lib_info_t *libinfo = &zcp_exists_info;
zcp_parse_args(state, libinfo->name, libinfo->pargs, libinfo->kwargs);
const char *dsname = lua_tostring(state, 1);
dsl_dataset_t *ds;
int error = dsl_dataset_hold(dp, dsname, FTAG, &ds);
if (error == 0) {
dsl_dataset_rele(ds, FTAG);
lua_pushboolean(state, B_TRUE);
} else if (error == ENOENT) {
lua_pushboolean(state, B_FALSE);
} else if (error == EXDEV) {
return (luaL_error(state, "dataset '%s' is not in the "
"target pool", dsname));
} else if (error == EIO) {
return (luaL_error(state, "I/O error opening dataset '%s'",
dsname));
} else if (error != 0) {
return (luaL_error(state, "unexpected error %d", error));
}
return (1);
}
/*
* Allocate/realloc/free a buffer for the lua interpreter.
*
* When nsize is 0, behaves as free() and returns NULL.
*
* If ptr is NULL, behaves as malloc() and returns an allocated buffer of size
* at least nsize.
*
* Otherwise, behaves as realloc(), changing the allocation from osize to nsize.
* Shrinking the buffer size never fails.
*
* The original allocated buffer size is stored as a uint64 at the beginning of
* the buffer to avoid actually reallocating when shrinking a buffer, since lua
* requires that this operation never fail.
*/
static void *
zcp_lua_alloc(void *ud, void *ptr, size_t osize, size_t nsize)
{
zcp_alloc_arg_t *allocargs = ud;
if (nsize == 0) {
if (ptr != NULL) {
int64_t *allocbuf = (int64_t *)ptr - 1;
int64_t allocsize = *allocbuf;
ASSERT3S(allocsize, >, 0);
ASSERT3S(allocargs->aa_alloc_remaining + allocsize, <=,
allocargs->aa_alloc_limit);
allocargs->aa_alloc_remaining += allocsize;
vmem_free(allocbuf, allocsize);
}
return (NULL);
} else if (ptr == NULL) {
int64_t *allocbuf;
int64_t allocsize = nsize + sizeof (int64_t);
if (!allocargs->aa_must_succeed &&
(allocsize <= 0 ||
allocsize > allocargs->aa_alloc_remaining)) {
return (NULL);
}
allocbuf = vmem_alloc(allocsize, KM_SLEEP);
allocargs->aa_alloc_remaining -= allocsize;
*allocbuf = allocsize;
return (allocbuf + 1);
} else if (nsize <= osize) {
/*
* If shrinking the buffer, lua requires that the reallocation
* never fail.
*/
return (ptr);
} else {
ASSERT3U(nsize, >, osize);
uint64_t *luabuf = zcp_lua_alloc(ud, NULL, 0, nsize);
if (luabuf == NULL) {
return (NULL);
}
(void) memcpy(luabuf, ptr, osize);
VERIFY3P(zcp_lua_alloc(ud, ptr, osize, 0), ==, NULL);
return (luabuf);
}
}
-/* ARGSUSED */
static void
zcp_lua_counthook(lua_State *state, lua_Debug *ar)
{
+ (void) ar;
lua_getfield(state, LUA_REGISTRYINDEX, ZCP_RUN_INFO_KEY);
zcp_run_info_t *ri = lua_touserdata(state, -1);
/*
* Check if we were canceled while waiting for the
* txg to sync or from our open context thread
*/
if (ri->zri_canceled ||
(!ri->zri_sync && issig(JUSTLOOKING) && issig(FORREAL))) {
ri->zri_canceled = B_TRUE;
(void) lua_pushstring(state, "Channel program was canceled.");
(void) lua_error(state);
/* Unreachable */
}
/*
* Check how many instructions the channel program has
* executed so far, and compare against the limit.
*/
ri->zri_curinstrs += zfs_lua_check_instrlimit_interval;
if (ri->zri_maxinstrs != 0 && ri->zri_curinstrs > ri->zri_maxinstrs) {
ri->zri_timed_out = B_TRUE;
(void) lua_pushstring(state,
"Channel program timed out.");
(void) lua_error(state);
/* Unreachable */
}
}
static int
zcp_panic_cb(lua_State *state)
{
panic("unprotected error in call to Lua API (%s)\n",
lua_tostring(state, -1));
return (0);
}
static void
zcp_eval_impl(dmu_tx_t *tx, zcp_run_info_t *ri)
{
int err;
lua_State *state = ri->zri_state;
VERIFY3U(3, ==, lua_gettop(state));
/* finish initializing our runtime state */
ri->zri_pool = dmu_tx_pool(tx);
ri->zri_tx = tx;
list_create(&ri->zri_cleanup_handlers, sizeof (zcp_cleanup_handler_t),
offsetof(zcp_cleanup_handler_t, zch_node));
/*
* Store the zcp_run_info_t struct for this run in the Lua registry.
* Registry entries are not directly accessible by the Lua scripts but
* can be accessed by our callbacks.
*/
lua_pushlightuserdata(state, ri);
lua_setfield(state, LUA_REGISTRYINDEX, ZCP_RUN_INFO_KEY);
VERIFY3U(3, ==, lua_gettop(state));
/*
* Tell the Lua interpreter to call our handler every count
* instructions. Channel programs that execute too many instructions
* should die with ETIME.
*/
(void) lua_sethook(state, zcp_lua_counthook, LUA_MASKCOUNT,
zfs_lua_check_instrlimit_interval);
/*
* Tell the Lua memory allocator to stop using KM_SLEEP before handing
* off control to the channel program. Channel programs that use too
* much memory should die with ENOSPC.
*/
ri->zri_allocargs->aa_must_succeed = B_FALSE;
/*
* Call the Lua function that open-context passed us. This pops the
* function and its input from the stack and pushes any return
* or error values.
*/
err = lua_pcall(state, 1, LUA_MULTRET, 1);
/*
* Let Lua use KM_SLEEP while we interpret the return values.
*/
ri->zri_allocargs->aa_must_succeed = B_TRUE;
/*
* Remove the error handler callback from the stack. At this point,
* there shouldn't be any cleanup handler registered in the handler
* list (zri_cleanup_handlers), regardless of whether it ran or not.
*/
list_destroy(&ri->zri_cleanup_handlers);
lua_remove(state, 1);
switch (err) {
case LUA_OK: {
/*
* Lua supports returning multiple values in a single return
* statement. Return values will have been pushed onto the
* stack:
* 1: Return value 1
* 2: Return value 2
* 3: etc...
* To simplify the process of retrieving a return value from a
* channel program, we disallow returning more than one value
* to ZFS from the Lua script, yielding a singleton return
* nvlist of the form { "return": Return value 1 }.
*/
int return_count = lua_gettop(state);
if (return_count == 1) {
ri->zri_result = 0;
zcp_convert_return_values(state, ri->zri_outnvl,
ZCP_RET_RETURN, &ri->zri_result);
} else if (return_count > 1) {
ri->zri_result = SET_ERROR(ECHRNG);
lua_settop(state, 0);
(void) lua_pushfstring(state, "Multiple return "
"values not supported");
zcp_convert_return_values(state, ri->zri_outnvl,
ZCP_RET_ERROR, &ri->zri_result);
}
break;
}
case LUA_ERRRUN:
case LUA_ERRGCMM: {
/*
* The channel program encountered a fatal error within the
* script, such as failing an assertion, or calling a function
* with incompatible arguments. The error value and the
* traceback generated by zcp_error_handler() should be on the
* stack.
*/
VERIFY3U(1, ==, lua_gettop(state));
if (ri->zri_timed_out) {
ri->zri_result = SET_ERROR(ETIME);
} else if (ri->zri_canceled) {
ri->zri_result = SET_ERROR(EINTR);
} else {
ri->zri_result = SET_ERROR(ECHRNG);
}
zcp_convert_return_values(state, ri->zri_outnvl,
ZCP_RET_ERROR, &ri->zri_result);
if (ri->zri_result == ETIME && ri->zri_outnvl != NULL) {
(void) nvlist_add_uint64(ri->zri_outnvl,
ZCP_ARG_INSTRLIMIT, ri->zri_curinstrs);
}
break;
}
case LUA_ERRERR: {
/*
* The channel program encountered a fatal error within the
* script, and we encountered another error while trying to
* compute the traceback in zcp_error_handler(). We can only
* return the error message.
*/
VERIFY3U(1, ==, lua_gettop(state));
if (ri->zri_timed_out) {
ri->zri_result = SET_ERROR(ETIME);
} else if (ri->zri_canceled) {
ri->zri_result = SET_ERROR(EINTR);
} else {
ri->zri_result = SET_ERROR(ECHRNG);
}
zcp_convert_return_values(state, ri->zri_outnvl,
ZCP_RET_ERROR, &ri->zri_result);
break;
}
case LUA_ERRMEM:
/*
* Lua ran out of memory while running the channel program.
* There's not much we can do.
*/
ri->zri_result = SET_ERROR(ENOSPC);
break;
default:
VERIFY0(err);
}
}
static void
zcp_pool_error(zcp_run_info_t *ri, const char *poolname)
{
ri->zri_result = SET_ERROR(ECHRNG);
lua_settop(ri->zri_state, 0);
(void) lua_pushfstring(ri->zri_state, "Could not open pool: %s",
poolname);
zcp_convert_return_values(ri->zri_state, ri->zri_outnvl,
ZCP_RET_ERROR, &ri->zri_result);
}
/*
* This callback is called when txg_wait_synced_sig encountered a signal.
* The txg_wait_synced_sig will continue to wait for the txg to complete
* after calling this callback.
*/
-/* ARGSUSED */
static void
zcp_eval_sig(void *arg, dmu_tx_t *tx)
{
+ (void) tx;
zcp_run_info_t *ri = arg;
ri->zri_canceled = B_TRUE;
}
static void
zcp_eval_sync(void *arg, dmu_tx_t *tx)
{
zcp_run_info_t *ri = arg;
/*
* Open context should have setup the stack to contain:
* 1: Error handler callback
* 2: Script to run (converted to a Lua function)
* 3: nvlist input to function (converted to Lua table or nil)
*/
VERIFY3U(3, ==, lua_gettop(ri->zri_state));
zcp_eval_impl(tx, ri);
}
static void
zcp_eval_open(zcp_run_info_t *ri, const char *poolname)
{
int error;
dsl_pool_t *dp;
dmu_tx_t *tx;
/*
* See comment from the same assertion in zcp_eval_sync().
*/
VERIFY3U(3, ==, lua_gettop(ri->zri_state));
error = dsl_pool_hold(poolname, FTAG, &dp);
if (error != 0) {
zcp_pool_error(ri, poolname);
return;
}
/*
* As we are running in open-context, we have no transaction associated
* with the channel program. At the same time, functions from the
* zfs.check submodule need to be associated with a transaction as
* they are basically dry-runs of their counterparts in the zfs.sync
* submodule. These functions should be able to run in open-context.
* Therefore we create a new transaction that we later abort once
* the channel program has been evaluated.
*/
tx = dmu_tx_create_dd(dp->dp_mos_dir);
zcp_eval_impl(tx, ri);
dmu_tx_abort(tx);
dsl_pool_rele(dp, FTAG);
}
int
zcp_eval(const char *poolname, const char *program, boolean_t sync,
uint64_t instrlimit, uint64_t memlimit, nvpair_t *nvarg, nvlist_t *outnvl)
{
int err;
lua_State *state;
zcp_run_info_t runinfo;
if (instrlimit > zfs_lua_max_instrlimit)
return (SET_ERROR(EINVAL));
if (memlimit == 0 || memlimit > zfs_lua_max_memlimit)
return (SET_ERROR(EINVAL));
zcp_alloc_arg_t allocargs = {
.aa_must_succeed = B_TRUE,
.aa_alloc_remaining = (int64_t)memlimit,
.aa_alloc_limit = (int64_t)memlimit,
};
/*
* Creates a Lua state with a memory allocator that uses KM_SLEEP.
* This should never fail.
*/
state = lua_newstate(zcp_lua_alloc, &allocargs);
VERIFY(state != NULL);
(void) lua_atpanic(state, zcp_panic_cb);
/*
* Load core Lua libraries we want access to.
*/
VERIFY3U(1, ==, luaopen_base(state));
lua_pop(state, 1);
VERIFY3U(1, ==, luaopen_coroutine(state));
lua_setglobal(state, LUA_COLIBNAME);
VERIFY0(lua_gettop(state));
VERIFY3U(1, ==, luaopen_string(state));
lua_setglobal(state, LUA_STRLIBNAME);
VERIFY0(lua_gettop(state));
VERIFY3U(1, ==, luaopen_table(state));
lua_setglobal(state, LUA_TABLIBNAME);
VERIFY0(lua_gettop(state));
/*
* Load globally visible variables such as errno aliases.
*/
zcp_load_globals(state);
VERIFY0(lua_gettop(state));
/*
* Load ZFS-specific modules.
*/
lua_newtable(state);
VERIFY3U(1, ==, zcp_load_list_lib(state));
lua_setfield(state, -2, "list");
VERIFY3U(1, ==, zcp_load_synctask_lib(state, B_FALSE));
lua_setfield(state, -2, "check");
VERIFY3U(1, ==, zcp_load_synctask_lib(state, B_TRUE));
lua_setfield(state, -2, "sync");
VERIFY3U(1, ==, zcp_load_get_lib(state));
lua_pushcclosure(state, zcp_debug_info.func, 0);
lua_setfield(state, -2, zcp_debug_info.name);
lua_pushcclosure(state, zcp_exists_info.func, 0);
lua_setfield(state, -2, zcp_exists_info.name);
lua_setglobal(state, "zfs");
VERIFY0(lua_gettop(state));
/*
* Push the error-callback that calculates Lua stack traces on
* unexpected failures.
*/
lua_pushcfunction(state, zcp_error_handler);
VERIFY3U(1, ==, lua_gettop(state));
/*
* Load the actual script as a function onto the stack as text ("t").
* The only valid error condition is a syntax error in the script.
* ERRMEM should not be possible because our allocator is using
* KM_SLEEP. ERRGCMM should not be possible because we have not added
* any objects with __gc metamethods to the interpreter that could
* fail.
*/
err = luaL_loadbufferx(state, program, strlen(program),
"channel program", "t");
if (err == LUA_ERRSYNTAX) {
fnvlist_add_string(outnvl, ZCP_RET_ERROR,
lua_tostring(state, -1));
lua_close(state);
return (SET_ERROR(EINVAL));
}
VERIFY0(err);
VERIFY3U(2, ==, lua_gettop(state));
/*
* Convert the input nvlist to a Lua object and put it on top of the
* stack.
*/
char errmsg[128];
err = zcp_nvpair_value_to_lua(state, nvarg,
errmsg, sizeof (errmsg));
if (err != 0) {
fnvlist_add_string(outnvl, ZCP_RET_ERROR, errmsg);
lua_close(state);
return (SET_ERROR(EINVAL));
}
VERIFY3U(3, ==, lua_gettop(state));
runinfo.zri_state = state;
runinfo.zri_allocargs = &allocargs;
runinfo.zri_outnvl = outnvl;
runinfo.zri_result = 0;
runinfo.zri_cred = CRED();
runinfo.zri_proc = curproc;
runinfo.zri_timed_out = B_FALSE;
runinfo.zri_canceled = B_FALSE;
runinfo.zri_sync = sync;
runinfo.zri_space_used = 0;
runinfo.zri_curinstrs = 0;
runinfo.zri_maxinstrs = instrlimit;
runinfo.zri_new_zvols = fnvlist_alloc();
if (sync) {
err = dsl_sync_task_sig(poolname, NULL, zcp_eval_sync,
zcp_eval_sig, &runinfo, 0, ZFS_SPACE_CHECK_ZCP_EVAL);
if (err != 0)
zcp_pool_error(&runinfo, poolname);
} else {
zcp_eval_open(&runinfo, poolname);
}
lua_close(state);
/*
* Create device minor nodes for any new zvols.
*/
for (nvpair_t *pair = nvlist_next_nvpair(runinfo.zri_new_zvols, NULL);
pair != NULL;
pair = nvlist_next_nvpair(runinfo.zri_new_zvols, pair)) {
zvol_create_minor(nvpair_name(pair));
}
fnvlist_free(runinfo.zri_new_zvols);
return (runinfo.zri_result);
}
/*
* Retrieve metadata about the currently running channel program.
*/
zcp_run_info_t *
zcp_run_info(lua_State *state)
{
zcp_run_info_t *ri;
lua_getfield(state, LUA_REGISTRYINDEX, ZCP_RUN_INFO_KEY);
ri = lua_touserdata(state, -1);
lua_pop(state, 1);
return (ri);
}
/*
* Argument Parsing
* ================
*
* The Lua language allows methods to be called with any number
* of arguments of any type. When calling back into ZFS we need to sanitize
* arguments from channel programs to make sure unexpected arguments or
* arguments of the wrong type result in clear error messages. To do this
* in a uniform way all callbacks from channel programs should use the
* zcp_parse_args() function to interpret inputs.
*
* Positional vs Keyword Arguments
* ===============================
*
* Every callback function takes a fixed set of required positional arguments
* and optional keyword arguments. For example, the destroy function takes
* a single positional string argument (the name of the dataset to destroy)
* and an optional "defer" keyword boolean argument. When calling lua functions
* with parentheses, only positional arguments can be used:
*
* zfs.sync.snapshot("rpool@snap")
*
* To use keyword arguments functions should be called with a single argument
* that is a lua table containing mappings of integer -> positional arguments
* and string -> keyword arguments:
*
* zfs.sync.snapshot({1="rpool@snap", defer=true})
*
* The lua language allows curly braces to be used in place of parenthesis as
* syntactic sugar for this calling convention:
*
* zfs.sync.snapshot{"rpool@snap", defer=true}
*/
/*
* Throw an error and print the given arguments. If there are too many
* arguments to fit in the output buffer, only the error format string is
* output.
*/
static void
zcp_args_error(lua_State *state, const char *fname, const zcp_arg_t *pargs,
const zcp_arg_t *kwargs, const char *fmt, ...)
{
int i;
char errmsg[512];
size_t len = sizeof (errmsg);
size_t msglen = 0;
va_list argp;
va_start(argp, fmt);
VERIFY3U(len, >, vsnprintf(errmsg, len, fmt, argp));
va_end(argp);
/*
* Calculate the total length of the final string, including extra
* formatting characters. If the argument dump would be too large,
* only print the error string.
*/
msglen = strlen(errmsg);
msglen += strlen(fname) + 4; /* : + {} + null terminator */
for (i = 0; pargs[i].za_name != NULL; i++) {
msglen += strlen(pargs[i].za_name);
msglen += strlen(lua_typename(state, pargs[i].za_lua_type));
if (pargs[i + 1].za_name != NULL || kwargs[0].za_name != NULL)
msglen += 5; /* < + ( + )> + , */
else
msglen += 4; /* < + ( + )> */
}
for (i = 0; kwargs[i].za_name != NULL; i++) {
msglen += strlen(kwargs[i].za_name);
msglen += strlen(lua_typename(state, kwargs[i].za_lua_type));
if (kwargs[i + 1].za_name != NULL)
msglen += 4; /* =( + ) + , */
else
msglen += 3; /* =( + ) */
}
if (msglen >= len)
(void) luaL_error(state, errmsg);
VERIFY3U(len, >, strlcat(errmsg, ": ", len));
VERIFY3U(len, >, strlcat(errmsg, fname, len));
VERIFY3U(len, >, strlcat(errmsg, "{", len));
for (i = 0; pargs[i].za_name != NULL; i++) {
VERIFY3U(len, >, strlcat(errmsg, "<", len));
VERIFY3U(len, >, strlcat(errmsg, pargs[i].za_name, len));
VERIFY3U(len, >, strlcat(errmsg, "(", len));
VERIFY3U(len, >, strlcat(errmsg,
lua_typename(state, pargs[i].za_lua_type), len));
VERIFY3U(len, >, strlcat(errmsg, ")>", len));
if (pargs[i + 1].za_name != NULL || kwargs[0].za_name != NULL) {
VERIFY3U(len, >, strlcat(errmsg, ", ", len));
}
}
for (i = 0; kwargs[i].za_name != NULL; i++) {
VERIFY3U(len, >, strlcat(errmsg, kwargs[i].za_name, len));
VERIFY3U(len, >, strlcat(errmsg, "=(", len));
VERIFY3U(len, >, strlcat(errmsg,
lua_typename(state, kwargs[i].za_lua_type), len));
VERIFY3U(len, >, strlcat(errmsg, ")", len));
if (kwargs[i + 1].za_name != NULL) {
VERIFY3U(len, >, strlcat(errmsg, ", ", len));
}
}
VERIFY3U(len, >, strlcat(errmsg, "}", len));
(void) luaL_error(state, errmsg);
panic("unreachable code");
}
static void
zcp_parse_table_args(lua_State *state, const char *fname,
const zcp_arg_t *pargs, const zcp_arg_t *kwargs)
{
int i;
int type;
for (i = 0; pargs[i].za_name != NULL; i++) {
/*
* Check the table for this positional argument, leaving it
* on the top of the stack once we finish validating it.
*/
lua_pushinteger(state, i + 1);
lua_gettable(state, 1);
type = lua_type(state, -1);
if (type == LUA_TNIL) {
zcp_args_error(state, fname, pargs, kwargs,
"too few arguments");
panic("unreachable code");
} else if (type != pargs[i].za_lua_type) {
zcp_args_error(state, fname, pargs, kwargs,
"arg %d wrong type (is '%s', expected '%s')",
i + 1, lua_typename(state, type),
lua_typename(state, pargs[i].za_lua_type));
panic("unreachable code");
}
/*
* Remove the positional argument from the table.
*/
lua_pushinteger(state, i + 1);
lua_pushnil(state);
lua_settable(state, 1);
}
for (i = 0; kwargs[i].za_name != NULL; i++) {
/*
* Check the table for this keyword argument, which may be
* nil if it was omitted. Leave the value on the top of
* the stack after validating it.
*/
lua_getfield(state, 1, kwargs[i].za_name);
type = lua_type(state, -1);
if (type != LUA_TNIL && type != kwargs[i].za_lua_type) {
zcp_args_error(state, fname, pargs, kwargs,
"kwarg '%s' wrong type (is '%s', expected '%s')",
kwargs[i].za_name, lua_typename(state, type),
lua_typename(state, kwargs[i].za_lua_type));
panic("unreachable code");
}
/*
* Remove the keyword argument from the table.
*/
lua_pushnil(state);
lua_setfield(state, 1, kwargs[i].za_name);
}
/*
* Any entries remaining in the table are invalid inputs, print
* an error message based on what the entry is.
*/
lua_pushnil(state);
if (lua_next(state, 1)) {
if (lua_isnumber(state, -2) && lua_tointeger(state, -2) > 0) {
zcp_args_error(state, fname, pargs, kwargs,
"too many positional arguments");
} else if (lua_isstring(state, -2)) {
zcp_args_error(state, fname, pargs, kwargs,
"invalid kwarg '%s'", lua_tostring(state, -2));
} else {
zcp_args_error(state, fname, pargs, kwargs,
"kwarg keys must be strings");
}
panic("unreachable code");
}
lua_remove(state, 1);
}
static void
zcp_parse_pos_args(lua_State *state, const char *fname, const zcp_arg_t *pargs,
const zcp_arg_t *kwargs)
{
int i;
int type;
for (i = 0; pargs[i].za_name != NULL; i++) {
type = lua_type(state, i + 1);
if (type == LUA_TNONE) {
zcp_args_error(state, fname, pargs, kwargs,
"too few arguments");
panic("unreachable code");
} else if (type != pargs[i].za_lua_type) {
zcp_args_error(state, fname, pargs, kwargs,
"arg %d wrong type (is '%s', expected '%s')",
i + 1, lua_typename(state, type),
lua_typename(state, pargs[i].za_lua_type));
panic("unreachable code");
}
}
if (lua_gettop(state) != i) {
zcp_args_error(state, fname, pargs, kwargs,
"too many positional arguments");
panic("unreachable code");
}
for (i = 0; kwargs[i].za_name != NULL; i++) {
lua_pushnil(state);
}
}
/*
* Checks the current Lua stack against an expected set of positional and
* keyword arguments. If the stack does not match the expected arguments
* aborts the current channel program with a useful error message, otherwise
* it re-arranges the stack so that it contains the positional arguments
* followed by the keyword argument values in declaration order. Any missing
* keyword argument will be represented by a nil value on the stack.
*
* If the stack contains exactly one argument of type LUA_TTABLE the curly
* braces calling convention is assumed, otherwise the stack is parsed for
* positional arguments only.
*
* This function should be used by every function callback. It should be called
* before the callback manipulates the Lua stack as it assumes the stack
* represents the function arguments.
*/
void
zcp_parse_args(lua_State *state, const char *fname, const zcp_arg_t *pargs,
const zcp_arg_t *kwargs)
{
if (lua_gettop(state) == 1 && lua_istable(state, 1)) {
zcp_parse_table_args(state, fname, pargs, kwargs);
} else {
zcp_parse_pos_args(state, fname, pargs, kwargs);
}
}
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_lua, zfs_lua_, max_instrlimit, ULONG, ZMOD_RW,
"Max instruction limit that can be specified for a channel program");
ZFS_MODULE_PARAM(zfs_lua, zfs_lua_, max_memlimit, ULONG, ZMOD_RW,
"Max memory limit that can be specified for a channel program");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/zcp_synctask.c b/sys/contrib/openzfs/module/zfs/zcp_synctask.c
index c6ade59b9ced..bfcdbcf9c27f 100644
--- a/sys/contrib/openzfs/module/zfs/zcp_synctask.c
+++ b/sys/contrib/openzfs/module/zfs/zcp_synctask.c
@@ -1,549 +1,551 @@
/*
* CDDL HEADER START
*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2016, 2017 by Delphix. All rights reserved.
* Copyright (c) 2019, 2020 by Christian Schwarz. All rights reserved.
* Copyright 2020 Joyent, Inc.
*/
#include <sys/lua/lua.h>
#include <sys/lua/lauxlib.h>
#include <sys/zcp.h>
#include <sys/zcp_set.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_synctask.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_bookmark.h>
#include <sys/dsl_destroy.h>
#include <sys/dmu_objset.h>
#include <sys/zfs_znode.h>
#include <sys/zfeature.h>
#include <sys/metaslab.h>
#define DST_AVG_BLKSHIFT 14
typedef struct zcp_inherit_prop_arg {
lua_State *zipa_state;
const char *zipa_prop;
dsl_props_set_arg_t zipa_dpsa;
} zcp_inherit_prop_arg_t;
typedef int (zcp_synctask_func_t)(lua_State *, boolean_t, nvlist_t *);
typedef struct zcp_synctask_info {
const char *name;
zcp_synctask_func_t *func;
const zcp_arg_t pargs[4];
const zcp_arg_t kwargs[2];
zfs_space_check_t space_check;
int blocks_modified;
} zcp_synctask_info_t;
static void
zcp_synctask_cleanup(void *arg)
{
fnvlist_free(arg);
}
/*
* Generic synctask interface for channel program syncfuncs.
*
* To perform some action in syncing context, we'd generally call
* dsl_sync_task(), but since the Lua script is already running inside a
* synctask we need to leave out some actions (such as acquiring the config
* rwlock and performing space checks).
*
* If 'sync' is false, executes a dry run and returns the error code.
*
* If we are not running in syncing context and we are not doing a dry run
* (meaning we are running a zfs.sync function in open-context) then we
* return a Lua error.
*
* This function also handles common fatal error cases for channel program
* library functions. If a fatal error occurs, err_dsname will be the dataset
* name reported in error messages, if supplied.
*/
static int
zcp_sync_task(lua_State *state, dsl_checkfunc_t *checkfunc,
dsl_syncfunc_t *syncfunc, void *arg, boolean_t sync, const char *err_dsname)
{
int err;
zcp_run_info_t *ri = zcp_run_info(state);
err = checkfunc(arg, ri->zri_tx);
if (!sync)
return (err);
if (!ri->zri_sync) {
return (luaL_error(state, "running functions from the zfs.sync "
"submodule requires passing sync=TRUE to "
"lzc_channel_program() (i.e. do not specify the \"-n\" "
"command line argument)"));
}
if (err == 0) {
syncfunc(arg, ri->zri_tx);
} else if (err == EIO) {
if (err_dsname != NULL) {
return (luaL_error(state,
"I/O error while accessing dataset '%s'",
err_dsname));
} else {
return (luaL_error(state,
"I/O error while accessing dataset."));
}
}
return (err);
}
static int zcp_synctask_destroy(lua_State *, boolean_t, nvlist_t *);
static zcp_synctask_info_t zcp_synctask_destroy_info = {
.name = "destroy",
.func = zcp_synctask_destroy,
.pargs = {
{.za_name = "filesystem | snapshot", .za_lua_type = LUA_TSTRING},
{NULL, 0}
},
.kwargs = {
{.za_name = "defer", .za_lua_type = LUA_TBOOLEAN},
{NULL, 0}
},
.space_check = ZFS_SPACE_CHECK_DESTROY,
.blocks_modified = 0
};
-/* ARGSUSED */
static int
zcp_synctask_destroy(lua_State *state, boolean_t sync, nvlist_t *err_details)
{
+ (void) err_details;
int err;
const char *dsname = lua_tostring(state, 1);
boolean_t issnap = (strchr(dsname, '@') != NULL);
if (!issnap && !lua_isnil(state, 2)) {
return (luaL_error(state,
"'deferred' kwarg only supported for snapshots: %s",
dsname));
}
if (issnap) {
dsl_destroy_snapshot_arg_t ddsa = { 0 };
ddsa.ddsa_name = dsname;
if (!lua_isnil(state, 2)) {
ddsa.ddsa_defer = lua_toboolean(state, 2);
} else {
ddsa.ddsa_defer = B_FALSE;
}
err = zcp_sync_task(state, dsl_destroy_snapshot_check,
dsl_destroy_snapshot_sync, &ddsa, sync, dsname);
} else {
dsl_destroy_head_arg_t ddha = { 0 };
ddha.ddha_name = dsname;
err = zcp_sync_task(state, dsl_destroy_head_check,
dsl_destroy_head_sync, &ddha, sync, dsname);
}
return (err);
}
static int zcp_synctask_promote(lua_State *, boolean_t, nvlist_t *);
static zcp_synctask_info_t zcp_synctask_promote_info = {
.name = "promote",
.func = zcp_synctask_promote,
.pargs = {
{.za_name = "clone", .za_lua_type = LUA_TSTRING},
{NULL, 0}
},
.kwargs = {
{NULL, 0}
},
.space_check = ZFS_SPACE_CHECK_RESERVED,
.blocks_modified = 3
};
static int
zcp_synctask_promote(lua_State *state, boolean_t sync, nvlist_t *err_details)
{
int err;
dsl_dataset_promote_arg_t ddpa = { 0 };
const char *dsname = lua_tostring(state, 1);
zcp_run_info_t *ri = zcp_run_info(state);
ddpa.ddpa_clonename = dsname;
ddpa.err_ds = err_details;
ddpa.cr = ri->zri_cred;
ddpa.proc = ri->zri_proc;
/*
* If there was a snapshot name conflict, then err_ds will be filled
* with a list of conflicting snapshot names.
*/
err = zcp_sync_task(state, dsl_dataset_promote_check,
dsl_dataset_promote_sync, &ddpa, sync, dsname);
return (err);
}
static int zcp_synctask_rollback(lua_State *, boolean_t, nvlist_t *err_details);
static zcp_synctask_info_t zcp_synctask_rollback_info = {
.name = "rollback",
.func = zcp_synctask_rollback,
.space_check = ZFS_SPACE_CHECK_RESERVED,
.blocks_modified = 1,
.pargs = {
{.za_name = "filesystem", .za_lua_type = LUA_TSTRING},
{0, 0}
},
.kwargs = {
{0, 0}
}
};
static int
zcp_synctask_rollback(lua_State *state, boolean_t sync, nvlist_t *err_details)
{
int err;
const char *dsname = lua_tostring(state, 1);
dsl_dataset_rollback_arg_t ddra = { 0 };
ddra.ddra_fsname = dsname;
ddra.ddra_result = err_details;
err = zcp_sync_task(state, dsl_dataset_rollback_check,
dsl_dataset_rollback_sync, &ddra, sync, dsname);
return (err);
}
static int zcp_synctask_snapshot(lua_State *, boolean_t, nvlist_t *);
static zcp_synctask_info_t zcp_synctask_snapshot_info = {
.name = "snapshot",
.func = zcp_synctask_snapshot,
.pargs = {
{.za_name = "filesystem@snapname | volume@snapname",
.za_lua_type = LUA_TSTRING},
{NULL, 0}
},
.kwargs = {
{NULL, 0}
},
.space_check = ZFS_SPACE_CHECK_NORMAL,
.blocks_modified = 3
};
-/* ARGSUSED */
static int
zcp_synctask_snapshot(lua_State *state, boolean_t sync, nvlist_t *err_details)
{
+ (void) err_details;
int err;
dsl_dataset_snapshot_arg_t ddsa = { 0 };
const char *dsname = lua_tostring(state, 1);
zcp_run_info_t *ri = zcp_run_info(state);
/*
* On old pools, the ZIL must not be active when a snapshot is created,
* but we can't suspend the ZIL because we're already in syncing
* context.
*/
if (spa_version(ri->zri_pool->dp_spa) < SPA_VERSION_FAST_SNAP) {
return (SET_ERROR(ENOTSUP));
}
/*
* We only allow for a single snapshot rather than a list, so the
* error list output is unnecessary.
*/
ddsa.ddsa_errors = NULL;
ddsa.ddsa_props = NULL;
ddsa.ddsa_cr = ri->zri_cred;
ddsa.ddsa_proc = ri->zri_proc;
ddsa.ddsa_snaps = fnvlist_alloc();
fnvlist_add_boolean(ddsa.ddsa_snaps, dsname);
zcp_cleanup_handler_t *zch = zcp_register_cleanup(state,
zcp_synctask_cleanup, ddsa.ddsa_snaps);
err = zcp_sync_task(state, dsl_dataset_snapshot_check,
dsl_dataset_snapshot_sync, &ddsa, sync, dsname);
if (err == 0) {
/*
* We may need to create a new device minor node for this
* dataset (if it is a zvol and the "snapdev" property is set).
* Save it in the nvlist so that it can be processed in open
* context.
*/
fnvlist_add_boolean(ri->zri_new_zvols, dsname);
}
zcp_deregister_cleanup(state, zch);
fnvlist_free(ddsa.ddsa_snaps);
return (err);
}
static int zcp_synctask_inherit_prop(lua_State *, boolean_t,
nvlist_t *err_details);
static zcp_synctask_info_t zcp_synctask_inherit_prop_info = {
.name = "inherit",
.func = zcp_synctask_inherit_prop,
.space_check = ZFS_SPACE_CHECK_RESERVED,
.blocks_modified = 2, /* 2 * numprops */
.pargs = {
{ .za_name = "dataset", .za_lua_type = LUA_TSTRING },
{ .za_name = "property", .za_lua_type = LUA_TSTRING },
{ NULL, 0 }
},
.kwargs = {
{ NULL, 0 }
},
};
static int
zcp_synctask_inherit_prop_check(void *arg, dmu_tx_t *tx)
{
zcp_inherit_prop_arg_t *args = arg;
zfs_prop_t prop = zfs_name_to_prop(args->zipa_prop);
if (prop == ZPROP_INVAL) {
if (zfs_prop_user(args->zipa_prop))
return (0);
return (EINVAL);
}
if (zfs_prop_readonly(prop))
return (EINVAL);
if (!zfs_prop_inheritable(prop))
return (EINVAL);
return (dsl_props_set_check(&args->zipa_dpsa, tx));
}
static void
zcp_synctask_inherit_prop_sync(void *arg, dmu_tx_t *tx)
{
zcp_inherit_prop_arg_t *args = arg;
dsl_props_set_arg_t *dpsa = &args->zipa_dpsa;
dsl_props_set_sync(dpsa, tx);
}
static int
zcp_synctask_inherit_prop(lua_State *state, boolean_t sync,
nvlist_t *err_details)
{
+ (void) err_details;
int err;
zcp_inherit_prop_arg_t zipa = { 0 };
dsl_props_set_arg_t *dpsa = &zipa.zipa_dpsa;
const char *dsname = lua_tostring(state, 1);
const char *prop = lua_tostring(state, 2);
zipa.zipa_state = state;
zipa.zipa_prop = prop;
dpsa->dpsa_dsname = dsname;
dpsa->dpsa_source = ZPROP_SRC_INHERITED;
dpsa->dpsa_props = fnvlist_alloc();
fnvlist_add_boolean(dpsa->dpsa_props, prop);
zcp_cleanup_handler_t *zch = zcp_register_cleanup(state,
zcp_synctask_cleanup, dpsa->dpsa_props);
err = zcp_sync_task(state, zcp_synctask_inherit_prop_check,
zcp_synctask_inherit_prop_sync, &zipa, sync, dsname);
zcp_deregister_cleanup(state, zch);
fnvlist_free(dpsa->dpsa_props);
return (err);
}
static int zcp_synctask_bookmark(lua_State *, boolean_t, nvlist_t *);
static zcp_synctask_info_t zcp_synctask_bookmark_info = {
.name = "bookmark",
.func = zcp_synctask_bookmark,
.pargs = {
{.za_name = "snapshot | bookmark", .za_lua_type = LUA_TSTRING},
{.za_name = "bookmark", .za_lua_type = LUA_TSTRING},
{NULL, 0}
},
.kwargs = {
{NULL, 0}
},
.space_check = ZFS_SPACE_CHECK_NORMAL,
.blocks_modified = 1,
};
-/* ARGSUSED */
static int
zcp_synctask_bookmark(lua_State *state, boolean_t sync, nvlist_t *err_details)
{
+ (void) err_details;
int err;
const char *source = lua_tostring(state, 1);
const char *new = lua_tostring(state, 2);
nvlist_t *bmarks = fnvlist_alloc();
fnvlist_add_string(bmarks, new, source);
zcp_cleanup_handler_t *zch = zcp_register_cleanup(state,
zcp_synctask_cleanup, bmarks);
dsl_bookmark_create_arg_t dbca = {
.dbca_bmarks = bmarks,
.dbca_errors = NULL,
};
err = zcp_sync_task(state, dsl_bookmark_create_check,
dsl_bookmark_create_sync, &dbca, sync, source);
zcp_deregister_cleanup(state, zch);
fnvlist_free(bmarks);
return (err);
}
static int zcp_synctask_set_prop(lua_State *, boolean_t, nvlist_t *err_details);
static zcp_synctask_info_t zcp_synctask_set_prop_info = {
.name = "set_prop",
.func = zcp_synctask_set_prop,
.space_check = ZFS_SPACE_CHECK_RESERVED,
.blocks_modified = 2,
.pargs = {
{ .za_name = "dataset", .za_lua_type = LUA_TSTRING},
{ .za_name = "property", .za_lua_type = LUA_TSTRING},
{ .za_name = "value", .za_lua_type = LUA_TSTRING},
{ NULL, 0 }
},
.kwargs = {
{ NULL, 0 }
}
};
static int
zcp_synctask_set_prop(lua_State *state, boolean_t sync, nvlist_t *err_details)
{
+ (void) err_details;
int err;
zcp_set_prop_arg_t args = { 0 };
const char *dsname = lua_tostring(state, 1);
const char *prop = lua_tostring(state, 2);
const char *val = lua_tostring(state, 3);
args.state = state;
args.dsname = dsname;
args.prop = prop;
args.val = val;
err = zcp_sync_task(state, zcp_set_prop_check, zcp_set_prop_sync,
&args, sync, dsname);
return (err);
}
static int
zcp_synctask_wrapper(lua_State *state)
{
int err;
zcp_cleanup_handler_t *zch;
int num_ret = 1;
nvlist_t *err_details = fnvlist_alloc();
/*
* Make sure err_details is properly freed, even if a fatal error is
* thrown during the synctask.
*/
zch = zcp_register_cleanup(state, zcp_synctask_cleanup, err_details);
zcp_synctask_info_t *info = lua_touserdata(state, lua_upvalueindex(1));
boolean_t sync = lua_toboolean(state, lua_upvalueindex(2));
zcp_run_info_t *ri = zcp_run_info(state);
dsl_pool_t *dp = ri->zri_pool;
/* MOS space is triple-dittoed, so we multiply by 3. */
uint64_t funcspace =
((uint64_t)info->blocks_modified << DST_AVG_BLKSHIFT) * 3;
zcp_parse_args(state, info->name, info->pargs, info->kwargs);
err = 0;
if (info->space_check != ZFS_SPACE_CHECK_NONE) {
uint64_t quota = dsl_pool_unreserved_space(dp,
info->space_check);
uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes +
ri->zri_space_used;
if (used + funcspace > quota) {
err = SET_ERROR(ENOSPC);
}
}
if (err == 0) {
err = info->func(state, sync, err_details);
}
if (err == 0) {
ri->zri_space_used += funcspace;
}
lua_pushnumber(state, (lua_Number)err);
if (fnvlist_num_pairs(err_details) > 0) {
(void) zcp_nvlist_to_lua(state, err_details, NULL, 0);
num_ret++;
}
zcp_deregister_cleanup(state, zch);
fnvlist_free(err_details);
return (num_ret);
}
int
zcp_load_synctask_lib(lua_State *state, boolean_t sync)
{
int i;
zcp_synctask_info_t *zcp_synctask_funcs[] = {
&zcp_synctask_destroy_info,
&zcp_synctask_promote_info,
&zcp_synctask_rollback_info,
&zcp_synctask_snapshot_info,
&zcp_synctask_inherit_prop_info,
&zcp_synctask_bookmark_info,
&zcp_synctask_set_prop_info,
NULL
};
lua_newtable(state);
for (i = 0; zcp_synctask_funcs[i] != NULL; i++) {
zcp_synctask_info_t *info = zcp_synctask_funcs[i];
lua_pushlightuserdata(state, info);
lua_pushboolean(state, sync);
lua_pushcclosure(state, &zcp_synctask_wrapper, 2);
lua_setfield(state, -2, info->name);
info++;
}
return (1);
}
diff --git a/sys/contrib/openzfs/module/zfs/zfs_fm.c b/sys/contrib/openzfs/module/zfs/zfs_fm.c
index 60e631567a89..87f793cb4e92 100644
--- a/sys/contrib/openzfs/module/zfs/zfs_fm.c
+++ b/sys/contrib/openzfs/module/zfs/zfs_fm.c
@@ -1,1458 +1,1527 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2012,2021 by Delphix. All rights reserved.
*/
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/vdev.h>
#include <sys/vdev_impl.h>
#include <sys/zio.h>
#include <sys/zio_checksum.h>
#include <sys/fm/fs/zfs.h>
#include <sys/fm/protocol.h>
#include <sys/fm/util.h>
#include <sys/sysevent.h>
/*
* This general routine is responsible for generating all the different ZFS
* ereports. The payload is dependent on the class, and which arguments are
* supplied to the function:
*
* EREPORT POOL VDEV IO
* block X X X
* data X X
* device X X
* pool X
*
* If we are in a loading state, all errors are chained together by the same
* SPA-wide ENA (Error Numeric Association).
*
* For isolated I/O requests, we get the ENA from the zio_t. The propagation
* gets very complicated due to RAID-Z, gang blocks, and vdev caching. We want
* to chain together all ereports associated with a logical piece of data. For
* read I/Os, there are basically three 'types' of I/O, which form a roughly
* layered diagram:
*
- * +---------------+
+ * +---------------+
* | Aggregate I/O | No associated logical data or device
* +---------------+
* |
* V
* +---------------+ Reads associated with a piece of logical data.
* | Read I/O | This includes reads on behalf of RAID-Z,
* +---------------+ mirrors, gang blocks, retries, etc.
* |
* V
* +---------------+ Reads associated with a particular device, but
* | Physical I/O | no logical data. Issued as part of vdev caching
* +---------------+ and I/O aggregation.
*
* Note that 'physical I/O' here is not the same terminology as used in the rest
* of ZIO. Typically, 'physical I/O' simply means that there is no attached
* blockpointer. But I/O with no associated block pointer can still be related
* to a logical piece of data (i.e. RAID-Z requests).
*
* Purely physical I/O always have unique ENAs. They are not related to a
* particular piece of logical data, and therefore cannot be chained together.
* We still generate an ereport, but the DE doesn't correlate it with any
* logical piece of data. When such an I/O fails, the delegated I/O requests
* will issue a retry, which will trigger the 'real' ereport with the correct
* ENA.
*
* We keep track of the ENA for a ZIO chain through the 'io_logical' member.
* When a new logical I/O is issued, we set this to point to itself. Child I/Os
* then inherit this pointer, so that when it is first set subsequent failures
* will use the same ENA. For vdev cache fill and queue aggregation I/O,
* this pointer is set to NULL, and no ereport will be generated (since it
* doesn't actually correspond to any particular device or piece of data,
* and the caller will always retry without caching or queueing anyway).
*
* For checksum errors, we want to include more information about the actual
* error which occurs. Accordingly, we build an ereport when the error is
* noticed, but instead of sending it in immediately, we hang it off of the
* io_cksum_report field of the logical IO. When the logical IO completes
* (successfully or not), zfs_ereport_finish_checksum() is called with the
* good and bad versions of the buffer (if available), and we annotate the
* ereport with information about the differences.
*/
#ifdef _KERNEL
/*
* Duplicate ereport Detection
*
* Some ereports are retained momentarily for detecting duplicates. These
* are kept in a recent_events_node_t in both a time-ordered list and an AVL
* tree of recent unique ereports.
*
* The lifespan of these recent ereports is bounded (15 mins) and a cleaner
* task is used to purge stale entries.
*/
static list_t recent_events_list;
static avl_tree_t recent_events_tree;
static kmutex_t recent_events_lock;
static taskqid_t recent_events_cleaner_tqid;
/*
* Each node is about 128 bytes so 2,000 would consume 1/4 MiB.
*
* This setting can be changed dynamically and setting it to zero
* disables duplicate detection.
*/
unsigned int zfs_zevent_retain_max = 2000;
/*
* The lifespan for a recent ereport entry. The default of 15 minutes is
* intended to outlive the zfs diagnosis engine's threshold of 10 errors
* over a period of 10 minutes.
*/
unsigned int zfs_zevent_retain_expire_secs = 900;
typedef enum zfs_subclass {
ZSC_IO,
ZSC_DATA,
ZSC_CHECKSUM
} zfs_subclass_t;
typedef struct {
/* common criteria */
uint64_t re_pool_guid;
uint64_t re_vdev_guid;
int re_io_error;
uint64_t re_io_size;
uint64_t re_io_offset;
zfs_subclass_t re_subclass;
zio_priority_t re_io_priority;
/* logical zio criteria (optional) */
zbookmark_phys_t re_io_bookmark;
/* internal state */
avl_node_t re_tree_link;
list_node_t re_list_link;
uint64_t re_timestamp;
} recent_events_node_t;
static int
recent_events_compare(const void *a, const void *b)
{
const recent_events_node_t *node1 = a;
const recent_events_node_t *node2 = b;
int cmp;
/*
* The comparison order here is somewhat arbitrary.
* What's important is that if every criteria matches, then it
* is a duplicate (i.e. compare returns 0)
*/
if ((cmp = TREE_CMP(node1->re_subclass, node2->re_subclass)) != 0)
return (cmp);
if ((cmp = TREE_CMP(node1->re_pool_guid, node2->re_pool_guid)) != 0)
return (cmp);
if ((cmp = TREE_CMP(node1->re_vdev_guid, node2->re_vdev_guid)) != 0)
return (cmp);
if ((cmp = TREE_CMP(node1->re_io_error, node2->re_io_error)) != 0)
return (cmp);
if ((cmp = TREE_CMP(node1->re_io_priority, node2->re_io_priority)) != 0)
return (cmp);
if ((cmp = TREE_CMP(node1->re_io_size, node2->re_io_size)) != 0)
return (cmp);
if ((cmp = TREE_CMP(node1->re_io_offset, node2->re_io_offset)) != 0)
return (cmp);
const zbookmark_phys_t *zb1 = &node1->re_io_bookmark;
const zbookmark_phys_t *zb2 = &node2->re_io_bookmark;
if ((cmp = TREE_CMP(zb1->zb_objset, zb2->zb_objset)) != 0)
return (cmp);
if ((cmp = TREE_CMP(zb1->zb_object, zb2->zb_object)) != 0)
return (cmp);
if ((cmp = TREE_CMP(zb1->zb_level, zb2->zb_level)) != 0)
return (cmp);
if ((cmp = TREE_CMP(zb1->zb_blkid, zb2->zb_blkid)) != 0)
return (cmp);
return (0);
}
static void zfs_ereport_schedule_cleaner(void);
/*
* background task to clean stale recent event nodes.
*/
-/*ARGSUSED*/
static void
zfs_ereport_cleaner(void *arg)
{
recent_events_node_t *entry;
uint64_t now = gethrtime();
/*
* purge expired entries
*/
mutex_enter(&recent_events_lock);
while ((entry = list_tail(&recent_events_list)) != NULL) {
uint64_t age = NSEC2SEC(now - entry->re_timestamp);
if (age <= zfs_zevent_retain_expire_secs)
break;
/* remove expired node */
avl_remove(&recent_events_tree, entry);
list_remove(&recent_events_list, entry);
kmem_free(entry, sizeof (*entry));
}
/* Restart the cleaner if more entries remain */
recent_events_cleaner_tqid = 0;
if (!list_is_empty(&recent_events_list))
zfs_ereport_schedule_cleaner();
mutex_exit(&recent_events_lock);
}
static void
zfs_ereport_schedule_cleaner(void)
{
ASSERT(MUTEX_HELD(&recent_events_lock));
uint64_t timeout = SEC2NSEC(zfs_zevent_retain_expire_secs + 1);
recent_events_cleaner_tqid = taskq_dispatch_delay(
system_delay_taskq, zfs_ereport_cleaner, NULL, TQ_SLEEP,
ddi_get_lbolt() + NSEC_TO_TICK(timeout));
}
/*
* Clear entries for a given vdev or all vdevs in a pool when vdev == NULL
*/
void
zfs_ereport_clear(spa_t *spa, vdev_t *vd)
{
uint64_t vdev_guid, pool_guid;
int cnt = 0;
ASSERT(vd != NULL || spa != NULL);
if (vd == NULL) {
vdev_guid = 0;
pool_guid = spa_guid(spa);
} else {
vdev_guid = vd->vdev_guid;
pool_guid = 0;
}
mutex_enter(&recent_events_lock);
recent_events_node_t *next = list_head(&recent_events_list);
while (next != NULL) {
recent_events_node_t *entry = next;
next = list_next(&recent_events_list, next);
if (entry->re_vdev_guid == vdev_guid ||
entry->re_pool_guid == pool_guid) {
avl_remove(&recent_events_tree, entry);
list_remove(&recent_events_list, entry);
kmem_free(entry, sizeof (*entry));
cnt++;
}
}
mutex_exit(&recent_events_lock);
}
/*
* Check if an ereport would be a duplicate of one recently posted.
*
* An ereport is considered a duplicate if the set of criteria in
* recent_events_node_t all match.
*
* Only FM_EREPORT_ZFS_IO, FM_EREPORT_ZFS_DATA, and FM_EREPORT_ZFS_CHECKSUM
* are candidates for duplicate checking.
*/
static boolean_t
zfs_ereport_is_duplicate(const char *subclass, spa_t *spa, vdev_t *vd,
const zbookmark_phys_t *zb, zio_t *zio, uint64_t offset, uint64_t size)
{
recent_events_node_t search = {0}, *entry;
if (vd == NULL || zio == NULL)
return (B_FALSE);
if (zfs_zevent_retain_max == 0)
return (B_FALSE);
if (strcmp(subclass, FM_EREPORT_ZFS_IO) == 0)
search.re_subclass = ZSC_IO;
else if (strcmp(subclass, FM_EREPORT_ZFS_DATA) == 0)
search.re_subclass = ZSC_DATA;
else if (strcmp(subclass, FM_EREPORT_ZFS_CHECKSUM) == 0)
search.re_subclass = ZSC_CHECKSUM;
else
return (B_FALSE);
search.re_pool_guid = spa_guid(spa);
search.re_vdev_guid = vd->vdev_guid;
search.re_io_error = zio->io_error;
search.re_io_priority = zio->io_priority;
/* if size is supplied use it over what's in zio */
if (size) {
search.re_io_size = size;
search.re_io_offset = offset;
} else {
search.re_io_size = zio->io_size;
search.re_io_offset = zio->io_offset;
}
/* grab optional logical zio criteria */
if (zb != NULL) {
search.re_io_bookmark.zb_objset = zb->zb_objset;
search.re_io_bookmark.zb_object = zb->zb_object;
search.re_io_bookmark.zb_level = zb->zb_level;
search.re_io_bookmark.zb_blkid = zb->zb_blkid;
}
uint64_t now = gethrtime();
mutex_enter(&recent_events_lock);
/* check if we have seen this one recently */
entry = avl_find(&recent_events_tree, &search, NULL);
if (entry != NULL) {
uint64_t age = NSEC2SEC(now - entry->re_timestamp);
/*
* There is still an active cleaner (since we're here).
* Reset the last seen time for this duplicate entry
* so that its lifespand gets extended.
*/
list_remove(&recent_events_list, entry);
list_insert_head(&recent_events_list, entry);
entry->re_timestamp = now;
zfs_zevent_track_duplicate();
mutex_exit(&recent_events_lock);
return (age <= zfs_zevent_retain_expire_secs);
}
if (avl_numnodes(&recent_events_tree) >= zfs_zevent_retain_max) {
/* recycle oldest node */
entry = list_tail(&recent_events_list);
ASSERT(entry != NULL);
list_remove(&recent_events_list, entry);
avl_remove(&recent_events_tree, entry);
} else {
entry = kmem_alloc(sizeof (recent_events_node_t), KM_SLEEP);
}
/* record this as a recent ereport */
*entry = search;
avl_add(&recent_events_tree, entry);
list_insert_head(&recent_events_list, entry);
entry->re_timestamp = now;
/* Start a cleaner if not already scheduled */
if (recent_events_cleaner_tqid == 0)
zfs_ereport_schedule_cleaner();
mutex_exit(&recent_events_lock);
return (B_FALSE);
}
void
zfs_zevent_post_cb(nvlist_t *nvl, nvlist_t *detector)
{
if (nvl)
fm_nvlist_destroy(nvl, FM_NVA_FREE);
if (detector)
fm_nvlist_destroy(detector, FM_NVA_FREE);
}
/*
* We want to rate limit ZIO delay, deadman, and checksum events so as to not
* flood zevent consumers when a disk is acting up.
*
* Returns 1 if we're ratelimiting, 0 if not.
*/
static int
zfs_is_ratelimiting_event(const char *subclass, vdev_t *vd)
{
int rc = 0;
/*
* zfs_ratelimit() returns 1 if we're *not* ratelimiting and 0 if we
* are. Invert it to get our return value.
*/
if (strcmp(subclass, FM_EREPORT_ZFS_DELAY) == 0) {
rc = !zfs_ratelimit(&vd->vdev_delay_rl);
} else if (strcmp(subclass, FM_EREPORT_ZFS_DEADMAN) == 0) {
rc = !zfs_ratelimit(&vd->vdev_deadman_rl);
} else if (strcmp(subclass, FM_EREPORT_ZFS_CHECKSUM) == 0) {
rc = !zfs_ratelimit(&vd->vdev_checksum_rl);
}
if (rc) {
/* We're rate limiting */
fm_erpt_dropped_increment();
}
return (rc);
}
/*
* Return B_TRUE if the event actually posted, B_FALSE if not.
*/
static boolean_t
zfs_ereport_start(nvlist_t **ereport_out, nvlist_t **detector_out,
const char *subclass, spa_t *spa, vdev_t *vd, const zbookmark_phys_t *zb,
zio_t *zio, uint64_t stateoroffset, uint64_t size)
{
nvlist_t *ereport, *detector;
uint64_t ena;
char class[64];
if ((ereport = fm_nvlist_create(NULL)) == NULL)
return (B_FALSE);
if ((detector = fm_nvlist_create(NULL)) == NULL) {
fm_nvlist_destroy(ereport, FM_NVA_FREE);
return (B_FALSE);
}
/*
* Serialize ereport generation
*/
mutex_enter(&spa->spa_errlist_lock);
/*
* Determine the ENA to use for this event. If we are in a loading
* state, use a SPA-wide ENA. Otherwise, if we are in an I/O state, use
* a root zio-wide ENA. Otherwise, simply use a unique ENA.
*/
if (spa_load_state(spa) != SPA_LOAD_NONE) {
if (spa->spa_ena == 0)
spa->spa_ena = fm_ena_generate(0, FM_ENA_FMT1);
ena = spa->spa_ena;
} else if (zio != NULL && zio->io_logical != NULL) {
if (zio->io_logical->io_ena == 0)
zio->io_logical->io_ena =
fm_ena_generate(0, FM_ENA_FMT1);
ena = zio->io_logical->io_ena;
} else {
ena = fm_ena_generate(0, FM_ENA_FMT1);
}
/*
* Construct the full class, detector, and other standard FMA fields.
*/
(void) snprintf(class, sizeof (class), "%s.%s",
ZFS_ERROR_CLASS, subclass);
fm_fmri_zfs_set(detector, FM_ZFS_SCHEME_VERSION, spa_guid(spa),
vd != NULL ? vd->vdev_guid : 0);
fm_ereport_set(ereport, FM_EREPORT_VERSION, class, ena, detector, NULL);
/*
* Construct the per-ereport payload, depending on which parameters are
* passed in.
*/
/*
* Generic payload members common to all ereports.
*/
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_POOL, DATA_TYPE_STRING, spa_name(spa),
FM_EREPORT_PAYLOAD_ZFS_POOL_GUID, DATA_TYPE_UINT64, spa_guid(spa),
FM_EREPORT_PAYLOAD_ZFS_POOL_STATE, DATA_TYPE_UINT64,
(uint64_t)spa_state(spa),
FM_EREPORT_PAYLOAD_ZFS_POOL_CONTEXT, DATA_TYPE_INT32,
(int32_t)spa_load_state(spa), NULL);
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_POOL_FAILMODE,
DATA_TYPE_STRING,
spa_get_failmode(spa) == ZIO_FAILURE_MODE_WAIT ?
FM_EREPORT_FAILMODE_WAIT :
spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE ?
FM_EREPORT_FAILMODE_CONTINUE : FM_EREPORT_FAILMODE_PANIC,
NULL);
if (vd != NULL) {
vdev_t *pvd = vd->vdev_parent;
vdev_queue_t *vq = &vd->vdev_queue;
vdev_stat_t *vs = &vd->vdev_stat;
vdev_t *spare_vd;
uint64_t *spare_guids;
char **spare_paths;
int i, spare_count;
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID,
DATA_TYPE_UINT64, vd->vdev_guid,
FM_EREPORT_PAYLOAD_ZFS_VDEV_TYPE,
DATA_TYPE_STRING, vd->vdev_ops->vdev_op_type, NULL);
if (vd->vdev_path != NULL)
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_PATH,
DATA_TYPE_STRING, vd->vdev_path, NULL);
if (vd->vdev_devid != NULL)
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_DEVID,
DATA_TYPE_STRING, vd->vdev_devid, NULL);
if (vd->vdev_fru != NULL)
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_FRU,
DATA_TYPE_STRING, vd->vdev_fru, NULL);
if (vd->vdev_enc_sysfs_path != NULL)
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_ENC_SYSFS_PATH,
DATA_TYPE_STRING, vd->vdev_enc_sysfs_path, NULL);
if (vd->vdev_ashift)
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_ASHIFT,
DATA_TYPE_UINT64, vd->vdev_ashift, NULL);
if (vq != NULL) {
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_COMP_TS,
DATA_TYPE_UINT64, vq->vq_io_complete_ts, NULL);
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_DELTA_TS,
DATA_TYPE_UINT64, vq->vq_io_delta_ts, NULL);
}
if (vs != NULL) {
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_READ_ERRORS,
DATA_TYPE_UINT64, vs->vs_read_errors,
FM_EREPORT_PAYLOAD_ZFS_VDEV_WRITE_ERRORS,
DATA_TYPE_UINT64, vs->vs_write_errors,
FM_EREPORT_PAYLOAD_ZFS_VDEV_CKSUM_ERRORS,
DATA_TYPE_UINT64, vs->vs_checksum_errors,
FM_EREPORT_PAYLOAD_ZFS_VDEV_DELAYS,
DATA_TYPE_UINT64, vs->vs_slow_ios,
NULL);
}
if (pvd != NULL) {
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_PARENT_GUID,
DATA_TYPE_UINT64, pvd->vdev_guid,
FM_EREPORT_PAYLOAD_ZFS_PARENT_TYPE,
DATA_TYPE_STRING, pvd->vdev_ops->vdev_op_type,
NULL);
if (pvd->vdev_path)
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_PARENT_PATH,
DATA_TYPE_STRING, pvd->vdev_path, NULL);
if (pvd->vdev_devid)
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_PARENT_DEVID,
DATA_TYPE_STRING, pvd->vdev_devid, NULL);
}
spare_count = spa->spa_spares.sav_count;
spare_paths = kmem_zalloc(sizeof (char *) * spare_count,
KM_SLEEP);
spare_guids = kmem_zalloc(sizeof (uint64_t) * spare_count,
KM_SLEEP);
for (i = 0; i < spare_count; i++) {
spare_vd = spa->spa_spares.sav_vdevs[i];
if (spare_vd) {
spare_paths[i] = spare_vd->vdev_path;
spare_guids[i] = spare_vd->vdev_guid;
}
}
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_VDEV_SPARE_PATHS,
DATA_TYPE_STRING_ARRAY, spare_count, spare_paths,
FM_EREPORT_PAYLOAD_ZFS_VDEV_SPARE_GUIDS,
DATA_TYPE_UINT64_ARRAY, spare_count, spare_guids, NULL);
kmem_free(spare_guids, sizeof (uint64_t) * spare_count);
kmem_free(spare_paths, sizeof (char *) * spare_count);
}
if (zio != NULL) {
/*
* Payload common to all I/Os.
*/
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_ERR,
DATA_TYPE_INT32, zio->io_error, NULL);
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS,
DATA_TYPE_INT32, zio->io_flags, NULL);
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_STAGE,
DATA_TYPE_UINT32, zio->io_stage, NULL);
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_PIPELINE,
DATA_TYPE_UINT32, zio->io_pipeline, NULL);
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_DELAY,
DATA_TYPE_UINT64, zio->io_delay, NULL);
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_TIMESTAMP,
DATA_TYPE_UINT64, zio->io_timestamp, NULL);
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_DELTA,
DATA_TYPE_UINT64, zio->io_delta, NULL);
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_PRIORITY,
DATA_TYPE_UINT32, zio->io_priority, NULL);
/*
* If the 'size' parameter is non-zero, it indicates this is a
* RAID-Z or other I/O where the physical offset and length are
* provided for us, instead of within the zio_t.
*/
if (vd != NULL) {
if (size)
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_ZIO_OFFSET,
DATA_TYPE_UINT64, stateoroffset,
FM_EREPORT_PAYLOAD_ZFS_ZIO_SIZE,
DATA_TYPE_UINT64, size, NULL);
else
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_ZIO_OFFSET,
DATA_TYPE_UINT64, zio->io_offset,
FM_EREPORT_PAYLOAD_ZFS_ZIO_SIZE,
DATA_TYPE_UINT64, zio->io_size, NULL);
}
} else if (vd != NULL) {
/*
* If we have a vdev but no zio, this is a device fault, and the
* 'stateoroffset' parameter indicates the previous state of the
* vdev.
*/
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_PREV_STATE,
DATA_TYPE_UINT64, stateoroffset, NULL);
}
/*
* Payload for I/Os with corresponding logical information.
*/
if (zb != NULL && (zio == NULL || zio->io_logical != NULL)) {
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_ZIO_OBJSET,
DATA_TYPE_UINT64, zb->zb_objset,
FM_EREPORT_PAYLOAD_ZFS_ZIO_OBJECT,
DATA_TYPE_UINT64, zb->zb_object,
FM_EREPORT_PAYLOAD_ZFS_ZIO_LEVEL,
DATA_TYPE_INT64, zb->zb_level,
FM_EREPORT_PAYLOAD_ZFS_ZIO_BLKID,
DATA_TYPE_UINT64, zb->zb_blkid, NULL);
}
mutex_exit(&spa->spa_errlist_lock);
*ereport_out = ereport;
*detector_out = detector;
return (B_TRUE);
}
/* if it's <= 128 bytes, save the corruption directly */
#define ZFM_MAX_INLINE (128 / sizeof (uint64_t))
#define MAX_RANGES 16
typedef struct zfs_ecksum_info {
/* histograms of set and cleared bits by bit number in a 64-bit word */
uint32_t zei_histogram_set[sizeof (uint64_t) * NBBY];
uint32_t zei_histogram_cleared[sizeof (uint64_t) * NBBY];
/* inline arrays of bits set and cleared. */
uint64_t zei_bits_set[ZFM_MAX_INLINE];
uint64_t zei_bits_cleared[ZFM_MAX_INLINE];
/*
* for each range, the number of bits set and cleared. The Hamming
* distance between the good and bad buffers is the sum of them all.
*/
uint32_t zei_range_sets[MAX_RANGES];
uint32_t zei_range_clears[MAX_RANGES];
struct zei_ranges {
uint32_t zr_start;
uint32_t zr_end;
} zei_ranges[MAX_RANGES];
size_t zei_range_count;
uint32_t zei_mingap;
uint32_t zei_allowed_mingap;
} zfs_ecksum_info_t;
static void
update_histogram(uint64_t value_arg, uint32_t *hist, uint32_t *count)
{
size_t i;
size_t bits = 0;
uint64_t value = BE_64(value_arg);
/* We store the bits in big-endian (largest-first) order */
for (i = 0; i < 64; i++) {
if (value & (1ull << i)) {
hist[63 - i]++;
++bits;
}
}
/* update the count of bits changed */
*count += bits;
}
/*
* We've now filled up the range array, and need to increase "mingap" and
* shrink the range list accordingly. zei_mingap is always the smallest
* distance between array entries, so we set the new_allowed_gap to be
* one greater than that. We then go through the list, joining together
* any ranges which are closer than the new_allowed_gap.
*
* By construction, there will be at least one. We also update zei_mingap
* to the new smallest gap, to prepare for our next invocation.
*/
static void
zei_shrink_ranges(zfs_ecksum_info_t *eip)
{
uint32_t mingap = UINT32_MAX;
uint32_t new_allowed_gap = eip->zei_mingap + 1;
size_t idx, output;
size_t max = eip->zei_range_count;
struct zei_ranges *r = eip->zei_ranges;
ASSERT3U(eip->zei_range_count, >, 0);
ASSERT3U(eip->zei_range_count, <=, MAX_RANGES);
output = idx = 0;
while (idx < max - 1) {
uint32_t start = r[idx].zr_start;
uint32_t end = r[idx].zr_end;
while (idx < max - 1) {
idx++;
uint32_t nstart = r[idx].zr_start;
uint32_t nend = r[idx].zr_end;
uint32_t gap = nstart - end;
if (gap < new_allowed_gap) {
end = nend;
continue;
}
if (gap < mingap)
mingap = gap;
break;
}
r[output].zr_start = start;
r[output].zr_end = end;
output++;
}
ASSERT3U(output, <, eip->zei_range_count);
eip->zei_range_count = output;
eip->zei_mingap = mingap;
eip->zei_allowed_mingap = new_allowed_gap;
}
static void
zei_add_range(zfs_ecksum_info_t *eip, int start, int end)
{
struct zei_ranges *r = eip->zei_ranges;
size_t count = eip->zei_range_count;
if (count >= MAX_RANGES) {
zei_shrink_ranges(eip);
count = eip->zei_range_count;
}
if (count == 0) {
eip->zei_mingap = UINT32_MAX;
eip->zei_allowed_mingap = 1;
} else {
int gap = start - r[count - 1].zr_end;
if (gap < eip->zei_allowed_mingap) {
r[count - 1].zr_end = end;
return;
}
if (gap < eip->zei_mingap)
eip->zei_mingap = gap;
}
r[count].zr_start = start;
r[count].zr_end = end;
eip->zei_range_count++;
}
static size_t
zei_range_total_size(zfs_ecksum_info_t *eip)
{
struct zei_ranges *r = eip->zei_ranges;
size_t count = eip->zei_range_count;
size_t result = 0;
size_t idx;
for (idx = 0; idx < count; idx++)
result += (r[idx].zr_end - r[idx].zr_start);
return (result);
}
static zfs_ecksum_info_t *
annotate_ecksum(nvlist_t *ereport, zio_bad_cksum_t *info,
const abd_t *goodabd, const abd_t *badabd, size_t size,
boolean_t drop_if_identical)
{
const uint64_t *good;
const uint64_t *bad;
uint64_t allset = 0;
uint64_t allcleared = 0;
size_t nui64s = size / sizeof (uint64_t);
size_t inline_size;
int no_inline = 0;
size_t idx;
size_t range;
size_t offset = 0;
ssize_t start = -1;
zfs_ecksum_info_t *eip = kmem_zalloc(sizeof (*eip), KM_SLEEP);
/* don't do any annotation for injected checksum errors */
if (info != NULL && info->zbc_injected)
return (eip);
if (info != NULL && info->zbc_has_cksum) {
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_CKSUM_EXPECTED,
DATA_TYPE_UINT64_ARRAY,
sizeof (info->zbc_expected) / sizeof (uint64_t),
(uint64_t *)&info->zbc_expected,
FM_EREPORT_PAYLOAD_ZFS_CKSUM_ACTUAL,
DATA_TYPE_UINT64_ARRAY,
sizeof (info->zbc_actual) / sizeof (uint64_t),
(uint64_t *)&info->zbc_actual,
FM_EREPORT_PAYLOAD_ZFS_CKSUM_ALGO,
DATA_TYPE_STRING,
info->zbc_checksum_name,
NULL);
if (info->zbc_byteswapped) {
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_CKSUM_BYTESWAP,
DATA_TYPE_BOOLEAN, 1,
NULL);
}
}
if (badabd == NULL || goodabd == NULL)
return (eip);
ASSERT3U(nui64s, <=, UINT32_MAX);
ASSERT3U(size, ==, nui64s * sizeof (uint64_t));
ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
ASSERT3U(size, <=, UINT32_MAX);
good = (const uint64_t *) abd_borrow_buf_copy((abd_t *)goodabd, size);
bad = (const uint64_t *) abd_borrow_buf_copy((abd_t *)badabd, size);
/* build up the range list by comparing the two buffers. */
for (idx = 0; idx < nui64s; idx++) {
if (good[idx] == bad[idx]) {
if (start == -1)
continue;
zei_add_range(eip, start, idx);
start = -1;
} else {
if (start != -1)
continue;
start = idx;
}
}
if (start != -1)
zei_add_range(eip, start, idx);
/* See if it will fit in our inline buffers */
inline_size = zei_range_total_size(eip);
if (inline_size > ZFM_MAX_INLINE)
no_inline = 1;
/*
* If there is no change and we want to drop if the buffers are
* identical, do so.
*/
if (inline_size == 0 && drop_if_identical) {
kmem_free(eip, sizeof (*eip));
abd_return_buf((abd_t *)goodabd, (void *)good, size);
abd_return_buf((abd_t *)badabd, (void *)bad, size);
return (NULL);
}
/*
* Now walk through the ranges, filling in the details of the
* differences. Also convert our uint64_t-array offsets to byte
* offsets.
*/
for (range = 0; range < eip->zei_range_count; range++) {
size_t start = eip->zei_ranges[range].zr_start;
size_t end = eip->zei_ranges[range].zr_end;
for (idx = start; idx < end; idx++) {
uint64_t set, cleared;
// bits set in bad, but not in good
set = ((~good[idx]) & bad[idx]);
// bits set in good, but not in bad
cleared = (good[idx] & (~bad[idx]));
allset |= set;
allcleared |= cleared;
if (!no_inline) {
ASSERT3U(offset, <, inline_size);
eip->zei_bits_set[offset] = set;
eip->zei_bits_cleared[offset] = cleared;
offset++;
}
update_histogram(set, eip->zei_histogram_set,
&eip->zei_range_sets[range]);
update_histogram(cleared, eip->zei_histogram_cleared,
&eip->zei_range_clears[range]);
}
/* convert to byte offsets */
eip->zei_ranges[range].zr_start *= sizeof (uint64_t);
eip->zei_ranges[range].zr_end *= sizeof (uint64_t);
}
abd_return_buf((abd_t *)goodabd, (void *)good, size);
abd_return_buf((abd_t *)badabd, (void *)bad, size);
eip->zei_allowed_mingap *= sizeof (uint64_t);
inline_size *= sizeof (uint64_t);
/* fill in ereport */
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_BAD_OFFSET_RANGES,
DATA_TYPE_UINT32_ARRAY, 2 * eip->zei_range_count,
(uint32_t *)eip->zei_ranges,
FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_MIN_GAP,
DATA_TYPE_UINT32, eip->zei_allowed_mingap,
FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_SETS,
DATA_TYPE_UINT32_ARRAY, eip->zei_range_count, eip->zei_range_sets,
FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_CLEARS,
DATA_TYPE_UINT32_ARRAY, eip->zei_range_count, eip->zei_range_clears,
NULL);
if (!no_inline) {
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_BAD_SET_BITS,
DATA_TYPE_UINT8_ARRAY,
inline_size, (uint8_t *)eip->zei_bits_set,
FM_EREPORT_PAYLOAD_ZFS_BAD_CLEARED_BITS,
DATA_TYPE_UINT8_ARRAY,
inline_size, (uint8_t *)eip->zei_bits_cleared,
NULL);
} else {
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_BAD_SET_HISTOGRAM,
DATA_TYPE_UINT32_ARRAY,
NBBY * sizeof (uint64_t), eip->zei_histogram_set,
FM_EREPORT_PAYLOAD_ZFS_BAD_CLEARED_HISTOGRAM,
DATA_TYPE_UINT32_ARRAY,
NBBY * sizeof (uint64_t), eip->zei_histogram_cleared,
NULL);
}
return (eip);
}
#else
-/*ARGSUSED*/
void
zfs_ereport_clear(spa_t *spa, vdev_t *vd)
{
+ (void) spa, (void) vd;
}
#endif
/*
* Make sure our event is still valid for the given zio/vdev/pool. For example,
* we don't want to keep logging events for a faulted or missing vdev.
*/
boolean_t
zfs_ereport_is_valid(const char *subclass, spa_t *spa, vdev_t *vd, zio_t *zio)
{
#ifdef _KERNEL
/*
* If we are doing a spa_tryimport() or in recovery mode,
* ignore errors.
*/
if (spa_load_state(spa) == SPA_LOAD_TRYIMPORT ||
spa_load_state(spa) == SPA_LOAD_RECOVER)
return (B_FALSE);
/*
* If we are in the middle of opening a pool, and the previous attempt
* failed, don't bother logging any new ereports - we're just going to
* get the same diagnosis anyway.
*/
if (spa_load_state(spa) != SPA_LOAD_NONE &&
spa->spa_last_open_failed)
return (B_FALSE);
if (zio != NULL) {
/*
* If this is not a read or write zio, ignore the error. This
* can occur if the DKIOCFLUSHWRITECACHE ioctl fails.
*/
if (zio->io_type != ZIO_TYPE_READ &&
zio->io_type != ZIO_TYPE_WRITE)
return (B_FALSE);
if (vd != NULL) {
/*
* If the vdev has already been marked as failing due
* to a failed probe, then ignore any subsequent I/O
* errors, as the DE will automatically fault the vdev
* on the first such failure. This also catches cases
* where vdev_remove_wanted is set and the device has
* not yet been asynchronously placed into the REMOVED
* state.
*/
if (zio->io_vd == vd && !vdev_accessible(vd, zio))
return (B_FALSE);
/*
* Ignore checksum errors for reads from DTL regions of
* leaf vdevs.
*/
if (zio->io_type == ZIO_TYPE_READ &&
zio->io_error == ECKSUM &&
vd->vdev_ops->vdev_op_leaf &&
vdev_dtl_contains(vd, DTL_MISSING, zio->io_txg, 1))
return (B_FALSE);
}
}
/*
* For probe failure, we want to avoid posting ereports if we've
* already removed the device in the meantime.
*/
if (vd != NULL &&
strcmp(subclass, FM_EREPORT_ZFS_PROBE_FAILURE) == 0 &&
(vd->vdev_remove_wanted || vd->vdev_state == VDEV_STATE_REMOVED))
return (B_FALSE);
/* Ignore bogus delay events (like from ioctls or unqueued IOs) */
if ((strcmp(subclass, FM_EREPORT_ZFS_DELAY) == 0) &&
(zio != NULL) && (!zio->io_timestamp)) {
return (B_FALSE);
}
+#else
+ (void) subclass, (void) spa, (void) vd, (void) zio;
#endif
return (B_TRUE);
}
/*
* Post an ereport for the given subclass
*
* Returns
* - 0 if an event was posted
* - EINVAL if there was a problem posting event
* - EBUSY if the event was rate limited
* - EALREADY if the event was already posted (duplicate)
*/
int
zfs_ereport_post(const char *subclass, spa_t *spa, vdev_t *vd,
const zbookmark_phys_t *zb, zio_t *zio, uint64_t state)
{
int rc = 0;
#ifdef _KERNEL
nvlist_t *ereport = NULL;
nvlist_t *detector = NULL;
if (!zfs_ereport_is_valid(subclass, spa, vd, zio))
return (EINVAL);
if (zfs_ereport_is_duplicate(subclass, spa, vd, zb, zio, 0, 0))
return (SET_ERROR(EALREADY));
if (zfs_is_ratelimiting_event(subclass, vd))
return (SET_ERROR(EBUSY));
if (!zfs_ereport_start(&ereport, &detector, subclass, spa, vd,
zb, zio, state, 0))
return (SET_ERROR(EINVAL)); /* couldn't post event */
if (ereport == NULL)
return (SET_ERROR(EINVAL));
/* Cleanup is handled by the callback function */
rc = zfs_zevent_post(ereport, detector, zfs_zevent_post_cb);
+#else
+ (void) subclass, (void) spa, (void) vd, (void) zb, (void) zio,
+ (void) state;
#endif
return (rc);
}
/*
* Prepare a checksum ereport
*
* Returns
* - 0 if an event was posted
* - EINVAL if there was a problem posting event
* - EBUSY if the event was rate limited
* - EALREADY if the event was already posted (duplicate)
*/
int
zfs_ereport_start_checksum(spa_t *spa, vdev_t *vd, const zbookmark_phys_t *zb,
struct zio *zio, uint64_t offset, uint64_t length, zio_bad_cksum_t *info)
{
zio_cksum_report_t *report;
#ifdef _KERNEL
if (!zfs_ereport_is_valid(FM_EREPORT_ZFS_CHECKSUM, spa, vd, zio))
return (SET_ERROR(EINVAL));
if (zfs_ereport_is_duplicate(FM_EREPORT_ZFS_CHECKSUM, spa, vd, zb, zio,
offset, length))
return (SET_ERROR(EALREADY));
if (zfs_is_ratelimiting_event(FM_EREPORT_ZFS_CHECKSUM, vd))
return (SET_ERROR(EBUSY));
+#else
+ (void) zb, (void) offset;
#endif
report = kmem_zalloc(sizeof (*report), KM_SLEEP);
zio_vsd_default_cksum_report(zio, report);
/* copy the checksum failure information if it was provided */
if (info != NULL) {
report->zcr_ckinfo = kmem_zalloc(sizeof (*info), KM_SLEEP);
bcopy(info, report->zcr_ckinfo, sizeof (*info));
}
report->zcr_sector = 1ULL << vd->vdev_top->vdev_ashift;
report->zcr_align =
vdev_psize_to_asize(vd->vdev_top, report->zcr_sector);
report->zcr_length = length;
#ifdef _KERNEL
(void) zfs_ereport_start(&report->zcr_ereport, &report->zcr_detector,
FM_EREPORT_ZFS_CHECKSUM, spa, vd, zb, zio, offset, length);
if (report->zcr_ereport == NULL) {
zfs_ereport_free_checksum(report);
return (0);
}
#endif
mutex_enter(&spa->spa_errlist_lock);
report->zcr_next = zio->io_logical->io_cksum_report;
zio->io_logical->io_cksum_report = report;
mutex_exit(&spa->spa_errlist_lock);
return (0);
}
void
zfs_ereport_finish_checksum(zio_cksum_report_t *report, const abd_t *good_data,
const abd_t *bad_data, boolean_t drop_if_identical)
{
#ifdef _KERNEL
zfs_ecksum_info_t *info;
info = annotate_ecksum(report->zcr_ereport, report->zcr_ckinfo,
good_data, bad_data, report->zcr_length, drop_if_identical);
if (info != NULL)
zfs_zevent_post(report->zcr_ereport,
report->zcr_detector, zfs_zevent_post_cb);
else
zfs_zevent_post_cb(report->zcr_ereport, report->zcr_detector);
report->zcr_ereport = report->zcr_detector = NULL;
if (info != NULL)
kmem_free(info, sizeof (*info));
+#else
+ (void) report, (void) good_data, (void) bad_data,
+ (void) drop_if_identical;
#endif
}
void
zfs_ereport_free_checksum(zio_cksum_report_t *rpt)
{
#ifdef _KERNEL
if (rpt->zcr_ereport != NULL) {
fm_nvlist_destroy(rpt->zcr_ereport,
FM_NVA_FREE);
fm_nvlist_destroy(rpt->zcr_detector,
FM_NVA_FREE);
}
#endif
rpt->zcr_free(rpt->zcr_cbdata, rpt->zcr_cbinfo);
if (rpt->zcr_ckinfo != NULL)
kmem_free(rpt->zcr_ckinfo, sizeof (*rpt->zcr_ckinfo));
kmem_free(rpt, sizeof (*rpt));
}
/*
* Post a checksum ereport
*
* Returns
* - 0 if an event was posted
* - EINVAL if there was a problem posting event
* - EBUSY if the event was rate limited
* - EALREADY if the event was already posted (duplicate)
*/
int
zfs_ereport_post_checksum(spa_t *spa, vdev_t *vd, const zbookmark_phys_t *zb,
struct zio *zio, uint64_t offset, uint64_t length,
const abd_t *good_data, const abd_t *bad_data, zio_bad_cksum_t *zbc)
{
int rc = 0;
#ifdef _KERNEL
nvlist_t *ereport = NULL;
nvlist_t *detector = NULL;
zfs_ecksum_info_t *info;
if (!zfs_ereport_is_valid(FM_EREPORT_ZFS_CHECKSUM, spa, vd, zio))
return (SET_ERROR(EINVAL));
if (zfs_ereport_is_duplicate(FM_EREPORT_ZFS_CHECKSUM, spa, vd, zb, zio,
offset, length))
return (SET_ERROR(EALREADY));
if (zfs_is_ratelimiting_event(FM_EREPORT_ZFS_CHECKSUM, vd))
return (SET_ERROR(EBUSY));
if (!zfs_ereport_start(&ereport, &detector, FM_EREPORT_ZFS_CHECKSUM,
spa, vd, zb, zio, offset, length) || (ereport == NULL)) {
return (SET_ERROR(EINVAL));
}
info = annotate_ecksum(ereport, zbc, good_data, bad_data, length,
B_FALSE);
if (info != NULL) {
rc = zfs_zevent_post(ereport, detector, zfs_zevent_post_cb);
kmem_free(info, sizeof (*info));
}
+#else
+ (void) spa, (void) vd, (void) zb, (void) zio, (void) offset,
+ (void) length, (void) good_data, (void) bad_data, (void) zbc;
#endif
return (rc);
}
/*
* The 'sysevent.fs.zfs.*' events are signals posted to notify user space of
* change in the pool. All sysevents are listed in sys/sysevent/eventdefs.h
* and are designed to be consumed by the ZFS Event Daemon (ZED). For
* additional details refer to the zed(8) man page.
*/
nvlist_t *
zfs_event_create(spa_t *spa, vdev_t *vd, const char *type, const char *name,
nvlist_t *aux)
{
nvlist_t *resource = NULL;
#ifdef _KERNEL
char class[64];
if (spa_load_state(spa) == SPA_LOAD_TRYIMPORT)
return (NULL);
if ((resource = fm_nvlist_create(NULL)) == NULL)
return (NULL);
(void) snprintf(class, sizeof (class), "%s.%s.%s", type,
ZFS_ERROR_CLASS, name);
VERIFY0(nvlist_add_uint8(resource, FM_VERSION, FM_RSRC_VERSION));
VERIFY0(nvlist_add_string(resource, FM_CLASS, class));
VERIFY0(nvlist_add_string(resource,
FM_EREPORT_PAYLOAD_ZFS_POOL, spa_name(spa)));
VERIFY0(nvlist_add_uint64(resource,
FM_EREPORT_PAYLOAD_ZFS_POOL_GUID, spa_guid(spa)));
VERIFY0(nvlist_add_uint64(resource,
FM_EREPORT_PAYLOAD_ZFS_POOL_STATE, spa_state(spa)));
VERIFY0(nvlist_add_int32(resource,
FM_EREPORT_PAYLOAD_ZFS_POOL_CONTEXT, spa_load_state(spa)));
if (vd) {
VERIFY0(nvlist_add_uint64(resource,
FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, vd->vdev_guid));
VERIFY0(nvlist_add_uint64(resource,
FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE, vd->vdev_state));
if (vd->vdev_path != NULL)
VERIFY0(nvlist_add_string(resource,
FM_EREPORT_PAYLOAD_ZFS_VDEV_PATH, vd->vdev_path));
if (vd->vdev_devid != NULL)
VERIFY0(nvlist_add_string(resource,
FM_EREPORT_PAYLOAD_ZFS_VDEV_DEVID, vd->vdev_devid));
if (vd->vdev_fru != NULL)
VERIFY0(nvlist_add_string(resource,
FM_EREPORT_PAYLOAD_ZFS_VDEV_FRU, vd->vdev_fru));
if (vd->vdev_enc_sysfs_path != NULL)
VERIFY0(nvlist_add_string(resource,
FM_EREPORT_PAYLOAD_ZFS_VDEV_ENC_SYSFS_PATH,
vd->vdev_enc_sysfs_path));
}
/* also copy any optional payload data */
if (aux) {
nvpair_t *elem = NULL;
while ((elem = nvlist_next_nvpair(aux, elem)) != NULL)
(void) nvlist_add_nvpair(resource, elem);
}
-
+#else
+ (void) spa, (void) vd, (void) type, (void) name, (void) aux;
#endif
return (resource);
}
static void
zfs_post_common(spa_t *spa, vdev_t *vd, const char *type, const char *name,
nvlist_t *aux)
{
#ifdef _KERNEL
nvlist_t *resource;
resource = zfs_event_create(spa, vd, type, name, aux);
if (resource)
zfs_zevent_post(resource, NULL, zfs_zevent_post_cb);
+#else
+ (void) spa, (void) vd, (void) type, (void) name, (void) aux;
#endif
}
/*
* The 'resource.fs.zfs.removed' event is an internal signal that the given vdev
* has been removed from the system. This will cause the DE to ignore any
* recent I/O errors, inferring that they are due to the asynchronous device
* removal.
*/
void
zfs_post_remove(spa_t *spa, vdev_t *vd)
{
zfs_post_common(spa, vd, FM_RSRC_CLASS, FM_RESOURCE_REMOVED, NULL);
}
/*
* The 'resource.fs.zfs.autoreplace' event is an internal signal that the pool
* has the 'autoreplace' property set, and therefore any broken vdevs will be
* handled by higher level logic, and no vdev fault should be generated.
*/
void
zfs_post_autoreplace(spa_t *spa, vdev_t *vd)
{
zfs_post_common(spa, vd, FM_RSRC_CLASS, FM_RESOURCE_AUTOREPLACE, NULL);
}
/*
* The 'resource.fs.zfs.statechange' event is an internal signal that the
* given vdev has transitioned its state to DEGRADED or HEALTHY. This will
* cause the retire agent to repair any outstanding fault management cases
* open because the device was not found (fault.fs.zfs.device).
*/
void
zfs_post_state_change(spa_t *spa, vdev_t *vd, uint64_t laststate)
{
#ifdef _KERNEL
nvlist_t *aux;
/*
* Add optional supplemental keys to payload
*/
aux = fm_nvlist_create(NULL);
if (vd && aux) {
if (vd->vdev_physpath) {
(void) nvlist_add_string(aux,
FM_EREPORT_PAYLOAD_ZFS_VDEV_PHYSPATH,
vd->vdev_physpath);
}
if (vd->vdev_enc_sysfs_path) {
(void) nvlist_add_string(aux,
FM_EREPORT_PAYLOAD_ZFS_VDEV_ENC_SYSFS_PATH,
vd->vdev_enc_sysfs_path);
}
(void) nvlist_add_uint64(aux,
FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE, laststate);
}
zfs_post_common(spa, vd, FM_RSRC_CLASS, FM_RESOURCE_STATECHANGE,
aux);
if (aux)
fm_nvlist_destroy(aux, FM_NVA_FREE);
+#else
+ (void) spa, (void) vd, (void) laststate;
#endif
}
#ifdef _KERNEL
void
zfs_ereport_init(void)
{
mutex_init(&recent_events_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&recent_events_list, sizeof (recent_events_node_t),
offsetof(recent_events_node_t, re_list_link));
avl_create(&recent_events_tree, recent_events_compare,
sizeof (recent_events_node_t), offsetof(recent_events_node_t,
re_tree_link));
}
/*
* This 'early' fini needs to run before zfs_fini() which on Linux waits
* for the system_delay_taskq to drain.
*/
void
zfs_ereport_taskq_fini(void)
{
mutex_enter(&recent_events_lock);
if (recent_events_cleaner_tqid != 0) {
taskq_cancel_id(system_delay_taskq, recent_events_cleaner_tqid);
recent_events_cleaner_tqid = 0;
}
mutex_exit(&recent_events_lock);
}
void
zfs_ereport_fini(void)
{
recent_events_node_t *entry;
while ((entry = list_head(&recent_events_list)) != NULL) {
avl_remove(&recent_events_tree, entry);
list_remove(&recent_events_list, entry);
kmem_free(entry, sizeof (*entry));
}
avl_destroy(&recent_events_tree);
list_destroy(&recent_events_list);
mutex_destroy(&recent_events_lock);
}
+void
+zfs_ereport_snapshot_post(const char *subclass, spa_t *spa, const char *name)
+{
+ nvlist_t *aux;
+
+ aux = fm_nvlist_create(NULL);
+ nvlist_add_string(aux, FM_EREPORT_PAYLOAD_ZFS_SNAPSHOT_NAME, name);
+
+ zfs_post_common(spa, NULL, FM_RSRC_CLASS, subclass, aux);
+ fm_nvlist_destroy(aux, FM_NVA_FREE);
+}
+
+/*
+ * Post when a event when a zvol is created or removed
+ *
+ * This is currently only used by macOS, since it uses the event to create
+ * symlinks between the volume name (mypool/myvol) and the actual /dev
+ * device (/dev/disk3). For example:
+ *
+ * /var/run/zfs/dsk/mypool/myvol -> /dev/disk3
+ *
+ * name: The full name of the zvol ("mypool/myvol")
+ * dev_name: The full /dev name for the zvol ("/dev/disk3")
+ * raw_name: The raw /dev name for the zvol ("/dev/rdisk3")
+ */
+void
+zfs_ereport_zvol_post(const char *subclass, const char *name,
+ const char *dev_name, const char *raw_name)
+{
+ nvlist_t *aux;
+ char *r;
+
+ boolean_t locked = mutex_owned(&spa_namespace_lock);
+ if (!locked) mutex_enter(&spa_namespace_lock);
+ spa_t *spa = spa_lookup(name);
+ if (!locked) mutex_exit(&spa_namespace_lock);
+
+ if (spa == NULL)
+ return;
+
+ aux = fm_nvlist_create(NULL);
+ nvlist_add_string(aux, FM_EREPORT_PAYLOAD_ZFS_DEVICE_NAME, dev_name);
+ nvlist_add_string(aux, FM_EREPORT_PAYLOAD_ZFS_RAW_DEVICE_NAME,
+ raw_name);
+ r = strchr(name, '/');
+ if (r && r[1])
+ nvlist_add_string(aux, FM_EREPORT_PAYLOAD_ZFS_VOLUME, &r[1]);
+
+ zfs_post_common(spa, NULL, FM_RSRC_CLASS, subclass, aux);
+ fm_nvlist_destroy(aux, FM_NVA_FREE);
+}
+
EXPORT_SYMBOL(zfs_ereport_post);
EXPORT_SYMBOL(zfs_ereport_is_valid);
EXPORT_SYMBOL(zfs_ereport_post_checksum);
EXPORT_SYMBOL(zfs_post_remove);
EXPORT_SYMBOL(zfs_post_autoreplace);
EXPORT_SYMBOL(zfs_post_state_change);
ZFS_MODULE_PARAM(zfs_zevent, zfs_zevent_, retain_max, UINT, ZMOD_RW,
"Maximum recent zevents records to retain for duplicate checking");
ZFS_MODULE_PARAM(zfs_zevent, zfs_zevent_, retain_expire_secs, UINT, ZMOD_RW,
"Expiration time for recent zevents records");
#endif /* _KERNEL */
diff --git a/sys/contrib/openzfs/module/zfs/zfs_vnops.c b/sys/contrib/openzfs/module/zfs/zfs_vnops.c
index 54749810d45d..1b32d3876724 100644
--- a/sys/contrib/openzfs/module/zfs/zfs_vnops.c
+++ b/sys/contrib/openzfs/module/zfs/zfs_vnops.c
@@ -1,925 +1,982 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
* Copyright (c) 2015 by Chunwei Chen. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc.
*/
/* Portions Copyright 2007 Jeremy Teo */
/* Portions Copyright 2010 Robert Milkowski */
#include <sys/types.h>
#include <sys/param.h>
#include <sys/time.h>
#include <sys/sysmacros.h>
#include <sys/vfs.h>
#include <sys/uio_impl.h>
#include <sys/file.h>
#include <sys/stat.h>
#include <sys/kmem.h>
#include <sys/cmn_err.h>
#include <sys/errno.h>
#include <sys/zfs_dir.h>
#include <sys/zfs_acl.h>
#include <sys/zfs_ioctl.h>
#include <sys/fs/zfs.h>
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/dbuf.h>
#include <sys/policy.h>
#include <sys/zfs_vnops.h>
#include <sys/zfs_quota.h>
#include <sys/zfs_vfsops.h>
#include <sys/zfs_znode.h>
static ulong_t zfs_fsync_sync_cnt = 4;
int
zfs_fsync(znode_t *zp, int syncflag, cred_t *cr)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
(void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt);
if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
zil_commit(zfsvfs->z_log, zp->z_id);
ZFS_EXIT(zfsvfs);
}
tsd_set(zfs_fsyncer_key, NULL);
return (0);
}
#if defined(SEEK_HOLE) && defined(SEEK_DATA)
/*
* Lseek support for finding holes (cmd == SEEK_HOLE) and
* data (cmd == SEEK_DATA). "off" is an in/out parameter.
*/
static int
zfs_holey_common(znode_t *zp, ulong_t cmd, loff_t *off)
{
zfs_locked_range_t *lr;
uint64_t noff = (uint64_t)*off; /* new offset */
uint64_t file_sz;
int error;
boolean_t hole;
file_sz = zp->z_size;
if (noff >= file_sz) {
return (SET_ERROR(ENXIO));
}
if (cmd == F_SEEK_HOLE)
hole = B_TRUE;
else
hole = B_FALSE;
/* Flush any mmap()'d data to disk */
if (zn_has_cached_data(zp))
zn_flush_cached_data(zp, B_FALSE);
lr = zfs_rangelock_enter(&zp->z_rangelock, 0, file_sz, RL_READER);
error = dmu_offset_next(ZTOZSB(zp)->z_os, zp->z_id, hole, &noff);
zfs_rangelock_exit(lr);
if (error == ESRCH)
return (SET_ERROR(ENXIO));
/* File was dirty, so fall back to using generic logic */
if (error == EBUSY) {
if (hole)
*off = file_sz;
return (0);
}
/*
* We could find a hole that begins after the logical end-of-file,
* because dmu_offset_next() only works on whole blocks. If the
* EOF falls mid-block, then indicate that the "virtual hole"
* at the end of the file begins at the logical EOF, rather than
* at the end of the last block.
*/
if (noff > file_sz) {
ASSERT(hole);
noff = file_sz;
}
if (noff < *off)
return (error);
*off = noff;
return (error);
}
int
zfs_holey(znode_t *zp, ulong_t cmd, loff_t *off)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int error;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
error = zfs_holey_common(zp, cmd, off);
ZFS_EXIT(zfsvfs);
return (error);
}
#endif /* SEEK_HOLE && SEEK_DATA */
/*ARGSUSED*/
int
zfs_access(znode_t *zp, int mode, int flag, cred_t *cr)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int error;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
if (flag & V_ACE_MASK)
error = zfs_zaccess(zp, mode, flag, B_FALSE, cr);
else
error = zfs_zaccess_rwx(zp, mode, flag, cr);
ZFS_EXIT(zfsvfs);
return (error);
}
static unsigned long zfs_vnops_read_chunk_size = 1024 * 1024; /* Tunable */
/*
* Read bytes from specified file into supplied buffer.
*
* IN: zp - inode of file to be read from.
* uio - structure supplying read location, range info,
* and return buffer.
* ioflag - O_SYNC flags; used to provide FRSYNC semantics.
* O_DIRECT flag; used to bypass page cache.
* cr - credentials of caller.
*
* OUT: uio - updated offset and range, buffer filled.
*
* RETURN: 0 on success, error code on failure.
*
* Side Effects:
* inode - atime updated if byte count > 0
*/
/* ARGSUSED */
int
zfs_read(struct znode *zp, zfs_uio_t *uio, int ioflag, cred_t *cr)
{
int error = 0;
boolean_t frsync = B_FALSE;
zfsvfs_t *zfsvfs = ZTOZSB(zp);
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
if (zp->z_pflags & ZFS_AV_QUARANTINED) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EACCES));
}
/* We don't copy out anything useful for directories. */
if (Z_ISDIR(ZTOTYPE(zp))) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EISDIR));
}
/*
* Validate file offset
*/
if (zfs_uio_offset(uio) < (offset_t)0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
/*
* Fasttrack empty reads
*/
if (zfs_uio_resid(uio) == 0) {
ZFS_EXIT(zfsvfs);
return (0);
}
#ifdef FRSYNC
/*
* If we're in FRSYNC mode, sync out this znode before reading it.
* Only do this for non-snapshots.
*
* Some platforms do not support FRSYNC and instead map it
* to O_SYNC, which results in unnecessary calls to zil_commit. We
* only honor FRSYNC requests on platforms which support it.
*/
frsync = !!(ioflag & FRSYNC);
#endif
if (zfsvfs->z_log &&
(frsync || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS))
zil_commit(zfsvfs->z_log, zp->z_id);
/*
* Lock the range against changes.
*/
zfs_locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock,
zfs_uio_offset(uio), zfs_uio_resid(uio), RL_READER);
/*
* If we are reading past end-of-file we can skip
* to the end; but we might still need to set atime.
*/
if (zfs_uio_offset(uio) >= zp->z_size) {
error = 0;
goto out;
}
ASSERT(zfs_uio_offset(uio) < zp->z_size);
ssize_t n = MIN(zfs_uio_resid(uio), zp->z_size - zfs_uio_offset(uio));
ssize_t start_resid = n;
while (n > 0) {
ssize_t nbytes = MIN(n, zfs_vnops_read_chunk_size -
P2PHASE(zfs_uio_offset(uio), zfs_vnops_read_chunk_size));
#ifdef UIO_NOCOPY
if (zfs_uio_segflg(uio) == UIO_NOCOPY)
error = mappedread_sf(zp, nbytes, uio);
else
#endif
if (zn_has_cached_data(zp) && !(ioflag & O_DIRECT)) {
error = mappedread(zp, nbytes, uio);
} else {
error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
uio, nbytes);
}
if (error) {
/* convert checksum errors into IO errors */
if (error == ECKSUM)
error = SET_ERROR(EIO);
break;
}
n -= nbytes;
}
int64_t nread = start_resid - n;
dataset_kstats_update_read_kstats(&zfsvfs->z_kstat, nread);
task_io_account_read(nread);
out:
zfs_rangelock_exit(lr);
ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
ZFS_EXIT(zfsvfs);
return (error);
}
+static void
+zfs_clear_setid_bits_if_necessary(zfsvfs_t *zfsvfs, znode_t *zp, cred_t *cr,
+ uint64_t *clear_setid_bits_txgp, dmu_tx_t *tx)
+{
+ zilog_t *zilog = zfsvfs->z_log;
+ const uint64_t uid = KUID_TO_SUID(ZTOUID(zp));
+
+ ASSERT(clear_setid_bits_txgp != NULL);
+ ASSERT(tx != NULL);
+
+ /*
+ * Clear Set-UID/Set-GID bits on successful write if not
+ * privileged and at least one of the execute bits is set.
+ *
+ * It would be nice to do this after all writes have
+ * been done, but that would still expose the ISUID/ISGID
+ * to another app after the partial write is committed.
+ *
+ * Note: we don't call zfs_fuid_map_id() here because
+ * user 0 is not an ephemeral uid.
+ */
+ mutex_enter(&zp->z_acl_lock);
+ if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) | (S_IXUSR >> 6))) != 0 &&
+ (zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
+ secpolicy_vnode_setid_retain(zp, cr,
+ ((zp->z_mode & S_ISUID) != 0 && uid == 0)) != 0) {
+ uint64_t newmode;
+
+ zp->z_mode &= ~(S_ISUID | S_ISGID);
+ newmode = zp->z_mode;
+ (void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs),
+ (void *)&newmode, sizeof (uint64_t), tx);
+
+ mutex_exit(&zp->z_acl_lock);
+
+ /*
+ * Make sure SUID/SGID bits will be removed when we replay the
+ * log. If the setid bits are keep coming back, don't log more
+ * than one TX_SETATTR per transaction group.
+ */
+ if (*clear_setid_bits_txgp != dmu_tx_get_txg(tx)) {
+ vattr_t va;
+
+ bzero(&va, sizeof (va));
+ va.va_mask = AT_MODE;
+ va.va_nodeid = zp->z_id;
+ va.va_mode = newmode;
+ zfs_log_setattr(zilog, tx, TX_SETATTR, zp, &va, AT_MODE,
+ NULL);
+ *clear_setid_bits_txgp = dmu_tx_get_txg(tx);
+ }
+ } else {
+ mutex_exit(&zp->z_acl_lock);
+ }
+}
+
/*
* Write the bytes to a file.
*
* IN: zp - znode of file to be written to.
* uio - structure supplying write location, range info,
* and data buffer.
* ioflag - O_APPEND flag set if in append mode.
* O_DIRECT flag; used to bypass page cache.
* cr - credentials of caller.
*
* OUT: uio - updated offset and range.
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
* ip - ctime|mtime updated if byte count > 0
*/
/* ARGSUSED */
int
zfs_write(znode_t *zp, zfs_uio_t *uio, int ioflag, cred_t *cr)
{
int error = 0, error1;
ssize_t start_resid = zfs_uio_resid(uio);
+ uint64_t clear_setid_bits_txg = 0;
/*
* Fasttrack empty write
*/
ssize_t n = start_resid;
if (n == 0)
return (0);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
sa_bulk_attr_t bulk[4];
int count = 0;
uint64_t mtime[2], ctime[2];
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
&zp->z_size, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, 8);
/*
* Callers might not be able to detect properly that we are read-only,
* so check it explicitly here.
*/
if (zfs_is_readonly(zfsvfs)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EROFS));
}
/*
* If immutable or not appending then return EPERM.
* Intentionally allow ZFS_READONLY through here.
* See zfs_zaccess_common()
*/
if ((zp->z_pflags & ZFS_IMMUTABLE) ||
((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & O_APPEND) &&
(zfs_uio_offset(uio) < zp->z_size))) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EPERM));
}
/*
* Validate file offset
*/
offset_t woff = ioflag & O_APPEND ? zp->z_size : zfs_uio_offset(uio);
if (woff < 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
const uint64_t max_blksz = zfsvfs->z_max_blksz;
/*
* Pre-fault the pages to ensure slow (eg NFS) pages
* don't hold up txg.
* Skip this if uio contains loaned arc_buf.
*/
if (zfs_uio_prefaultpages(MIN(n, max_blksz), uio)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EFAULT));
}
/*
* If in append mode, set the io offset pointer to eof.
*/
zfs_locked_range_t *lr;
if (ioflag & O_APPEND) {
/*
* Obtain an appending range lock to guarantee file append
* semantics. We reset the write offset once we have the lock.
*/
lr = zfs_rangelock_enter(&zp->z_rangelock, 0, n, RL_APPEND);
woff = lr->lr_offset;
if (lr->lr_length == UINT64_MAX) {
/*
* We overlocked the file because this write will cause
* the file block size to increase.
* Note that zp_size cannot change with this lock held.
*/
woff = zp->z_size;
}
zfs_uio_setoffset(uio, woff);
} else {
/*
* Note that if the file block size will change as a result of
* this write, then this range lock will lock the entire file
* so that we can re-write the block safely.
*/
lr = zfs_rangelock_enter(&zp->z_rangelock, woff, n, RL_WRITER);
}
if (zn_rlimit_fsize(zp, uio)) {
zfs_rangelock_exit(lr);
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EFBIG));
}
const rlim64_t limit = MAXOFFSET_T;
if (woff >= limit) {
zfs_rangelock_exit(lr);
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EFBIG));
}
if (n > limit - woff)
n = limit - woff;
uint64_t end_size = MAX(zp->z_size, woff + n);
zilog_t *zilog = zfsvfs->z_log;
const uint64_t uid = KUID_TO_SUID(ZTOUID(zp));
const uint64_t gid = KGID_TO_SGID(ZTOGID(zp));
const uint64_t projid = zp->z_projid;
/*
* Write the file in reasonable size chunks. Each chunk is written
* in a separate transaction; this keeps the intent log records small
* and allows us to do more fine-grained space accounting.
*/
while (n > 0) {
woff = zfs_uio_offset(uio);
if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT, uid) ||
zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT, gid) ||
(projid != ZFS_DEFAULT_PROJID &&
zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT,
projid))) {
error = SET_ERROR(EDQUOT);
break;
}
arc_buf_t *abuf = NULL;
if (n >= max_blksz && woff >= zp->z_size &&
P2PHASE(woff, max_blksz) == 0 &&
zp->z_blksz == max_blksz) {
/*
* This write covers a full block. "Borrow" a buffer
* from the dmu so that we can fill it before we enter
* a transaction. This avoids the possibility of
* holding up the transaction if the data copy hangs
* up on a pagefault (e.g., from an NFS server mapping).
*/
size_t cbytes;
abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
max_blksz);
ASSERT(abuf != NULL);
ASSERT(arc_buf_size(abuf) == max_blksz);
if ((error = zfs_uiocopy(abuf->b_data, max_blksz,
UIO_WRITE, uio, &cbytes))) {
dmu_return_arcbuf(abuf);
break;
}
ASSERT3S(cbytes, ==, max_blksz);
}
/*
* Start a transaction.
*/
dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
dmu_buf_impl_t *db = (dmu_buf_impl_t *)sa_get_db(zp->z_sa_hdl);
DB_DNODE_ENTER(db);
dmu_tx_hold_write_by_dnode(tx, DB_DNODE(db), woff,
MIN(n, max_blksz));
DB_DNODE_EXIT(db);
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
if (abuf != NULL)
dmu_return_arcbuf(abuf);
break;
}
+ /*
+ * NB: We must call zfs_clear_setid_bits_if_necessary before
+ * committing the transaction!
+ */
+
/*
* If rangelock_enter() over-locked we grow the blocksize
* and then reduce the lock range. This will only happen
* on the first iteration since rangelock_reduce() will
* shrink down lr_length to the appropriate size.
*/
if (lr->lr_length == UINT64_MAX) {
uint64_t new_blksz;
if (zp->z_blksz > max_blksz) {
/*
* File's blocksize is already larger than the
* "recordsize" property. Only let it grow to
* the next power of 2.
*/
ASSERT(!ISP2(zp->z_blksz));
new_blksz = MIN(end_size,
1 << highbit64(zp->z_blksz));
} else {
new_blksz = MIN(end_size, max_blksz);
}
zfs_grow_blocksize(zp, new_blksz, tx);
zfs_rangelock_reduce(lr, woff, n);
}
/*
* XXX - should we really limit each write to z_max_blksz?
* Perhaps we should use SPA_MAXBLOCKSIZE chunks?
*/
const ssize_t nbytes =
MIN(n, max_blksz - P2PHASE(woff, max_blksz));
ssize_t tx_bytes;
if (abuf == NULL) {
tx_bytes = zfs_uio_resid(uio);
zfs_uio_fault_disable(uio, B_TRUE);
error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
uio, nbytes, tx);
zfs_uio_fault_disable(uio, B_FALSE);
#ifdef __linux__
if (error == EFAULT) {
+ zfs_clear_setid_bits_if_necessary(zfsvfs, zp,
+ cr, &clear_setid_bits_txg, tx);
dmu_tx_commit(tx);
/*
* Account for partial writes before
* continuing the loop.
* Update needs to occur before the next
* zfs_uio_prefaultpages, or prefaultpages may
* error, and we may break the loop early.
*/
if (tx_bytes != zfs_uio_resid(uio))
n -= tx_bytes - zfs_uio_resid(uio);
if (zfs_uio_prefaultpages(MIN(n, max_blksz),
uio)) {
break;
}
continue;
}
#endif
/*
* On FreeBSD, EFAULT should be propagated back to the
* VFS, which will handle faulting and will retry.
*/
if (error != 0 && error != EFAULT) {
+ zfs_clear_setid_bits_if_necessary(zfsvfs, zp,
+ cr, &clear_setid_bits_txg, tx);
dmu_tx_commit(tx);
break;
}
tx_bytes -= zfs_uio_resid(uio);
} else {
/* Implied by abuf != NULL: */
ASSERT3S(n, >=, max_blksz);
ASSERT0(P2PHASE(woff, max_blksz));
/*
* We can simplify nbytes to MIN(n, max_blksz) since
* P2PHASE(woff, max_blksz) is 0, and knowing
* n >= max_blksz lets us simplify further:
*/
ASSERT3S(nbytes, ==, max_blksz);
/*
* Thus, we're writing a full block at a block-aligned
* offset and extending the file past EOF.
*
* dmu_assign_arcbuf_by_dbuf() will directly assign the
* arc buffer to a dbuf.
*/
error = dmu_assign_arcbuf_by_dbuf(
sa_get_db(zp->z_sa_hdl), woff, abuf, tx);
if (error != 0) {
+ /*
+ * XXX This might not be necessary if
+ * dmu_assign_arcbuf_by_dbuf is guaranteed
+ * to be atomic.
+ */
+ zfs_clear_setid_bits_if_necessary(zfsvfs, zp,
+ cr, &clear_setid_bits_txg, tx);
dmu_return_arcbuf(abuf);
dmu_tx_commit(tx);
break;
}
ASSERT3S(nbytes, <=, zfs_uio_resid(uio));
zfs_uioskip(uio, nbytes);
tx_bytes = nbytes;
}
if (tx_bytes && zn_has_cached_data(zp) &&
!(ioflag & O_DIRECT)) {
update_pages(zp, woff, tx_bytes, zfsvfs->z_os);
}
/*
* If we made no progress, we're done. If we made even
* partial progress, update the znode and ZIL accordingly.
*/
if (tx_bytes == 0) {
(void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
(void *)&zp->z_size, sizeof (uint64_t), tx);
dmu_tx_commit(tx);
ASSERT(error != 0);
break;
}
- /*
- * Clear Set-UID/Set-GID bits on successful write if not
- * privileged and at least one of the execute bits is set.
- *
- * It would be nice to do this after all writes have
- * been done, but that would still expose the ISUID/ISGID
- * to another app after the partial write is committed.
- *
- * Note: we don't call zfs_fuid_map_id() here because
- * user 0 is not an ephemeral uid.
- */
- mutex_enter(&zp->z_acl_lock);
- if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) |
- (S_IXUSR >> 6))) != 0 &&
- (zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
- secpolicy_vnode_setid_retain(zp, cr,
- ((zp->z_mode & S_ISUID) != 0 && uid == 0)) != 0) {
- uint64_t newmode;
- zp->z_mode &= ~(S_ISUID | S_ISGID);
- newmode = zp->z_mode;
- (void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs),
- (void *)&newmode, sizeof (uint64_t), tx);
- }
- mutex_exit(&zp->z_acl_lock);
+ zfs_clear_setid_bits_if_necessary(zfsvfs, zp, cr,
+ &clear_setid_bits_txg, tx);
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
/*
* Update the file size (zp_size) if it has changed;
* account for possible concurrent updates.
*/
while ((end_size = zp->z_size) < zfs_uio_offset(uio)) {
(void) atomic_cas_64(&zp->z_size, end_size,
zfs_uio_offset(uio));
ASSERT(error == 0 || error == EFAULT);
}
/*
* If we are replaying and eof is non zero then force
* the file size to the specified eof. Note, there's no
* concurrency during replay.
*/
if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0)
zp->z_size = zfsvfs->z_replay_eof;
error1 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
if (error1 != 0)
/* Avoid clobbering EFAULT. */
error = error1;
+ /*
+ * NB: During replay, the TX_SETATTR record logged by
+ * zfs_clear_setid_bits_if_necessary must precede any of
+ * the TX_WRITE records logged here.
+ */
zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag,
NULL, NULL);
+
dmu_tx_commit(tx);
if (error != 0)
break;
ASSERT3S(tx_bytes, ==, nbytes);
n -= nbytes;
if (n > 0) {
if (zfs_uio_prefaultpages(MIN(n, max_blksz), uio)) {
error = SET_ERROR(EFAULT);
break;
}
}
}
zfs_znode_update_vfs(zp);
zfs_rangelock_exit(lr);
/*
* If we're in replay mode, or we made no progress, or the
* uio data is inaccessible return an error. Otherwise, it's
* at least a partial write, so it's successful.
*/
if (zfsvfs->z_replay || zfs_uio_resid(uio) == start_resid ||
error == EFAULT) {
ZFS_EXIT(zfsvfs);
return (error);
}
if (ioflag & (O_SYNC | O_DSYNC) ||
zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, zp->z_id);
const int64_t nwritten = start_resid - zfs_uio_resid(uio);
dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, nwritten);
task_io_account_write(nwritten);
ZFS_EXIT(zfsvfs);
return (0);
}
/*ARGSUSED*/
int
zfs_getsecattr(znode_t *zp, vsecattr_t *vsecp, int flag, cred_t *cr)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int error;
boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
error = zfs_getacl(zp, vsecp, skipaclchk, cr);
ZFS_EXIT(zfsvfs);
return (error);
}
/*ARGSUSED*/
int
zfs_setsecattr(znode_t *zp, vsecattr_t *vsecp, int flag, cred_t *cr)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int error;
boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
zilog_t *zilog = zfsvfs->z_log;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
error = zfs_setacl(zp, vsecp, skipaclchk, cr);
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
ZFS_EXIT(zfsvfs);
return (error);
}
#ifdef ZFS_DEBUG
static int zil_fault_io = 0;
#endif
static void zfs_get_done(zgd_t *zgd, int error);
/*
* Get data to generate a TX_WRITE intent log record.
*/
int
zfs_get_data(void *arg, uint64_t gen, lr_write_t *lr, char *buf,
struct lwb *lwb, zio_t *zio)
{
zfsvfs_t *zfsvfs = arg;
objset_t *os = zfsvfs->z_os;
znode_t *zp;
uint64_t object = lr->lr_foid;
uint64_t offset = lr->lr_offset;
uint64_t size = lr->lr_length;
dmu_buf_t *db;
zgd_t *zgd;
int error = 0;
uint64_t zp_gen;
ASSERT3P(lwb, !=, NULL);
ASSERT3P(zio, !=, NULL);
ASSERT3U(size, !=, 0);
/*
* Nothing to do if the file has been removed
*/
if (zfs_zget(zfsvfs, object, &zp) != 0)
return (SET_ERROR(ENOENT));
if (zp->z_unlinked) {
/*
* Release the vnode asynchronously as we currently have the
* txg stopped from syncing.
*/
zfs_zrele_async(zp);
return (SET_ERROR(ENOENT));
}
/* check if generation number matches */
if (sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs), &zp_gen,
sizeof (zp_gen)) != 0) {
zfs_zrele_async(zp);
return (SET_ERROR(EIO));
}
if (zp_gen != gen) {
zfs_zrele_async(zp);
return (SET_ERROR(ENOENT));
}
zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
zgd->zgd_lwb = lwb;
zgd->zgd_private = zp;
/*
* Write records come in two flavors: immediate and indirect.
* For small writes it's cheaper to store the data with the
* log record (immediate); for large writes it's cheaper to
* sync the data and get a pointer to it (indirect) so that
* we don't have to write the data twice.
*/
if (buf != NULL) { /* immediate write */
zgd->zgd_lr = zfs_rangelock_enter(&zp->z_rangelock,
offset, size, RL_READER);
/* test for truncation needs to be done while range locked */
if (offset >= zp->z_size) {
error = SET_ERROR(ENOENT);
} else {
error = dmu_read(os, object, offset, size, buf,
DMU_READ_NO_PREFETCH);
}
ASSERT(error == 0 || error == ENOENT);
} else { /* indirect write */
/*
* Have to lock the whole block to ensure when it's
* written out and its checksum is being calculated
* that no one can change the data. We need to re-check
* blocksize after we get the lock in case it's changed!
*/
for (;;) {
uint64_t blkoff;
size = zp->z_blksz;
blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
offset -= blkoff;
zgd->zgd_lr = zfs_rangelock_enter(&zp->z_rangelock,
offset, size, RL_READER);
if (zp->z_blksz == size)
break;
offset += blkoff;
zfs_rangelock_exit(zgd->zgd_lr);
}
/* test for truncation needs to be done while range locked */
if (lr->lr_offset >= zp->z_size)
error = SET_ERROR(ENOENT);
#ifdef ZFS_DEBUG
if (zil_fault_io) {
error = SET_ERROR(EIO);
zil_fault_io = 0;
}
#endif
if (error == 0)
error = dmu_buf_hold(os, object, offset, zgd, &db,
DMU_READ_NO_PREFETCH);
if (error == 0) {
blkptr_t *bp = &lr->lr_blkptr;
zgd->zgd_db = db;
zgd->zgd_bp = bp;
ASSERT(db->db_offset == offset);
ASSERT(db->db_size == size);
error = dmu_sync(zio, lr->lr_common.lrc_txg,
zfs_get_done, zgd);
ASSERT(error || lr->lr_length <= size);
/*
* On success, we need to wait for the write I/O
* initiated by dmu_sync() to complete before we can
* release this dbuf. We will finish everything up
* in the zfs_get_done() callback.
*/
if (error == 0)
return (0);
if (error == EALREADY) {
lr->lr_common.lrc_txtype = TX_WRITE2;
/*
* TX_WRITE2 relies on the data previously
* written by the TX_WRITE that caused
* EALREADY. We zero out the BP because
* it is the old, currently-on-disk BP.
*/
zgd->zgd_bp = NULL;
BP_ZERO(bp);
error = 0;
}
}
}
zfs_get_done(zgd, error);
return (error);
}
/* ARGSUSED */
static void
zfs_get_done(zgd_t *zgd, int error)
{
znode_t *zp = zgd->zgd_private;
if (zgd->zgd_db)
dmu_buf_rele(zgd->zgd_db, zgd);
zfs_rangelock_exit(zgd->zgd_lr);
/*
* Release the vnode asynchronously as we currently have the
* txg stopped from syncing.
*/
zfs_zrele_async(zp);
kmem_free(zgd, sizeof (zgd_t));
}
EXPORT_SYMBOL(zfs_access);
EXPORT_SYMBOL(zfs_fsync);
EXPORT_SYMBOL(zfs_holey);
EXPORT_SYMBOL(zfs_read);
EXPORT_SYMBOL(zfs_write);
EXPORT_SYMBOL(zfs_getsecattr);
EXPORT_SYMBOL(zfs_setsecattr);
ZFS_MODULE_PARAM(zfs_vnops, zfs_vnops_, read_chunk_size, ULONG, ZMOD_RW,
"Bytes to read per chunk");
diff --git a/sys/contrib/openzfs/module/zfs/zil.c b/sys/contrib/openzfs/module/zfs/zil.c
index 640e805d093a..b9f177daee53 100644
--- a/sys/contrib/openzfs/module/zfs/zil.c
+++ b/sys/contrib/openzfs/module/zfs/zil.c
@@ -1,3733 +1,3734 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2018 by Delphix. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright (c) 2018 Datto Inc.
*/
/* Portions Copyright 2010 Robert Milkowski */
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/dmu.h>
#include <sys/zap.h>
#include <sys/arc.h>
#include <sys/stat.h>
#include <sys/zil.h>
#include <sys/zil_impl.h>
#include <sys/dsl_dataset.h>
#include <sys/vdev_impl.h>
#include <sys/dmu_tx.h>
#include <sys/dsl_pool.h>
#include <sys/metaslab.h>
#include <sys/trace_zfs.h>
#include <sys/abd.h>
/*
* The ZFS Intent Log (ZIL) saves "transaction records" (itxs) of system
* calls that change the file system. Each itx has enough information to
* be able to replay them after a system crash, power loss, or
* equivalent failure mode. These are stored in memory until either:
*
* 1. they are committed to the pool by the DMU transaction group
* (txg), at which point they can be discarded; or
* 2. they are committed to the on-disk ZIL for the dataset being
* modified (e.g. due to an fsync, O_DSYNC, or other synchronous
* requirement).
*
* In the event of a crash or power loss, the itxs contained by each
* dataset's on-disk ZIL will be replayed when that dataset is first
* instantiated (e.g. if the dataset is a normal filesystem, when it is
* first mounted).
*
* As hinted at above, there is one ZIL per dataset (both the in-memory
* representation, and the on-disk representation). The on-disk format
* consists of 3 parts:
*
* - a single, per-dataset, ZIL header; which points to a chain of
* - zero or more ZIL blocks; each of which contains
* - zero or more ZIL records
*
* A ZIL record holds the information necessary to replay a single
* system call transaction. A ZIL block can hold many ZIL records, and
* the blocks are chained together, similarly to a singly linked list.
*
* Each ZIL block contains a block pointer (blkptr_t) to the next ZIL
* block in the chain, and the ZIL header points to the first block in
* the chain.
*
* Note, there is not a fixed place in the pool to hold these ZIL
* blocks; they are dynamically allocated and freed as needed from the
* blocks available on the pool, though they can be preferentially
* allocated from a dedicated "log" vdev.
*/
/*
* This controls the amount of time that a ZIL block (lwb) will remain
* "open" when it isn't "full", and it has a thread waiting for it to be
* committed to stable storage. Please refer to the zil_commit_waiter()
* function (and the comments within it) for more details.
*/
int zfs_commit_timeout_pct = 5;
/*
* See zil.h for more information about these fields.
*/
zil_stats_t zil_stats = {
{ "zil_commit_count", KSTAT_DATA_UINT64 },
{ "zil_commit_writer_count", KSTAT_DATA_UINT64 },
{ "zil_itx_count", KSTAT_DATA_UINT64 },
{ "zil_itx_indirect_count", KSTAT_DATA_UINT64 },
{ "zil_itx_indirect_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_copied_count", KSTAT_DATA_UINT64 },
{ "zil_itx_copied_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_needcopy_count", KSTAT_DATA_UINT64 },
{ "zil_itx_needcopy_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_normal_count", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_normal_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_slog_count", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_slog_bytes", KSTAT_DATA_UINT64 },
};
static kstat_t *zil_ksp;
/*
* Disable intent logging replay. This global ZIL switch affects all pools.
*/
int zil_replay_disable = 0;
/*
* Disable the DKIOCFLUSHWRITECACHE commands that are normally sent to
* the disk(s) by the ZIL after an LWB write has completed. Setting this
* will cause ZIL corruption on power loss if a volatile out-of-order
* write cache is enabled.
*/
int zil_nocacheflush = 0;
/*
* Limit SLOG write size per commit executed with synchronous priority.
* Any writes above that will be executed with lower (asynchronous) priority
* to limit potential SLOG device abuse by single active ZIL writer.
*/
unsigned long zil_slog_bulk = 768 * 1024;
static kmem_cache_t *zil_lwb_cache;
static kmem_cache_t *zil_zcw_cache;
#define LWB_EMPTY(lwb) ((BP_GET_LSIZE(&lwb->lwb_blk) - \
sizeof (zil_chain_t)) == (lwb->lwb_sz - lwb->lwb_nused))
static int
zil_bp_compare(const void *x1, const void *x2)
{
const dva_t *dva1 = &((zil_bp_node_t *)x1)->zn_dva;
const dva_t *dva2 = &((zil_bp_node_t *)x2)->zn_dva;
int cmp = TREE_CMP(DVA_GET_VDEV(dva1), DVA_GET_VDEV(dva2));
if (likely(cmp))
return (cmp);
return (TREE_CMP(DVA_GET_OFFSET(dva1), DVA_GET_OFFSET(dva2)));
}
static void
zil_bp_tree_init(zilog_t *zilog)
{
avl_create(&zilog->zl_bp_tree, zil_bp_compare,
sizeof (zil_bp_node_t), offsetof(zil_bp_node_t, zn_node));
}
static void
zil_bp_tree_fini(zilog_t *zilog)
{
avl_tree_t *t = &zilog->zl_bp_tree;
zil_bp_node_t *zn;
void *cookie = NULL;
while ((zn = avl_destroy_nodes(t, &cookie)) != NULL)
kmem_free(zn, sizeof (zil_bp_node_t));
avl_destroy(t);
}
int
zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp)
{
avl_tree_t *t = &zilog->zl_bp_tree;
const dva_t *dva;
zil_bp_node_t *zn;
avl_index_t where;
if (BP_IS_EMBEDDED(bp))
return (0);
dva = BP_IDENTITY(bp);
if (avl_find(t, dva, &where) != NULL)
return (SET_ERROR(EEXIST));
zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP);
zn->zn_dva = *dva;
avl_insert(t, zn, where);
return (0);
}
static zil_header_t *
zil_header_in_syncing_context(zilog_t *zilog)
{
return ((zil_header_t *)zilog->zl_header);
}
static void
zil_init_log_chain(zilog_t *zilog, blkptr_t *bp)
{
zio_cksum_t *zc = &bp->blk_cksum;
(void) random_get_pseudo_bytes((void *)&zc->zc_word[ZIL_ZC_GUID_0],
sizeof (zc->zc_word[ZIL_ZC_GUID_0]));
(void) random_get_pseudo_bytes((void *)&zc->zc_word[ZIL_ZC_GUID_1],
sizeof (zc->zc_word[ZIL_ZC_GUID_1]));
zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os);
zc->zc_word[ZIL_ZC_SEQ] = 1ULL;
}
/*
* Read a log block and make sure it's valid.
*/
static int
zil_read_log_block(zilog_t *zilog, boolean_t decrypt, const blkptr_t *bp,
blkptr_t *nbp, void *dst, char **end)
{
enum zio_flag zio_flags = ZIO_FLAG_CANFAIL;
arc_flags_t aflags = ARC_FLAG_WAIT;
arc_buf_t *abuf = NULL;
zbookmark_phys_t zb;
int error;
if (zilog->zl_header->zh_claim_txg == 0)
zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB;
if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
zio_flags |= ZIO_FLAG_SPECULATIVE;
if (!decrypt)
zio_flags |= ZIO_FLAG_RAW;
SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET],
ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func,
&abuf, ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
if (error == 0) {
zio_cksum_t cksum = bp->blk_cksum;
/*
* Validate the checksummed log block.
*
* Sequence numbers should be... sequential. The checksum
* verifier for the next block should be bp's checksum plus 1.
*
* Also check the log chain linkage and size used.
*/
cksum.zc_word[ZIL_ZC_SEQ]++;
if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) {
zil_chain_t *zilc = abuf->b_data;
char *lr = (char *)(zilc + 1);
uint64_t len = zilc->zc_nused - sizeof (zil_chain_t);
if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk)) {
error = SET_ERROR(ECKSUM);
} else {
ASSERT3U(len, <=, SPA_OLD_MAXBLOCKSIZE);
bcopy(lr, dst, len);
*end = (char *)dst + len;
*nbp = zilc->zc_next_blk;
}
} else {
char *lr = abuf->b_data;
uint64_t size = BP_GET_LSIZE(bp);
zil_chain_t *zilc = (zil_chain_t *)(lr + size) - 1;
if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) ||
(zilc->zc_nused > (size - sizeof (*zilc)))) {
error = SET_ERROR(ECKSUM);
} else {
ASSERT3U(zilc->zc_nused, <=,
SPA_OLD_MAXBLOCKSIZE);
bcopy(lr, dst, zilc->zc_nused);
*end = (char *)dst + zilc->zc_nused;
*nbp = zilc->zc_next_blk;
}
}
arc_buf_destroy(abuf, &abuf);
}
return (error);
}
/*
* Read a TX_WRITE log data block.
*/
static int
zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf)
{
enum zio_flag zio_flags = ZIO_FLAG_CANFAIL;
const blkptr_t *bp = &lr->lr_blkptr;
arc_flags_t aflags = ARC_FLAG_WAIT;
arc_buf_t *abuf = NULL;
zbookmark_phys_t zb;
int error;
if (BP_IS_HOLE(bp)) {
if (wbuf != NULL)
bzero(wbuf, MAX(BP_GET_LSIZE(bp), lr->lr_length));
return (0);
}
if (zilog->zl_header->zh_claim_txg == 0)
zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB;
/*
* If we are not using the resulting data, we are just checking that
* it hasn't been corrupted so we don't need to waste CPU time
* decompressing and decrypting it.
*/
if (wbuf == NULL)
zio_flags |= ZIO_FLAG_RAW;
SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid,
ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp));
error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
if (error == 0) {
if (wbuf != NULL)
bcopy(abuf->b_data, wbuf, arc_buf_size(abuf));
arc_buf_destroy(abuf, &abuf);
}
return (error);
}
/*
* Parse the intent log, and call parse_func for each valid record within.
*/
int
zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg,
boolean_t decrypt)
{
const zil_header_t *zh = zilog->zl_header;
boolean_t claimed = !!zh->zh_claim_txg;
uint64_t claim_blk_seq = claimed ? zh->zh_claim_blk_seq : UINT64_MAX;
uint64_t claim_lr_seq = claimed ? zh->zh_claim_lr_seq : UINT64_MAX;
uint64_t max_blk_seq = 0;
uint64_t max_lr_seq = 0;
uint64_t blk_count = 0;
uint64_t lr_count = 0;
blkptr_t blk, next_blk;
char *lrbuf, *lrp;
int error = 0;
bzero(&next_blk, sizeof (blkptr_t));
/*
* Old logs didn't record the maximum zh_claim_lr_seq.
*/
if (!(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
claim_lr_seq = UINT64_MAX;
/*
* Starting at the block pointed to by zh_log we read the log chain.
* For each block in the chain we strongly check that block to
* ensure its validity. We stop when an invalid block is found.
* For each block pointer in the chain we call parse_blk_func().
* For each record in each valid block we call parse_lr_func().
* If the log has been claimed, stop if we encounter a sequence
* number greater than the highest claimed sequence number.
*/
lrbuf = zio_buf_alloc(SPA_OLD_MAXBLOCKSIZE);
zil_bp_tree_init(zilog);
for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) {
uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
int reclen;
char *end = NULL;
if (blk_seq > claim_blk_seq)
break;
error = parse_blk_func(zilog, &blk, arg, txg);
if (error != 0)
break;
ASSERT3U(max_blk_seq, <, blk_seq);
max_blk_seq = blk_seq;
blk_count++;
if (max_lr_seq == claim_lr_seq && max_blk_seq == claim_blk_seq)
break;
error = zil_read_log_block(zilog, decrypt, &blk, &next_blk,
lrbuf, &end);
if (error != 0)
break;
for (lrp = lrbuf; lrp < end; lrp += reclen) {
lr_t *lr = (lr_t *)lrp;
reclen = lr->lrc_reclen;
ASSERT3U(reclen, >=, sizeof (lr_t));
if (lr->lrc_seq > claim_lr_seq)
goto done;
error = parse_lr_func(zilog, lr, arg, txg);
if (error != 0)
goto done;
ASSERT3U(max_lr_seq, <, lr->lrc_seq);
max_lr_seq = lr->lrc_seq;
lr_count++;
}
}
done:
zilog->zl_parse_error = error;
zilog->zl_parse_blk_seq = max_blk_seq;
zilog->zl_parse_lr_seq = max_lr_seq;
zilog->zl_parse_blk_count = blk_count;
zilog->zl_parse_lr_count = lr_count;
ASSERT(!claimed || !(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID) ||
(max_blk_seq == claim_blk_seq && max_lr_seq == claim_lr_seq) ||
(decrypt && error == EIO));
zil_bp_tree_fini(zilog);
zio_buf_free(lrbuf, SPA_OLD_MAXBLOCKSIZE);
return (error);
}
-/* ARGSUSED */
static int
zil_clear_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx,
uint64_t first_txg)
{
+ (void) tx;
ASSERT(!BP_IS_HOLE(bp));
/*
* As we call this function from the context of a rewind to a
* checkpoint, each ZIL block whose txg is later than the txg
* that we rewind to is invalid. Thus, we return -1 so
* zil_parse() doesn't attempt to read it.
*/
if (bp->blk_birth >= first_txg)
return (-1);
if (zil_bp_tree_add(zilog, bp) != 0)
return (0);
zio_free(zilog->zl_spa, first_txg, bp);
return (0);
}
-/* ARGSUSED */
static int
zil_noop_log_record(zilog_t *zilog, const lr_t *lrc, void *tx,
uint64_t first_txg)
{
+ (void) zilog, (void) lrc, (void) tx, (void) first_txg;
return (0);
}
static int
zil_claim_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx,
uint64_t first_txg)
{
/*
* Claim log block if not already committed and not already claimed.
* If tx == NULL, just verify that the block is claimable.
*/
if (BP_IS_HOLE(bp) || bp->blk_birth < first_txg ||
zil_bp_tree_add(zilog, bp) != 0)
return (0);
return (zio_wait(zio_claim(NULL, zilog->zl_spa,
tx == NULL ? 0 : first_txg, bp, spa_claim_notify, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB)));
}
static int
zil_claim_log_record(zilog_t *zilog, const lr_t *lrc, void *tx,
uint64_t first_txg)
{
lr_write_t *lr = (lr_write_t *)lrc;
int error;
if (lrc->lrc_txtype != TX_WRITE)
return (0);
/*
* If the block is not readable, don't claim it. This can happen
* in normal operation when a log block is written to disk before
* some of the dmu_sync() blocks it points to. In this case, the
* transaction cannot have been committed to anyone (we would have
* waited for all writes to be stable first), so it is semantically
* correct to declare this the end of the log.
*/
if (lr->lr_blkptr.blk_birth >= first_txg) {
error = zil_read_log_data(zilog, lr, NULL);
if (error != 0)
return (error);
}
return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg));
}
-/* ARGSUSED */
static int
zil_free_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx,
uint64_t claim_txg)
{
+ (void) claim_txg;
+
zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
return (0);
}
static int
zil_free_log_record(zilog_t *zilog, const lr_t *lrc, void *tx,
uint64_t claim_txg)
{
lr_write_t *lr = (lr_write_t *)lrc;
blkptr_t *bp = &lr->lr_blkptr;
/*
* If we previously claimed it, we need to free it.
*/
if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE &&
bp->blk_birth >= claim_txg && zil_bp_tree_add(zilog, bp) == 0 &&
!BP_IS_HOLE(bp))
zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
return (0);
}
static int
zil_lwb_vdev_compare(const void *x1, const void *x2)
{
const uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev;
const uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev;
return (TREE_CMP(v1, v2));
}
static lwb_t *
zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, boolean_t slog, uint64_t txg,
boolean_t fastwrite)
{
lwb_t *lwb;
lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
lwb->lwb_zilog = zilog;
lwb->lwb_blk = *bp;
lwb->lwb_fastwrite = fastwrite;
lwb->lwb_slog = slog;
lwb->lwb_state = LWB_STATE_CLOSED;
lwb->lwb_buf = zio_buf_alloc(BP_GET_LSIZE(bp));
lwb->lwb_max_txg = txg;
lwb->lwb_write_zio = NULL;
lwb->lwb_root_zio = NULL;
lwb->lwb_tx = NULL;
lwb->lwb_issued_timestamp = 0;
if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) {
lwb->lwb_nused = sizeof (zil_chain_t);
lwb->lwb_sz = BP_GET_LSIZE(bp);
} else {
lwb->lwb_nused = 0;
lwb->lwb_sz = BP_GET_LSIZE(bp) - sizeof (zil_chain_t);
}
mutex_enter(&zilog->zl_lock);
list_insert_tail(&zilog->zl_lwb_list, lwb);
mutex_exit(&zilog->zl_lock);
ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock));
ASSERT(avl_is_empty(&lwb->lwb_vdev_tree));
VERIFY(list_is_empty(&lwb->lwb_waiters));
VERIFY(list_is_empty(&lwb->lwb_itxs));
return (lwb);
}
static void
zil_free_lwb(zilog_t *zilog, lwb_t *lwb)
{
ASSERT(MUTEX_HELD(&zilog->zl_lock));
ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock));
VERIFY(list_is_empty(&lwb->lwb_waiters));
VERIFY(list_is_empty(&lwb->lwb_itxs));
ASSERT(avl_is_empty(&lwb->lwb_vdev_tree));
ASSERT3P(lwb->lwb_write_zio, ==, NULL);
ASSERT3P(lwb->lwb_root_zio, ==, NULL);
ASSERT3U(lwb->lwb_max_txg, <=, spa_syncing_txg(zilog->zl_spa));
ASSERT(lwb->lwb_state == LWB_STATE_CLOSED ||
lwb->lwb_state == LWB_STATE_FLUSH_DONE);
/*
* Clear the zilog's field to indicate this lwb is no longer
* valid, and prevent use-after-free errors.
*/
if (zilog->zl_last_lwb_opened == lwb)
zilog->zl_last_lwb_opened = NULL;
kmem_cache_free(zil_lwb_cache, lwb);
}
/*
* Called when we create in-memory log transactions so that we know
* to cleanup the itxs at the end of spa_sync().
*/
static void
zilog_dirty(zilog_t *zilog, uint64_t txg)
{
dsl_pool_t *dp = zilog->zl_dmu_pool;
dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
ASSERT(spa_writeable(zilog->zl_spa));
if (ds->ds_is_snapshot)
panic("dirtying snapshot!");
if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) {
/* up the hold count until we can be written out */
dmu_buf_add_ref(ds->ds_dbuf, zilog);
zilog->zl_dirty_max_txg = MAX(txg, zilog->zl_dirty_max_txg);
}
}
/*
* Determine if the zil is dirty in the specified txg. Callers wanting to
* ensure that the dirty state does not change must hold the itxg_lock for
* the specified txg. Holding the lock will ensure that the zil cannot be
* dirtied (zil_itx_assign) or cleaned (zil_clean) while we check its current
* state.
*/
static boolean_t __maybe_unused
zilog_is_dirty_in_txg(zilog_t *zilog, uint64_t txg)
{
dsl_pool_t *dp = zilog->zl_dmu_pool;
if (txg_list_member(&dp->dp_dirty_zilogs, zilog, txg & TXG_MASK))
return (B_TRUE);
return (B_FALSE);
}
/*
* Determine if the zil is dirty. The zil is considered dirty if it has
* any pending itx records that have not been cleaned by zil_clean().
*/
static boolean_t
zilog_is_dirty(zilog_t *zilog)
{
dsl_pool_t *dp = zilog->zl_dmu_pool;
for (int t = 0; t < TXG_SIZE; t++) {
if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t))
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Create an on-disk intent log.
*/
static lwb_t *
zil_create(zilog_t *zilog)
{
const zil_header_t *zh = zilog->zl_header;
lwb_t *lwb = NULL;
uint64_t txg = 0;
dmu_tx_t *tx = NULL;
blkptr_t blk;
int error = 0;
boolean_t fastwrite = FALSE;
boolean_t slog = FALSE;
/*
* Wait for any previous destroy to complete.
*/
txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
ASSERT(zh->zh_claim_txg == 0);
ASSERT(zh->zh_replay_seq == 0);
blk = zh->zh_log;
/*
* Allocate an initial log block if:
* - there isn't one already
* - the existing block is the wrong endianness
*/
if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) {
tx = dmu_tx_create(zilog->zl_os);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
txg = dmu_tx_get_txg(tx);
if (!BP_IS_HOLE(&blk)) {
zio_free(zilog->zl_spa, txg, &blk);
BP_ZERO(&blk);
}
error = zio_alloc_zil(zilog->zl_spa, zilog->zl_os, txg, &blk,
ZIL_MIN_BLKSZ, &slog);
fastwrite = TRUE;
if (error == 0)
zil_init_log_chain(zilog, &blk);
}
/*
* Allocate a log write block (lwb) for the first log block.
*/
if (error == 0)
lwb = zil_alloc_lwb(zilog, &blk, slog, txg, fastwrite);
/*
* If we just allocated the first log block, commit our transaction
* and wait for zil_sync() to stuff the block pointer into zh_log.
* (zh is part of the MOS, so we cannot modify it in open context.)
*/
if (tx != NULL) {
dmu_tx_commit(tx);
txg_wait_synced(zilog->zl_dmu_pool, txg);
}
ASSERT(error != 0 || bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0);
IMPLY(error == 0, lwb != NULL);
return (lwb);
}
/*
* In one tx, free all log blocks and clear the log header. If keep_first
* is set, then we're replaying a log with no content. We want to keep the
* first block, however, so that the first synchronous transaction doesn't
* require a txg_wait_synced() in zil_create(). We don't need to
* txg_wait_synced() here either when keep_first is set, because both
* zil_create() and zil_destroy() will wait for any in-progress destroys
* to complete.
*/
void
zil_destroy(zilog_t *zilog, boolean_t keep_first)
{
const zil_header_t *zh = zilog->zl_header;
lwb_t *lwb;
dmu_tx_t *tx;
uint64_t txg;
/*
* Wait for any previous destroy to complete.
*/
txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
zilog->zl_old_header = *zh; /* debugging aid */
if (BP_IS_HOLE(&zh->zh_log))
return;
tx = dmu_tx_create(zilog->zl_os);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
txg = dmu_tx_get_txg(tx);
mutex_enter(&zilog->zl_lock);
ASSERT3U(zilog->zl_destroy_txg, <, txg);
zilog->zl_destroy_txg = txg;
zilog->zl_keep_first = keep_first;
if (!list_is_empty(&zilog->zl_lwb_list)) {
ASSERT(zh->zh_claim_txg == 0);
VERIFY(!keep_first);
while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
if (lwb->lwb_fastwrite)
metaslab_fastwrite_unmark(zilog->zl_spa,
&lwb->lwb_blk);
list_remove(&zilog->zl_lwb_list, lwb);
if (lwb->lwb_buf != NULL)
zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
zio_free(zilog->zl_spa, txg, &lwb->lwb_blk);
zil_free_lwb(zilog, lwb);
}
} else if (!keep_first) {
zil_destroy_sync(zilog, tx);
}
mutex_exit(&zilog->zl_lock);
dmu_tx_commit(tx);
}
void
zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx)
{
ASSERT(list_is_empty(&zilog->zl_lwb_list));
(void) zil_parse(zilog, zil_free_log_block,
zil_free_log_record, tx, zilog->zl_header->zh_claim_txg, B_FALSE);
}
int
zil_claim(dsl_pool_t *dp, dsl_dataset_t *ds, void *txarg)
{
dmu_tx_t *tx = txarg;
zilog_t *zilog;
uint64_t first_txg;
zil_header_t *zh;
objset_t *os;
int error;
error = dmu_objset_own_obj(dp, ds->ds_object,
DMU_OST_ANY, B_FALSE, B_FALSE, FTAG, &os);
if (error != 0) {
/*
* EBUSY indicates that the objset is inconsistent, in which
* case it can not have a ZIL.
*/
if (error != EBUSY) {
cmn_err(CE_WARN, "can't open objset for %llu, error %u",
(unsigned long long)ds->ds_object, error);
}
return (0);
}
zilog = dmu_objset_zil(os);
zh = zil_header_in_syncing_context(zilog);
ASSERT3U(tx->tx_txg, ==, spa_first_txg(zilog->zl_spa));
first_txg = spa_min_claim_txg(zilog->zl_spa);
/*
* If the spa_log_state is not set to be cleared, check whether
* the current uberblock is a checkpoint one and if the current
* header has been claimed before moving on.
*
* If the current uberblock is a checkpointed uberblock then
* one of the following scenarios took place:
*
* 1] We are currently rewinding to the checkpoint of the pool.
* 2] We crashed in the middle of a checkpoint rewind but we
* did manage to write the checkpointed uberblock to the
* vdev labels, so when we tried to import the pool again
* the checkpointed uberblock was selected from the import
* procedure.
*
* In both cases we want to zero out all the ZIL blocks, except
* the ones that have been claimed at the time of the checkpoint
* (their zh_claim_txg != 0). The reason is that these blocks
* may be corrupted since we may have reused their locations on
* disk after we took the checkpoint.
*
* We could try to set spa_log_state to SPA_LOG_CLEAR earlier
* when we first figure out whether the current uberblock is
* checkpointed or not. Unfortunately, that would discard all
* the logs, including the ones that are claimed, and we would
* leak space.
*/
if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR ||
(zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 &&
zh->zh_claim_txg == 0)) {
if (!BP_IS_HOLE(&zh->zh_log)) {
(void) zil_parse(zilog, zil_clear_log_block,
zil_noop_log_record, tx, first_txg, B_FALSE);
}
BP_ZERO(&zh->zh_log);
if (os->os_encrypted)
os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE;
dsl_dataset_dirty(dmu_objset_ds(os), tx);
dmu_objset_disown(os, B_FALSE, FTAG);
return (0);
}
/*
* If we are not rewinding and opening the pool normally, then
* the min_claim_txg should be equal to the first txg of the pool.
*/
ASSERT3U(first_txg, ==, spa_first_txg(zilog->zl_spa));
/*
* Claim all log blocks if we haven't already done so, and remember
* the highest claimed sequence number. This ensures that if we can
* read only part of the log now (e.g. due to a missing device),
* but we can read the entire log later, we will not try to replay
* or destroy beyond the last block we successfully claimed.
*/
ASSERT3U(zh->zh_claim_txg, <=, first_txg);
if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) {
(void) zil_parse(zilog, zil_claim_log_block,
zil_claim_log_record, tx, first_txg, B_FALSE);
zh->zh_claim_txg = first_txg;
zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq;
zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq;
if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1)
zh->zh_flags |= ZIL_REPLAY_NEEDED;
zh->zh_flags |= ZIL_CLAIM_LR_SEQ_VALID;
if (os->os_encrypted)
os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE;
dsl_dataset_dirty(dmu_objset_ds(os), tx);
}
ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1));
dmu_objset_disown(os, B_FALSE, FTAG);
return (0);
}
/*
* Check the log by walking the log chain.
* Checksum errors are ok as they indicate the end of the chain.
* Any other error (no device or read failure) returns an error.
*/
-/* ARGSUSED */
int
zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx)
{
+ (void) dp;
zilog_t *zilog;
objset_t *os;
blkptr_t *bp;
int error;
ASSERT(tx == NULL);
error = dmu_objset_from_ds(ds, &os);
if (error != 0) {
cmn_err(CE_WARN, "can't open objset %llu, error %d",
(unsigned long long)ds->ds_object, error);
return (0);
}
zilog = dmu_objset_zil(os);
bp = (blkptr_t *)&zilog->zl_header->zh_log;
if (!BP_IS_HOLE(bp)) {
vdev_t *vd;
boolean_t valid = B_TRUE;
/*
* Check the first block and determine if it's on a log device
* which may have been removed or faulted prior to loading this
* pool. If so, there's no point in checking the rest of the
* log as its content should have already been synced to the
* pool.
*/
spa_config_enter(os->os_spa, SCL_STATE, FTAG, RW_READER);
vd = vdev_lookup_top(os->os_spa, DVA_GET_VDEV(&bp->blk_dva[0]));
if (vd->vdev_islog && vdev_is_dead(vd))
valid = vdev_log_state_valid(vd);
spa_config_exit(os->os_spa, SCL_STATE, FTAG);
if (!valid)
return (0);
/*
* Check whether the current uberblock is checkpointed (e.g.
* we are rewinding) and whether the current header has been
* claimed or not. If it hasn't then skip verifying it. We
* do this because its ZIL blocks may be part of the pool's
* state before the rewind, which is no longer valid.
*/
zil_header_t *zh = zil_header_in_syncing_context(zilog);
if (zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 &&
zh->zh_claim_txg == 0)
return (0);
}
/*
* Because tx == NULL, zil_claim_log_block() will not actually claim
* any blocks, but just determine whether it is possible to do so.
* In addition to checking the log chain, zil_claim_log_block()
* will invoke zio_claim() with a done func of spa_claim_notify(),
* which will update spa_max_claim_txg. See spa_load() for details.
*/
error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx,
zilog->zl_header->zh_claim_txg ? -1ULL :
spa_min_claim_txg(os->os_spa), B_FALSE);
return ((error == ECKSUM || error == ENOENT) ? 0 : error);
}
/*
* When an itx is "skipped", this function is used to properly mark the
* waiter as "done, and signal any thread(s) waiting on it. An itx can
* be skipped (and not committed to an lwb) for a variety of reasons,
* one of them being that the itx was committed via spa_sync(), prior to
* it being committed to an lwb; this can happen if a thread calling
* zil_commit() is racing with spa_sync().
*/
static void
zil_commit_waiter_skip(zil_commit_waiter_t *zcw)
{
mutex_enter(&zcw->zcw_lock);
ASSERT3B(zcw->zcw_done, ==, B_FALSE);
zcw->zcw_done = B_TRUE;
cv_broadcast(&zcw->zcw_cv);
mutex_exit(&zcw->zcw_lock);
}
/*
* This function is used when the given waiter is to be linked into an
* lwb's "lwb_waiter" list; i.e. when the itx is committed to the lwb.
* At this point, the waiter will no longer be referenced by the itx,
* and instead, will be referenced by the lwb.
*/
static void
zil_commit_waiter_link_lwb(zil_commit_waiter_t *zcw, lwb_t *lwb)
{
/*
* The lwb_waiters field of the lwb is protected by the zilog's
* zl_lock, thus it must be held when calling this function.
*/
ASSERT(MUTEX_HELD(&lwb->lwb_zilog->zl_lock));
mutex_enter(&zcw->zcw_lock);
ASSERT(!list_link_active(&zcw->zcw_node));
ASSERT3P(zcw->zcw_lwb, ==, NULL);
ASSERT3P(lwb, !=, NULL);
ASSERT(lwb->lwb_state == LWB_STATE_OPENED ||
lwb->lwb_state == LWB_STATE_ISSUED ||
lwb->lwb_state == LWB_STATE_WRITE_DONE);
list_insert_tail(&lwb->lwb_waiters, zcw);
zcw->zcw_lwb = lwb;
mutex_exit(&zcw->zcw_lock);
}
/*
* This function is used when zio_alloc_zil() fails to allocate a ZIL
* block, and the given waiter must be linked to the "nolwb waiters"
* list inside of zil_process_commit_list().
*/
static void
zil_commit_waiter_link_nolwb(zil_commit_waiter_t *zcw, list_t *nolwb)
{
mutex_enter(&zcw->zcw_lock);
ASSERT(!list_link_active(&zcw->zcw_node));
ASSERT3P(zcw->zcw_lwb, ==, NULL);
list_insert_tail(nolwb, zcw);
mutex_exit(&zcw->zcw_lock);
}
void
zil_lwb_add_block(lwb_t *lwb, const blkptr_t *bp)
{
avl_tree_t *t = &lwb->lwb_vdev_tree;
avl_index_t where;
zil_vdev_node_t *zv, zvsearch;
int ndvas = BP_GET_NDVAS(bp);
int i;
if (zil_nocacheflush)
return;
mutex_enter(&lwb->lwb_vdev_lock);
for (i = 0; i < ndvas; i++) {
zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
if (avl_find(t, &zvsearch, &where) == NULL) {
zv = kmem_alloc(sizeof (*zv), KM_SLEEP);
zv->zv_vdev = zvsearch.zv_vdev;
avl_insert(t, zv, where);
}
}
mutex_exit(&lwb->lwb_vdev_lock);
}
static void
zil_lwb_flush_defer(lwb_t *lwb, lwb_t *nlwb)
{
avl_tree_t *src = &lwb->lwb_vdev_tree;
avl_tree_t *dst = &nlwb->lwb_vdev_tree;
void *cookie = NULL;
zil_vdev_node_t *zv;
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_WRITE_DONE);
ASSERT3S(nlwb->lwb_state, !=, LWB_STATE_WRITE_DONE);
ASSERT3S(nlwb->lwb_state, !=, LWB_STATE_FLUSH_DONE);
/*
* While 'lwb' is at a point in its lifetime where lwb_vdev_tree does
* not need the protection of lwb_vdev_lock (it will only be modified
* while holding zilog->zl_lock) as its writes and those of its
* children have all completed. The younger 'nlwb' may be waiting on
* future writes to additional vdevs.
*/
mutex_enter(&nlwb->lwb_vdev_lock);
/*
* Tear down the 'lwb' vdev tree, ensuring that entries which do not
* exist in 'nlwb' are moved to it, freeing any would-be duplicates.
*/
while ((zv = avl_destroy_nodes(src, &cookie)) != NULL) {
avl_index_t where;
if (avl_find(dst, zv, &where) == NULL) {
avl_insert(dst, zv, where);
} else {
kmem_free(zv, sizeof (*zv));
}
}
mutex_exit(&nlwb->lwb_vdev_lock);
}
void
zil_lwb_add_txg(lwb_t *lwb, uint64_t txg)
{
lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg);
}
/*
* This function is a called after all vdevs associated with a given lwb
* write have completed their DKIOCFLUSHWRITECACHE command; or as soon
* as the lwb write completes, if "zil_nocacheflush" is set. Further,
* all "previous" lwb's will have completed before this function is
* called; i.e. this function is called for all previous lwbs before
* it's called for "this" lwb (enforced via zio the dependencies
* configured in zil_lwb_set_zio_dependency()).
*
* The intention is for this function to be called as soon as the
* contents of an lwb are considered "stable" on disk, and will survive
* any sudden loss of power. At this point, any threads waiting for the
* lwb to reach this state are signalled, and the "waiter" structures
* are marked "done".
*/
static void
zil_lwb_flush_vdevs_done(zio_t *zio)
{
lwb_t *lwb = zio->io_private;
zilog_t *zilog = lwb->lwb_zilog;
dmu_tx_t *tx = lwb->lwb_tx;
zil_commit_waiter_t *zcw;
itx_t *itx;
spa_config_exit(zilog->zl_spa, SCL_STATE, lwb);
zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
mutex_enter(&zilog->zl_lock);
/*
* Ensure the lwb buffer pointer is cleared before releasing the
* txg. If we have had an allocation failure and the txg is
* waiting to sync then we want zil_sync() to remove the lwb so
* that it's not picked up as the next new one in
* zil_process_commit_list(). zil_sync() will only remove the
* lwb if lwb_buf is null.
*/
lwb->lwb_buf = NULL;
lwb->lwb_tx = NULL;
ASSERT3U(lwb->lwb_issued_timestamp, >, 0);
zilog->zl_last_lwb_latency = gethrtime() - lwb->lwb_issued_timestamp;
lwb->lwb_root_zio = NULL;
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_WRITE_DONE);
lwb->lwb_state = LWB_STATE_FLUSH_DONE;
if (zilog->zl_last_lwb_opened == lwb) {
/*
* Remember the highest committed log sequence number
* for ztest. We only update this value when all the log
* writes succeeded, because ztest wants to ASSERT that
* it got the whole log chain.
*/
zilog->zl_commit_lr_seq = zilog->zl_lr_seq;
}
while ((itx = list_head(&lwb->lwb_itxs)) != NULL) {
list_remove(&lwb->lwb_itxs, itx);
zil_itx_destroy(itx);
}
while ((zcw = list_head(&lwb->lwb_waiters)) != NULL) {
mutex_enter(&zcw->zcw_lock);
ASSERT(list_link_active(&zcw->zcw_node));
list_remove(&lwb->lwb_waiters, zcw);
ASSERT3P(zcw->zcw_lwb, ==, lwb);
zcw->zcw_lwb = NULL;
/*
* We expect any ZIO errors from child ZIOs to have been
* propagated "up" to this specific LWB's root ZIO, in
* order for this error handling to work correctly. This
* includes ZIO errors from either this LWB's write or
* flush, as well as any errors from other dependent LWBs
* (e.g. a root LWB ZIO that might be a child of this LWB).
*
* With that said, it's important to note that LWB flush
* errors are not propagated up to the LWB root ZIO.
* This is incorrect behavior, and results in VDEV flush
* errors not being handled correctly here. See the
* comment above the call to "zio_flush" for details.
*/
zcw->zcw_zio_error = zio->io_error;
ASSERT3B(zcw->zcw_done, ==, B_FALSE);
zcw->zcw_done = B_TRUE;
cv_broadcast(&zcw->zcw_cv);
mutex_exit(&zcw->zcw_lock);
}
mutex_exit(&zilog->zl_lock);
/*
* Now that we've written this log block, we have a stable pointer
* to the next block in the chain, so it's OK to let the txg in
* which we allocated the next block sync.
*/
dmu_tx_commit(tx);
}
/*
* This is called when an lwb's write zio completes. The callback's
* purpose is to issue the DKIOCFLUSHWRITECACHE commands for the vdevs
* in the lwb's lwb_vdev_tree. The tree will contain the vdevs involved
* in writing out this specific lwb's data, and in the case that cache
* flushes have been deferred, vdevs involved in writing the data for
* previous lwbs. The writes corresponding to all the vdevs in the
* lwb_vdev_tree will have completed by the time this is called, due to
* the zio dependencies configured in zil_lwb_set_zio_dependency(),
* which takes deferred flushes into account. The lwb will be "done"
* once zil_lwb_flush_vdevs_done() is called, which occurs in the zio
* completion callback for the lwb's root zio.
*/
static void
zil_lwb_write_done(zio_t *zio)
{
lwb_t *lwb = zio->io_private;
spa_t *spa = zio->io_spa;
zilog_t *zilog = lwb->lwb_zilog;
avl_tree_t *t = &lwb->lwb_vdev_tree;
void *cookie = NULL;
zil_vdev_node_t *zv;
lwb_t *nlwb;
ASSERT3S(spa_config_held(spa, SCL_STATE, RW_READER), !=, 0);
ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG);
ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER);
ASSERT(!BP_IS_GANG(zio->io_bp));
ASSERT(!BP_IS_HOLE(zio->io_bp));
ASSERT(BP_GET_FILL(zio->io_bp) == 0);
abd_free(zio->io_abd);
mutex_enter(&zilog->zl_lock);
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_ISSUED);
lwb->lwb_state = LWB_STATE_WRITE_DONE;
lwb->lwb_write_zio = NULL;
lwb->lwb_fastwrite = FALSE;
nlwb = list_next(&zilog->zl_lwb_list, lwb);
mutex_exit(&zilog->zl_lock);
if (avl_numnodes(t) == 0)
return;
/*
* If there was an IO error, we're not going to call zio_flush()
* on these vdevs, so we simply empty the tree and free the
* nodes. We avoid calling zio_flush() since there isn't any
* good reason for doing so, after the lwb block failed to be
* written out.
*
* Additionally, we don't perform any further error handling at
* this point (e.g. setting "zcw_zio_error" appropriately), as
* we expect that to occur in "zil_lwb_flush_vdevs_done" (thus,
* we expect any error seen here, to have been propagated to
* that function).
*/
if (zio->io_error != 0) {
while ((zv = avl_destroy_nodes(t, &cookie)) != NULL)
kmem_free(zv, sizeof (*zv));
return;
}
/*
* If this lwb does not have any threads waiting for it to
* complete, we want to defer issuing the DKIOCFLUSHWRITECACHE
* command to the vdevs written to by "this" lwb, and instead
* rely on the "next" lwb to handle the DKIOCFLUSHWRITECACHE
* command for those vdevs. Thus, we merge the vdev tree of
* "this" lwb with the vdev tree of the "next" lwb in the list,
* and assume the "next" lwb will handle flushing the vdevs (or
* deferring the flush(s) again).
*
* This is a useful performance optimization, especially for
* workloads with lots of async write activity and few sync
* write and/or fsync activity, as it has the potential to
* coalesce multiple flush commands to a vdev into one.
*/
if (list_head(&lwb->lwb_waiters) == NULL && nlwb != NULL) {
zil_lwb_flush_defer(lwb, nlwb);
ASSERT(avl_is_empty(&lwb->lwb_vdev_tree));
return;
}
while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) {
vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev);
if (vd != NULL) {
/*
* The "ZIO_FLAG_DONT_PROPAGATE" is currently
* always used within "zio_flush". This means,
* any errors when flushing the vdev(s), will
* (unfortunately) not be handled correctly,
* since these "zio_flush" errors will not be
* propagated up to "zil_lwb_flush_vdevs_done".
*/
zio_flush(lwb->lwb_root_zio, vd);
}
kmem_free(zv, sizeof (*zv));
}
}
static void
zil_lwb_set_zio_dependency(zilog_t *zilog, lwb_t *lwb)
{
lwb_t *last_lwb_opened = zilog->zl_last_lwb_opened;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT(MUTEX_HELD(&zilog->zl_lock));
/*
* The zilog's "zl_last_lwb_opened" field is used to build the
* lwb/zio dependency chain, which is used to preserve the
* ordering of lwb completions that is required by the semantics
* of the ZIL. Each new lwb zio becomes a parent of the
* "previous" lwb zio, such that the new lwb's zio cannot
* complete until the "previous" lwb's zio completes.
*
* This is required by the semantics of zil_commit(); the commit
* waiters attached to the lwbs will be woken in the lwb zio's
* completion callback, so this zio dependency graph ensures the
* waiters are woken in the correct order (the same order the
* lwbs were created).
*/
if (last_lwb_opened != NULL &&
last_lwb_opened->lwb_state != LWB_STATE_FLUSH_DONE) {
ASSERT(last_lwb_opened->lwb_state == LWB_STATE_OPENED ||
last_lwb_opened->lwb_state == LWB_STATE_ISSUED ||
last_lwb_opened->lwb_state == LWB_STATE_WRITE_DONE);
ASSERT3P(last_lwb_opened->lwb_root_zio, !=, NULL);
zio_add_child(lwb->lwb_root_zio,
last_lwb_opened->lwb_root_zio);
/*
* If the previous lwb's write hasn't already completed,
* we also want to order the completion of the lwb write
* zios (above, we only order the completion of the lwb
* root zios). This is required because of how we can
* defer the DKIOCFLUSHWRITECACHE commands for each lwb.
*
* When the DKIOCFLUSHWRITECACHE commands are deferred,
* the previous lwb will rely on this lwb to flush the
* vdevs written to by that previous lwb. Thus, we need
* to ensure this lwb doesn't issue the flush until
* after the previous lwb's write completes. We ensure
* this ordering by setting the zio parent/child
* relationship here.
*
* Without this relationship on the lwb's write zio,
* it's possible for this lwb's write to complete prior
* to the previous lwb's write completing; and thus, the
* vdevs for the previous lwb would be flushed prior to
* that lwb's data being written to those vdevs (the
* vdevs are flushed in the lwb write zio's completion
* handler, zil_lwb_write_done()).
*/
if (last_lwb_opened->lwb_state != LWB_STATE_WRITE_DONE) {
ASSERT(last_lwb_opened->lwb_state == LWB_STATE_OPENED ||
last_lwb_opened->lwb_state == LWB_STATE_ISSUED);
ASSERT3P(last_lwb_opened->lwb_write_zio, !=, NULL);
zio_add_child(lwb->lwb_write_zio,
last_lwb_opened->lwb_write_zio);
}
}
}
/*
* This function's purpose is to "open" an lwb such that it is ready to
* accept new itxs being committed to it. To do this, the lwb's zio
* structures are created, and linked to the lwb. This function is
* idempotent; if the passed in lwb has already been opened, this
* function is essentially a no-op.
*/
static void
zil_lwb_write_open(zilog_t *zilog, lwb_t *lwb)
{
zbookmark_phys_t zb;
zio_priority_t prio;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT3P(lwb, !=, NULL);
EQUIV(lwb->lwb_root_zio == NULL, lwb->lwb_state == LWB_STATE_CLOSED);
EQUIV(lwb->lwb_root_zio != NULL, lwb->lwb_state == LWB_STATE_OPENED);
SET_BOOKMARK(&zb, lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET],
ZB_ZIL_OBJECT, ZB_ZIL_LEVEL,
lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]);
/* Lock so zil_sync() doesn't fastwrite_unmark after zio is created */
mutex_enter(&zilog->zl_lock);
if (lwb->lwb_root_zio == NULL) {
abd_t *lwb_abd = abd_get_from_buf(lwb->lwb_buf,
BP_GET_LSIZE(&lwb->lwb_blk));
if (!lwb->lwb_fastwrite) {
metaslab_fastwrite_mark(zilog->zl_spa, &lwb->lwb_blk);
lwb->lwb_fastwrite = 1;
}
if (!lwb->lwb_slog || zilog->zl_cur_used <= zil_slog_bulk)
prio = ZIO_PRIORITY_SYNC_WRITE;
else
prio = ZIO_PRIORITY_ASYNC_WRITE;
lwb->lwb_root_zio = zio_root(zilog->zl_spa,
zil_lwb_flush_vdevs_done, lwb, ZIO_FLAG_CANFAIL);
ASSERT3P(lwb->lwb_root_zio, !=, NULL);
lwb->lwb_write_zio = zio_rewrite(lwb->lwb_root_zio,
zilog->zl_spa, 0, &lwb->lwb_blk, lwb_abd,
BP_GET_LSIZE(&lwb->lwb_blk), zil_lwb_write_done, lwb,
prio, ZIO_FLAG_CANFAIL | ZIO_FLAG_FASTWRITE, &zb);
ASSERT3P(lwb->lwb_write_zio, !=, NULL);
lwb->lwb_state = LWB_STATE_OPENED;
zil_lwb_set_zio_dependency(zilog, lwb);
zilog->zl_last_lwb_opened = lwb;
}
mutex_exit(&zilog->zl_lock);
ASSERT3P(lwb->lwb_root_zio, !=, NULL);
ASSERT3P(lwb->lwb_write_zio, !=, NULL);
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED);
}
/*
* Define a limited set of intent log block sizes.
*
* These must be a multiple of 4KB. Note only the amount used (again
* aligned to 4KB) actually gets written. However, we can't always just
* allocate SPA_OLD_MAXBLOCKSIZE as the slog space could be exhausted.
*/
struct {
uint64_t limit;
uint64_t blksz;
} zil_block_buckets[] = {
{ 4096, 4096 }, /* non TX_WRITE */
{ 8192 + 4096, 8192 + 4096 }, /* database */
{ 32768 + 4096, 32768 + 4096 }, /* NFS writes */
{ 65536 + 4096, 65536 + 4096 }, /* 64KB writes */
{ 131072, 131072 }, /* < 128KB writes */
{ 131072 +4096, 65536 + 4096 }, /* 128KB writes */
{ UINT64_MAX, SPA_OLD_MAXBLOCKSIZE}, /* > 128KB writes */
};
/*
* Maximum block size used by the ZIL. This is picked up when the ZIL is
* initialized. Otherwise this should not be used directly; see
* zl_max_block_size instead.
*/
int zil_maxblocksize = SPA_OLD_MAXBLOCKSIZE;
/*
* Start a log block write and advance to the next log block.
* Calls are serialized.
*/
static lwb_t *
zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb)
{
lwb_t *nlwb = NULL;
zil_chain_t *zilc;
spa_t *spa = zilog->zl_spa;
blkptr_t *bp;
dmu_tx_t *tx;
uint64_t txg;
uint64_t zil_blksz, wsz;
int i, error;
boolean_t slog;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT3P(lwb->lwb_root_zio, !=, NULL);
ASSERT3P(lwb->lwb_write_zio, !=, NULL);
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED);
if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) {
zilc = (zil_chain_t *)lwb->lwb_buf;
bp = &zilc->zc_next_blk;
} else {
zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_sz);
bp = &zilc->zc_next_blk;
}
ASSERT(lwb->lwb_nused <= lwb->lwb_sz);
/*
* Allocate the next block and save its address in this block
* before writing it in order to establish the log chain.
* Note that if the allocation of nlwb synced before we wrote
* the block that points at it (lwb), we'd leak it if we crashed.
* Therefore, we don't do dmu_tx_commit() until zil_lwb_write_done().
* We dirty the dataset to ensure that zil_sync() will be called
* to clean up in the event of allocation failure or I/O failure.
*/
tx = dmu_tx_create(zilog->zl_os);
/*
* Since we are not going to create any new dirty data, and we
* can even help with clearing the existing dirty data, we
* should not be subject to the dirty data based delays. We
* use TXG_NOTHROTTLE to bypass the delay mechanism.
*/
VERIFY0(dmu_tx_assign(tx, TXG_WAIT | TXG_NOTHROTTLE));
dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
txg = dmu_tx_get_txg(tx);
lwb->lwb_tx = tx;
/*
* Log blocks are pre-allocated. Here we select the size of the next
* block, based on size used in the last block.
* - first find the smallest bucket that will fit the block from a
* limited set of block sizes. This is because it's faster to write
* blocks allocated from the same metaslab as they are adjacent or
* close.
* - next find the maximum from the new suggested size and an array of
* previous sizes. This lessens a picket fence effect of wrongly
* guessing the size if we have a stream of say 2k, 64k, 2k, 64k
* requests.
*
* Note we only write what is used, but we can't just allocate
* the maximum block size because we can exhaust the available
* pool log space.
*/
zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t);
for (i = 0; zil_blksz > zil_block_buckets[i].limit; i++)
continue;
zil_blksz = MIN(zil_block_buckets[i].blksz, zilog->zl_max_block_size);
zilog->zl_prev_blks[zilog->zl_prev_rotor] = zil_blksz;
for (i = 0; i < ZIL_PREV_BLKS; i++)
zil_blksz = MAX(zil_blksz, zilog->zl_prev_blks[i]);
zilog->zl_prev_rotor = (zilog->zl_prev_rotor + 1) & (ZIL_PREV_BLKS - 1);
BP_ZERO(bp);
error = zio_alloc_zil(spa, zilog->zl_os, txg, bp, zil_blksz, &slog);
if (slog) {
ZIL_STAT_BUMP(zil_itx_metaslab_slog_count);
ZIL_STAT_INCR(zil_itx_metaslab_slog_bytes, lwb->lwb_nused);
} else {
ZIL_STAT_BUMP(zil_itx_metaslab_normal_count);
ZIL_STAT_INCR(zil_itx_metaslab_normal_bytes, lwb->lwb_nused);
}
if (error == 0) {
ASSERT3U(bp->blk_birth, ==, txg);
bp->blk_cksum = lwb->lwb_blk.blk_cksum;
bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++;
/*
* Allocate a new log write block (lwb).
*/
nlwb = zil_alloc_lwb(zilog, bp, slog, txg, TRUE);
}
if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) {
/* For Slim ZIL only write what is used. */
wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ, uint64_t);
ASSERT3U(wsz, <=, lwb->lwb_sz);
zio_shrink(lwb->lwb_write_zio, wsz);
} else {
wsz = lwb->lwb_sz;
}
zilc->zc_pad = 0;
zilc->zc_nused = lwb->lwb_nused;
zilc->zc_eck.zec_cksum = lwb->lwb_blk.blk_cksum;
/*
* clear unused data for security
*/
bzero(lwb->lwb_buf + lwb->lwb_nused, wsz - lwb->lwb_nused);
spa_config_enter(zilog->zl_spa, SCL_STATE, lwb, RW_READER);
zil_lwb_add_block(lwb, &lwb->lwb_blk);
lwb->lwb_issued_timestamp = gethrtime();
lwb->lwb_state = LWB_STATE_ISSUED;
zio_nowait(lwb->lwb_root_zio);
zio_nowait(lwb->lwb_write_zio);
/*
* If there was an allocation failure then nlwb will be null which
* forces a txg_wait_synced().
*/
return (nlwb);
}
/*
* Maximum amount of write data that can be put into single log block.
*/
uint64_t
zil_max_log_data(zilog_t *zilog)
{
return (zilog->zl_max_block_size -
sizeof (zil_chain_t) - sizeof (lr_write_t));
}
/*
* Maximum amount of log space we agree to waste to reduce number of
* WR_NEED_COPY chunks to reduce zl_get_data() overhead (~12%).
*/
static inline uint64_t
zil_max_waste_space(zilog_t *zilog)
{
return (zil_max_log_data(zilog) / 8);
}
/*
* Maximum amount of write data for WR_COPIED. For correctness, consumers
* must fall back to WR_NEED_COPY if we can't fit the entire record into one
* maximum sized log block, because each WR_COPIED record must fit in a
* single log block. For space efficiency, we want to fit two records into a
* max-sized log block.
*/
uint64_t
zil_max_copied_data(zilog_t *zilog)
{
return ((zilog->zl_max_block_size - sizeof (zil_chain_t)) / 2 -
sizeof (lr_write_t));
}
static lwb_t *
zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb)
{
lr_t *lrcb, *lrc;
lr_write_t *lrwb, *lrw;
char *lr_buf;
uint64_t dlen, dnow, dpad, lwb_sp, reclen, txg, max_log_data;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT3P(lwb, !=, NULL);
ASSERT3P(lwb->lwb_buf, !=, NULL);
zil_lwb_write_open(zilog, lwb);
lrc = &itx->itx_lr;
lrw = (lr_write_t *)lrc;
/*
* A commit itx doesn't represent any on-disk state; instead
* it's simply used as a place holder on the commit list, and
* provides a mechanism for attaching a "commit waiter" onto the
* correct lwb (such that the waiter can be signalled upon
* completion of that lwb). Thus, we don't process this itx's
* log record if it's a commit itx (these itx's don't have log
* records), and instead link the itx's waiter onto the lwb's
* list of waiters.
*
* For more details, see the comment above zil_commit().
*/
if (lrc->lrc_txtype == TX_COMMIT) {
mutex_enter(&zilog->zl_lock);
zil_commit_waiter_link_lwb(itx->itx_private, lwb);
itx->itx_private = NULL;
mutex_exit(&zilog->zl_lock);
return (lwb);
}
if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) {
dlen = P2ROUNDUP_TYPED(
lrw->lr_length, sizeof (uint64_t), uint64_t);
dpad = dlen - lrw->lr_length;
} else {
dlen = dpad = 0;
}
reclen = lrc->lrc_reclen;
zilog->zl_cur_used += (reclen + dlen);
txg = lrc->lrc_txg;
ASSERT3U(zilog->zl_cur_used, <, UINT64_MAX - (reclen + dlen));
cont:
/*
* If this record won't fit in the current log block, start a new one.
* For WR_NEED_COPY optimize layout for minimal number of chunks.
*/
lwb_sp = lwb->lwb_sz - lwb->lwb_nused;
max_log_data = zil_max_log_data(zilog);
if (reclen > lwb_sp || (reclen + dlen > lwb_sp &&
lwb_sp < zil_max_waste_space(zilog) &&
(dlen % max_log_data == 0 ||
lwb_sp < reclen + dlen % max_log_data))) {
lwb = zil_lwb_write_issue(zilog, lwb);
if (lwb == NULL)
return (NULL);
zil_lwb_write_open(zilog, lwb);
ASSERT(LWB_EMPTY(lwb));
lwb_sp = lwb->lwb_sz - lwb->lwb_nused;
/*
* There must be enough space in the new, empty log block to
* hold reclen. For WR_COPIED, we need to fit the whole
* record in one block, and reclen is the header size + the
* data size. For WR_NEED_COPY, we can create multiple
* records, splitting the data into multiple blocks, so we
* only need to fit one word of data per block; in this case
* reclen is just the header size (no data).
*/
ASSERT3U(reclen + MIN(dlen, sizeof (uint64_t)), <=, lwb_sp);
}
dnow = MIN(dlen, lwb_sp - reclen);
lr_buf = lwb->lwb_buf + lwb->lwb_nused;
bcopy(lrc, lr_buf, reclen);
lrcb = (lr_t *)lr_buf; /* Like lrc, but inside lwb. */
lrwb = (lr_write_t *)lrcb; /* Like lrw, but inside lwb. */
ZIL_STAT_BUMP(zil_itx_count);
/*
* If it's a write, fetch the data or get its blkptr as appropriate.
*/
if (lrc->lrc_txtype == TX_WRITE) {
if (txg > spa_freeze_txg(zilog->zl_spa))
txg_wait_synced(zilog->zl_dmu_pool, txg);
if (itx->itx_wr_state == WR_COPIED) {
ZIL_STAT_BUMP(zil_itx_copied_count);
ZIL_STAT_INCR(zil_itx_copied_bytes, lrw->lr_length);
} else {
char *dbuf;
int error;
if (itx->itx_wr_state == WR_NEED_COPY) {
dbuf = lr_buf + reclen;
lrcb->lrc_reclen += dnow;
if (lrwb->lr_length > dnow)
lrwb->lr_length = dnow;
lrw->lr_offset += dnow;
lrw->lr_length -= dnow;
ZIL_STAT_BUMP(zil_itx_needcopy_count);
ZIL_STAT_INCR(zil_itx_needcopy_bytes, dnow);
} else {
ASSERT3S(itx->itx_wr_state, ==, WR_INDIRECT);
dbuf = NULL;
ZIL_STAT_BUMP(zil_itx_indirect_count);
ZIL_STAT_INCR(zil_itx_indirect_bytes,
lrw->lr_length);
}
/*
* We pass in the "lwb_write_zio" rather than
* "lwb_root_zio" so that the "lwb_write_zio"
* becomes the parent of any zio's created by
* the "zl_get_data" callback. The vdevs are
* flushed after the "lwb_write_zio" completes,
* so we want to make sure that completion
* callback waits for these additional zio's,
* such that the vdevs used by those zio's will
* be included in the lwb's vdev tree, and those
* vdevs will be properly flushed. If we passed
* in "lwb_root_zio" here, then these additional
* vdevs may not be flushed; e.g. if these zio's
* completed after "lwb_write_zio" completed.
*/
error = zilog->zl_get_data(itx->itx_private,
itx->itx_gen, lrwb, dbuf, lwb,
lwb->lwb_write_zio);
if (dbuf != NULL && error == 0 && dnow == dlen)
/* Zero any padding bytes in the last block. */
bzero((char *)dbuf + lrwb->lr_length, dpad);
if (error == EIO) {
txg_wait_synced(zilog->zl_dmu_pool, txg);
return (lwb);
}
if (error != 0) {
ASSERT(error == ENOENT || error == EEXIST ||
error == EALREADY);
return (lwb);
}
}
}
/*
* We're actually making an entry, so update lrc_seq to be the
* log record sequence number. Note that this is generally not
* equal to the itx sequence number because not all transactions
* are synchronous, and sometimes spa_sync() gets there first.
*/
lrcb->lrc_seq = ++zilog->zl_lr_seq;
lwb->lwb_nused += reclen + dnow;
zil_lwb_add_txg(lwb, txg);
ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_sz);
ASSERT0(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)));
dlen -= dnow;
if (dlen > 0) {
zilog->zl_cur_used += reclen;
goto cont;
}
return (lwb);
}
itx_t *
zil_itx_create(uint64_t txtype, size_t olrsize)
{
size_t itxsize, lrsize;
itx_t *itx;
lrsize = P2ROUNDUP_TYPED(olrsize, sizeof (uint64_t), size_t);
itxsize = offsetof(itx_t, itx_lr) + lrsize;
itx = zio_data_buf_alloc(itxsize);
itx->itx_lr.lrc_txtype = txtype;
itx->itx_lr.lrc_reclen = lrsize;
itx->itx_lr.lrc_seq = 0; /* defensive */
bzero((char *)&itx->itx_lr + olrsize, lrsize - olrsize);
itx->itx_sync = B_TRUE; /* default is synchronous */
itx->itx_callback = NULL;
itx->itx_callback_data = NULL;
itx->itx_size = itxsize;
return (itx);
}
void
zil_itx_destroy(itx_t *itx)
{
IMPLY(itx->itx_lr.lrc_txtype == TX_COMMIT, itx->itx_callback == NULL);
IMPLY(itx->itx_callback != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT);
if (itx->itx_callback != NULL)
itx->itx_callback(itx->itx_callback_data);
zio_data_buf_free(itx, itx->itx_size);
}
/*
* Free up the sync and async itxs. The itxs_t has already been detached
* so no locks are needed.
*/
static void
zil_itxg_clean(void *arg)
{
itx_t *itx;
list_t *list;
avl_tree_t *t;
void *cookie;
itxs_t *itxs = arg;
itx_async_node_t *ian;
list = &itxs->i_sync_list;
while ((itx = list_head(list)) != NULL) {
/*
* In the general case, commit itxs will not be found
* here, as they'll be committed to an lwb via
* zil_lwb_commit(), and free'd in that function. Having
* said that, it is still possible for commit itxs to be
* found here, due to the following race:
*
* - a thread calls zil_commit() which assigns the
* commit itx to a per-txg i_sync_list
* - zil_itxg_clean() is called (e.g. via spa_sync())
* while the waiter is still on the i_sync_list
*
* There's nothing to prevent syncing the txg while the
* waiter is on the i_sync_list. This normally doesn't
* happen because spa_sync() is slower than zil_commit(),
* but if zil_commit() calls txg_wait_synced() (e.g.
* because zil_create() or zil_commit_writer_stall() is
* called) we will hit this case.
*/
if (itx->itx_lr.lrc_txtype == TX_COMMIT)
zil_commit_waiter_skip(itx->itx_private);
list_remove(list, itx);
zil_itx_destroy(itx);
}
cookie = NULL;
t = &itxs->i_async_tree;
while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
list = &ian->ia_list;
while ((itx = list_head(list)) != NULL) {
list_remove(list, itx);
/* commit itxs should never be on the async lists. */
ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT);
zil_itx_destroy(itx);
}
list_destroy(list);
kmem_free(ian, sizeof (itx_async_node_t));
}
avl_destroy(t);
kmem_free(itxs, sizeof (itxs_t));
}
static int
zil_aitx_compare(const void *x1, const void *x2)
{
const uint64_t o1 = ((itx_async_node_t *)x1)->ia_foid;
const uint64_t o2 = ((itx_async_node_t *)x2)->ia_foid;
return (TREE_CMP(o1, o2));
}
/*
* Remove all async itx with the given oid.
*/
void
zil_remove_async(zilog_t *zilog, uint64_t oid)
{
uint64_t otxg, txg;
itx_async_node_t *ian;
avl_tree_t *t;
avl_index_t where;
list_t clean_list;
itx_t *itx;
ASSERT(oid != 0);
list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node));
if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
otxg = ZILTEST_TXG;
else
otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
mutex_enter(&itxg->itxg_lock);
if (itxg->itxg_txg != txg) {
mutex_exit(&itxg->itxg_lock);
continue;
}
/*
* Locate the object node and append its list.
*/
t = &itxg->itxg_itxs->i_async_tree;
ian = avl_find(t, &oid, &where);
if (ian != NULL)
list_move_tail(&clean_list, &ian->ia_list);
mutex_exit(&itxg->itxg_lock);
}
while ((itx = list_head(&clean_list)) != NULL) {
list_remove(&clean_list, itx);
/* commit itxs should never be on the async lists. */
ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT);
zil_itx_destroy(itx);
}
list_destroy(&clean_list);
}
void
zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
{
uint64_t txg;
itxg_t *itxg;
itxs_t *itxs, *clean = NULL;
/*
* Ensure the data of a renamed file is committed before the rename.
*/
if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_RENAME)
zil_async_to_sync(zilog, itx->itx_oid);
if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX)
txg = ZILTEST_TXG;
else
txg = dmu_tx_get_txg(tx);
itxg = &zilog->zl_itxg[txg & TXG_MASK];
mutex_enter(&itxg->itxg_lock);
itxs = itxg->itxg_itxs;
if (itxg->itxg_txg != txg) {
if (itxs != NULL) {
/*
* The zil_clean callback hasn't got around to cleaning
* this itxg. Save the itxs for release below.
* This should be rare.
*/
zfs_dbgmsg("zil_itx_assign: missed itx cleanup for "
"txg %llu", (u_longlong_t)itxg->itxg_txg);
clean = itxg->itxg_itxs;
}
itxg->itxg_txg = txg;
itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t),
KM_SLEEP);
list_create(&itxs->i_sync_list, sizeof (itx_t),
offsetof(itx_t, itx_node));
avl_create(&itxs->i_async_tree, zil_aitx_compare,
sizeof (itx_async_node_t),
offsetof(itx_async_node_t, ia_node));
}
if (itx->itx_sync) {
list_insert_tail(&itxs->i_sync_list, itx);
} else {
avl_tree_t *t = &itxs->i_async_tree;
uint64_t foid =
LR_FOID_GET_OBJ(((lr_ooo_t *)&itx->itx_lr)->lr_foid);
itx_async_node_t *ian;
avl_index_t where;
ian = avl_find(t, &foid, &where);
if (ian == NULL) {
ian = kmem_alloc(sizeof (itx_async_node_t),
KM_SLEEP);
list_create(&ian->ia_list, sizeof (itx_t),
offsetof(itx_t, itx_node));
ian->ia_foid = foid;
avl_insert(t, ian, where);
}
list_insert_tail(&ian->ia_list, itx);
}
itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx);
/*
* We don't want to dirty the ZIL using ZILTEST_TXG, because
* zil_clean() will never be called using ZILTEST_TXG. Thus, we
* need to be careful to always dirty the ZIL using the "real"
* TXG (not itxg_txg) even when the SPA is frozen.
*/
zilog_dirty(zilog, dmu_tx_get_txg(tx));
mutex_exit(&itxg->itxg_lock);
/* Release the old itxs now we've dropped the lock */
if (clean != NULL)
zil_itxg_clean(clean);
}
/*
* If there are any in-memory intent log transactions which have now been
* synced then start up a taskq to free them. We should only do this after we
* have written out the uberblocks (i.e. txg has been committed) so that
* don't inadvertently clean out in-memory log records that would be required
* by zil_commit().
*/
void
zil_clean(zilog_t *zilog, uint64_t synced_txg)
{
itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK];
itxs_t *clean_me;
ASSERT3U(synced_txg, <, ZILTEST_TXG);
mutex_enter(&itxg->itxg_lock);
if (itxg->itxg_itxs == NULL || itxg->itxg_txg == ZILTEST_TXG) {
mutex_exit(&itxg->itxg_lock);
return;
}
ASSERT3U(itxg->itxg_txg, <=, synced_txg);
ASSERT3U(itxg->itxg_txg, !=, 0);
clean_me = itxg->itxg_itxs;
itxg->itxg_itxs = NULL;
itxg->itxg_txg = 0;
mutex_exit(&itxg->itxg_lock);
/*
* Preferably start a task queue to free up the old itxs but
* if taskq_dispatch can't allocate resources to do that then
* free it in-line. This should be rare. Note, using TQ_SLEEP
* created a bad performance problem.
*/
ASSERT3P(zilog->zl_dmu_pool, !=, NULL);
ASSERT3P(zilog->zl_dmu_pool->dp_zil_clean_taskq, !=, NULL);
taskqid_t id = taskq_dispatch(zilog->zl_dmu_pool->dp_zil_clean_taskq,
zil_itxg_clean, clean_me, TQ_NOSLEEP);
if (id == TASKQID_INVALID)
zil_itxg_clean(clean_me);
}
/*
* This function will traverse the queue of itxs that need to be
* committed, and move them onto the ZIL's zl_itx_commit_list.
*/
static void
zil_get_commit_list(zilog_t *zilog)
{
uint64_t otxg, txg;
list_t *commit_list = &zilog->zl_itx_commit_list;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
otxg = ZILTEST_TXG;
else
otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
/*
* This is inherently racy, since there is nothing to prevent
* the last synced txg from changing. That's okay since we'll
* only commit things in the future.
*/
for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
mutex_enter(&itxg->itxg_lock);
if (itxg->itxg_txg != txg) {
mutex_exit(&itxg->itxg_lock);
continue;
}
/*
* If we're adding itx records to the zl_itx_commit_list,
* then the zil better be dirty in this "txg". We can assert
* that here since we're holding the itxg_lock which will
* prevent spa_sync from cleaning it. Once we add the itxs
* to the zl_itx_commit_list we must commit it to disk even
* if it's unnecessary (i.e. the txg was synced).
*/
ASSERT(zilog_is_dirty_in_txg(zilog, txg) ||
spa_freeze_txg(zilog->zl_spa) != UINT64_MAX);
list_move_tail(commit_list, &itxg->itxg_itxs->i_sync_list);
mutex_exit(&itxg->itxg_lock);
}
}
/*
* Move the async itxs for a specified object to commit into sync lists.
*/
void
zil_async_to_sync(zilog_t *zilog, uint64_t foid)
{
uint64_t otxg, txg;
itx_async_node_t *ian;
avl_tree_t *t;
avl_index_t where;
if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
otxg = ZILTEST_TXG;
else
otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
/*
* This is inherently racy, since there is nothing to prevent
* the last synced txg from changing.
*/
for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
mutex_enter(&itxg->itxg_lock);
if (itxg->itxg_txg != txg) {
mutex_exit(&itxg->itxg_lock);
continue;
}
/*
* If a foid is specified then find that node and append its
* list. Otherwise walk the tree appending all the lists
* to the sync list. We add to the end rather than the
* beginning to ensure the create has happened.
*/
t = &itxg->itxg_itxs->i_async_tree;
if (foid != 0) {
ian = avl_find(t, &foid, &where);
if (ian != NULL) {
list_move_tail(&itxg->itxg_itxs->i_sync_list,
&ian->ia_list);
}
} else {
void *cookie = NULL;
while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
list_move_tail(&itxg->itxg_itxs->i_sync_list,
&ian->ia_list);
list_destroy(&ian->ia_list);
kmem_free(ian, sizeof (itx_async_node_t));
}
}
mutex_exit(&itxg->itxg_lock);
}
}
/*
* This function will prune commit itxs that are at the head of the
* commit list (it won't prune past the first non-commit itx), and
* either: a) attach them to the last lwb that's still pending
* completion, or b) skip them altogether.
*
* This is used as a performance optimization to prevent commit itxs
* from generating new lwbs when it's unnecessary to do so.
*/
static void
zil_prune_commit_list(zilog_t *zilog)
{
itx_t *itx;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
while ((itx = list_head(&zilog->zl_itx_commit_list)) != NULL) {
lr_t *lrc = &itx->itx_lr;
if (lrc->lrc_txtype != TX_COMMIT)
break;
mutex_enter(&zilog->zl_lock);
lwb_t *last_lwb = zilog->zl_last_lwb_opened;
if (last_lwb == NULL ||
last_lwb->lwb_state == LWB_STATE_FLUSH_DONE) {
/*
* All of the itxs this waiter was waiting on
* must have already completed (or there were
* never any itx's for it to wait on), so it's
* safe to skip this waiter and mark it done.
*/
zil_commit_waiter_skip(itx->itx_private);
} else {
zil_commit_waiter_link_lwb(itx->itx_private, last_lwb);
itx->itx_private = NULL;
}
mutex_exit(&zilog->zl_lock);
list_remove(&zilog->zl_itx_commit_list, itx);
zil_itx_destroy(itx);
}
IMPLY(itx != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT);
}
static void
zil_commit_writer_stall(zilog_t *zilog)
{
/*
* When zio_alloc_zil() fails to allocate the next lwb block on
* disk, we must call txg_wait_synced() to ensure all of the
* lwbs in the zilog's zl_lwb_list are synced and then freed (in
* zil_sync()), such that any subsequent ZIL writer (i.e. a call
* to zil_process_commit_list()) will have to call zil_create(),
* and start a new ZIL chain.
*
* Since zil_alloc_zil() failed, the lwb that was previously
* issued does not have a pointer to the "next" lwb on disk.
* Thus, if another ZIL writer thread was to allocate the "next"
* on-disk lwb, that block could be leaked in the event of a
* crash (because the previous lwb on-disk would not point to
* it).
*
* We must hold the zilog's zl_issuer_lock while we do this, to
* ensure no new threads enter zil_process_commit_list() until
* all lwb's in the zl_lwb_list have been synced and freed
* (which is achieved via the txg_wait_synced() call).
*/
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
txg_wait_synced(zilog->zl_dmu_pool, 0);
ASSERT3P(list_tail(&zilog->zl_lwb_list), ==, NULL);
}
/*
* This function will traverse the commit list, creating new lwbs as
* needed, and committing the itxs from the commit list to these newly
* created lwbs. Additionally, as a new lwb is created, the previous
* lwb will be issued to the zio layer to be written to disk.
*/
static void
zil_process_commit_list(zilog_t *zilog)
{
spa_t *spa = zilog->zl_spa;
list_t nolwb_itxs;
list_t nolwb_waiters;
lwb_t *lwb;
itx_t *itx;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
/*
* Return if there's nothing to commit before we dirty the fs by
* calling zil_create().
*/
if (list_head(&zilog->zl_itx_commit_list) == NULL)
return;
list_create(&nolwb_itxs, sizeof (itx_t), offsetof(itx_t, itx_node));
list_create(&nolwb_waiters, sizeof (zil_commit_waiter_t),
offsetof(zil_commit_waiter_t, zcw_node));
lwb = list_tail(&zilog->zl_lwb_list);
if (lwb == NULL) {
lwb = zil_create(zilog);
} else {
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_WRITE_DONE);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_FLUSH_DONE);
}
while ((itx = list_head(&zilog->zl_itx_commit_list)) != NULL) {
lr_t *lrc = &itx->itx_lr;
uint64_t txg = lrc->lrc_txg;
ASSERT3U(txg, !=, 0);
if (lrc->lrc_txtype == TX_COMMIT) {
DTRACE_PROBE2(zil__process__commit__itx,
zilog_t *, zilog, itx_t *, itx);
} else {
DTRACE_PROBE2(zil__process__normal__itx,
zilog_t *, zilog, itx_t *, itx);
}
list_remove(&zilog->zl_itx_commit_list, itx);
boolean_t synced = txg <= spa_last_synced_txg(spa);
boolean_t frozen = txg > spa_freeze_txg(spa);
/*
* If the txg of this itx has already been synced out, then
* we don't need to commit this itx to an lwb. This is
* because the data of this itx will have already been
* written to the main pool. This is inherently racy, and
* it's still ok to commit an itx whose txg has already
* been synced; this will result in a write that's
* unnecessary, but will do no harm.
*
* With that said, we always want to commit TX_COMMIT itxs
* to an lwb, regardless of whether or not that itx's txg
* has been synced out. We do this to ensure any OPENED lwb
* will always have at least one zil_commit_waiter_t linked
* to the lwb.
*
* As a counter-example, if we skipped TX_COMMIT itx's
* whose txg had already been synced, the following
* situation could occur if we happened to be racing with
* spa_sync:
*
* 1. We commit a non-TX_COMMIT itx to an lwb, where the
* itx's txg is 10 and the last synced txg is 9.
* 2. spa_sync finishes syncing out txg 10.
* 3. We move to the next itx in the list, it's a TX_COMMIT
* whose txg is 10, so we skip it rather than committing
* it to the lwb used in (1).
*
* If the itx that is skipped in (3) is the last TX_COMMIT
* itx in the commit list, than it's possible for the lwb
* used in (1) to remain in the OPENED state indefinitely.
*
* To prevent the above scenario from occurring, ensuring
* that once an lwb is OPENED it will transition to ISSUED
* and eventually DONE, we always commit TX_COMMIT itx's to
* an lwb here, even if that itx's txg has already been
* synced.
*
* Finally, if the pool is frozen, we _always_ commit the
* itx. The point of freezing the pool is to prevent data
* from being written to the main pool via spa_sync, and
* instead rely solely on the ZIL to persistently store the
* data; i.e. when the pool is frozen, the last synced txg
* value can't be trusted.
*/
if (frozen || !synced || lrc->lrc_txtype == TX_COMMIT) {
if (lwb != NULL) {
lwb = zil_lwb_commit(zilog, itx, lwb);
if (lwb == NULL)
list_insert_tail(&nolwb_itxs, itx);
else
list_insert_tail(&lwb->lwb_itxs, itx);
} else {
if (lrc->lrc_txtype == TX_COMMIT) {
zil_commit_waiter_link_nolwb(
itx->itx_private, &nolwb_waiters);
}
list_insert_tail(&nolwb_itxs, itx);
}
} else {
ASSERT3S(lrc->lrc_txtype, !=, TX_COMMIT);
zil_itx_destroy(itx);
}
}
if (lwb == NULL) {
/*
* This indicates zio_alloc_zil() failed to allocate the
* "next" lwb on-disk. When this happens, we must stall
* the ZIL write pipeline; see the comment within
* zil_commit_writer_stall() for more details.
*/
zil_commit_writer_stall(zilog);
/*
* Additionally, we have to signal and mark the "nolwb"
* waiters as "done" here, since without an lwb, we
* can't do this via zil_lwb_flush_vdevs_done() like
* normal.
*/
zil_commit_waiter_t *zcw;
while ((zcw = list_head(&nolwb_waiters)) != NULL) {
zil_commit_waiter_skip(zcw);
list_remove(&nolwb_waiters, zcw);
}
/*
* And finally, we have to destroy the itx's that
* couldn't be committed to an lwb; this will also call
* the itx's callback if one exists for the itx.
*/
while ((itx = list_head(&nolwb_itxs)) != NULL) {
list_remove(&nolwb_itxs, itx);
zil_itx_destroy(itx);
}
} else {
ASSERT(list_is_empty(&nolwb_waiters));
ASSERT3P(lwb, !=, NULL);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_WRITE_DONE);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_FLUSH_DONE);
/*
* At this point, the ZIL block pointed at by the "lwb"
* variable is in one of the following states: "closed"
* or "open".
*
* If it's "closed", then no itxs have been committed to
* it, so there's no point in issuing its zio (i.e. it's
* "empty").
*
* If it's "open", then it contains one or more itxs that
* eventually need to be committed to stable storage. In
* this case we intentionally do not issue the lwb's zio
* to disk yet, and instead rely on one of the following
* two mechanisms for issuing the zio:
*
* 1. Ideally, there will be more ZIL activity occurring
* on the system, such that this function will be
* immediately called again (not necessarily by the same
* thread) and this lwb's zio will be issued via
* zil_lwb_commit(). This way, the lwb is guaranteed to
* be "full" when it is issued to disk, and we'll make
* use of the lwb's size the best we can.
*
* 2. If there isn't sufficient ZIL activity occurring on
* the system, such that this lwb's zio isn't issued via
* zil_lwb_commit(), zil_commit_waiter() will issue the
* lwb's zio. If this occurs, the lwb is not guaranteed
* to be "full" by the time its zio is issued, and means
* the size of the lwb was "too large" given the amount
* of ZIL activity occurring on the system at that time.
*
* We do this for a couple of reasons:
*
* 1. To try and reduce the number of IOPs needed to
* write the same number of itxs. If an lwb has space
* available in its buffer for more itxs, and more itxs
* will be committed relatively soon (relative to the
* latency of performing a write), then it's beneficial
* to wait for these "next" itxs. This way, more itxs
* can be committed to stable storage with fewer writes.
*
* 2. To try and use the largest lwb block size that the
* incoming rate of itxs can support. Again, this is to
* try and pack as many itxs into as few lwbs as
* possible, without significantly impacting the latency
* of each individual itx.
*/
}
}
/*
* This function is responsible for ensuring the passed in commit waiter
* (and associated commit itx) is committed to an lwb. If the waiter is
* not already committed to an lwb, all itxs in the zilog's queue of
* itxs will be processed. The assumption is the passed in waiter's
* commit itx will found in the queue just like the other non-commit
* itxs, such that when the entire queue is processed, the waiter will
* have been committed to an lwb.
*
* The lwb associated with the passed in waiter is not guaranteed to
* have been issued by the time this function completes. If the lwb is
* not issued, we rely on future calls to zil_commit_writer() to issue
* the lwb, or the timeout mechanism found in zil_commit_waiter().
*/
static void
zil_commit_writer(zilog_t *zilog, zil_commit_waiter_t *zcw)
{
ASSERT(!MUTEX_HELD(&zilog->zl_lock));
ASSERT(spa_writeable(zilog->zl_spa));
mutex_enter(&zilog->zl_issuer_lock);
if (zcw->zcw_lwb != NULL || zcw->zcw_done) {
/*
* It's possible that, while we were waiting to acquire
* the "zl_issuer_lock", another thread committed this
* waiter to an lwb. If that occurs, we bail out early,
* without processing any of the zilog's queue of itxs.
*
* On certain workloads and system configurations, the
* "zl_issuer_lock" can become highly contended. In an
* attempt to reduce this contention, we immediately drop
* the lock if the waiter has already been processed.
*
* We've measured this optimization to reduce CPU spent
* contending on this lock by up to 5%, using a system
* with 32 CPUs, low latency storage (~50 usec writes),
* and 1024 threads performing sync writes.
*/
goto out;
}
ZIL_STAT_BUMP(zil_commit_writer_count);
zil_get_commit_list(zilog);
zil_prune_commit_list(zilog);
zil_process_commit_list(zilog);
out:
mutex_exit(&zilog->zl_issuer_lock);
}
static void
zil_commit_waiter_timeout(zilog_t *zilog, zil_commit_waiter_t *zcw)
{
ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT(MUTEX_HELD(&zcw->zcw_lock));
ASSERT3B(zcw->zcw_done, ==, B_FALSE);
lwb_t *lwb = zcw->zcw_lwb;
ASSERT3P(lwb, !=, NULL);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_CLOSED);
/*
* If the lwb has already been issued by another thread, we can
* immediately return since there's no work to be done (the
* point of this function is to issue the lwb). Additionally, we
* do this prior to acquiring the zl_issuer_lock, to avoid
* acquiring it when it's not necessary to do so.
*/
if (lwb->lwb_state == LWB_STATE_ISSUED ||
lwb->lwb_state == LWB_STATE_WRITE_DONE ||
lwb->lwb_state == LWB_STATE_FLUSH_DONE)
return;
/*
* In order to call zil_lwb_write_issue() we must hold the
* zilog's "zl_issuer_lock". We can't simply acquire that lock,
* since we're already holding the commit waiter's "zcw_lock",
* and those two locks are acquired in the opposite order
* elsewhere.
*/
mutex_exit(&zcw->zcw_lock);
mutex_enter(&zilog->zl_issuer_lock);
mutex_enter(&zcw->zcw_lock);
/*
* Since we just dropped and re-acquired the commit waiter's
* lock, we have to re-check to see if the waiter was marked
* "done" during that process. If the waiter was marked "done",
* the "lwb" pointer is no longer valid (it can be free'd after
* the waiter is marked "done"), so without this check we could
* wind up with a use-after-free error below.
*/
if (zcw->zcw_done)
goto out;
ASSERT3P(lwb, ==, zcw->zcw_lwb);
/*
* We've already checked this above, but since we hadn't acquired
* the zilog's zl_issuer_lock, we have to perform this check a
* second time while holding the lock.
*
* We don't need to hold the zl_lock since the lwb cannot transition
* from OPENED to ISSUED while we hold the zl_issuer_lock. The lwb
* _can_ transition from ISSUED to DONE, but it's OK to race with
* that transition since we treat the lwb the same, whether it's in
* the ISSUED or DONE states.
*
* The important thing, is we treat the lwb differently depending on
* if it's ISSUED or OPENED, and block any other threads that might
* attempt to issue this lwb. For that reason we hold the
* zl_issuer_lock when checking the lwb_state; we must not call
* zil_lwb_write_issue() if the lwb had already been issued.
*
* See the comment above the lwb_state_t structure definition for
* more details on the lwb states, and locking requirements.
*/
if (lwb->lwb_state == LWB_STATE_ISSUED ||
lwb->lwb_state == LWB_STATE_WRITE_DONE ||
lwb->lwb_state == LWB_STATE_FLUSH_DONE)
goto out;
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED);
/*
* As described in the comments above zil_commit_waiter() and
* zil_process_commit_list(), we need to issue this lwb's zio
* since we've reached the commit waiter's timeout and it still
* hasn't been issued.
*/
lwb_t *nlwb = zil_lwb_write_issue(zilog, lwb);
IMPLY(nlwb != NULL, lwb->lwb_state != LWB_STATE_OPENED);
/*
* Since the lwb's zio hadn't been issued by the time this thread
* reached its timeout, we reset the zilog's "zl_cur_used" field
* to influence the zil block size selection algorithm.
*
* By having to issue the lwb's zio here, it means the size of the
* lwb was too large, given the incoming throughput of itxs. By
* setting "zl_cur_used" to zero, we communicate this fact to the
* block size selection algorithm, so it can take this information
* into account, and potentially select a smaller size for the
* next lwb block that is allocated.
*/
zilog->zl_cur_used = 0;
if (nlwb == NULL) {
/*
* When zil_lwb_write_issue() returns NULL, this
* indicates zio_alloc_zil() failed to allocate the
* "next" lwb on-disk. When this occurs, the ZIL write
* pipeline must be stalled; see the comment within the
* zil_commit_writer_stall() function for more details.
*
* We must drop the commit waiter's lock prior to
* calling zil_commit_writer_stall() or else we can wind
* up with the following deadlock:
*
* - This thread is waiting for the txg to sync while
* holding the waiter's lock; txg_wait_synced() is
* used within txg_commit_writer_stall().
*
* - The txg can't sync because it is waiting for this
* lwb's zio callback to call dmu_tx_commit().
*
* - The lwb's zio callback can't call dmu_tx_commit()
* because it's blocked trying to acquire the waiter's
* lock, which occurs prior to calling dmu_tx_commit()
*/
mutex_exit(&zcw->zcw_lock);
zil_commit_writer_stall(zilog);
mutex_enter(&zcw->zcw_lock);
}
out:
mutex_exit(&zilog->zl_issuer_lock);
ASSERT(MUTEX_HELD(&zcw->zcw_lock));
}
/*
* This function is responsible for performing the following two tasks:
*
* 1. its primary responsibility is to block until the given "commit
* waiter" is considered "done".
*
* 2. its secondary responsibility is to issue the zio for the lwb that
* the given "commit waiter" is waiting on, if this function has
* waited "long enough" and the lwb is still in the "open" state.
*
* Given a sufficient amount of itxs being generated and written using
* the ZIL, the lwb's zio will be issued via the zil_lwb_commit()
* function. If this does not occur, this secondary responsibility will
* ensure the lwb is issued even if there is not other synchronous
* activity on the system.
*
* For more details, see zil_process_commit_list(); more specifically,
* the comment at the bottom of that function.
*/
static void
zil_commit_waiter(zilog_t *zilog, zil_commit_waiter_t *zcw)
{
ASSERT(!MUTEX_HELD(&zilog->zl_lock));
ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT(spa_writeable(zilog->zl_spa));
mutex_enter(&zcw->zcw_lock);
/*
* The timeout is scaled based on the lwb latency to avoid
* significantly impacting the latency of each individual itx.
* For more details, see the comment at the bottom of the
* zil_process_commit_list() function.
*/
int pct = MAX(zfs_commit_timeout_pct, 1);
hrtime_t sleep = (zilog->zl_last_lwb_latency * pct) / 100;
hrtime_t wakeup = gethrtime() + sleep;
boolean_t timedout = B_FALSE;
while (!zcw->zcw_done) {
ASSERT(MUTEX_HELD(&zcw->zcw_lock));
lwb_t *lwb = zcw->zcw_lwb;
/*
* Usually, the waiter will have a non-NULL lwb field here,
* but it's possible for it to be NULL as a result of
* zil_commit() racing with spa_sync().
*
* When zil_clean() is called, it's possible for the itxg
* list (which may be cleaned via a taskq) to contain
* commit itxs. When this occurs, the commit waiters linked
* off of these commit itxs will not be committed to an
* lwb. Additionally, these commit waiters will not be
* marked done until zil_commit_waiter_skip() is called via
* zil_itxg_clean().
*
* Thus, it's possible for this commit waiter (i.e. the
* "zcw" variable) to be found in this "in between" state;
* where it's "zcw_lwb" field is NULL, and it hasn't yet
* been skipped, so it's "zcw_done" field is still B_FALSE.
*/
IMPLY(lwb != NULL, lwb->lwb_state != LWB_STATE_CLOSED);
if (lwb != NULL && lwb->lwb_state == LWB_STATE_OPENED) {
ASSERT3B(timedout, ==, B_FALSE);
/*
* If the lwb hasn't been issued yet, then we
* need to wait with a timeout, in case this
* function needs to issue the lwb after the
* timeout is reached; responsibility (2) from
* the comment above this function.
*/
int rc = cv_timedwait_hires(&zcw->zcw_cv,
&zcw->zcw_lock, wakeup, USEC2NSEC(1),
CALLOUT_FLAG_ABSOLUTE);
if (rc != -1 || zcw->zcw_done)
continue;
timedout = B_TRUE;
zil_commit_waiter_timeout(zilog, zcw);
if (!zcw->zcw_done) {
/*
* If the commit waiter has already been
* marked "done", it's possible for the
* waiter's lwb structure to have already
* been freed. Thus, we can only reliably
* make these assertions if the waiter
* isn't done.
*/
ASSERT3P(lwb, ==, zcw->zcw_lwb);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_OPENED);
}
} else {
/*
* If the lwb isn't open, then it must have already
* been issued. In that case, there's no need to
* use a timeout when waiting for the lwb to
* complete.
*
* Additionally, if the lwb is NULL, the waiter
* will soon be signaled and marked done via
* zil_clean() and zil_itxg_clean(), so no timeout
* is required.
*/
IMPLY(lwb != NULL,
lwb->lwb_state == LWB_STATE_ISSUED ||
lwb->lwb_state == LWB_STATE_WRITE_DONE ||
lwb->lwb_state == LWB_STATE_FLUSH_DONE);
cv_wait(&zcw->zcw_cv, &zcw->zcw_lock);
}
}
mutex_exit(&zcw->zcw_lock);
}
static zil_commit_waiter_t *
zil_alloc_commit_waiter(void)
{
zil_commit_waiter_t *zcw = kmem_cache_alloc(zil_zcw_cache, KM_SLEEP);
cv_init(&zcw->zcw_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&zcw->zcw_lock, NULL, MUTEX_DEFAULT, NULL);
list_link_init(&zcw->zcw_node);
zcw->zcw_lwb = NULL;
zcw->zcw_done = B_FALSE;
zcw->zcw_zio_error = 0;
return (zcw);
}
static void
zil_free_commit_waiter(zil_commit_waiter_t *zcw)
{
ASSERT(!list_link_active(&zcw->zcw_node));
ASSERT3P(zcw->zcw_lwb, ==, NULL);
ASSERT3B(zcw->zcw_done, ==, B_TRUE);
mutex_destroy(&zcw->zcw_lock);
cv_destroy(&zcw->zcw_cv);
kmem_cache_free(zil_zcw_cache, zcw);
}
/*
* This function is used to create a TX_COMMIT itx and assign it. This
* way, it will be linked into the ZIL's list of synchronous itxs, and
* then later committed to an lwb (or skipped) when
* zil_process_commit_list() is called.
*/
static void
zil_commit_itx_assign(zilog_t *zilog, zil_commit_waiter_t *zcw)
{
dmu_tx_t *tx = dmu_tx_create(zilog->zl_os);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
itx_t *itx = zil_itx_create(TX_COMMIT, sizeof (lr_t));
itx->itx_sync = B_TRUE;
itx->itx_private = zcw;
zil_itx_assign(zilog, itx, tx);
dmu_tx_commit(tx);
}
/*
* Commit ZFS Intent Log transactions (itxs) to stable storage.
*
* When writing ZIL transactions to the on-disk representation of the
* ZIL, the itxs are committed to a Log Write Block (lwb). Multiple
* itxs can be committed to a single lwb. Once a lwb is written and
* committed to stable storage (i.e. the lwb is written, and vdevs have
* been flushed), each itx that was committed to that lwb is also
* considered to be committed to stable storage.
*
* When an itx is committed to an lwb, the log record (lr_t) contained
* by the itx is copied into the lwb's zio buffer, and once this buffer
* is written to disk, it becomes an on-disk ZIL block.
*
* As itxs are generated, they're inserted into the ZIL's queue of
* uncommitted itxs. The semantics of zil_commit() are such that it will
* block until all itxs that were in the queue when it was called, are
* committed to stable storage.
*
* If "foid" is zero, this means all "synchronous" and "asynchronous"
* itxs, for all objects in the dataset, will be committed to stable
* storage prior to zil_commit() returning. If "foid" is non-zero, all
* "synchronous" itxs for all objects, but only "asynchronous" itxs
* that correspond to the foid passed in, will be committed to stable
* storage prior to zil_commit() returning.
*
* Generally speaking, when zil_commit() is called, the consumer doesn't
* actually care about _all_ of the uncommitted itxs. Instead, they're
* simply trying to waiting for a specific itx to be committed to disk,
* but the interface(s) for interacting with the ZIL don't allow such
* fine-grained communication. A better interface would allow a consumer
* to create and assign an itx, and then pass a reference to this itx to
* zil_commit(); such that zil_commit() would return as soon as that
* specific itx was committed to disk (instead of waiting for _all_
* itxs to be committed).
*
* When a thread calls zil_commit() a special "commit itx" will be
* generated, along with a corresponding "waiter" for this commit itx.
* zil_commit() will wait on this waiter's CV, such that when the waiter
* is marked done, and signaled, zil_commit() will return.
*
* This commit itx is inserted into the queue of uncommitted itxs. This
* provides an easy mechanism for determining which itxs were in the
* queue prior to zil_commit() having been called, and which itxs were
* added after zil_commit() was called.
*
* The commit it is special; it doesn't have any on-disk representation.
* When a commit itx is "committed" to an lwb, the waiter associated
* with it is linked onto the lwb's list of waiters. Then, when that lwb
* completes, each waiter on the lwb's list is marked done and signaled
* -- allowing the thread waiting on the waiter to return from zil_commit().
*
* It's important to point out a few critical factors that allow us
* to make use of the commit itxs, commit waiters, per-lwb lists of
* commit waiters, and zio completion callbacks like we're doing:
*
* 1. The list of waiters for each lwb is traversed, and each commit
* waiter is marked "done" and signaled, in the zio completion
* callback of the lwb's zio[*].
*
* * Actually, the waiters are signaled in the zio completion
* callback of the root zio for the DKIOCFLUSHWRITECACHE commands
* that are sent to the vdevs upon completion of the lwb zio.
*
* 2. When the itxs are inserted into the ZIL's queue of uncommitted
* itxs, the order in which they are inserted is preserved[*]; as
* itxs are added to the queue, they are added to the tail of
* in-memory linked lists.
*
* When committing the itxs to lwbs (to be written to disk), they
* are committed in the same order in which the itxs were added to
* the uncommitted queue's linked list(s); i.e. the linked list of
* itxs to commit is traversed from head to tail, and each itx is
* committed to an lwb in that order.
*
* * To clarify:
*
* - the order of "sync" itxs is preserved w.r.t. other
* "sync" itxs, regardless of the corresponding objects.
* - the order of "async" itxs is preserved w.r.t. other
* "async" itxs corresponding to the same object.
* - the order of "async" itxs is *not* preserved w.r.t. other
* "async" itxs corresponding to different objects.
* - the order of "sync" itxs w.r.t. "async" itxs (or vice
* versa) is *not* preserved, even for itxs that correspond
* to the same object.
*
* For more details, see: zil_itx_assign(), zil_async_to_sync(),
* zil_get_commit_list(), and zil_process_commit_list().
*
* 3. The lwbs represent a linked list of blocks on disk. Thus, any
* lwb cannot be considered committed to stable storage, until its
* "previous" lwb is also committed to stable storage. This fact,
* coupled with the fact described above, means that itxs are
* committed in (roughly) the order in which they were generated.
* This is essential because itxs are dependent on prior itxs.
* Thus, we *must not* deem an itx as being committed to stable
* storage, until *all* prior itxs have also been committed to
* stable storage.
*
* To enforce this ordering of lwb zio's, while still leveraging as
* much of the underlying storage performance as possible, we rely
* on two fundamental concepts:
*
* 1. The creation and issuance of lwb zio's is protected by
* the zilog's "zl_issuer_lock", which ensures only a single
* thread is creating and/or issuing lwb's at a time
* 2. The "previous" lwb is a child of the "current" lwb
* (leveraging the zio parent-child dependency graph)
*
* By relying on this parent-child zio relationship, we can have
* many lwb zio's concurrently issued to the underlying storage,
* but the order in which they complete will be the same order in
* which they were created.
*/
void
zil_commit(zilog_t *zilog, uint64_t foid)
{
/*
* We should never attempt to call zil_commit on a snapshot for
* a couple of reasons:
*
* 1. A snapshot may never be modified, thus it cannot have any
* in-flight itxs that would have modified the dataset.
*
* 2. By design, when zil_commit() is called, a commit itx will
* be assigned to this zilog; as a result, the zilog will be
* dirtied. We must not dirty the zilog of a snapshot; there's
* checks in the code that enforce this invariant, and will
* cause a panic if it's not upheld.
*/
ASSERT3B(dmu_objset_is_snapshot(zilog->zl_os), ==, B_FALSE);
if (zilog->zl_sync == ZFS_SYNC_DISABLED)
return;
if (!spa_writeable(zilog->zl_spa)) {
/*
* If the SPA is not writable, there should never be any
* pending itxs waiting to be committed to disk. If that
* weren't true, we'd skip writing those itxs out, and
* would break the semantics of zil_commit(); thus, we're
* verifying that truth before we return to the caller.
*/
ASSERT(list_is_empty(&zilog->zl_lwb_list));
ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL);
for (int i = 0; i < TXG_SIZE; i++)
ASSERT3P(zilog->zl_itxg[i].itxg_itxs, ==, NULL);
return;
}
/*
* If the ZIL is suspended, we don't want to dirty it by calling
* zil_commit_itx_assign() below, nor can we write out
* lwbs like would be done in zil_commit_write(). Thus, we
* simply rely on txg_wait_synced() to maintain the necessary
* semantics, and avoid calling those functions altogether.
*/
if (zilog->zl_suspend > 0) {
txg_wait_synced(zilog->zl_dmu_pool, 0);
return;
}
zil_commit_impl(zilog, foid);
}
void
zil_commit_impl(zilog_t *zilog, uint64_t foid)
{
ZIL_STAT_BUMP(zil_commit_count);
/*
* Move the "async" itxs for the specified foid to the "sync"
* queues, such that they will be later committed (or skipped)
* to an lwb when zil_process_commit_list() is called.
*
* Since these "async" itxs must be committed prior to this
* call to zil_commit returning, we must perform this operation
* before we call zil_commit_itx_assign().
*/
zil_async_to_sync(zilog, foid);
/*
* We allocate a new "waiter" structure which will initially be
* linked to the commit itx using the itx's "itx_private" field.
* Since the commit itx doesn't represent any on-disk state,
* when it's committed to an lwb, rather than copying the its
* lr_t into the lwb's buffer, the commit itx's "waiter" will be
* added to the lwb's list of waiters. Then, when the lwb is
* committed to stable storage, each waiter in the lwb's list of
* waiters will be marked "done", and signalled.
*
* We must create the waiter and assign the commit itx prior to
* calling zil_commit_writer(), or else our specific commit itx
* is not guaranteed to be committed to an lwb prior to calling
* zil_commit_waiter().
*/
zil_commit_waiter_t *zcw = zil_alloc_commit_waiter();
zil_commit_itx_assign(zilog, zcw);
zil_commit_writer(zilog, zcw);
zil_commit_waiter(zilog, zcw);
if (zcw->zcw_zio_error != 0) {
/*
* If there was an error writing out the ZIL blocks that
* this thread is waiting on, then we fallback to
* relying on spa_sync() to write out the data this
* thread is waiting on. Obviously this has performance
* implications, but the expectation is for this to be
* an exceptional case, and shouldn't occur often.
*/
DTRACE_PROBE2(zil__commit__io__error,
zilog_t *, zilog, zil_commit_waiter_t *, zcw);
txg_wait_synced(zilog->zl_dmu_pool, 0);
}
zil_free_commit_waiter(zcw);
}
/*
* Called in syncing context to free committed log blocks and update log header.
*/
void
zil_sync(zilog_t *zilog, dmu_tx_t *tx)
{
zil_header_t *zh = zil_header_in_syncing_context(zilog);
uint64_t txg = dmu_tx_get_txg(tx);
spa_t *spa = zilog->zl_spa;
uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK];
lwb_t *lwb;
/*
* We don't zero out zl_destroy_txg, so make sure we don't try
* to destroy it twice.
*/
if (spa_sync_pass(spa) != 1)
return;
mutex_enter(&zilog->zl_lock);
ASSERT(zilog->zl_stop_sync == 0);
if (*replayed_seq != 0) {
ASSERT(zh->zh_replay_seq < *replayed_seq);
zh->zh_replay_seq = *replayed_seq;
*replayed_seq = 0;
}
if (zilog->zl_destroy_txg == txg) {
blkptr_t blk = zh->zh_log;
ASSERT(list_head(&zilog->zl_lwb_list) == NULL);
bzero(zh, sizeof (zil_header_t));
bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq));
if (zilog->zl_keep_first) {
/*
* If this block was part of log chain that couldn't
* be claimed because a device was missing during
* zil_claim(), but that device later returns,
* then this block could erroneously appear valid.
* To guard against this, assign a new GUID to the new
* log chain so it doesn't matter what blk points to.
*/
zil_init_log_chain(zilog, &blk);
zh->zh_log = blk;
}
}
while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
zh->zh_log = lwb->lwb_blk;
if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg)
break;
list_remove(&zilog->zl_lwb_list, lwb);
zio_free(spa, txg, &lwb->lwb_blk);
zil_free_lwb(zilog, lwb);
/*
* If we don't have anything left in the lwb list then
* we've had an allocation failure and we need to zero
* out the zil_header blkptr so that we don't end
* up freeing the same block twice.
*/
if (list_head(&zilog->zl_lwb_list) == NULL)
BP_ZERO(&zh->zh_log);
}
/*
* Remove fastwrite on any blocks that have been pre-allocated for
* the next commit. This prevents fastwrite counter pollution by
* unused, long-lived LWBs.
*/
for (; lwb != NULL; lwb = list_next(&zilog->zl_lwb_list, lwb)) {
if (lwb->lwb_fastwrite && !lwb->lwb_write_zio) {
metaslab_fastwrite_unmark(zilog->zl_spa, &lwb->lwb_blk);
lwb->lwb_fastwrite = 0;
}
}
mutex_exit(&zilog->zl_lock);
}
-/* ARGSUSED */
static int
zil_lwb_cons(void *vbuf, void *unused, int kmflag)
{
+ (void) unused, (void) kmflag;
lwb_t *lwb = vbuf;
list_create(&lwb->lwb_itxs, sizeof (itx_t), offsetof(itx_t, itx_node));
list_create(&lwb->lwb_waiters, sizeof (zil_commit_waiter_t),
offsetof(zil_commit_waiter_t, zcw_node));
avl_create(&lwb->lwb_vdev_tree, zil_lwb_vdev_compare,
sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node));
mutex_init(&lwb->lwb_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
return (0);
}
-/* ARGSUSED */
static void
zil_lwb_dest(void *vbuf, void *unused)
{
+ (void) unused;
lwb_t *lwb = vbuf;
mutex_destroy(&lwb->lwb_vdev_lock);
avl_destroy(&lwb->lwb_vdev_tree);
list_destroy(&lwb->lwb_waiters);
list_destroy(&lwb->lwb_itxs);
}
void
zil_init(void)
{
zil_lwb_cache = kmem_cache_create("zil_lwb_cache",
sizeof (lwb_t), 0, zil_lwb_cons, zil_lwb_dest, NULL, NULL, NULL, 0);
zil_zcw_cache = kmem_cache_create("zil_zcw_cache",
sizeof (zil_commit_waiter_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
zil_ksp = kstat_create("zfs", 0, "zil", "misc",
KSTAT_TYPE_NAMED, sizeof (zil_stats) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL);
if (zil_ksp != NULL) {
zil_ksp->ks_data = &zil_stats;
kstat_install(zil_ksp);
}
}
void
zil_fini(void)
{
kmem_cache_destroy(zil_zcw_cache);
kmem_cache_destroy(zil_lwb_cache);
if (zil_ksp != NULL) {
kstat_delete(zil_ksp);
zil_ksp = NULL;
}
}
void
zil_set_sync(zilog_t *zilog, uint64_t sync)
{
zilog->zl_sync = sync;
}
void
zil_set_logbias(zilog_t *zilog, uint64_t logbias)
{
zilog->zl_logbias = logbias;
}
zilog_t *
zil_alloc(objset_t *os, zil_header_t *zh_phys)
{
zilog_t *zilog;
zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP);
zilog->zl_header = zh_phys;
zilog->zl_os = os;
zilog->zl_spa = dmu_objset_spa(os);
zilog->zl_dmu_pool = dmu_objset_pool(os);
zilog->zl_destroy_txg = TXG_INITIAL - 1;
zilog->zl_logbias = dmu_objset_logbias(os);
zilog->zl_sync = dmu_objset_syncprop(os);
zilog->zl_dirty_max_txg = 0;
zilog->zl_last_lwb_opened = NULL;
zilog->zl_last_lwb_latency = 0;
zilog->zl_max_block_size = zil_maxblocksize;
mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&zilog->zl_issuer_lock, NULL, MUTEX_DEFAULT, NULL);
for (int i = 0; i < TXG_SIZE; i++) {
mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL,
MUTEX_DEFAULT, NULL);
}
list_create(&zilog->zl_lwb_list, sizeof (lwb_t),
offsetof(lwb_t, lwb_node));
list_create(&zilog->zl_itx_commit_list, sizeof (itx_t),
offsetof(itx_t, itx_node));
cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL);
return (zilog);
}
void
zil_free(zilog_t *zilog)
{
int i;
zilog->zl_stop_sync = 1;
ASSERT0(zilog->zl_suspend);
ASSERT0(zilog->zl_suspending);
ASSERT(list_is_empty(&zilog->zl_lwb_list));
list_destroy(&zilog->zl_lwb_list);
ASSERT(list_is_empty(&zilog->zl_itx_commit_list));
list_destroy(&zilog->zl_itx_commit_list);
for (i = 0; i < TXG_SIZE; i++) {
/*
* It's possible for an itx to be generated that doesn't dirty
* a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean()
* callback to remove the entry. We remove those here.
*
* Also free up the ziltest itxs.
*/
if (zilog->zl_itxg[i].itxg_itxs)
zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs);
mutex_destroy(&zilog->zl_itxg[i].itxg_lock);
}
mutex_destroy(&zilog->zl_issuer_lock);
mutex_destroy(&zilog->zl_lock);
cv_destroy(&zilog->zl_cv_suspend);
kmem_free(zilog, sizeof (zilog_t));
}
/*
* Open an intent log.
*/
zilog_t *
zil_open(objset_t *os, zil_get_data_t *get_data)
{
zilog_t *zilog = dmu_objset_zil(os);
ASSERT3P(zilog->zl_get_data, ==, NULL);
ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL);
ASSERT(list_is_empty(&zilog->zl_lwb_list));
zilog->zl_get_data = get_data;
return (zilog);
}
/*
* Close an intent log.
*/
void
zil_close(zilog_t *zilog)
{
lwb_t *lwb;
uint64_t txg;
if (!dmu_objset_is_snapshot(zilog->zl_os)) {
zil_commit(zilog, 0);
} else {
ASSERT3P(list_tail(&zilog->zl_lwb_list), ==, NULL);
ASSERT0(zilog->zl_dirty_max_txg);
ASSERT3B(zilog_is_dirty(zilog), ==, B_FALSE);
}
mutex_enter(&zilog->zl_lock);
lwb = list_tail(&zilog->zl_lwb_list);
if (lwb == NULL)
txg = zilog->zl_dirty_max_txg;
else
txg = MAX(zilog->zl_dirty_max_txg, lwb->lwb_max_txg);
mutex_exit(&zilog->zl_lock);
/*
* We need to use txg_wait_synced() to wait long enough for the
* ZIL to be clean, and to wait for all pending lwbs to be
* written out.
*/
if (txg != 0)
txg_wait_synced(zilog->zl_dmu_pool, txg);
if (zilog_is_dirty(zilog))
zfs_dbgmsg("zil (%px) is dirty, txg %llu", zilog,
(u_longlong_t)txg);
if (txg < spa_freeze_txg(zilog->zl_spa))
VERIFY(!zilog_is_dirty(zilog));
zilog->zl_get_data = NULL;
/*
* We should have only one lwb left on the list; remove it now.
*/
mutex_enter(&zilog->zl_lock);
lwb = list_head(&zilog->zl_lwb_list);
if (lwb != NULL) {
ASSERT3P(lwb, ==, list_tail(&zilog->zl_lwb_list));
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED);
if (lwb->lwb_fastwrite)
metaslab_fastwrite_unmark(zilog->zl_spa, &lwb->lwb_blk);
list_remove(&zilog->zl_lwb_list, lwb);
zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
zil_free_lwb(zilog, lwb);
}
mutex_exit(&zilog->zl_lock);
}
static char *suspend_tag = "zil suspending";
/*
* Suspend an intent log. While in suspended mode, we still honor
* synchronous semantics, but we rely on txg_wait_synced() to do it.
* On old version pools, we suspend the log briefly when taking a
* snapshot so that it will have an empty intent log.
*
* Long holds are not really intended to be used the way we do here --
* held for such a short time. A concurrent caller of dsl_dataset_long_held()
* could fail. Therefore we take pains to only put a long hold if it is
* actually necessary. Fortunately, it will only be necessary if the
* objset is currently mounted (or the ZVOL equivalent). In that case it
* will already have a long hold, so we are not really making things any worse.
*
* Ideally, we would locate the existing long-holder (i.e. the zfsvfs_t or
* zvol_state_t), and use their mechanism to prevent their hold from being
* dropped (e.g. VFS_HOLD()). However, that would be even more pain for
* very little gain.
*
* if cookiep == NULL, this does both the suspend & resume.
* Otherwise, it returns with the dataset "long held", and the cookie
* should be passed into zil_resume().
*/
int
zil_suspend(const char *osname, void **cookiep)
{
objset_t *os;
zilog_t *zilog;
const zil_header_t *zh;
int error;
error = dmu_objset_hold(osname, suspend_tag, &os);
if (error != 0)
return (error);
zilog = dmu_objset_zil(os);
mutex_enter(&zilog->zl_lock);
zh = zilog->zl_header;
if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */
mutex_exit(&zilog->zl_lock);
dmu_objset_rele(os, suspend_tag);
return (SET_ERROR(EBUSY));
}
/*
* Don't put a long hold in the cases where we can avoid it. This
* is when there is no cookie so we are doing a suspend & resume
* (i.e. called from zil_vdev_offline()), and there's nothing to do
* for the suspend because it's already suspended, or there's no ZIL.
*/
if (cookiep == NULL && !zilog->zl_suspending &&
(zilog->zl_suspend > 0 || BP_IS_HOLE(&zh->zh_log))) {
mutex_exit(&zilog->zl_lock);
dmu_objset_rele(os, suspend_tag);
return (0);
}
dsl_dataset_long_hold(dmu_objset_ds(os), suspend_tag);
dsl_pool_rele(dmu_objset_pool(os), suspend_tag);
zilog->zl_suspend++;
if (zilog->zl_suspend > 1) {
/*
* Someone else is already suspending it.
* Just wait for them to finish.
*/
while (zilog->zl_suspending)
cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock);
mutex_exit(&zilog->zl_lock);
if (cookiep == NULL)
zil_resume(os);
else
*cookiep = os;
return (0);
}
/*
* If there is no pointer to an on-disk block, this ZIL must not
* be active (e.g. filesystem not mounted), so there's nothing
* to clean up.
*/
if (BP_IS_HOLE(&zh->zh_log)) {
ASSERT(cookiep != NULL); /* fast path already handled */
*cookiep = os;
mutex_exit(&zilog->zl_lock);
return (0);
}
/*
* The ZIL has work to do. Ensure that the associated encryption
* key will remain mapped while we are committing the log by
* grabbing a reference to it. If the key isn't loaded we have no
* choice but to return an error until the wrapping key is loaded.
*/
if (os->os_encrypted &&
dsl_dataset_create_key_mapping(dmu_objset_ds(os)) != 0) {
zilog->zl_suspend--;
mutex_exit(&zilog->zl_lock);
dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag);
dsl_dataset_rele(dmu_objset_ds(os), suspend_tag);
return (SET_ERROR(EACCES));
}
zilog->zl_suspending = B_TRUE;
mutex_exit(&zilog->zl_lock);
/*
* We need to use zil_commit_impl to ensure we wait for all
* LWB_STATE_OPENED and LWB_STATE_ISSUED lwbs to be committed
* to disk before proceeding. If we used zil_commit instead, it
* would just call txg_wait_synced(), because zl_suspend is set.
* txg_wait_synced() doesn't wait for these lwb's to be
* LWB_STATE_FLUSH_DONE before returning.
*/
zil_commit_impl(zilog, 0);
/*
* Now that we've ensured all lwb's are LWB_STATE_FLUSH_DONE, we
* use txg_wait_synced() to ensure the data from the zilog has
* migrated to the main pool before calling zil_destroy().
*/
txg_wait_synced(zilog->zl_dmu_pool, 0);
zil_destroy(zilog, B_FALSE);
mutex_enter(&zilog->zl_lock);
zilog->zl_suspending = B_FALSE;
cv_broadcast(&zilog->zl_cv_suspend);
mutex_exit(&zilog->zl_lock);
if (os->os_encrypted)
dsl_dataset_remove_key_mapping(dmu_objset_ds(os));
if (cookiep == NULL)
zil_resume(os);
else
*cookiep = os;
return (0);
}
void
zil_resume(void *cookie)
{
objset_t *os = cookie;
zilog_t *zilog = dmu_objset_zil(os);
mutex_enter(&zilog->zl_lock);
ASSERT(zilog->zl_suspend != 0);
zilog->zl_suspend--;
mutex_exit(&zilog->zl_lock);
dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag);
dsl_dataset_rele(dmu_objset_ds(os), suspend_tag);
}
typedef struct zil_replay_arg {
zil_replay_func_t **zr_replay;
void *zr_arg;
boolean_t zr_byteswap;
char *zr_lr;
} zil_replay_arg_t;
static int
zil_replay_error(zilog_t *zilog, const lr_t *lr, int error)
{
char name[ZFS_MAX_DATASET_NAME_LEN];
zilog->zl_replaying_seq--; /* didn't actually replay this one */
dmu_objset_name(zilog->zl_os, name);
cmn_err(CE_WARN, "ZFS replay transaction error %d, "
"dataset %s, seq 0x%llx, txtype %llu %s\n", error, name,
(u_longlong_t)lr->lrc_seq,
(u_longlong_t)(lr->lrc_txtype & ~TX_CI),
(lr->lrc_txtype & TX_CI) ? "CI" : "");
return (error);
}
static int
zil_replay_log_record(zilog_t *zilog, const lr_t *lr, void *zra,
uint64_t claim_txg)
{
zil_replay_arg_t *zr = zra;
const zil_header_t *zh = zilog->zl_header;
uint64_t reclen = lr->lrc_reclen;
uint64_t txtype = lr->lrc_txtype;
int error = 0;
zilog->zl_replaying_seq = lr->lrc_seq;
if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */
return (0);
if (lr->lrc_txg < claim_txg) /* already committed */
return (0);
/* Strip case-insensitive bit, still present in log record */
txtype &= ~TX_CI;
if (txtype == 0 || txtype >= TX_MAX_TYPE)
return (zil_replay_error(zilog, lr, EINVAL));
/*
* If this record type can be logged out of order, the object
* (lr_foid) may no longer exist. That's legitimate, not an error.
*/
if (TX_OOO(txtype)) {
error = dmu_object_info(zilog->zl_os,
LR_FOID_GET_OBJ(((lr_ooo_t *)lr)->lr_foid), NULL);
if (error == ENOENT || error == EEXIST)
return (0);
}
/*
* Make a copy of the data so we can revise and extend it.
*/
bcopy(lr, zr->zr_lr, reclen);
/*
* If this is a TX_WRITE with a blkptr, suck in the data.
*/
if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) {
error = zil_read_log_data(zilog, (lr_write_t *)lr,
zr->zr_lr + reclen);
if (error != 0)
return (zil_replay_error(zilog, lr, error));
}
/*
* The log block containing this lr may have been byteswapped
* so that we can easily examine common fields like lrc_txtype.
* However, the log is a mix of different record types, and only the
* replay vectors know how to byteswap their records. Therefore, if
* the lr was byteswapped, undo it before invoking the replay vector.
*/
if (zr->zr_byteswap)
byteswap_uint64_array(zr->zr_lr, reclen);
/*
* We must now do two things atomically: replay this log record,
* and update the log header sequence number to reflect the fact that
* we did so. At the end of each replay function the sequence number
* is updated if we are in replay mode.
*/
error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, zr->zr_byteswap);
if (error != 0) {
/*
* The DMU's dnode layer doesn't see removes until the txg
* commits, so a subsequent claim can spuriously fail with
* EEXIST. So if we receive any error we try syncing out
* any removes then retry the transaction. Note that we
* specify B_FALSE for byteswap now, so we don't do it twice.
*/
txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0);
error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, B_FALSE);
if (error != 0)
return (zil_replay_error(zilog, lr, error));
}
return (0);
}
-/* ARGSUSED */
static int
zil_incr_blks(zilog_t *zilog, const blkptr_t *bp, void *arg, uint64_t claim_txg)
{
+ (void) bp, (void) arg, (void) claim_txg;
+
zilog->zl_replay_blks++;
return (0);
}
/*
* If this dataset has a non-empty intent log, replay it and destroy it.
*/
void
zil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE])
{
zilog_t *zilog = dmu_objset_zil(os);
const zil_header_t *zh = zilog->zl_header;
zil_replay_arg_t zr;
if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) {
zil_destroy(zilog, B_TRUE);
return;
}
zr.zr_replay = replay_func;
zr.zr_arg = arg;
zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log);
zr.zr_lr = vmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
/*
* Wait for in-progress removes to sync before starting replay.
*/
txg_wait_synced(zilog->zl_dmu_pool, 0);
zilog->zl_replay = B_TRUE;
zilog->zl_replay_time = ddi_get_lbolt();
ASSERT(zilog->zl_replay_blks == 0);
(void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
zh->zh_claim_txg, B_TRUE);
vmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE);
zil_destroy(zilog, B_FALSE);
txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
zilog->zl_replay = B_FALSE;
}
boolean_t
zil_replaying(zilog_t *zilog, dmu_tx_t *tx)
{
if (zilog->zl_sync == ZFS_SYNC_DISABLED)
return (B_TRUE);
if (zilog->zl_replay) {
dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] =
zilog->zl_replaying_seq;
return (B_TRUE);
}
return (B_FALSE);
}
-/* ARGSUSED */
int
zil_reset(const char *osname, void *arg)
{
- int error;
+ (void) arg;
- error = zil_suspend(osname, NULL);
+ int error = zil_suspend(osname, NULL);
/* EACCES means crypto key not loaded */
if ((error == EACCES) || (error == EBUSY))
return (SET_ERROR(error));
if (error != 0)
return (SET_ERROR(EEXIST));
return (0);
}
EXPORT_SYMBOL(zil_alloc);
EXPORT_SYMBOL(zil_free);
EXPORT_SYMBOL(zil_open);
EXPORT_SYMBOL(zil_close);
EXPORT_SYMBOL(zil_replay);
EXPORT_SYMBOL(zil_replaying);
EXPORT_SYMBOL(zil_destroy);
EXPORT_SYMBOL(zil_destroy_sync);
EXPORT_SYMBOL(zil_itx_create);
EXPORT_SYMBOL(zil_itx_destroy);
EXPORT_SYMBOL(zil_itx_assign);
EXPORT_SYMBOL(zil_commit);
EXPORT_SYMBOL(zil_claim);
EXPORT_SYMBOL(zil_check_log_chain);
EXPORT_SYMBOL(zil_sync);
EXPORT_SYMBOL(zil_clean);
EXPORT_SYMBOL(zil_suspend);
EXPORT_SYMBOL(zil_resume);
EXPORT_SYMBOL(zil_lwb_add_block);
EXPORT_SYMBOL(zil_bp_tree_add);
EXPORT_SYMBOL(zil_set_sync);
EXPORT_SYMBOL(zil_set_logbias);
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs, zfs_, commit_timeout_pct, INT, ZMOD_RW,
"ZIL block open timeout percentage");
ZFS_MODULE_PARAM(zfs_zil, zil_, replay_disable, INT, ZMOD_RW,
"Disable intent logging replay");
ZFS_MODULE_PARAM(zfs_zil, zil_, nocacheflush, INT, ZMOD_RW,
"Disable ZIL cache flushes");
ZFS_MODULE_PARAM(zfs_zil, zil_, slog_bulk, ULONG, ZMOD_RW,
"Limit in bytes slog sync writes per commit");
ZFS_MODULE_PARAM(zfs_zil, zil_, maxblocksize, INT, ZMOD_RW,
"Limit in bytes of ZIL log block size");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/zio.c b/sys/contrib/openzfs/module/zfs/zio.c
index c016fa323b41..04b5d121e111 100644
--- a/sys/contrib/openzfs/module/zfs/zio.c
+++ b/sys/contrib/openzfs/module/zfs/zio.c
@@ -1,5054 +1,5064 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
* Copyright (c) 2021, Datto, Inc.
*/
#include <sys/sysmacros.h>
#include <sys/zfs_context.h>
#include <sys/fm/fs/zfs.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/spa_impl.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_trim.h>
#include <sys/zio_impl.h>
#include <sys/zio_compress.h>
#include <sys/zio_checksum.h>
#include <sys/dmu_objset.h>
#include <sys/arc.h>
#include <sys/ddt.h>
#include <sys/blkptr.h>
#include <sys/zfeature.h>
#include <sys/dsl_scan.h>
#include <sys/metaslab_impl.h>
#include <sys/time.h>
#include <sys/trace_zfs.h>
#include <sys/abd.h>
#include <sys/dsl_crypt.h>
#include <cityhash.h>
/*
* ==========================================================================
* I/O type descriptions
* ==========================================================================
*/
const char *zio_type_name[ZIO_TYPES] = {
/*
* Note: Linux kernel thread name length is limited
* so these names will differ from upstream open zfs.
*/
"z_null", "z_rd", "z_wr", "z_fr", "z_cl", "z_ioctl", "z_trim"
};
int zio_dva_throttle_enabled = B_TRUE;
int zio_deadman_log_all = B_FALSE;
/*
* ==========================================================================
* I/O kmem caches
* ==========================================================================
*/
kmem_cache_t *zio_cache;
kmem_cache_t *zio_link_cache;
kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
#if defined(ZFS_DEBUG) && !defined(_KERNEL)
uint64_t zio_buf_cache_allocs[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
uint64_t zio_buf_cache_frees[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
#endif
/* Mark IOs as "slow" if they take longer than 30 seconds */
int zio_slow_io_ms = (30 * MILLISEC);
#define BP_SPANB(indblkshift, level) \
(((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT)))
#define COMPARE_META_LEVEL 0x80000000ul
/*
* The following actions directly effect the spa's sync-to-convergence logic.
* The values below define the sync pass when we start performing the action.
* Care should be taken when changing these values as they directly impact
* spa_sync() performance. Tuning these values may introduce subtle performance
* pathologies and should only be done in the context of performance analysis.
* These tunables will eventually be removed and replaced with #defines once
* enough analysis has been done to determine optimal values.
*
* The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that
* regular blocks are not deferred.
*
* Starting in sync pass 8 (zfs_sync_pass_dont_compress), we disable
* compression (including of metadata). In practice, we don't have this
* many sync passes, so this has no effect.
*
* The original intent was that disabling compression would help the sync
* passes to converge. However, in practice disabling compression increases
* the average number of sync passes, because when we turn compression off, a
* lot of block's size will change and thus we have to re-allocate (not
* overwrite) them. It also increases the number of 128KB allocations (e.g.
* for indirect blocks and spacemaps) because these will not be compressed.
* The 128K allocations are especially detrimental to performance on highly
* fragmented systems, which may have very few free segments of this size,
* and may need to load new metaslabs to satisfy 128K allocations.
*/
int zfs_sync_pass_deferred_free = 2; /* defer frees starting in this pass */
int zfs_sync_pass_dont_compress = 8; /* don't compress starting in this pass */
int zfs_sync_pass_rewrite = 2; /* rewrite new bps starting in this pass */
/*
* An allocating zio is one that either currently has the DVA allocate
* stage set or will have it later in its lifetime.
*/
#define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE)
/*
* Enable smaller cores by excluding metadata
* allocations as well.
*/
int zio_exclude_metadata = 0;
int zio_requeue_io_start_cut_in_line = 1;
#ifdef ZFS_DEBUG
int zio_buf_debug_limit = 16384;
#else
int zio_buf_debug_limit = 0;
#endif
static inline void __zio_execute(zio_t *zio);
static void zio_taskq_dispatch(zio_t *, zio_taskq_type_t, boolean_t);
void
zio_init(void)
{
size_t c;
zio_cache = kmem_cache_create("zio_cache",
sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
zio_link_cache = kmem_cache_create("zio_link_cache",
sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
/*
* For small buffers, we want a cache for each multiple of
* SPA_MINBLOCKSIZE. For larger buffers, we want a cache
* for each quarter-power of 2.
*/
for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) {
size_t size = (c + 1) << SPA_MINBLOCKSHIFT;
size_t p2 = size;
size_t align = 0;
size_t data_cflags, cflags;
data_cflags = KMC_NODEBUG;
cflags = (zio_exclude_metadata || size > zio_buf_debug_limit) ?
KMC_NODEBUG : 0;
#if defined(_ILP32) && defined(_KERNEL)
/*
* Cache size limited to 1M on 32-bit platforms until ARC
* buffers no longer require virtual address space.
*/
if (size > zfs_max_recordsize)
break;
#endif
while (!ISP2(p2))
p2 &= p2 - 1;
#ifndef _KERNEL
/*
* If we are using watchpoints, put each buffer on its own page,
* to eliminate the performance overhead of trapping to the
* kernel when modifying a non-watched buffer that shares the
* page with a watched buffer.
*/
if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE))
continue;
/*
* Here's the problem - on 4K native devices in userland on
* Linux using O_DIRECT, buffers must be 4K aligned or I/O
* will fail with EINVAL, causing zdb (and others) to coredump.
* Since userland probably doesn't need optimized buffer caches,
* we just force 4K alignment on everything.
*/
align = 8 * SPA_MINBLOCKSIZE;
#else
if (size < PAGESIZE) {
align = SPA_MINBLOCKSIZE;
} else if (IS_P2ALIGNED(size, p2 >> 2)) {
align = PAGESIZE;
}
#endif
if (align != 0) {
char name[36];
if (cflags == data_cflags) {
/*
* Resulting kmem caches would be identical.
* Save memory by creating only one.
*/
(void) snprintf(name, sizeof (name),
"zio_buf_comb_%lu", (ulong_t)size);
zio_buf_cache[c] = kmem_cache_create(name,
size, align, NULL, NULL, NULL, NULL, NULL,
cflags);
zio_data_buf_cache[c] = zio_buf_cache[c];
continue;
}
(void) snprintf(name, sizeof (name), "zio_buf_%lu",
(ulong_t)size);
zio_buf_cache[c] = kmem_cache_create(name, size,
align, NULL, NULL, NULL, NULL, NULL, cflags);
(void) snprintf(name, sizeof (name), "zio_data_buf_%lu",
(ulong_t)size);
zio_data_buf_cache[c] = kmem_cache_create(name, size,
align, NULL, NULL, NULL, NULL, NULL, data_cflags);
}
}
while (--c != 0) {
ASSERT(zio_buf_cache[c] != NULL);
if (zio_buf_cache[c - 1] == NULL)
zio_buf_cache[c - 1] = zio_buf_cache[c];
ASSERT(zio_data_buf_cache[c] != NULL);
if (zio_data_buf_cache[c - 1] == NULL)
zio_data_buf_cache[c - 1] = zio_data_buf_cache[c];
}
zio_inject_init();
lz4_init();
}
void
zio_fini(void)
{
size_t n = SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT;
#if defined(ZFS_DEBUG) && !defined(_KERNEL)
for (size_t i = 0; i < n; i++) {
if (zio_buf_cache_allocs[i] != zio_buf_cache_frees[i])
(void) printf("zio_fini: [%d] %llu != %llu\n",
(int)((i + 1) << SPA_MINBLOCKSHIFT),
(long long unsigned)zio_buf_cache_allocs[i],
(long long unsigned)zio_buf_cache_frees[i]);
}
#endif
/*
* The same kmem cache can show up multiple times in both zio_buf_cache
* and zio_data_buf_cache. Do a wasteful but trivially correct scan to
* sort it out.
*/
for (size_t i = 0; i < n; i++) {
kmem_cache_t *cache = zio_buf_cache[i];
if (cache == NULL)
continue;
for (size_t j = i; j < n; j++) {
if (cache == zio_buf_cache[j])
zio_buf_cache[j] = NULL;
if (cache == zio_data_buf_cache[j])
zio_data_buf_cache[j] = NULL;
}
kmem_cache_destroy(cache);
}
for (size_t i = 0; i < n; i++) {
kmem_cache_t *cache = zio_data_buf_cache[i];
if (cache == NULL)
continue;
for (size_t j = i; j < n; j++) {
if (cache == zio_data_buf_cache[j])
zio_data_buf_cache[j] = NULL;
}
kmem_cache_destroy(cache);
}
for (size_t i = 0; i < n; i++) {
VERIFY3P(zio_buf_cache[i], ==, NULL);
VERIFY3P(zio_data_buf_cache[i], ==, NULL);
}
kmem_cache_destroy(zio_link_cache);
kmem_cache_destroy(zio_cache);
zio_inject_fini();
lz4_fini();
}
/*
* ==========================================================================
* Allocate and free I/O buffers
* ==========================================================================
*/
/*
* Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a
* crashdump if the kernel panics, so use it judiciously. Obviously, it's
* useful to inspect ZFS metadata, but if possible, we should avoid keeping
* excess / transient data in-core during a crashdump.
*/
void *
zio_buf_alloc(size_t size)
{
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
#if defined(ZFS_DEBUG) && !defined(_KERNEL)
atomic_add_64(&zio_buf_cache_allocs[c], 1);
#endif
return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE));
}
/*
* Use zio_data_buf_alloc to allocate data. The data will not appear in a
* crashdump if the kernel panics. This exists so that we will limit the amount
* of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount
* of kernel heap dumped to disk when the kernel panics)
*/
void *
zio_data_buf_alloc(size_t size)
{
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE));
}
void
zio_buf_free(void *buf, size_t size)
{
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
#if defined(ZFS_DEBUG) && !defined(_KERNEL)
atomic_add_64(&zio_buf_cache_frees[c], 1);
#endif
kmem_cache_free(zio_buf_cache[c], buf);
}
void
zio_data_buf_free(void *buf, size_t size)
{
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
kmem_cache_free(zio_data_buf_cache[c], buf);
}
static void
zio_abd_free(void *abd, size_t size)
{
+ (void) size;
abd_free((abd_t *)abd);
}
/*
* ==========================================================================
* Push and pop I/O transform buffers
* ==========================================================================
*/
void
zio_push_transform(zio_t *zio, abd_t *data, uint64_t size, uint64_t bufsize,
zio_transform_func_t *transform)
{
zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP);
zt->zt_orig_abd = zio->io_abd;
zt->zt_orig_size = zio->io_size;
zt->zt_bufsize = bufsize;
zt->zt_transform = transform;
zt->zt_next = zio->io_transform_stack;
zio->io_transform_stack = zt;
zio->io_abd = data;
zio->io_size = size;
}
void
zio_pop_transforms(zio_t *zio)
{
zio_transform_t *zt;
while ((zt = zio->io_transform_stack) != NULL) {
if (zt->zt_transform != NULL)
zt->zt_transform(zio,
zt->zt_orig_abd, zt->zt_orig_size);
if (zt->zt_bufsize != 0)
abd_free(zio->io_abd);
zio->io_abd = zt->zt_orig_abd;
zio->io_size = zt->zt_orig_size;
zio->io_transform_stack = zt->zt_next;
kmem_free(zt, sizeof (zio_transform_t));
}
}
/*
* ==========================================================================
* I/O transform callbacks for subblocks, decompression, and decryption
* ==========================================================================
*/
static void
zio_subblock(zio_t *zio, abd_t *data, uint64_t size)
{
ASSERT(zio->io_size > size);
if (zio->io_type == ZIO_TYPE_READ)
abd_copy(data, zio->io_abd, size);
}
static void
zio_decompress(zio_t *zio, abd_t *data, uint64_t size)
{
if (zio->io_error == 0) {
void *tmp = abd_borrow_buf(data, size);
int ret = zio_decompress_data(BP_GET_COMPRESS(zio->io_bp),
zio->io_abd, tmp, zio->io_size, size,
&zio->io_prop.zp_complevel);
abd_return_buf_copy(data, tmp, size);
if (zio_injection_enabled && ret == 0)
ret = zio_handle_fault_injection(zio, EINVAL);
if (ret != 0)
zio->io_error = SET_ERROR(EIO);
}
}
static void
zio_decrypt(zio_t *zio, abd_t *data, uint64_t size)
{
int ret;
void *tmp;
blkptr_t *bp = zio->io_bp;
spa_t *spa = zio->io_spa;
uint64_t dsobj = zio->io_bookmark.zb_objset;
uint64_t lsize = BP_GET_LSIZE(bp);
dmu_object_type_t ot = BP_GET_TYPE(bp);
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
boolean_t no_crypt = B_FALSE;
ASSERT(BP_USES_CRYPT(bp));
ASSERT3U(size, !=, 0);
if (zio->io_error != 0)
return;
/*
* Verify the cksum of MACs stored in an indirect bp. It will always
* be possible to verify this since it does not require an encryption
* key.
*/
if (BP_HAS_INDIRECT_MAC_CKSUM(bp)) {
zio_crypt_decode_mac_bp(bp, mac);
if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) {
/*
* We haven't decompressed the data yet, but
* zio_crypt_do_indirect_mac_checksum() requires
* decompressed data to be able to parse out the MACs
* from the indirect block. We decompress it now and
* throw away the result after we are finished.
*/
tmp = zio_buf_alloc(lsize);
ret = zio_decompress_data(BP_GET_COMPRESS(bp),
zio->io_abd, tmp, zio->io_size, lsize,
&zio->io_prop.zp_complevel);
if (ret != 0) {
ret = SET_ERROR(EIO);
goto error;
}
ret = zio_crypt_do_indirect_mac_checksum(B_FALSE,
tmp, lsize, BP_SHOULD_BYTESWAP(bp), mac);
zio_buf_free(tmp, lsize);
} else {
ret = zio_crypt_do_indirect_mac_checksum_abd(B_FALSE,
zio->io_abd, size, BP_SHOULD_BYTESWAP(bp), mac);
}
abd_copy(data, zio->io_abd, size);
if (zio_injection_enabled && ot != DMU_OT_DNODE && ret == 0) {
ret = zio_handle_decrypt_injection(spa,
&zio->io_bookmark, ot, ECKSUM);
}
if (ret != 0)
goto error;
return;
}
/*
* If this is an authenticated block, just check the MAC. It would be
* nice to separate this out into its own flag, but for the moment
* enum zio_flag is out of bits.
*/
if (BP_IS_AUTHENTICATED(bp)) {
if (ot == DMU_OT_OBJSET) {
ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa,
dsobj, zio->io_abd, size, BP_SHOULD_BYTESWAP(bp));
} else {
zio_crypt_decode_mac_bp(bp, mac);
ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj,
zio->io_abd, size, mac);
if (zio_injection_enabled && ret == 0) {
ret = zio_handle_decrypt_injection(spa,
&zio->io_bookmark, ot, ECKSUM);
}
}
abd_copy(data, zio->io_abd, size);
if (ret != 0)
goto error;
return;
}
zio_crypt_decode_params_bp(bp, salt, iv);
if (ot == DMU_OT_INTENT_LOG) {
tmp = abd_borrow_buf_copy(zio->io_abd, sizeof (zil_chain_t));
zio_crypt_decode_mac_zil(tmp, mac);
abd_return_buf(zio->io_abd, tmp, sizeof (zil_chain_t));
} else {
zio_crypt_decode_mac_bp(bp, mac);
}
ret = spa_do_crypt_abd(B_FALSE, spa, &zio->io_bookmark, BP_GET_TYPE(bp),
BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp), salt, iv, mac, size, data,
zio->io_abd, &no_crypt);
if (no_crypt)
abd_copy(data, zio->io_abd, size);
if (ret != 0)
goto error;
return;
error:
/* assert that the key was found unless this was speculative */
ASSERT(ret != EACCES || (zio->io_flags & ZIO_FLAG_SPECULATIVE));
/*
* If there was a decryption / authentication error return EIO as
* the io_error. If this was not a speculative zio, create an ereport.
*/
if (ret == ECKSUM) {
zio->io_error = SET_ERROR(EIO);
if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) {
spa_log_error(spa, &zio->io_bookmark);
(void) zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION,
spa, NULL, &zio->io_bookmark, zio, 0);
}
} else {
zio->io_error = ret;
}
}
/*
* ==========================================================================
* I/O parent/child relationships and pipeline interlocks
* ==========================================================================
*/
zio_t *
zio_walk_parents(zio_t *cio, zio_link_t **zl)
{
list_t *pl = &cio->io_parent_list;
*zl = (*zl == NULL) ? list_head(pl) : list_next(pl, *zl);
if (*zl == NULL)
return (NULL);
ASSERT((*zl)->zl_child == cio);
return ((*zl)->zl_parent);
}
zio_t *
zio_walk_children(zio_t *pio, zio_link_t **zl)
{
list_t *cl = &pio->io_child_list;
ASSERT(MUTEX_HELD(&pio->io_lock));
*zl = (*zl == NULL) ? list_head(cl) : list_next(cl, *zl);
if (*zl == NULL)
return (NULL);
ASSERT((*zl)->zl_parent == pio);
return ((*zl)->zl_child);
}
zio_t *
zio_unique_parent(zio_t *cio)
{
zio_link_t *zl = NULL;
zio_t *pio = zio_walk_parents(cio, &zl);
VERIFY3P(zio_walk_parents(cio, &zl), ==, NULL);
return (pio);
}
void
zio_add_child(zio_t *pio, zio_t *cio)
{
zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP);
/*
* Logical I/Os can have logical, gang, or vdev children.
* Gang I/Os can have gang or vdev children.
* Vdev I/Os can only have vdev children.
* The following ASSERT captures all of these constraints.
*/
ASSERT3S(cio->io_child_type, <=, pio->io_child_type);
zl->zl_parent = pio;
zl->zl_child = cio;
mutex_enter(&pio->io_lock);
mutex_enter(&cio->io_lock);
ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0);
for (int w = 0; w < ZIO_WAIT_TYPES; w++)
pio->io_children[cio->io_child_type][w] += !cio->io_state[w];
list_insert_head(&pio->io_child_list, zl);
list_insert_head(&cio->io_parent_list, zl);
pio->io_child_count++;
cio->io_parent_count++;
mutex_exit(&cio->io_lock);
mutex_exit(&pio->io_lock);
}
static void
zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl)
{
ASSERT(zl->zl_parent == pio);
ASSERT(zl->zl_child == cio);
mutex_enter(&pio->io_lock);
mutex_enter(&cio->io_lock);
list_remove(&pio->io_child_list, zl);
list_remove(&cio->io_parent_list, zl);
pio->io_child_count--;
cio->io_parent_count--;
mutex_exit(&cio->io_lock);
mutex_exit(&pio->io_lock);
kmem_cache_free(zio_link_cache, zl);
}
static boolean_t
zio_wait_for_children(zio_t *zio, uint8_t childbits, enum zio_wait_type wait)
{
boolean_t waiting = B_FALSE;
mutex_enter(&zio->io_lock);
ASSERT(zio->io_stall == NULL);
for (int c = 0; c < ZIO_CHILD_TYPES; c++) {
if (!(ZIO_CHILD_BIT_IS_SET(childbits, c)))
continue;
uint64_t *countp = &zio->io_children[c][wait];
if (*countp != 0) {
zio->io_stage >>= 1;
ASSERT3U(zio->io_stage, !=, ZIO_STAGE_OPEN);
zio->io_stall = countp;
waiting = B_TRUE;
break;
}
}
mutex_exit(&zio->io_lock);
return (waiting);
}
__attribute__((always_inline))
static inline void
zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait,
zio_t **next_to_executep)
{
uint64_t *countp = &pio->io_children[zio->io_child_type][wait];
int *errorp = &pio->io_child_error[zio->io_child_type];
mutex_enter(&pio->io_lock);
if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
*errorp = zio_worst_error(*errorp, zio->io_error);
pio->io_reexecute |= zio->io_reexecute;
ASSERT3U(*countp, >, 0);
(*countp)--;
if (*countp == 0 && pio->io_stall == countp) {
zio_taskq_type_t type =
pio->io_stage < ZIO_STAGE_VDEV_IO_START ? ZIO_TASKQ_ISSUE :
ZIO_TASKQ_INTERRUPT;
pio->io_stall = NULL;
mutex_exit(&pio->io_lock);
/*
* If we can tell the caller to execute this parent next, do
* so. Otherwise dispatch the parent zio as its own task.
*
* Having the caller execute the parent when possible reduces
* locking on the zio taskq's, reduces context switch
* overhead, and has no recursion penalty. Note that one
* read from disk typically causes at least 3 zio's: a
* zio_null(), the logical zio_read(), and then a physical
* zio. When the physical ZIO completes, we are able to call
* zio_done() on all 3 of these zio's from one invocation of
* zio_execute() by returning the parent back to
* zio_execute(). Since the parent isn't executed until this
* thread returns back to zio_execute(), the caller should do
* so promptly.
*
* In other cases, dispatching the parent prevents
* overflowing the stack when we have deeply nested
* parent-child relationships, as we do with the "mega zio"
* of writes for spa_sync(), and the chain of ZIL blocks.
*/
if (next_to_executep != NULL && *next_to_executep == NULL) {
*next_to_executep = pio;
} else {
zio_taskq_dispatch(pio, type, B_FALSE);
}
} else {
mutex_exit(&pio->io_lock);
}
}
static void
zio_inherit_child_errors(zio_t *zio, enum zio_child c)
{
if (zio->io_child_error[c] != 0 && zio->io_error == 0)
zio->io_error = zio->io_child_error[c];
}
int
zio_bookmark_compare(const void *x1, const void *x2)
{
const zio_t *z1 = x1;
const zio_t *z2 = x2;
if (z1->io_bookmark.zb_objset < z2->io_bookmark.zb_objset)
return (-1);
if (z1->io_bookmark.zb_objset > z2->io_bookmark.zb_objset)
return (1);
if (z1->io_bookmark.zb_object < z2->io_bookmark.zb_object)
return (-1);
if (z1->io_bookmark.zb_object > z2->io_bookmark.zb_object)
return (1);
if (z1->io_bookmark.zb_level < z2->io_bookmark.zb_level)
return (-1);
if (z1->io_bookmark.zb_level > z2->io_bookmark.zb_level)
return (1);
if (z1->io_bookmark.zb_blkid < z2->io_bookmark.zb_blkid)
return (-1);
if (z1->io_bookmark.zb_blkid > z2->io_bookmark.zb_blkid)
return (1);
if (z1 < z2)
return (-1);
if (z1 > z2)
return (1);
return (0);
}
/*
* ==========================================================================
* Create the various types of I/O (read, write, free, etc)
* ==========================================================================
*/
static zio_t *
zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
abd_t *data, uint64_t lsize, uint64_t psize, zio_done_func_t *done,
void *private, zio_type_t type, zio_priority_t priority,
enum zio_flag flags, vdev_t *vd, uint64_t offset,
const zbookmark_phys_t *zb, enum zio_stage stage,
enum zio_stage pipeline)
{
zio_t *zio;
IMPLY(type != ZIO_TYPE_TRIM, psize <= SPA_MAXBLOCKSIZE);
ASSERT(P2PHASE(psize, SPA_MINBLOCKSIZE) == 0);
ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0);
ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER));
ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER));
ASSERT(vd || stage == ZIO_STAGE_OPEN);
IMPLY(lsize != psize, (flags & ZIO_FLAG_RAW_COMPRESS) != 0);
zio = kmem_cache_alloc(zio_cache, KM_SLEEP);
bzero(zio, sizeof (zio_t));
mutex_init(&zio->io_lock, NULL, MUTEX_NOLOCKDEP, NULL);
cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL);
list_create(&zio->io_parent_list, sizeof (zio_link_t),
offsetof(zio_link_t, zl_parent_node));
list_create(&zio->io_child_list, sizeof (zio_link_t),
offsetof(zio_link_t, zl_child_node));
metaslab_trace_init(&zio->io_alloc_list);
if (vd != NULL)
zio->io_child_type = ZIO_CHILD_VDEV;
else if (flags & ZIO_FLAG_GANG_CHILD)
zio->io_child_type = ZIO_CHILD_GANG;
else if (flags & ZIO_FLAG_DDT_CHILD)
zio->io_child_type = ZIO_CHILD_DDT;
else
zio->io_child_type = ZIO_CHILD_LOGICAL;
if (bp != NULL) {
zio->io_bp = (blkptr_t *)bp;
zio->io_bp_copy = *bp;
zio->io_bp_orig = *bp;
if (type != ZIO_TYPE_WRITE ||
zio->io_child_type == ZIO_CHILD_DDT)
zio->io_bp = &zio->io_bp_copy; /* so caller can free */
if (zio->io_child_type == ZIO_CHILD_LOGICAL)
zio->io_logical = zio;
if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp))
pipeline |= ZIO_GANG_STAGES;
}
zio->io_spa = spa;
zio->io_txg = txg;
zio->io_done = done;
zio->io_private = private;
zio->io_type = type;
zio->io_priority = priority;
zio->io_vd = vd;
zio->io_offset = offset;
zio->io_orig_abd = zio->io_abd = data;
zio->io_orig_size = zio->io_size = psize;
zio->io_lsize = lsize;
zio->io_orig_flags = zio->io_flags = flags;
zio->io_orig_stage = zio->io_stage = stage;
zio->io_orig_pipeline = zio->io_pipeline = pipeline;
zio->io_pipeline_trace = ZIO_STAGE_OPEN;
zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY);
zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE);
if (zb != NULL)
zio->io_bookmark = *zb;
if (pio != NULL) {
zio->io_metaslab_class = pio->io_metaslab_class;
if (zio->io_logical == NULL)
zio->io_logical = pio->io_logical;
if (zio->io_child_type == ZIO_CHILD_GANG)
zio->io_gang_leader = pio->io_gang_leader;
zio_add_child(pio, zio);
}
taskq_init_ent(&zio->io_tqent);
return (zio);
}
static void
zio_destroy(zio_t *zio)
{
metaslab_trace_fini(&zio->io_alloc_list);
list_destroy(&zio->io_parent_list);
list_destroy(&zio->io_child_list);
mutex_destroy(&zio->io_lock);
cv_destroy(&zio->io_cv);
kmem_cache_free(zio_cache, zio);
}
zio_t *
zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done,
void *private, enum zio_flag flags)
{
zio_t *zio;
zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private,
ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL,
ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE);
return (zio);
}
zio_t *
zio_root(spa_t *spa, zio_done_func_t *done, void *private, enum zio_flag flags)
{
return (zio_null(NULL, spa, NULL, done, private, flags));
}
static int
zfs_blkptr_verify_log(spa_t *spa, const blkptr_t *bp,
enum blk_verify_flag blk_verify, const char *fmt, ...)
{
va_list adx;
char buf[256];
va_start(adx, fmt);
(void) vsnprintf(buf, sizeof (buf), fmt, adx);
va_end(adx);
switch (blk_verify) {
case BLK_VERIFY_HALT:
dprintf_bp(bp, "blkptr at %p dprintf_bp():", bp);
zfs_panic_recover("%s: %s", spa_name(spa), buf);
break;
case BLK_VERIFY_LOG:
zfs_dbgmsg("%s: %s", spa_name(spa), buf);
break;
case BLK_VERIFY_ONLY:
break;
}
return (1);
}
/*
* Verify the block pointer fields contain reasonable values. This means
* it only contains known object types, checksum/compression identifiers,
* block sizes within the maximum allowed limits, valid DVAs, etc.
*
* If everything checks out B_TRUE is returned. The zfs_blkptr_verify
* argument controls the behavior when an invalid field is detected.
*
* Modes for zfs_blkptr_verify:
* 1) BLK_VERIFY_ONLY (evaluate the block)
* 2) BLK_VERIFY_LOG (evaluate the block and log problems)
* 3) BLK_VERIFY_HALT (call zfs_panic_recover on error)
*/
boolean_t
zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp, boolean_t config_held,
enum blk_verify_flag blk_verify)
{
int errors = 0;
if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %p has invalid TYPE %llu",
bp, (longlong_t)BP_GET_TYPE(bp));
}
if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS ||
BP_GET_CHECKSUM(bp) <= ZIO_CHECKSUM_ON) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %p has invalid CHECKSUM %llu",
bp, (longlong_t)BP_GET_CHECKSUM(bp));
}
if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS ||
BP_GET_COMPRESS(bp) <= ZIO_COMPRESS_ON) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %p has invalid COMPRESS %llu",
bp, (longlong_t)BP_GET_COMPRESS(bp));
}
if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %p has invalid LSIZE %llu",
bp, (longlong_t)BP_GET_LSIZE(bp));
}
if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %p has invalid PSIZE %llu",
bp, (longlong_t)BP_GET_PSIZE(bp));
}
if (BP_IS_EMBEDDED(bp)) {
if (BPE_GET_ETYPE(bp) >= NUM_BP_EMBEDDED_TYPES) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %p has invalid ETYPE %llu",
bp, (longlong_t)BPE_GET_ETYPE(bp));
}
}
/*
* Do not verify individual DVAs if the config is not trusted. This
* will be done once the zio is executed in vdev_mirror_map_alloc.
*/
if (!spa->spa_trust_config)
return (errors == 0);
if (!config_held)
spa_config_enter(spa, SCL_VDEV, bp, RW_READER);
else
ASSERT(spa_config_held(spa, SCL_VDEV, RW_WRITER));
/*
* Pool-specific checks.
*
* Note: it would be nice to verify that the blk_birth and
* BP_PHYSICAL_BIRTH() are not too large. However, spa_freeze()
* allows the birth time of log blocks (and dmu_sync()-ed blocks
* that are in the log) to be arbitrarily large.
*/
for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
const dva_t *dva = &bp->blk_dva[i];
uint64_t vdevid = DVA_GET_VDEV(dva);
if (vdevid >= spa->spa_root_vdev->vdev_children) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %p DVA %u has invalid VDEV %llu",
bp, i, (longlong_t)vdevid);
continue;
}
vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
if (vd == NULL) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %p DVA %u has invalid VDEV %llu",
bp, i, (longlong_t)vdevid);
continue;
}
if (vd->vdev_ops == &vdev_hole_ops) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %p DVA %u has hole VDEV %llu",
bp, i, (longlong_t)vdevid);
continue;
}
if (vd->vdev_ops == &vdev_missing_ops) {
/*
* "missing" vdevs are valid during import, but we
* don't have their detailed info (e.g. asize), so
* we can't perform any more checks on them.
*/
continue;
}
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t asize = DVA_GET_ASIZE(dva);
if (DVA_GET_GANG(dva))
asize = vdev_gang_header_asize(vd);
if (offset + asize > vd->vdev_asize) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %p DVA %u has invalid OFFSET %llu",
bp, i, (longlong_t)offset);
}
}
if (errors > 0)
dprintf_bp(bp, "blkptr at %p dprintf_bp():", bp);
if (!config_held)
spa_config_exit(spa, SCL_VDEV, bp);
return (errors == 0);
}
boolean_t
zfs_dva_valid(spa_t *spa, const dva_t *dva, const blkptr_t *bp)
{
+ (void) bp;
uint64_t vdevid = DVA_GET_VDEV(dva);
if (vdevid >= spa->spa_root_vdev->vdev_children)
return (B_FALSE);
vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
if (vd == NULL)
return (B_FALSE);
if (vd->vdev_ops == &vdev_hole_ops)
return (B_FALSE);
if (vd->vdev_ops == &vdev_missing_ops) {
return (B_FALSE);
}
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t asize = DVA_GET_ASIZE(dva);
if (DVA_GET_GANG(dva))
asize = vdev_gang_header_asize(vd);
if (offset + asize > vd->vdev_asize)
return (B_FALSE);
return (B_TRUE);
}
zio_t *
zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
abd_t *data, uint64_t size, zio_done_func_t *done, void *private,
zio_priority_t priority, enum zio_flag flags, const zbookmark_phys_t *zb)
{
zio_t *zio;
zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp,
data, size, size, done, private,
ZIO_TYPE_READ, priority, flags, NULL, 0, zb,
ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE);
return (zio);
}
zio_t *
zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
abd_t *data, uint64_t lsize, uint64_t psize, const zio_prop_t *zp,
zio_done_func_t *ready, zio_done_func_t *children_ready,
zio_done_func_t *physdone, zio_done_func_t *done,
void *private, zio_priority_t priority, enum zio_flag flags,
const zbookmark_phys_t *zb)
{
zio_t *zio;
ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF &&
zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS &&
zp->zp_compress >= ZIO_COMPRESS_OFF &&
zp->zp_compress < ZIO_COMPRESS_FUNCTIONS &&
DMU_OT_IS_VALID(zp->zp_type) &&
zp->zp_level < 32 &&
zp->zp_copies > 0 &&
zp->zp_copies <= spa_max_replication(spa));
zio = zio_create(pio, spa, txg, bp, data, lsize, psize, done, private,
ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb,
ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE);
zio->io_ready = ready;
zio->io_children_ready = children_ready;
zio->io_physdone = physdone;
zio->io_prop = *zp;
/*
* Data can be NULL if we are going to call zio_write_override() to
* provide the already-allocated BP. But we may need the data to
* verify a dedup hit (if requested). In this case, don't try to
* dedup (just take the already-allocated BP verbatim). Encrypted
* dedup blocks need data as well so we also disable dedup in this
* case.
*/
if (data == NULL &&
(zio->io_prop.zp_dedup_verify || zio->io_prop.zp_encrypt)) {
zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE;
}
return (zio);
}
zio_t *
zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, abd_t *data,
uint64_t size, zio_done_func_t *done, void *private,
zio_priority_t priority, enum zio_flag flags, zbookmark_phys_t *zb)
{
zio_t *zio;
zio = zio_create(pio, spa, txg, bp, data, size, size, done, private,
ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_IO_REWRITE, NULL, 0, zb,
ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE);
return (zio);
}
void
zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite)
{
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(zio->io_stage == ZIO_STAGE_OPEN);
ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa));
/*
* We must reset the io_prop to match the values that existed
* when the bp was first written by dmu_sync() keeping in mind
* that nopwrite and dedup are mutually exclusive.
*/
zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup;
zio->io_prop.zp_nopwrite = nopwrite;
zio->io_prop.zp_copies = copies;
zio->io_bp_override = bp;
}
void
zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp)
{
(void) zfs_blkptr_verify(spa, bp, B_FALSE, BLK_VERIFY_HALT);
/*
* The check for EMBEDDED is a performance optimization. We
* process the free here (by ignoring it) rather than
* putting it on the list and then processing it in zio_free_sync().
*/
if (BP_IS_EMBEDDED(bp))
return;
metaslab_check_free(spa, bp);
/*
* Frees that are for the currently-syncing txg, are not going to be
* deferred, and which will not need to do a read (i.e. not GANG or
* DEDUP), can be processed immediately. Otherwise, put them on the
* in-memory list for later processing.
*
* Note that we only defer frees after zfs_sync_pass_deferred_free
* when the log space map feature is disabled. [see relevant comment
* in spa_sync_iterate_to_convergence()]
*/
if (BP_IS_GANG(bp) ||
BP_GET_DEDUP(bp) ||
txg != spa->spa_syncing_txg ||
(spa_sync_pass(spa) >= zfs_sync_pass_deferred_free &&
!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))) {
bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp);
} else {
VERIFY3P(zio_free_sync(NULL, spa, txg, bp, 0), ==, NULL);
}
}
/*
* To improve performance, this function may return NULL if we were able
* to do the free immediately. This avoids the cost of creating a zio
* (and linking it to the parent, etc).
*/
zio_t *
zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
enum zio_flag flags)
{
ASSERT(!BP_IS_HOLE(bp));
ASSERT(spa_syncing_txg(spa) == txg);
if (BP_IS_EMBEDDED(bp))
return (NULL);
metaslab_check_free(spa, bp);
arc_freed(spa, bp);
dsl_scan_freed(spa, bp);
if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp)) {
/*
* GANG and DEDUP blocks can induce a read (for the gang block
* header, or the DDT), so issue them asynchronously so that
* this thread is not tied up.
*/
enum zio_stage stage =
ZIO_FREE_PIPELINE | ZIO_STAGE_ISSUE_ASYNC;
return (zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
BP_GET_PSIZE(bp), NULL, NULL,
ZIO_TYPE_FREE, ZIO_PRIORITY_NOW,
flags, NULL, 0, NULL, ZIO_STAGE_OPEN, stage));
} else {
metaslab_free(spa, bp, txg, B_FALSE);
return (NULL);
}
}
zio_t *
zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
zio_done_func_t *done, void *private, enum zio_flag flags)
{
zio_t *zio;
(void) zfs_blkptr_verify(spa, bp, flags & ZIO_FLAG_CONFIG_WRITER,
BLK_VERIFY_HALT);
if (BP_IS_EMBEDDED(bp))
return (zio_null(pio, spa, NULL, NULL, NULL, 0));
/*
* A claim is an allocation of a specific block. Claims are needed
* to support immediate writes in the intent log. The issue is that
* immediate writes contain committed data, but in a txg that was
* *not* committed. Upon opening the pool after an unclean shutdown,
* the intent log claims all blocks that contain immediate write data
* so that the SPA knows they're in use.
*
* All claims *must* be resolved in the first txg -- before the SPA
* starts allocating blocks -- so that nothing is allocated twice.
* If txg == 0 we just verify that the block is claimable.
*/
ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <,
spa_min_claim_txg(spa));
ASSERT(txg == spa_min_claim_txg(spa) || txg == 0);
ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(8) */
zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
BP_GET_PSIZE(bp), done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW,
flags, NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE);
ASSERT0(zio->io_queued_timestamp);
return (zio);
}
zio_t *
zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd,
zio_done_func_t *done, void *private, enum zio_flag flags)
{
zio_t *zio;
int c;
if (vd->vdev_children == 0) {
zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private,
ZIO_TYPE_IOCTL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL,
ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE);
zio->io_cmd = cmd;
} else {
zio = zio_null(pio, spa, NULL, NULL, NULL, flags);
for (c = 0; c < vd->vdev_children; c++)
zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd,
done, private, flags));
}
return (zio);
}
zio_t *
zio_trim(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
zio_done_func_t *done, void *private, zio_priority_t priority,
enum zio_flag flags, enum trim_flag trim_flags)
{
zio_t *zio;
ASSERT0(vd->vdev_children);
ASSERT0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
ASSERT0(P2PHASE(size, 1ULL << vd->vdev_ashift));
ASSERT3U(size, !=, 0);
zio = zio_create(pio, vd->vdev_spa, 0, NULL, NULL, size, size, done,
private, ZIO_TYPE_TRIM, priority, flags | ZIO_FLAG_PHYSICAL,
vd, offset, NULL, ZIO_STAGE_OPEN, ZIO_TRIM_PIPELINE);
zio->io_trim_flags = trim_flags;
return (zio);
}
zio_t *
zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
abd_t *data, int checksum, zio_done_func_t *done, void *private,
zio_priority_t priority, enum zio_flag flags, boolean_t labels)
{
zio_t *zio;
ASSERT(vd->vdev_children == 0);
ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
ASSERT3U(offset + size, <=, vd->vdev_psize);
zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
private, ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd,
offset, NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE);
zio->io_prop.zp_checksum = checksum;
return (zio);
}
zio_t *
zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
abd_t *data, int checksum, zio_done_func_t *done, void *private,
zio_priority_t priority, enum zio_flag flags, boolean_t labels)
{
zio_t *zio;
ASSERT(vd->vdev_children == 0);
ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
ASSERT3U(offset + size, <=, vd->vdev_psize);
zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
private, ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd,
offset, NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE);
zio->io_prop.zp_checksum = checksum;
if (zio_checksum_table[checksum].ci_flags & ZCHECKSUM_FLAG_EMBEDDED) {
/*
* zec checksums are necessarily destructive -- they modify
* the end of the write buffer to hold the verifier/checksum.
* Therefore, we must make a local copy in case the data is
* being written to multiple places in parallel.
*/
abd_t *wbuf = abd_alloc_sametype(data, size);
abd_copy(wbuf, data, size);
zio_push_transform(zio, wbuf, size, size, NULL);
}
return (zio);
}
/*
* Create a child I/O to do some work for us.
*/
zio_t *
zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset,
abd_t *data, uint64_t size, int type, zio_priority_t priority,
enum zio_flag flags, zio_done_func_t *done, void *private)
{
enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE;
zio_t *zio;
/*
* vdev child I/Os do not propagate their error to the parent.
* Therefore, for correct operation the caller *must* check for
* and handle the error in the child i/o's done callback.
* The only exceptions are i/os that we don't care about
* (OPTIONAL or REPAIR).
*/
ASSERT((flags & ZIO_FLAG_OPTIONAL) || (flags & ZIO_FLAG_IO_REPAIR) ||
done != NULL);
if (type == ZIO_TYPE_READ && bp != NULL) {
/*
* If we have the bp, then the child should perform the
* checksum and the parent need not. This pushes error
* detection as close to the leaves as possible and
* eliminates redundant checksums in the interior nodes.
*/
pipeline |= ZIO_STAGE_CHECKSUM_VERIFY;
pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
}
if (vd->vdev_ops->vdev_op_leaf) {
ASSERT0(vd->vdev_children);
offset += VDEV_LABEL_START_SIZE;
}
flags |= ZIO_VDEV_CHILD_FLAGS(pio);
/*
* If we've decided to do a repair, the write is not speculative --
* even if the original read was.
*/
if (flags & ZIO_FLAG_IO_REPAIR)
flags &= ~ZIO_FLAG_SPECULATIVE;
/*
* If we're creating a child I/O that is not associated with a
* top-level vdev, then the child zio is not an allocating I/O.
* If this is a retried I/O then we ignore it since we will
* have already processed the original allocating I/O.
*/
if (flags & ZIO_FLAG_IO_ALLOCATING &&
(vd != vd->vdev_top || (flags & ZIO_FLAG_IO_RETRY))) {
ASSERT(pio->io_metaslab_class != NULL);
ASSERT(pio->io_metaslab_class->mc_alloc_throttle_enabled);
ASSERT(type == ZIO_TYPE_WRITE);
ASSERT(priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(!(flags & ZIO_FLAG_IO_REPAIR));
ASSERT(!(pio->io_flags & ZIO_FLAG_IO_REWRITE) ||
pio->io_child_type == ZIO_CHILD_GANG);
flags &= ~ZIO_FLAG_IO_ALLOCATING;
}
zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, size,
done, private, type, priority, flags, vd, offset, &pio->io_bookmark,
ZIO_STAGE_VDEV_IO_START >> 1, pipeline);
ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
zio->io_physdone = pio->io_physdone;
if (vd->vdev_ops->vdev_op_leaf && zio->io_logical != NULL)
zio->io_logical->io_phys_children++;
return (zio);
}
zio_t *
zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, abd_t *data, uint64_t size,
zio_type_t type, zio_priority_t priority, enum zio_flag flags,
zio_done_func_t *done, void *private)
{
zio_t *zio;
ASSERT(vd->vdev_ops->vdev_op_leaf);
zio = zio_create(NULL, vd->vdev_spa, 0, NULL,
data, size, size, done, private, type, priority,
flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED,
vd, offset, NULL,
ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE);
return (zio);
}
void
zio_flush(zio_t *zio, vdev_t *vd)
{
zio_nowait(zio_ioctl(zio, zio->io_spa, vd, DKIOCFLUSHWRITECACHE,
NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY));
}
void
zio_shrink(zio_t *zio, uint64_t size)
{
ASSERT3P(zio->io_executor, ==, NULL);
ASSERT3U(zio->io_orig_size, ==, zio->io_size);
ASSERT3U(size, <=, zio->io_size);
/*
* We don't shrink for raidz because of problems with the
* reconstruction when reading back less than the block size.
* Note, BP_IS_RAIDZ() assumes no compression.
*/
ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
if (!BP_IS_RAIDZ(zio->io_bp)) {
/* we are not doing a raw write */
ASSERT3U(zio->io_size, ==, zio->io_lsize);
zio->io_orig_size = zio->io_size = zio->io_lsize = size;
}
}
/*
* ==========================================================================
* Prepare to read and write logical blocks
* ==========================================================================
*/
static zio_t *
zio_read_bp_init(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
uint64_t psize =
BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp);
ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF &&
zio->io_child_type == ZIO_CHILD_LOGICAL &&
!(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) {
zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
psize, psize, zio_decompress);
}
if (((BP_IS_PROTECTED(bp) && !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) ||
BP_HAS_INDIRECT_MAC_CKSUM(bp)) &&
zio->io_child_type == ZIO_CHILD_LOGICAL) {
zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
psize, psize, zio_decrypt);
}
if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) {
int psize = BPE_GET_PSIZE(bp);
void *data = abd_borrow_buf(zio->io_abd, psize);
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
decode_embedded_bp_compressed(bp, data);
abd_return_buf_copy(zio->io_abd, data, psize);
} else {
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
}
if (!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) && BP_GET_LEVEL(bp) == 0)
zio->io_flags |= ZIO_FLAG_DONT_CACHE;
if (BP_GET_TYPE(bp) == DMU_OT_DDT_ZAP)
zio->io_flags |= ZIO_FLAG_DONT_CACHE;
if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL)
zio->io_pipeline = ZIO_DDT_READ_PIPELINE;
return (zio);
}
static zio_t *
zio_write_bp_init(zio_t *zio)
{
if (!IO_IS_ALLOCATING(zio))
return (zio);
ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
if (zio->io_bp_override) {
blkptr_t *bp = zio->io_bp;
zio_prop_t *zp = &zio->io_prop;
ASSERT(bp->blk_birth != zio->io_txg);
ASSERT(BP_GET_DEDUP(zio->io_bp_override) == 0);
*bp = *zio->io_bp_override;
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
if (BP_IS_EMBEDDED(bp))
return (zio);
/*
* If we've been overridden and nopwrite is set then
* set the flag accordingly to indicate that a nopwrite
* has already occurred.
*/
if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) {
ASSERT(!zp->zp_dedup);
ASSERT3U(BP_GET_CHECKSUM(bp), ==, zp->zp_checksum);
zio->io_flags |= ZIO_FLAG_NOPWRITE;
return (zio);
}
ASSERT(!zp->zp_nopwrite);
if (BP_IS_HOLE(bp) || !zp->zp_dedup)
return (zio);
ASSERT((zio_checksum_table[zp->zp_checksum].ci_flags &
ZCHECKSUM_FLAG_DEDUP) || zp->zp_dedup_verify);
if (BP_GET_CHECKSUM(bp) == zp->zp_checksum &&
!zp->zp_encrypt) {
BP_SET_DEDUP(bp, 1);
zio->io_pipeline |= ZIO_STAGE_DDT_WRITE;
return (zio);
}
/*
* We were unable to handle this as an override bp, treat
* it as a regular write I/O.
*/
zio->io_bp_override = NULL;
*bp = zio->io_bp_orig;
zio->io_pipeline = zio->io_orig_pipeline;
}
return (zio);
}
static zio_t *
zio_write_compress(zio_t *zio)
{
spa_t *spa = zio->io_spa;
zio_prop_t *zp = &zio->io_prop;
enum zio_compress compress = zp->zp_compress;
blkptr_t *bp = zio->io_bp;
uint64_t lsize = zio->io_lsize;
uint64_t psize = zio->io_size;
int pass = 1;
/*
* If our children haven't all reached the ready stage,
* wait for them and then repeat this pipeline stage.
*/
if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT |
ZIO_CHILD_GANG_BIT, ZIO_WAIT_READY)) {
return (NULL);
}
if (!IO_IS_ALLOCATING(zio))
return (zio);
if (zio->io_children_ready != NULL) {
/*
* Now that all our children are ready, run the callback
* associated with this zio in case it wants to modify the
* data to be written.
*/
ASSERT3U(zp->zp_level, >, 0);
zio->io_children_ready(zio);
}
ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
ASSERT(zio->io_bp_override == NULL);
if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg) {
/*
* We're rewriting an existing block, which means we're
* working on behalf of spa_sync(). For spa_sync() to
* converge, it must eventually be the case that we don't
* have to allocate new blocks. But compression changes
* the blocksize, which forces a reallocate, and makes
* convergence take longer. Therefore, after the first
* few passes, stop compressing to ensure convergence.
*/
pass = spa_sync_pass(spa);
ASSERT(zio->io_txg == spa_syncing_txg(spa));
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(!BP_GET_DEDUP(bp));
if (pass >= zfs_sync_pass_dont_compress)
compress = ZIO_COMPRESS_OFF;
/* Make sure someone doesn't change their mind on overwrites */
ASSERT(BP_IS_EMBEDDED(bp) || MIN(zp->zp_copies + BP_IS_GANG(bp),
spa_max_replication(spa)) == BP_GET_NDVAS(bp));
}
/* If it's a compressed write that is not raw, compress the buffer. */
if (compress != ZIO_COMPRESS_OFF &&
!(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) {
void *cbuf = zio_buf_alloc(lsize);
psize = zio_compress_data(compress, zio->io_abd, cbuf, lsize,
zp->zp_complevel);
if (psize == 0 || psize >= lsize) {
compress = ZIO_COMPRESS_OFF;
zio_buf_free(cbuf, lsize);
} else if (!zp->zp_dedup && !zp->zp_encrypt &&
psize <= BPE_PAYLOAD_SIZE &&
zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) &&
spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) {
encode_embedded_bp_compressed(bp,
cbuf, compress, lsize, psize);
BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA);
BP_SET_TYPE(bp, zio->io_prop.zp_type);
BP_SET_LEVEL(bp, zio->io_prop.zp_level);
zio_buf_free(cbuf, lsize);
bp->blk_birth = zio->io_txg;
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
ASSERT(spa_feature_is_active(spa,
SPA_FEATURE_EMBEDDED_DATA));
return (zio);
} else {
/*
* Round compressed size up to the minimum allocation
* size of the smallest-ashift device, and zero the
* tail. This ensures that the compressed size of the
* BP (and thus compressratio property) are correct,
* in that we charge for the padding used to fill out
* the last sector.
*/
ASSERT3U(spa->spa_min_alloc, >=, SPA_MINBLOCKSHIFT);
size_t rounded = (size_t)roundup(psize,
spa->spa_min_alloc);
if (rounded >= lsize) {
compress = ZIO_COMPRESS_OFF;
zio_buf_free(cbuf, lsize);
psize = lsize;
} else {
abd_t *cdata = abd_get_from_buf(cbuf, lsize);
abd_take_ownership_of_buf(cdata, B_TRUE);
abd_zero_off(cdata, psize, rounded - psize);
psize = rounded;
zio_push_transform(zio, cdata,
psize, lsize, NULL);
}
}
/*
* We were unable to handle this as an override bp, treat
* it as a regular write I/O.
*/
zio->io_bp_override = NULL;
*bp = zio->io_bp_orig;
zio->io_pipeline = zio->io_orig_pipeline;
} else if ((zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) != 0 &&
zp->zp_type == DMU_OT_DNODE) {
/*
* The DMU actually relies on the zio layer's compression
* to free metadnode blocks that have had all contained
* dnodes freed. As a result, even when doing a raw
* receive, we must check whether the block can be compressed
* to a hole.
*/
psize = zio_compress_data(ZIO_COMPRESS_EMPTY,
zio->io_abd, NULL, lsize, zp->zp_complevel);
if (psize == 0 || psize >= lsize)
compress = ZIO_COMPRESS_OFF;
- } else if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS) {
+ } else if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS &&
+ !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) {
+ /*
+ * If we are raw receiving an encrypted dataset we should not
+ * take this codepath because it will change the on-disk block
+ * and decryption will fail.
+ */
size_t rounded = MIN((size_t)roundup(psize,
spa->spa_min_alloc), lsize);
if (rounded != psize) {
abd_t *cdata = abd_alloc_linear(rounded, B_TRUE);
abd_zero_off(cdata, psize, rounded - psize);
abd_copy_off(cdata, zio->io_abd, 0, 0, psize);
psize = rounded;
zio_push_transform(zio, cdata,
psize, rounded, NULL);
}
} else {
ASSERT3U(psize, !=, 0);
}
/*
* The final pass of spa_sync() must be all rewrites, but the first
* few passes offer a trade-off: allocating blocks defers convergence,
* but newly allocated blocks are sequential, so they can be written
* to disk faster. Therefore, we allow the first few passes of
* spa_sync() to allocate new blocks, but force rewrites after that.
* There should only be a handful of blocks after pass 1 in any case.
*/
if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg &&
BP_GET_PSIZE(bp) == psize &&
pass >= zfs_sync_pass_rewrite) {
VERIFY3U(psize, !=, 0);
enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES;
zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages;
zio->io_flags |= ZIO_FLAG_IO_REWRITE;
} else {
BP_ZERO(bp);
zio->io_pipeline = ZIO_WRITE_PIPELINE;
}
if (psize == 0) {
if (zio->io_bp_orig.blk_birth != 0 &&
spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) {
BP_SET_LSIZE(bp, lsize);
BP_SET_TYPE(bp, zp->zp_type);
BP_SET_LEVEL(bp, zp->zp_level);
BP_SET_BIRTH(bp, zio->io_txg, 0);
}
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
} else {
ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER);
BP_SET_LSIZE(bp, lsize);
BP_SET_TYPE(bp, zp->zp_type);
BP_SET_LEVEL(bp, zp->zp_level);
BP_SET_PSIZE(bp, psize);
BP_SET_COMPRESS(bp, compress);
BP_SET_CHECKSUM(bp, zp->zp_checksum);
BP_SET_DEDUP(bp, zp->zp_dedup);
BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
if (zp->zp_dedup) {
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
ASSERT(!zp->zp_encrypt ||
DMU_OT_IS_ENCRYPTED(zp->zp_type));
zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE;
}
if (zp->zp_nopwrite) {
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
zio->io_pipeline |= ZIO_STAGE_NOP_WRITE;
}
}
return (zio);
}
static zio_t *
zio_free_bp_init(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
if (zio->io_child_type == ZIO_CHILD_LOGICAL) {
if (BP_GET_DEDUP(bp))
zio->io_pipeline = ZIO_DDT_FREE_PIPELINE;
}
ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
return (zio);
}
/*
* ==========================================================================
* Execute the I/O pipeline
* ==========================================================================
*/
static void
zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline)
{
spa_t *spa = zio->io_spa;
zio_type_t t = zio->io_type;
int flags = (cutinline ? TQ_FRONT : 0);
/*
* If we're a config writer or a probe, the normal issue and
* interrupt threads may all be blocked waiting for the config lock.
* In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL.
*/
if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE))
t = ZIO_TYPE_NULL;
/*
* A similar issue exists for the L2ARC write thread until L2ARC 2.0.
*/
if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux)
t = ZIO_TYPE_NULL;
/*
* If this is a high priority I/O, then use the high priority taskq if
* available.
*/
if ((zio->io_priority == ZIO_PRIORITY_NOW ||
zio->io_priority == ZIO_PRIORITY_SYNC_WRITE) &&
spa->spa_zio_taskq[t][q + 1].stqs_count != 0)
q++;
ASSERT3U(q, <, ZIO_TASKQ_TYPES);
/*
* NB: We are assuming that the zio can only be dispatched
* to a single taskq at a time. It would be a grievous error
* to dispatch the zio to another taskq at the same time.
*/
ASSERT(taskq_empty_ent(&zio->io_tqent));
spa_taskq_dispatch_ent(spa, t, q, zio_execute, zio, flags,
&zio->io_tqent);
}
static boolean_t
zio_taskq_member(zio_t *zio, zio_taskq_type_t q)
{
spa_t *spa = zio->io_spa;
taskq_t *tq = taskq_of_curthread();
for (zio_type_t t = 0; t < ZIO_TYPES; t++) {
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
uint_t i;
for (i = 0; i < tqs->stqs_count; i++) {
if (tqs->stqs_taskq[i] == tq)
return (B_TRUE);
}
}
return (B_FALSE);
}
static zio_t *
zio_issue_async(zio_t *zio)
{
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
return (NULL);
}
void
zio_interrupt(void *zio)
{
zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE);
}
void
zio_delay_interrupt(zio_t *zio)
{
/*
* The timeout_generic() function isn't defined in userspace, so
* rather than trying to implement the function, the zio delay
* functionality has been disabled for userspace builds.
*/
#ifdef _KERNEL
/*
* If io_target_timestamp is zero, then no delay has been registered
* for this IO, thus jump to the end of this function and "skip" the
* delay; issuing it directly to the zio layer.
*/
if (zio->io_target_timestamp != 0) {
hrtime_t now = gethrtime();
if (now >= zio->io_target_timestamp) {
/*
* This IO has already taken longer than the target
* delay to complete, so we don't want to delay it
* any longer; we "miss" the delay and issue it
* directly to the zio layer. This is likely due to
* the target latency being set to a value less than
* the underlying hardware can satisfy (e.g. delay
* set to 1ms, but the disks take 10ms to complete an
* IO request).
*/
DTRACE_PROBE2(zio__delay__miss, zio_t *, zio,
hrtime_t, now);
zio_interrupt(zio);
} else {
taskqid_t tid;
hrtime_t diff = zio->io_target_timestamp - now;
clock_t expire_at_tick = ddi_get_lbolt() +
NSEC_TO_TICK(diff);
DTRACE_PROBE3(zio__delay__hit, zio_t *, zio,
hrtime_t, now, hrtime_t, diff);
if (NSEC_TO_TICK(diff) == 0) {
/* Our delay is less than a jiffy - just spin */
zfs_sleep_until(zio->io_target_timestamp);
zio_interrupt(zio);
} else {
/*
* Use taskq_dispatch_delay() in the place of
* OpenZFS's timeout_generic().
*/
tid = taskq_dispatch_delay(system_taskq,
zio_interrupt, zio, TQ_NOSLEEP,
expire_at_tick);
if (tid == TASKQID_INVALID) {
/*
* Couldn't allocate a task. Just
* finish the zio without a delay.
*/
zio_interrupt(zio);
}
}
}
return;
}
#endif
DTRACE_PROBE1(zio__delay__skip, zio_t *, zio);
zio_interrupt(zio);
}
static void
zio_deadman_impl(zio_t *pio, int ziodepth)
{
zio_t *cio, *cio_next;
zio_link_t *zl = NULL;
vdev_t *vd = pio->io_vd;
if (zio_deadman_log_all || (vd != NULL && vd->vdev_ops->vdev_op_leaf)) {
vdev_queue_t *vq = vd ? &vd->vdev_queue : NULL;
zbookmark_phys_t *zb = &pio->io_bookmark;
uint64_t delta = gethrtime() - pio->io_timestamp;
uint64_t failmode = spa_get_deadman_failmode(pio->io_spa);
zfs_dbgmsg("slow zio[%d]: zio=%px timestamp=%llu "
"delta=%llu queued=%llu io=%llu "
"path=%s "
"last=%llu type=%d "
"priority=%d flags=0x%x stage=0x%x "
"pipeline=0x%x pipeline-trace=0x%x "
"objset=%llu object=%llu "
"level=%llu blkid=%llu "
"offset=%llu size=%llu "
"error=%d",
ziodepth, pio, pio->io_timestamp,
(u_longlong_t)delta, pio->io_delta, pio->io_delay,
vd ? vd->vdev_path : "NULL",
vq ? vq->vq_io_complete_ts : 0, pio->io_type,
pio->io_priority, pio->io_flags, pio->io_stage,
pio->io_pipeline, pio->io_pipeline_trace,
(u_longlong_t)zb->zb_objset, (u_longlong_t)zb->zb_object,
(u_longlong_t)zb->zb_level, (u_longlong_t)zb->zb_blkid,
(u_longlong_t)pio->io_offset, (u_longlong_t)pio->io_size,
pio->io_error);
(void) zfs_ereport_post(FM_EREPORT_ZFS_DEADMAN,
pio->io_spa, vd, zb, pio, 0);
if (failmode == ZIO_FAILURE_MODE_CONTINUE &&
taskq_empty_ent(&pio->io_tqent)) {
zio_interrupt(pio);
}
}
mutex_enter(&pio->io_lock);
for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
cio_next = zio_walk_children(pio, &zl);
zio_deadman_impl(cio, ziodepth + 1);
}
mutex_exit(&pio->io_lock);
}
/*
* Log the critical information describing this zio and all of its children
* using the zfs_dbgmsg() interface then post deadman event for the ZED.
*/
void
zio_deadman(zio_t *pio, char *tag)
{
spa_t *spa = pio->io_spa;
char *name = spa_name(spa);
if (!zfs_deadman_enabled || spa_suspended(spa))
return;
zio_deadman_impl(pio, 0);
switch (spa_get_deadman_failmode(spa)) {
case ZIO_FAILURE_MODE_WAIT:
zfs_dbgmsg("%s waiting for hung I/O to pool '%s'", tag, name);
break;
case ZIO_FAILURE_MODE_CONTINUE:
zfs_dbgmsg("%s restarting hung I/O for pool '%s'", tag, name);
break;
case ZIO_FAILURE_MODE_PANIC:
fm_panic("%s determined I/O to pool '%s' is hung.", tag, name);
break;
}
}
/*
* Execute the I/O pipeline until one of the following occurs:
* (1) the I/O completes; (2) the pipeline stalls waiting for
* dependent child I/Os; (3) the I/O issues, so we're waiting
* for an I/O completion interrupt; (4) the I/O is delegated by
* vdev-level caching or aggregation; (5) the I/O is deferred
* due to vdev-level queueing; (6) the I/O is handed off to
* another thread. In all cases, the pipeline stops whenever
* there's no CPU work; it never burns a thread in cv_wait_io().
*
* There's no locking on io_stage because there's no legitimate way
* for multiple threads to be attempting to process the same I/O.
*/
static zio_pipe_stage_t *zio_pipeline[];
/*
* zio_execute() is a wrapper around the static function
* __zio_execute() so that we can force __zio_execute() to be
* inlined. This reduces stack overhead which is important
* because __zio_execute() is called recursively in several zio
* code paths. zio_execute() itself cannot be inlined because
* it is externally visible.
*/
void
zio_execute(void *zio)
{
fstrans_cookie_t cookie;
cookie = spl_fstrans_mark();
__zio_execute(zio);
spl_fstrans_unmark(cookie);
}
/*
* Used to determine if in the current context the stack is sized large
* enough to allow zio_execute() to be called recursively. A minimum
* stack size of 16K is required to avoid needing to re-dispatch the zio.
*/
static boolean_t
zio_execute_stack_check(zio_t *zio)
{
#if !defined(HAVE_LARGE_STACKS)
dsl_pool_t *dp = spa_get_dsl(zio->io_spa);
/* Executing in txg_sync_thread() context. */
if (dp && curthread == dp->dp_tx.tx_sync_thread)
return (B_TRUE);
/* Pool initialization outside of zio_taskq context. */
if (dp && spa_is_initializing(dp->dp_spa) &&
!zio_taskq_member(zio, ZIO_TASKQ_ISSUE) &&
!zio_taskq_member(zio, ZIO_TASKQ_ISSUE_HIGH))
return (B_TRUE);
+#else
+ (void) zio;
#endif /* HAVE_LARGE_STACKS */
return (B_FALSE);
}
__attribute__((always_inline))
static inline void
__zio_execute(zio_t *zio)
{
ASSERT3U(zio->io_queued_timestamp, >, 0);
while (zio->io_stage < ZIO_STAGE_DONE) {
enum zio_stage pipeline = zio->io_pipeline;
enum zio_stage stage = zio->io_stage;
zio->io_executor = curthread;
ASSERT(!MUTEX_HELD(&zio->io_lock));
ASSERT(ISP2(stage));
ASSERT(zio->io_stall == NULL);
do {
stage <<= 1;
} while ((stage & pipeline) == 0);
ASSERT(stage <= ZIO_STAGE_DONE);
/*
* If we are in interrupt context and this pipeline stage
* will grab a config lock that is held across I/O,
* or may wait for an I/O that needs an interrupt thread
* to complete, issue async to avoid deadlock.
*
* For VDEV_IO_START, we cut in line so that the io will
* be sent to disk promptly.
*/
if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL &&
zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) {
boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
zio_requeue_io_start_cut_in_line : B_FALSE;
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
return;
}
/*
* If the current context doesn't have large enough stacks
* the zio must be issued asynchronously to prevent overflow.
*/
if (zio_execute_stack_check(zio)) {
boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
zio_requeue_io_start_cut_in_line : B_FALSE;
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
return;
}
zio->io_stage = stage;
zio->io_pipeline_trace |= zio->io_stage;
/*
* The zio pipeline stage returns the next zio to execute
* (typically the same as this one), or NULL if we should
* stop.
*/
zio = zio_pipeline[highbit64(stage) - 1](zio);
if (zio == NULL)
return;
}
}
/*
* ==========================================================================
* Initiate I/O, either sync or async
* ==========================================================================
*/
int
zio_wait(zio_t *zio)
{
/*
* Some routines, like zio_free_sync(), may return a NULL zio
* to avoid the performance overhead of creating and then destroying
* an unneeded zio. For the callers' simplicity, we accept a NULL
* zio and ignore it.
*/
if (zio == NULL)
return (0);
long timeout = MSEC_TO_TICK(zfs_deadman_ziotime_ms);
int error;
ASSERT3S(zio->io_stage, ==, ZIO_STAGE_OPEN);
ASSERT3P(zio->io_executor, ==, NULL);
zio->io_waiter = curthread;
ASSERT0(zio->io_queued_timestamp);
zio->io_queued_timestamp = gethrtime();
__zio_execute(zio);
mutex_enter(&zio->io_lock);
while (zio->io_executor != NULL) {
error = cv_timedwait_io(&zio->io_cv, &zio->io_lock,
ddi_get_lbolt() + timeout);
if (zfs_deadman_enabled && error == -1 &&
gethrtime() - zio->io_queued_timestamp >
spa_deadman_ziotime(zio->io_spa)) {
mutex_exit(&zio->io_lock);
timeout = MSEC_TO_TICK(zfs_deadman_checktime_ms);
zio_deadman(zio, FTAG);
mutex_enter(&zio->io_lock);
}
}
mutex_exit(&zio->io_lock);
error = zio->io_error;
zio_destroy(zio);
return (error);
}
void
zio_nowait(zio_t *zio)
{
/*
* See comment in zio_wait().
*/
if (zio == NULL)
return;
ASSERT3P(zio->io_executor, ==, NULL);
if (zio->io_child_type == ZIO_CHILD_LOGICAL &&
zio_unique_parent(zio) == NULL) {
zio_t *pio;
/*
* This is a logical async I/O with no parent to wait for it.
* We add it to the spa_async_root_zio "Godfather" I/O which
* will ensure they complete prior to unloading the pool.
*/
spa_t *spa = zio->io_spa;
pio = spa->spa_async_zio_root[CPU_SEQID_UNSTABLE];
zio_add_child(pio, zio);
}
ASSERT0(zio->io_queued_timestamp);
zio->io_queued_timestamp = gethrtime();
__zio_execute(zio);
}
/*
* ==========================================================================
* Reexecute, cancel, or suspend/resume failed I/O
* ==========================================================================
*/
static void
zio_reexecute(void *arg)
{
zio_t *pio = arg;
zio_t *cio, *cio_next;
ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN);
ASSERT(pio->io_gang_leader == NULL);
ASSERT(pio->io_gang_tree == NULL);
pio->io_flags = pio->io_orig_flags;
pio->io_stage = pio->io_orig_stage;
pio->io_pipeline = pio->io_orig_pipeline;
pio->io_reexecute = 0;
pio->io_flags |= ZIO_FLAG_REEXECUTED;
pio->io_pipeline_trace = 0;
pio->io_error = 0;
for (int w = 0; w < ZIO_WAIT_TYPES; w++)
pio->io_state[w] = 0;
for (int c = 0; c < ZIO_CHILD_TYPES; c++)
pio->io_child_error[c] = 0;
if (IO_IS_ALLOCATING(pio))
BP_ZERO(pio->io_bp);
/*
* As we reexecute pio's children, new children could be created.
* New children go to the head of pio's io_child_list, however,
* so we will (correctly) not reexecute them. The key is that
* the remainder of pio's io_child_list, from 'cio_next' onward,
* cannot be affected by any side effects of reexecuting 'cio'.
*/
zio_link_t *zl = NULL;
mutex_enter(&pio->io_lock);
for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
cio_next = zio_walk_children(pio, &zl);
for (int w = 0; w < ZIO_WAIT_TYPES; w++)
pio->io_children[cio->io_child_type][w]++;
mutex_exit(&pio->io_lock);
zio_reexecute(cio);
mutex_enter(&pio->io_lock);
}
mutex_exit(&pio->io_lock);
/*
* Now that all children have been reexecuted, execute the parent.
* We don't reexecute "The Godfather" I/O here as it's the
* responsibility of the caller to wait on it.
*/
if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) {
pio->io_queued_timestamp = gethrtime();
__zio_execute(pio);
}
}
void
zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t reason)
{
if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC)
fm_panic("Pool '%s' has encountered an uncorrectable I/O "
"failure and the failure mode property for this pool "
"is set to panic.", spa_name(spa));
cmn_err(CE_WARN, "Pool '%s' has encountered an uncorrectable I/O "
"failure and has been suspended.\n", spa_name(spa));
(void) zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL,
NULL, NULL, 0);
mutex_enter(&spa->spa_suspend_lock);
if (spa->spa_suspend_zio_root == NULL)
spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER);
spa->spa_suspended = reason;
if (zio != NULL) {
ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
ASSERT(zio != spa->spa_suspend_zio_root);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(zio_unique_parent(zio) == NULL);
ASSERT(zio->io_stage == ZIO_STAGE_DONE);
zio_add_child(spa->spa_suspend_zio_root, zio);
}
mutex_exit(&spa->spa_suspend_lock);
}
int
zio_resume(spa_t *spa)
{
zio_t *pio;
/*
* Reexecute all previously suspended i/o.
*/
mutex_enter(&spa->spa_suspend_lock);
spa->spa_suspended = ZIO_SUSPEND_NONE;
cv_broadcast(&spa->spa_suspend_cv);
pio = spa->spa_suspend_zio_root;
spa->spa_suspend_zio_root = NULL;
mutex_exit(&spa->spa_suspend_lock);
if (pio == NULL)
return (0);
zio_reexecute(pio);
return (zio_wait(pio));
}
void
zio_resume_wait(spa_t *spa)
{
mutex_enter(&spa->spa_suspend_lock);
while (spa_suspended(spa))
cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock);
mutex_exit(&spa->spa_suspend_lock);
}
/*
* ==========================================================================
* Gang blocks.
*
* A gang block is a collection of small blocks that looks to the DMU
* like one large block. When zio_dva_allocate() cannot find a block
* of the requested size, due to either severe fragmentation or the pool
* being nearly full, it calls zio_write_gang_block() to construct the
* block from smaller fragments.
*
* A gang block consists of a gang header (zio_gbh_phys_t) and up to
* three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like
* an indirect block: it's an array of block pointers. It consumes
* only one sector and hence is allocatable regardless of fragmentation.
* The gang header's bps point to its gang members, which hold the data.
*
* Gang blocks are self-checksumming, using the bp's <vdev, offset, txg>
* as the verifier to ensure uniqueness of the SHA256 checksum.
* Critically, the gang block bp's blk_cksum is the checksum of the data,
* not the gang header. This ensures that data block signatures (needed for
* deduplication) are independent of how the block is physically stored.
*
* Gang blocks can be nested: a gang member may itself be a gang block.
* Thus every gang block is a tree in which root and all interior nodes are
* gang headers, and the leaves are normal blocks that contain user data.
* The root of the gang tree is called the gang leader.
*
* To perform any operation (read, rewrite, free, claim) on a gang block,
* zio_gang_assemble() first assembles the gang tree (minus data leaves)
* in the io_gang_tree field of the original logical i/o by recursively
* reading the gang leader and all gang headers below it. This yields
* an in-core tree containing the contents of every gang header and the
* bps for every constituent of the gang block.
*
* With the gang tree now assembled, zio_gang_issue() just walks the gang tree
* and invokes a callback on each bp. To free a gang block, zio_gang_issue()
* calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp.
* zio_claim_gang() provides a similarly trivial wrapper for zio_claim().
* zio_read_gang() is a wrapper around zio_read() that omits reading gang
* headers, since we already have those in io_gang_tree. zio_rewrite_gang()
* performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite()
* of the gang header plus zio_checksum_compute() of the data to update the
* gang header's blk_cksum as described above.
*
* The two-phase assemble/issue model solves the problem of partial failure --
* what if you'd freed part of a gang block but then couldn't read the
* gang header for another part? Assembling the entire gang tree first
* ensures that all the necessary gang header I/O has succeeded before
* starting the actual work of free, claim, or write. Once the gang tree
* is assembled, free and claim are in-memory operations that cannot fail.
*
* In the event that a gang write fails, zio_dva_unallocate() walks the
* gang tree to immediately free (i.e. insert back into the space map)
* everything we've allocated. This ensures that we don't get ENOSPC
* errors during repeated suspend/resume cycles due to a flaky device.
*
* Gang rewrites only happen during sync-to-convergence. If we can't assemble
* the gang tree, we won't modify the block, so we can safely defer the free
* (knowing that the block is still intact). If we *can* assemble the gang
* tree, then even if some of the rewrites fail, zio_dva_unallocate() will free
* each constituent bp and we can allocate a new block on the next sync pass.
*
* In all cases, the gang tree allows complete recovery from partial failure.
* ==========================================================================
*/
static void
zio_gang_issue_func_done(zio_t *zio)
{
abd_free(zio->io_abd);
}
static zio_t *
zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
uint64_t offset)
{
if (gn != NULL)
return (pio);
return (zio_read(pio, pio->io_spa, bp, abd_get_offset(data, offset),
BP_GET_PSIZE(bp), zio_gang_issue_func_done,
NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
&pio->io_bookmark));
}
static zio_t *
zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
uint64_t offset)
{
zio_t *zio;
if (gn != NULL) {
abd_t *gbh_abd =
abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE);
zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
gbh_abd, SPA_GANGBLOCKSIZE, zio_gang_issue_func_done, NULL,
pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
&pio->io_bookmark);
/*
* As we rewrite each gang header, the pipeline will compute
* a new gang block header checksum for it; but no one will
* compute a new data checksum, so we do that here. The one
* exception is the gang leader: the pipeline already computed
* its data checksum because that stage precedes gang assembly.
* (Presently, nothing actually uses interior data checksums;
* this is just good hygiene.)
*/
if (gn != pio->io_gang_leader->io_gang_tree) {
abd_t *buf = abd_get_offset(data, offset);
zio_checksum_compute(zio, BP_GET_CHECKSUM(bp),
buf, BP_GET_PSIZE(bp));
abd_free(buf);
}
/*
* If we are here to damage data for testing purposes,
* leave the GBH alone so that we can detect the damage.
*/
if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE)
zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
} else {
zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
abd_get_offset(data, offset), BP_GET_PSIZE(bp),
zio_gang_issue_func_done, NULL, pio->io_priority,
ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
}
return (zio);
}
-/* ARGSUSED */
static zio_t *
zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
uint64_t offset)
{
+ (void) gn, (void) data, (void) offset;
+
zio_t *zio = zio_free_sync(pio, pio->io_spa, pio->io_txg, bp,
ZIO_GANG_CHILD_FLAGS(pio));
if (zio == NULL) {
zio = zio_null(pio, pio->io_spa,
NULL, NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio));
}
return (zio);
}
-/* ARGSUSED */
static zio_t *
zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
uint64_t offset)
{
+ (void) gn, (void) data, (void) offset;
return (zio_claim(pio, pio->io_spa, pio->io_txg, bp,
NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio)));
}
static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = {
NULL,
zio_read_gang,
zio_rewrite_gang,
zio_free_gang,
zio_claim_gang,
NULL
};
static void zio_gang_tree_assemble_done(zio_t *zio);
static zio_gang_node_t *
zio_gang_node_alloc(zio_gang_node_t **gnpp)
{
zio_gang_node_t *gn;
ASSERT(*gnpp == NULL);
gn = kmem_zalloc(sizeof (*gn), KM_SLEEP);
gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE);
*gnpp = gn;
return (gn);
}
static void
zio_gang_node_free(zio_gang_node_t **gnpp)
{
zio_gang_node_t *gn = *gnpp;
for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
ASSERT(gn->gn_child[g] == NULL);
zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE);
kmem_free(gn, sizeof (*gn));
*gnpp = NULL;
}
static void
zio_gang_tree_free(zio_gang_node_t **gnpp)
{
zio_gang_node_t *gn = *gnpp;
if (gn == NULL)
return;
for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
zio_gang_tree_free(&gn->gn_child[g]);
zio_gang_node_free(gnpp);
}
static void
zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp)
{
zio_gang_node_t *gn = zio_gang_node_alloc(gnpp);
abd_t *gbh_abd = abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE);
ASSERT(gio->io_gang_leader == gio);
ASSERT(BP_IS_GANG(bp));
zio_nowait(zio_read(gio, gio->io_spa, bp, gbh_abd, SPA_GANGBLOCKSIZE,
zio_gang_tree_assemble_done, gn, gio->io_priority,
ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark));
}
static void
zio_gang_tree_assemble_done(zio_t *zio)
{
zio_t *gio = zio->io_gang_leader;
zio_gang_node_t *gn = zio->io_private;
blkptr_t *bp = zio->io_bp;
ASSERT(gio == zio_unique_parent(zio));
ASSERT(zio->io_child_count == 0);
if (zio->io_error)
return;
/* this ABD was created from a linear buf in zio_gang_tree_assemble */
if (BP_SHOULD_BYTESWAP(bp))
byteswap_uint64_array(abd_to_buf(zio->io_abd), zio->io_size);
ASSERT3P(abd_to_buf(zio->io_abd), ==, gn->gn_gbh);
ASSERT(zio->io_size == SPA_GANGBLOCKSIZE);
ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
abd_free(zio->io_abd);
for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
if (!BP_IS_GANG(gbp))
continue;
zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]);
}
}
static void
zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, abd_t *data,
uint64_t offset)
{
zio_t *gio = pio->io_gang_leader;
zio_t *zio;
ASSERT(BP_IS_GANG(bp) == !!gn);
ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp));
ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree);
/*
* If you're a gang header, your data is in gn->gn_gbh.
* If you're a gang member, your data is in 'data' and gn == NULL.
*/
zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data, offset);
if (gn != NULL) {
ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
if (BP_IS_HOLE(gbp))
continue;
zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data,
offset);
offset += BP_GET_PSIZE(gbp);
}
}
if (gn == gio->io_gang_tree)
ASSERT3U(gio->io_size, ==, offset);
if (zio != pio)
zio_nowait(zio);
}
static zio_t *
zio_gang_assemble(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL);
ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
zio->io_gang_leader = zio;
zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree);
return (zio);
}
static zio_t *
zio_gang_issue(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT, ZIO_WAIT_DONE)) {
return (NULL);
}
ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio);
ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
if (zio->io_child_error[ZIO_CHILD_GANG] == 0)
zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_abd,
0);
else
zio_gang_tree_free(&zio->io_gang_tree);
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
return (zio);
}
static void
zio_write_gang_member_ready(zio_t *zio)
{
zio_t *pio = zio_unique_parent(zio);
dva_t *cdva = zio->io_bp->blk_dva;
dva_t *pdva = pio->io_bp->blk_dva;
uint64_t asize;
zio_t *gio __maybe_unused = zio->io_gang_leader;
if (BP_IS_HOLE(zio->io_bp))
return;
ASSERT(BP_IS_HOLE(&zio->io_bp_orig));
ASSERT(zio->io_child_type == ZIO_CHILD_GANG);
ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies);
ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp));
ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp));
ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp));
mutex_enter(&pio->io_lock);
for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) {
ASSERT(DVA_GET_GANG(&pdva[d]));
asize = DVA_GET_ASIZE(&pdva[d]);
asize += DVA_GET_ASIZE(&cdva[d]);
DVA_SET_ASIZE(&pdva[d], asize);
}
mutex_exit(&pio->io_lock);
}
static void
zio_write_gang_done(zio_t *zio)
{
/*
* The io_abd field will be NULL for a zio with no data. The io_flags
* will initially have the ZIO_FLAG_NODATA bit flag set, but we can't
* check for it here as it is cleared in zio_ready.
*/
if (zio->io_abd != NULL)
abd_free(zio->io_abd);
}
static zio_t *
zio_write_gang_block(zio_t *pio, metaslab_class_t *mc)
{
spa_t *spa = pio->io_spa;
blkptr_t *bp = pio->io_bp;
zio_t *gio = pio->io_gang_leader;
zio_t *zio;
zio_gang_node_t *gn, **gnpp;
zio_gbh_phys_t *gbh;
abd_t *gbh_abd;
uint64_t txg = pio->io_txg;
uint64_t resid = pio->io_size;
uint64_t lsize;
int copies = gio->io_prop.zp_copies;
int gbh_copies;
zio_prop_t zp;
int error;
boolean_t has_data = !(pio->io_flags & ZIO_FLAG_NODATA);
/*
* encrypted blocks need DVA[2] free so encrypted gang headers can't
* have a third copy.
*/
gbh_copies = MIN(copies + 1, spa_max_replication(spa));
if (gio->io_prop.zp_encrypt && gbh_copies >= SPA_DVAS_PER_BP)
gbh_copies = SPA_DVAS_PER_BP - 1;
int flags = METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER;
if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(has_data);
flags |= METASLAB_ASYNC_ALLOC;
VERIFY(zfs_refcount_held(&mc->mc_allocator[pio->io_allocator].
mca_alloc_slots, pio));
/*
* The logical zio has already placed a reservation for
* 'copies' allocation slots but gang blocks may require
* additional copies. These additional copies
* (i.e. gbh_copies - copies) are guaranteed to succeed
* since metaslab_class_throttle_reserve() always allows
* additional reservations for gang blocks.
*/
VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies - copies,
pio->io_allocator, pio, flags));
}
error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE,
bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags,
&pio->io_alloc_list, pio, pio->io_allocator);
if (error) {
if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(has_data);
/*
* If we failed to allocate the gang block header then
* we remove any additional allocation reservations that
* we placed here. The original reservation will
* be removed when the logical I/O goes to the ready
* stage.
*/
metaslab_class_throttle_unreserve(mc,
gbh_copies - copies, pio->io_allocator, pio);
}
pio->io_error = error;
return (pio);
}
if (pio == gio) {
gnpp = &gio->io_gang_tree;
} else {
gnpp = pio->io_private;
ASSERT(pio->io_ready == zio_write_gang_member_ready);
}
gn = zio_gang_node_alloc(gnpp);
gbh = gn->gn_gbh;
bzero(gbh, SPA_GANGBLOCKSIZE);
gbh_abd = abd_get_from_buf(gbh, SPA_GANGBLOCKSIZE);
/*
* Create the gang header.
*/
zio = zio_rewrite(pio, spa, txg, bp, gbh_abd, SPA_GANGBLOCKSIZE,
zio_write_gang_done, NULL, pio->io_priority,
ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
/*
* Create and nowait the gang children.
*/
for (int g = 0; resid != 0; resid -= lsize, g++) {
lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g),
SPA_MINBLOCKSIZE);
ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid);
zp.zp_checksum = gio->io_prop.zp_checksum;
zp.zp_compress = ZIO_COMPRESS_OFF;
zp.zp_complevel = gio->io_prop.zp_complevel;
zp.zp_type = DMU_OT_NONE;
zp.zp_level = 0;
zp.zp_copies = gio->io_prop.zp_copies;
zp.zp_dedup = B_FALSE;
zp.zp_dedup_verify = B_FALSE;
zp.zp_nopwrite = B_FALSE;
zp.zp_encrypt = gio->io_prop.zp_encrypt;
zp.zp_byteorder = gio->io_prop.zp_byteorder;
bzero(zp.zp_salt, ZIO_DATA_SALT_LEN);
bzero(zp.zp_iv, ZIO_DATA_IV_LEN);
bzero(zp.zp_mac, ZIO_DATA_MAC_LEN);
zio_t *cio = zio_write(zio, spa, txg, &gbh->zg_blkptr[g],
has_data ? abd_get_offset(pio->io_abd, pio->io_size -
resid) : NULL, lsize, lsize, &zp,
zio_write_gang_member_ready, NULL, NULL,
zio_write_gang_done, &gn->gn_child[g], pio->io_priority,
ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(has_data);
/*
* Gang children won't throttle but we should
* account for their work, so reserve an allocation
* slot for them here.
*/
VERIFY(metaslab_class_throttle_reserve(mc,
zp.zp_copies, cio->io_allocator, cio, flags));
}
zio_nowait(cio);
}
/*
* Set pio's pipeline to just wait for zio to finish.
*/
pio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
/*
* We didn't allocate this bp, so make sure it doesn't get unmarked.
*/
pio->io_flags &= ~ZIO_FLAG_FASTWRITE;
zio_nowait(zio);
return (pio);
}
/*
* The zio_nop_write stage in the pipeline determines if allocating a
* new bp is necessary. The nopwrite feature can handle writes in
* either syncing or open context (i.e. zil writes) and as a result is
* mutually exclusive with dedup.
*
* By leveraging a cryptographically secure checksum, such as SHA256, we
* can compare the checksums of the new data and the old to determine if
* allocating a new block is required. Note that our requirements for
* cryptographic strength are fairly weak: there can't be any accidental
* hash collisions, but we don't need to be secure against intentional
* (malicious) collisions. To trigger a nopwrite, you have to be able
* to write the file to begin with, and triggering an incorrect (hash
* collision) nopwrite is no worse than simply writing to the file.
* That said, there are no known attacks against the checksum algorithms
* used for nopwrite, assuming that the salt and the checksums
* themselves remain secret.
*/
static zio_t *
zio_nop_write(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
blkptr_t *bp_orig = &zio->io_bp_orig;
zio_prop_t *zp = &zio->io_prop;
ASSERT(BP_GET_LEVEL(bp) == 0);
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
ASSERT(zp->zp_nopwrite);
ASSERT(!zp->zp_dedup);
ASSERT(zio->io_bp_override == NULL);
ASSERT(IO_IS_ALLOCATING(zio));
/*
* Check to see if the original bp and the new bp have matching
* characteristics (i.e. same checksum, compression algorithms, etc).
* If they don't then just continue with the pipeline which will
* allocate a new bp.
*/
if (BP_IS_HOLE(bp_orig) ||
!(zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_flags &
ZCHECKSUM_FLAG_NOPWRITE) ||
BP_IS_ENCRYPTED(bp) || BP_IS_ENCRYPTED(bp_orig) ||
BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) ||
BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) ||
BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) ||
zp->zp_copies != BP_GET_NDVAS(bp_orig))
return (zio);
/*
* If the checksums match then reset the pipeline so that we
* avoid allocating a new bp and issuing any I/O.
*/
if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) {
ASSERT(zio_checksum_table[zp->zp_checksum].ci_flags &
ZCHECKSUM_FLAG_NOPWRITE);
ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig));
ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig));
ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF);
ASSERT(bcmp(&bp->blk_prop, &bp_orig->blk_prop,
sizeof (uint64_t)) == 0);
/*
* If we're overwriting a block that is currently on an
* indirect vdev, then ignore the nopwrite request and
* allow a new block to be allocated on a concrete vdev.
*/
spa_config_enter(zio->io_spa, SCL_VDEV, FTAG, RW_READER);
vdev_t *tvd = vdev_lookup_top(zio->io_spa,
DVA_GET_VDEV(&bp->blk_dva[0]));
if (tvd->vdev_ops == &vdev_indirect_ops) {
spa_config_exit(zio->io_spa, SCL_VDEV, FTAG);
return (zio);
}
spa_config_exit(zio->io_spa, SCL_VDEV, FTAG);
*bp = *bp_orig;
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
zio->io_flags |= ZIO_FLAG_NOPWRITE;
}
return (zio);
}
/*
* ==========================================================================
* Dedup
* ==========================================================================
*/
static void
zio_ddt_child_read_done(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
ddt_entry_t *dde = zio->io_private;
ddt_phys_t *ddp;
zio_t *pio = zio_unique_parent(zio);
mutex_enter(&pio->io_lock);
ddp = ddt_phys_select(dde, bp);
if (zio->io_error == 0)
ddt_phys_clear(ddp); /* this ddp doesn't need repair */
if (zio->io_error == 0 && dde->dde_repair_abd == NULL)
dde->dde_repair_abd = zio->io_abd;
else
abd_free(zio->io_abd);
mutex_exit(&pio->io_lock);
}
static zio_t *
zio_ddt_read_start(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
ASSERT(BP_GET_DEDUP(bp));
ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
if (zio->io_child_error[ZIO_CHILD_DDT]) {
ddt_t *ddt = ddt_select(zio->io_spa, bp);
ddt_entry_t *dde = ddt_repair_start(ddt, bp);
ddt_phys_t *ddp = dde->dde_phys;
ddt_phys_t *ddp_self = ddt_phys_select(dde, bp);
blkptr_t blk;
ASSERT(zio->io_vsd == NULL);
zio->io_vsd = dde;
if (ddp_self == NULL)
return (zio);
for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
if (ddp->ddp_phys_birth == 0 || ddp == ddp_self)
continue;
ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp,
&blk);
zio_nowait(zio_read(zio, zio->io_spa, &blk,
abd_alloc_for_io(zio->io_size, B_TRUE),
zio->io_size, zio_ddt_child_read_done, dde,
zio->io_priority, ZIO_DDT_CHILD_FLAGS(zio) |
ZIO_FLAG_DONT_PROPAGATE, &zio->io_bookmark));
}
return (zio);
}
zio_nowait(zio_read(zio, zio->io_spa, bp,
zio->io_abd, zio->io_size, NULL, NULL, zio->io_priority,
ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark));
return (zio);
}
static zio_t *
zio_ddt_read_done(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
if (zio_wait_for_children(zio, ZIO_CHILD_DDT_BIT, ZIO_WAIT_DONE)) {
return (NULL);
}
ASSERT(BP_GET_DEDUP(bp));
ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
if (zio->io_child_error[ZIO_CHILD_DDT]) {
ddt_t *ddt = ddt_select(zio->io_spa, bp);
ddt_entry_t *dde = zio->io_vsd;
if (ddt == NULL) {
ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE);
return (zio);
}
if (dde == NULL) {
zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1;
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
return (NULL);
}
if (dde->dde_repair_abd != NULL) {
abd_copy(zio->io_abd, dde->dde_repair_abd,
zio->io_size);
zio->io_child_error[ZIO_CHILD_DDT] = 0;
}
ddt_repair_done(ddt, dde);
zio->io_vsd = NULL;
}
ASSERT(zio->io_vsd == NULL);
return (zio);
}
static boolean_t
zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde)
{
spa_t *spa = zio->io_spa;
boolean_t do_raw = !!(zio->io_flags & ZIO_FLAG_RAW);
ASSERT(!(zio->io_bp_override && do_raw));
/*
* Note: we compare the original data, not the transformed data,
* because when zio->io_bp is an override bp, we will not have
* pushed the I/O transforms. That's an important optimization
* because otherwise we'd compress/encrypt all dmu_sync() data twice.
* However, we should never get a raw, override zio so in these
* cases we can compare the io_abd directly. This is useful because
* it allows us to do dedup verification even if we don't have access
* to the original data (for instance, if the encryption keys aren't
* loaded).
*/
for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) {
zio_t *lio = dde->dde_lead_zio[p];
if (lio != NULL && do_raw) {
return (lio->io_size != zio->io_size ||
abd_cmp(zio->io_abd, lio->io_abd) != 0);
} else if (lio != NULL) {
return (lio->io_orig_size != zio->io_orig_size ||
abd_cmp(zio->io_orig_abd, lio->io_orig_abd) != 0);
}
}
for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) {
ddt_phys_t *ddp = &dde->dde_phys[p];
if (ddp->ddp_phys_birth != 0 && do_raw) {
blkptr_t blk = *zio->io_bp;
uint64_t psize;
abd_t *tmpabd;
int error;
ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth);
psize = BP_GET_PSIZE(&blk);
if (psize != zio->io_size)
return (B_TRUE);
ddt_exit(ddt);
tmpabd = abd_alloc_for_io(psize, B_TRUE);
error = zio_wait(zio_read(NULL, spa, &blk, tmpabd,
psize, NULL, NULL, ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_RAW, &zio->io_bookmark));
if (error == 0) {
if (abd_cmp(tmpabd, zio->io_abd) != 0)
error = SET_ERROR(ENOENT);
}
abd_free(tmpabd);
ddt_enter(ddt);
return (error != 0);
} else if (ddp->ddp_phys_birth != 0) {
arc_buf_t *abuf = NULL;
arc_flags_t aflags = ARC_FLAG_WAIT;
blkptr_t blk = *zio->io_bp;
int error;
ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth);
if (BP_GET_LSIZE(&blk) != zio->io_orig_size)
return (B_TRUE);
ddt_exit(ddt);
error = arc_read(NULL, spa, &blk,
arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
&aflags, &zio->io_bookmark);
if (error == 0) {
if (abd_cmp_buf(zio->io_orig_abd, abuf->b_data,
zio->io_orig_size) != 0)
error = SET_ERROR(ENOENT);
arc_buf_destroy(abuf, &abuf);
}
ddt_enter(ddt);
return (error != 0);
}
}
return (B_FALSE);
}
static void
zio_ddt_child_write_ready(zio_t *zio)
{
int p = zio->io_prop.zp_copies;
ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
ddt_entry_t *dde = zio->io_private;
ddt_phys_t *ddp = &dde->dde_phys[p];
zio_t *pio;
if (zio->io_error)
return;
ddt_enter(ddt);
ASSERT(dde->dde_lead_zio[p] == zio);
ddt_phys_fill(ddp, zio->io_bp);
zio_link_t *zl = NULL;
while ((pio = zio_walk_parents(zio, &zl)) != NULL)
ddt_bp_fill(ddp, pio->io_bp, zio->io_txg);
ddt_exit(ddt);
}
static void
zio_ddt_child_write_done(zio_t *zio)
{
int p = zio->io_prop.zp_copies;
ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
ddt_entry_t *dde = zio->io_private;
ddt_phys_t *ddp = &dde->dde_phys[p];
ddt_enter(ddt);
ASSERT(ddp->ddp_refcnt == 0);
ASSERT(dde->dde_lead_zio[p] == zio);
dde->dde_lead_zio[p] = NULL;
if (zio->io_error == 0) {
zio_link_t *zl = NULL;
while (zio_walk_parents(zio, &zl) != NULL)
ddt_phys_addref(ddp);
} else {
ddt_phys_clear(ddp);
}
ddt_exit(ddt);
}
static zio_t *
zio_ddt_write(zio_t *zio)
{
spa_t *spa = zio->io_spa;
blkptr_t *bp = zio->io_bp;
uint64_t txg = zio->io_txg;
zio_prop_t *zp = &zio->io_prop;
int p = zp->zp_copies;
zio_t *cio = NULL;
ddt_t *ddt = ddt_select(spa, bp);
ddt_entry_t *dde;
ddt_phys_t *ddp;
ASSERT(BP_GET_DEDUP(bp));
ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum);
ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override);
ASSERT(!(zio->io_bp_override && (zio->io_flags & ZIO_FLAG_RAW)));
ddt_enter(ddt);
dde = ddt_lookup(ddt, bp, B_TRUE);
ddp = &dde->dde_phys[p];
if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) {
/*
* If we're using a weak checksum, upgrade to a strong checksum
* and try again. If we're already using a strong checksum,
* we can't resolve it, so just convert to an ordinary write.
* (And automatically e-mail a paper to Nature?)
*/
if (!(zio_checksum_table[zp->zp_checksum].ci_flags &
ZCHECKSUM_FLAG_DEDUP)) {
zp->zp_checksum = spa_dedup_checksum(spa);
zio_pop_transforms(zio);
zio->io_stage = ZIO_STAGE_OPEN;
BP_ZERO(bp);
} else {
zp->zp_dedup = B_FALSE;
BP_SET_DEDUP(bp, B_FALSE);
}
ASSERT(!BP_GET_DEDUP(bp));
zio->io_pipeline = ZIO_WRITE_PIPELINE;
ddt_exit(ddt);
return (zio);
}
if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) {
if (ddp->ddp_phys_birth != 0)
ddt_bp_fill(ddp, bp, txg);
if (dde->dde_lead_zio[p] != NULL)
zio_add_child(zio, dde->dde_lead_zio[p]);
else
ddt_phys_addref(ddp);
} else if (zio->io_bp_override) {
ASSERT(bp->blk_birth == txg);
ASSERT(BP_EQUAL(bp, zio->io_bp_override));
ddt_phys_fill(ddp, bp);
ddt_phys_addref(ddp);
} else {
cio = zio_write(zio, spa, txg, bp, zio->io_orig_abd,
zio->io_orig_size, zio->io_orig_size, zp,
zio_ddt_child_write_ready, NULL, NULL,
zio_ddt_child_write_done, dde, zio->io_priority,
ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark);
zio_push_transform(cio, zio->io_abd, zio->io_size, 0, NULL);
dde->dde_lead_zio[p] = cio;
}
ddt_exit(ddt);
zio_nowait(cio);
return (zio);
}
ddt_entry_t *freedde; /* for debugging */
static zio_t *
zio_ddt_free(zio_t *zio)
{
spa_t *spa = zio->io_spa;
blkptr_t *bp = zio->io_bp;
ddt_t *ddt = ddt_select(spa, bp);
ddt_entry_t *dde;
ddt_phys_t *ddp;
ASSERT(BP_GET_DEDUP(bp));
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ddt_enter(ddt);
freedde = dde = ddt_lookup(ddt, bp, B_TRUE);
if (dde) {
ddp = ddt_phys_select(dde, bp);
if (ddp)
ddt_phys_decref(ddp);
}
ddt_exit(ddt);
return (zio);
}
/*
* ==========================================================================
* Allocate and free blocks
* ==========================================================================
*/
static zio_t *
zio_io_to_allocate(spa_t *spa, int allocator)
{
zio_t *zio;
ASSERT(MUTEX_HELD(&spa->spa_allocs[allocator].spaa_lock));
zio = avl_first(&spa->spa_allocs[allocator].spaa_tree);
if (zio == NULL)
return (NULL);
ASSERT(IO_IS_ALLOCATING(zio));
/*
* Try to place a reservation for this zio. If we're unable to
* reserve then we throttle.
*/
ASSERT3U(zio->io_allocator, ==, allocator);
if (!metaslab_class_throttle_reserve(zio->io_metaslab_class,
zio->io_prop.zp_copies, allocator, zio, 0)) {
return (NULL);
}
avl_remove(&spa->spa_allocs[allocator].spaa_tree, zio);
ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE);
return (zio);
}
static zio_t *
zio_dva_throttle(zio_t *zio)
{
spa_t *spa = zio->io_spa;
zio_t *nio;
metaslab_class_t *mc;
/* locate an appropriate allocation class */
mc = spa_preferred_class(spa, zio->io_size, zio->io_prop.zp_type,
zio->io_prop.zp_level, zio->io_prop.zp_zpl_smallblk);
if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE ||
!mc->mc_alloc_throttle_enabled ||
zio->io_child_type == ZIO_CHILD_GANG ||
zio->io_flags & ZIO_FLAG_NODATA) {
return (zio);
}
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
ASSERT3U(zio->io_queued_timestamp, >, 0);
ASSERT(zio->io_stage == ZIO_STAGE_DVA_THROTTLE);
zbookmark_phys_t *bm = &zio->io_bookmark;
/*
* We want to try to use as many allocators as possible to help improve
* performance, but we also want logically adjacent IOs to be physically
* adjacent to improve sequential read performance. We chunk each object
* into 2^20 block regions, and then hash based on the objset, object,
* level, and region to accomplish both of these goals.
*/
int allocator = (uint_t)cityhash4(bm->zb_objset, bm->zb_object,
bm->zb_level, bm->zb_blkid >> 20) % spa->spa_alloc_count;
zio->io_allocator = allocator;
zio->io_metaslab_class = mc;
mutex_enter(&spa->spa_allocs[allocator].spaa_lock);
avl_add(&spa->spa_allocs[allocator].spaa_tree, zio);
nio = zio_io_to_allocate(spa, allocator);
mutex_exit(&spa->spa_allocs[allocator].spaa_lock);
return (nio);
}
static void
zio_allocate_dispatch(spa_t *spa, int allocator)
{
zio_t *zio;
mutex_enter(&spa->spa_allocs[allocator].spaa_lock);
zio = zio_io_to_allocate(spa, allocator);
mutex_exit(&spa->spa_allocs[allocator].spaa_lock);
if (zio == NULL)
return;
ASSERT3U(zio->io_stage, ==, ZIO_STAGE_DVA_THROTTLE);
ASSERT0(zio->io_error);
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_TRUE);
}
static zio_t *
zio_dva_allocate(zio_t *zio)
{
spa_t *spa = zio->io_spa;
metaslab_class_t *mc;
blkptr_t *bp = zio->io_bp;
int error;
int flags = 0;
if (zio->io_gang_leader == NULL) {
ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
zio->io_gang_leader = zio;
}
ASSERT(BP_IS_HOLE(bp));
ASSERT0(BP_GET_NDVAS(bp));
ASSERT3U(zio->io_prop.zp_copies, >, 0);
ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa));
ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp));
flags |= (zio->io_flags & ZIO_FLAG_FASTWRITE) ? METASLAB_FASTWRITE : 0;
if (zio->io_flags & ZIO_FLAG_NODATA)
flags |= METASLAB_DONT_THROTTLE;
if (zio->io_flags & ZIO_FLAG_GANG_CHILD)
flags |= METASLAB_GANG_CHILD;
if (zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE)
flags |= METASLAB_ASYNC_ALLOC;
/*
* if not already chosen, locate an appropriate allocation class
*/
mc = zio->io_metaslab_class;
if (mc == NULL) {
mc = spa_preferred_class(spa, zio->io_size,
zio->io_prop.zp_type, zio->io_prop.zp_level,
zio->io_prop.zp_zpl_smallblk);
zio->io_metaslab_class = mc;
}
/*
* Try allocating the block in the usual metaslab class.
* If that's full, allocate it in the normal class.
* If that's full, allocate as a gang block,
* and if all are full, the allocation fails (which shouldn't happen).
*
* Note that we do not fall back on embedded slog (ZIL) space, to
* preserve unfragmented slog space, which is critical for decent
* sync write performance. If a log allocation fails, we will fall
* back to spa_sync() which is abysmal for performance.
*/
error = metaslab_alloc(spa, mc, zio->io_size, bp,
zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
&zio->io_alloc_list, zio, zio->io_allocator);
/*
* Fallback to normal class when an alloc class is full
*/
if (error == ENOSPC && mc != spa_normal_class(spa)) {
/*
* If throttling, transfer reservation over to normal class.
* The io_allocator slot can remain the same even though we
* are switching classes.
*/
if (mc->mc_alloc_throttle_enabled &&
(zio->io_flags & ZIO_FLAG_IO_ALLOCATING)) {
metaslab_class_throttle_unreserve(mc,
zio->io_prop.zp_copies, zio->io_allocator, zio);
zio->io_flags &= ~ZIO_FLAG_IO_ALLOCATING;
VERIFY(metaslab_class_throttle_reserve(
spa_normal_class(spa),
zio->io_prop.zp_copies, zio->io_allocator, zio,
flags | METASLAB_MUST_RESERVE));
}
zio->io_metaslab_class = mc = spa_normal_class(spa);
if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) {
zfs_dbgmsg("%s: metaslab allocation failure, "
"trying normal class: zio %px, size %llu, error %d",
spa_name(spa), zio, (u_longlong_t)zio->io_size,
error);
}
error = metaslab_alloc(spa, mc, zio->io_size, bp,
zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
&zio->io_alloc_list, zio, zio->io_allocator);
}
if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) {
if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) {
zfs_dbgmsg("%s: metaslab allocation failure, "
"trying ganging: zio %px, size %llu, error %d",
spa_name(spa), zio, (u_longlong_t)zio->io_size,
error);
}
return (zio_write_gang_block(zio, mc));
}
if (error != 0) {
if (error != ENOSPC ||
(zfs_flags & ZFS_DEBUG_METASLAB_ALLOC)) {
zfs_dbgmsg("%s: metaslab allocation failure: zio %px, "
"size %llu, error %d",
spa_name(spa), zio, (u_longlong_t)zio->io_size,
error);
}
zio->io_error = error;
}
return (zio);
}
static zio_t *
zio_dva_free(zio_t *zio)
{
metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE);
return (zio);
}
static zio_t *
zio_dva_claim(zio_t *zio)
{
int error;
error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg);
if (error)
zio->io_error = error;
return (zio);
}
/*
* Undo an allocation. This is used by zio_done() when an I/O fails
* and we want to give back the block we just allocated.
* This handles both normal blocks and gang blocks.
*/
static void
zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp)
{
ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp));
ASSERT(zio->io_bp_override == NULL);
if (!BP_IS_HOLE(bp))
metaslab_free(zio->io_spa, bp, bp->blk_birth, B_TRUE);
if (gn != NULL) {
for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
zio_dva_unallocate(zio, gn->gn_child[g],
&gn->gn_gbh->zg_blkptr[g]);
}
}
}
/*
* Try to allocate an intent log block. Return 0 on success, errno on failure.
*/
int
zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp,
uint64_t size, boolean_t *slog)
{
int error = 1;
zio_alloc_list_t io_alloc_list;
ASSERT(txg > spa_syncing_txg(spa));
metaslab_trace_init(&io_alloc_list);
/*
* Block pointer fields are useful to metaslabs for stats and debugging.
* Fill in the obvious ones before calling into metaslab_alloc().
*/
BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
BP_SET_PSIZE(new_bp, size);
BP_SET_LEVEL(new_bp, 0);
/*
* When allocating a zil block, we don't have information about
* the final destination of the block except the objset it's part
* of, so we just hash the objset ID to pick the allocator to get
* some parallelism.
*/
int flags = METASLAB_FASTWRITE | METASLAB_ZIL;
int allocator = (uint_t)cityhash4(0, 0, 0,
os->os_dsl_dataset->ds_object) % spa->spa_alloc_count;
error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1,
txg, NULL, flags, &io_alloc_list, NULL, allocator);
*slog = (error == 0);
if (error != 0) {
error = metaslab_alloc(spa, spa_embedded_log_class(spa), size,
new_bp, 1, txg, NULL, flags,
&io_alloc_list, NULL, allocator);
}
if (error != 0) {
error = metaslab_alloc(spa, spa_normal_class(spa), size,
new_bp, 1, txg, NULL, flags,
&io_alloc_list, NULL, allocator);
}
metaslab_trace_fini(&io_alloc_list);
if (error == 0) {
BP_SET_LSIZE(new_bp, size);
BP_SET_PSIZE(new_bp, size);
BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF);
BP_SET_CHECKSUM(new_bp,
spa_version(spa) >= SPA_VERSION_SLIM_ZIL
? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG);
BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
BP_SET_LEVEL(new_bp, 0);
BP_SET_DEDUP(new_bp, 0);
BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER);
/*
* encrypted blocks will require an IV and salt. We generate
* these now since we will not be rewriting the bp at
* rewrite time.
*/
if (os->os_encrypted) {
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t salt[ZIO_DATA_SALT_LEN];
BP_SET_CRYPT(new_bp, B_TRUE);
VERIFY0(spa_crypt_get_salt(spa,
dmu_objset_id(os), salt));
VERIFY0(zio_crypt_generate_iv(iv));
zio_crypt_encode_params_bp(new_bp, salt, iv);
}
} else {
zfs_dbgmsg("%s: zil block allocation failure: "
"size %llu, error %d", spa_name(spa), (u_longlong_t)size,
error);
}
return (error);
}
/*
* ==========================================================================
* Read and write to physical devices
* ==========================================================================
*/
/*
* Issue an I/O to the underlying vdev. Typically the issue pipeline
* stops after this stage and will resume upon I/O completion.
* However, there are instances where the vdev layer may need to
* continue the pipeline when an I/O was not issued. Since the I/O
* that was sent to the vdev layer might be different than the one
* currently active in the pipeline (see vdev_queue_io()), we explicitly
* force the underlying vdev layers to call either zio_execute() or
* zio_interrupt() to ensure that the pipeline continues with the correct I/O.
*/
static zio_t *
zio_vdev_io_start(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
uint64_t align;
spa_t *spa = zio->io_spa;
zio->io_delay = 0;
ASSERT(zio->io_error == 0);
ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0);
if (vd == NULL) {
if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
spa_config_enter(spa, SCL_ZIO, zio, RW_READER);
/*
* The mirror_ops handle multiple DVAs in a single BP.
*/
vdev_mirror_ops.vdev_op_io_start(zio);
return (NULL);
}
ASSERT3P(zio->io_logical, !=, zio);
if (zio->io_type == ZIO_TYPE_WRITE) {
ASSERT(spa->spa_trust_config);
/*
* Note: the code can handle other kinds of writes,
* but we don't expect them.
*/
if (zio->io_vd->vdev_removing) {
ASSERT(zio->io_flags &
(ZIO_FLAG_PHYSICAL | ZIO_FLAG_SELF_HEAL |
ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE));
}
}
align = 1ULL << vd->vdev_top->vdev_ashift;
if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) &&
P2PHASE(zio->io_size, align) != 0) {
/* Transform logical writes to be a full physical block size. */
uint64_t asize = P2ROUNDUP(zio->io_size, align);
abd_t *abuf = abd_alloc_sametype(zio->io_abd, asize);
ASSERT(vd == vd->vdev_top);
if (zio->io_type == ZIO_TYPE_WRITE) {
abd_copy(abuf, zio->io_abd, zio->io_size);
abd_zero_off(abuf, zio->io_size, asize - zio->io_size);
}
zio_push_transform(zio, abuf, asize, asize, zio_subblock);
}
/*
* If this is not a physical io, make sure that it is properly aligned
* before proceeding.
*/
if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) {
ASSERT0(P2PHASE(zio->io_offset, align));
ASSERT0(P2PHASE(zio->io_size, align));
} else {
/*
* For physical writes, we allow 512b aligned writes and assume
* the device will perform a read-modify-write as necessary.
*/
ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE));
ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE));
}
VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa));
/*
* If this is a repair I/O, and there's no self-healing involved --
* that is, we're just resilvering what we expect to resilver --
* then don't do the I/O unless zio's txg is actually in vd's DTL.
* This prevents spurious resilvering.
*
* There are a few ways that we can end up creating these spurious
* resilver i/os:
*
* 1. A resilver i/o will be issued if any DVA in the BP has a
* dirty DTL. The mirror code will issue resilver writes to
* each DVA, including the one(s) that are not on vdevs with dirty
* DTLs.
*
* 2. With nested replication, which happens when we have a
* "replacing" or "spare" vdev that's a child of a mirror or raidz.
* For example, given mirror(replacing(A+B), C), it's likely that
* only A is out of date (it's the new device). In this case, we'll
* read from C, then use the data to resilver A+B -- but we don't
* actually want to resilver B, just A. The top-level mirror has no
* way to know this, so instead we just discard unnecessary repairs
* as we work our way down the vdev tree.
*
* 3. ZTEST also creates mirrors of mirrors, mirrors of raidz, etc.
* The same logic applies to any form of nested replication: ditto
* + mirror, RAID-Z + replacing, etc.
*
* However, indirect vdevs point off to other vdevs which may have
* DTL's, so we never bypass them. The child i/os on concrete vdevs
* will be properly bypassed instead.
*
* Leaf DTL_PARTIAL can be empty when a legitimate write comes from
* a dRAID spare vdev. For example, when a dRAID spare is first
* used, its spare blocks need to be written to but the leaf vdev's
* of such blocks can have empty DTL_PARTIAL.
*
* There seemed no clean way to allow such writes while bypassing
* spurious ones. At this point, just avoid all bypassing for dRAID
* for correctness.
*/
if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) &&
!(zio->io_flags & ZIO_FLAG_SELF_HEAL) &&
zio->io_txg != 0 && /* not a delegated i/o */
vd->vdev_ops != &vdev_indirect_ops &&
vd->vdev_top->vdev_ops != &vdev_draid_ops &&
!vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) {
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
zio_vdev_io_bypass(zio);
return (zio);
}
/*
* Select the next best leaf I/O to process. Distributed spares are
* excluded since they dispatch the I/O directly to a leaf vdev after
* applying the dRAID mapping.
*/
if (vd->vdev_ops->vdev_op_leaf &&
vd->vdev_ops != &vdev_draid_spare_ops &&
(zio->io_type == ZIO_TYPE_READ ||
zio->io_type == ZIO_TYPE_WRITE ||
zio->io_type == ZIO_TYPE_TRIM)) {
if (zio->io_type == ZIO_TYPE_READ && vdev_cache_read(zio))
return (zio);
if ((zio = vdev_queue_io(zio)) == NULL)
return (NULL);
if (!vdev_accessible(vd, zio)) {
zio->io_error = SET_ERROR(ENXIO);
zio_interrupt(zio);
return (NULL);
}
zio->io_delay = gethrtime();
}
vd->vdev_ops->vdev_op_io_start(zio);
return (NULL);
}
static zio_t *
zio_vdev_io_done(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops;
boolean_t unexpected_error = B_FALSE;
if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
return (NULL);
}
ASSERT(zio->io_type == ZIO_TYPE_READ ||
zio->io_type == ZIO_TYPE_WRITE || zio->io_type == ZIO_TYPE_TRIM);
if (zio->io_delay)
zio->io_delay = gethrtime() - zio->io_delay;
if (vd != NULL && vd->vdev_ops->vdev_op_leaf &&
vd->vdev_ops != &vdev_draid_spare_ops) {
vdev_queue_io_done(zio);
if (zio->io_type == ZIO_TYPE_WRITE)
vdev_cache_write(zio);
if (zio_injection_enabled && zio->io_error == 0)
zio->io_error = zio_handle_device_injections(vd, zio,
EIO, EILSEQ);
if (zio_injection_enabled && zio->io_error == 0)
zio->io_error = zio_handle_label_injection(zio, EIO);
if (zio->io_error && zio->io_type != ZIO_TYPE_TRIM) {
if (!vdev_accessible(vd, zio)) {
zio->io_error = SET_ERROR(ENXIO);
} else {
unexpected_error = B_TRUE;
}
}
}
ops->vdev_op_io_done(zio);
if (unexpected_error)
VERIFY(vdev_probe(vd, zio) == NULL);
return (zio);
}
/*
* This function is used to change the priority of an existing zio that is
* currently in-flight. This is used by the arc to upgrade priority in the
* event that a demand read is made for a block that is currently queued
* as a scrub or async read IO. Otherwise, the high priority read request
* would end up having to wait for the lower priority IO.
*/
void
zio_change_priority(zio_t *pio, zio_priority_t priority)
{
zio_t *cio, *cio_next;
zio_link_t *zl = NULL;
ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
if (pio->io_vd != NULL && pio->io_vd->vdev_ops->vdev_op_leaf) {
vdev_queue_change_io_priority(pio, priority);
} else {
pio->io_priority = priority;
}
mutex_enter(&pio->io_lock);
for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
cio_next = zio_walk_children(pio, &zl);
zio_change_priority(cio, priority);
}
mutex_exit(&pio->io_lock);
}
/*
* For non-raidz ZIOs, we can just copy aside the bad data read from the
* disk, and use that to finish the checksum ereport later.
*/
static void
zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr,
const abd_t *good_buf)
{
/* no processing needed */
zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE);
}
-/*ARGSUSED*/
void
zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr)
{
void *abd = abd_alloc_sametype(zio->io_abd, zio->io_size);
abd_copy(abd, zio->io_abd, zio->io_size);
zcr->zcr_cbinfo = zio->io_size;
zcr->zcr_cbdata = abd;
zcr->zcr_finish = zio_vsd_default_cksum_finish;
zcr->zcr_free = zio_abd_free;
}
static zio_t *
zio_vdev_io_assess(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
return (NULL);
}
if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
spa_config_exit(zio->io_spa, SCL_ZIO, zio);
if (zio->io_vsd != NULL) {
zio->io_vsd_ops->vsd_free(zio);
zio->io_vsd = NULL;
}
if (zio_injection_enabled && zio->io_error == 0)
zio->io_error = zio_handle_fault_injection(zio, EIO);
/*
* If the I/O failed, determine whether we should attempt to retry it.
*
* On retry, we cut in line in the issue queue, since we don't want
* compression/checksumming/etc. work to prevent our (cheap) IO reissue.
*/
if (zio->io_error && vd == NULL &&
!(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) {
ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */
zio->io_error = 0;
zio->io_flags |= ZIO_FLAG_IO_RETRY |
ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE;
zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1;
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE,
zio_requeue_io_start_cut_in_line);
return (NULL);
}
/*
* If we got an error on a leaf device, convert it to ENXIO
* if the device is not accessible at all.
*/
if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf &&
!vdev_accessible(vd, zio))
zio->io_error = SET_ERROR(ENXIO);
/*
* If we can't write to an interior vdev (mirror or RAID-Z),
* set vdev_cant_write so that we stop trying to allocate from it.
*/
if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE &&
vd != NULL && !vd->vdev_ops->vdev_op_leaf) {
vdev_dbgmsg(vd, "zio_vdev_io_assess(zio=%px) setting "
"cant_write=TRUE due to write failure with ENXIO",
zio);
vd->vdev_cant_write = B_TRUE;
}
/*
* If a cache flush returns ENOTSUP or ENOTTY, we know that no future
* attempts will ever succeed. In this case we set a persistent
* boolean flag so that we don't bother with it in the future.
*/
if ((zio->io_error == ENOTSUP || zio->io_error == ENOTTY) &&
zio->io_type == ZIO_TYPE_IOCTL &&
zio->io_cmd == DKIOCFLUSHWRITECACHE && vd != NULL)
vd->vdev_nowritecache = B_TRUE;
if (zio->io_error)
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
if (vd != NULL && vd->vdev_ops->vdev_op_leaf &&
zio->io_physdone != NULL) {
ASSERT(!(zio->io_flags & ZIO_FLAG_DELEGATED));
ASSERT(zio->io_child_type == ZIO_CHILD_VDEV);
zio->io_physdone(zio->io_logical);
}
return (zio);
}
void
zio_vdev_io_reissue(zio_t *zio)
{
ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
ASSERT(zio->io_error == 0);
zio->io_stage >>= 1;
}
void
zio_vdev_io_redone(zio_t *zio)
{
ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE);
zio->io_stage >>= 1;
}
void
zio_vdev_io_bypass(zio_t *zio)
{
ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
ASSERT(zio->io_error == 0);
zio->io_flags |= ZIO_FLAG_IO_BYPASS;
zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1;
}
/*
* ==========================================================================
* Encrypt and store encryption parameters
* ==========================================================================
*/
/*
* This function is used for ZIO_STAGE_ENCRYPT. It is responsible for
* managing the storage of encryption parameters and passing them to the
* lower-level encryption functions.
*/
static zio_t *
zio_encrypt(zio_t *zio)
{
zio_prop_t *zp = &zio->io_prop;
spa_t *spa = zio->io_spa;
blkptr_t *bp = zio->io_bp;
uint64_t psize = BP_GET_PSIZE(bp);
uint64_t dsobj = zio->io_bookmark.zb_objset;
dmu_object_type_t ot = BP_GET_TYPE(bp);
void *enc_buf = NULL;
abd_t *eabd = NULL;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
boolean_t no_crypt = B_FALSE;
/* the root zio already encrypted the data */
if (zio->io_child_type == ZIO_CHILD_GANG)
return (zio);
/* only ZIL blocks are re-encrypted on rewrite */
if (!IO_IS_ALLOCATING(zio) && ot != DMU_OT_INTENT_LOG)
return (zio);
if (!(zp->zp_encrypt || BP_IS_ENCRYPTED(bp))) {
BP_SET_CRYPT(bp, B_FALSE);
return (zio);
}
/* if we are doing raw encryption set the provided encryption params */
if (zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) {
ASSERT0(BP_GET_LEVEL(bp));
BP_SET_CRYPT(bp, B_TRUE);
BP_SET_BYTEORDER(bp, zp->zp_byteorder);
if (ot != DMU_OT_OBJSET)
zio_crypt_encode_mac_bp(bp, zp->zp_mac);
/* dnode blocks must be written out in the provided byteorder */
if (zp->zp_byteorder != ZFS_HOST_BYTEORDER &&
ot == DMU_OT_DNODE) {
void *bswap_buf = zio_buf_alloc(psize);
abd_t *babd = abd_get_from_buf(bswap_buf, psize);
ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
abd_copy_to_buf(bswap_buf, zio->io_abd, psize);
dmu_ot_byteswap[DMU_OT_BYTESWAP(ot)].ob_func(bswap_buf,
psize);
abd_take_ownership_of_buf(babd, B_TRUE);
zio_push_transform(zio, babd, psize, psize, NULL);
}
if (DMU_OT_IS_ENCRYPTED(ot))
zio_crypt_encode_params_bp(bp, zp->zp_salt, zp->zp_iv);
return (zio);
}
/* indirect blocks only maintain a cksum of the lower level MACs */
if (BP_GET_LEVEL(bp) > 0) {
BP_SET_CRYPT(bp, B_TRUE);
VERIFY0(zio_crypt_do_indirect_mac_checksum_abd(B_TRUE,
zio->io_orig_abd, BP_GET_LSIZE(bp), BP_SHOULD_BYTESWAP(bp),
mac));
zio_crypt_encode_mac_bp(bp, mac);
return (zio);
}
/*
* Objset blocks are a special case since they have 2 256-bit MACs
* embedded within them.
*/
if (ot == DMU_OT_OBJSET) {
ASSERT0(DMU_OT_IS_ENCRYPTED(ot));
ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
BP_SET_CRYPT(bp, B_TRUE);
VERIFY0(spa_do_crypt_objset_mac_abd(B_TRUE, spa, dsobj,
zio->io_abd, psize, BP_SHOULD_BYTESWAP(bp)));
return (zio);
}
/* unencrypted object types are only authenticated with a MAC */
if (!DMU_OT_IS_ENCRYPTED(ot)) {
BP_SET_CRYPT(bp, B_TRUE);
VERIFY0(spa_do_crypt_mac_abd(B_TRUE, spa, dsobj,
zio->io_abd, psize, mac));
zio_crypt_encode_mac_bp(bp, mac);
return (zio);
}
/*
* Later passes of sync-to-convergence may decide to rewrite data
* in place to avoid more disk reallocations. This presents a problem
* for encryption because this constitutes rewriting the new data with
* the same encryption key and IV. However, this only applies to blocks
* in the MOS (particularly the spacemaps) and we do not encrypt the
* MOS. We assert that the zio is allocating or an intent log write
* to enforce this.
*/
ASSERT(IO_IS_ALLOCATING(zio) || ot == DMU_OT_INTENT_LOG);
ASSERT(BP_GET_LEVEL(bp) == 0 || ot == DMU_OT_INTENT_LOG);
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_ENCRYPTION));
ASSERT3U(psize, !=, 0);
enc_buf = zio_buf_alloc(psize);
eabd = abd_get_from_buf(enc_buf, psize);
abd_take_ownership_of_buf(eabd, B_TRUE);
/*
* For an explanation of what encryption parameters are stored
* where, see the block comment in zio_crypt.c.
*/
if (ot == DMU_OT_INTENT_LOG) {
zio_crypt_decode_params_bp(bp, salt, iv);
} else {
BP_SET_CRYPT(bp, B_TRUE);
}
/* Perform the encryption. This should not fail */
VERIFY0(spa_do_crypt_abd(B_TRUE, spa, &zio->io_bookmark,
BP_GET_TYPE(bp), BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp),
salt, iv, mac, psize, zio->io_abd, eabd, &no_crypt));
/* encode encryption metadata into the bp */
if (ot == DMU_OT_INTENT_LOG) {
/*
* ZIL blocks store the MAC in the embedded checksum, so the
* transform must always be applied.
*/
zio_crypt_encode_mac_zil(enc_buf, mac);
zio_push_transform(zio, eabd, psize, psize, NULL);
} else {
BP_SET_CRYPT(bp, B_TRUE);
zio_crypt_encode_params_bp(bp, salt, iv);
zio_crypt_encode_mac_bp(bp, mac);
if (no_crypt) {
ASSERT3U(ot, ==, DMU_OT_DNODE);
abd_free(eabd);
} else {
zio_push_transform(zio, eabd, psize, psize, NULL);
}
}
return (zio);
}
/*
* ==========================================================================
* Generate and verify checksums
* ==========================================================================
*/
static zio_t *
zio_checksum_generate(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
enum zio_checksum checksum;
if (bp == NULL) {
/*
* This is zio_write_phys().
* We're either generating a label checksum, or none at all.
*/
checksum = zio->io_prop.zp_checksum;
if (checksum == ZIO_CHECKSUM_OFF)
return (zio);
ASSERT(checksum == ZIO_CHECKSUM_LABEL);
} else {
if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) {
ASSERT(!IO_IS_ALLOCATING(zio));
checksum = ZIO_CHECKSUM_GANG_HEADER;
} else {
checksum = BP_GET_CHECKSUM(bp);
}
}
zio_checksum_compute(zio, checksum, zio->io_abd, zio->io_size);
return (zio);
}
static zio_t *
zio_checksum_verify(zio_t *zio)
{
zio_bad_cksum_t info;
blkptr_t *bp = zio->io_bp;
int error;
ASSERT(zio->io_vd != NULL);
if (bp == NULL) {
/*
* This is zio_read_phys().
* We're either verifying a label checksum, or nothing at all.
*/
if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF)
return (zio);
ASSERT3U(zio->io_prop.zp_checksum, ==, ZIO_CHECKSUM_LABEL);
}
if ((error = zio_checksum_error(zio, &info)) != 0) {
zio->io_error = error;
if (error == ECKSUM &&
!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
(void) zfs_ereport_start_checksum(zio->io_spa,
zio->io_vd, &zio->io_bookmark, zio,
zio->io_offset, zio->io_size, &info);
mutex_enter(&zio->io_vd->vdev_stat_lock);
zio->io_vd->vdev_stat.vs_checksum_errors++;
mutex_exit(&zio->io_vd->vdev_stat_lock);
}
}
return (zio);
}
/*
* Called by RAID-Z to ensure we don't compute the checksum twice.
*/
void
zio_checksum_verified(zio_t *zio)
{
zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
}
/*
* ==========================================================================
* Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other.
* An error of 0 indicates success. ENXIO indicates whole-device failure,
* which may be transient (e.g. unplugged) or permanent. ECKSUM and EIO
* indicate errors that are specific to one I/O, and most likely permanent.
* Any other error is presumed to be worse because we weren't expecting it.
* ==========================================================================
*/
int
zio_worst_error(int e1, int e2)
{
static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO };
int r1, r2;
for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++)
if (e1 == zio_error_rank[r1])
break;
for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++)
if (e2 == zio_error_rank[r2])
break;
return (r1 > r2 ? e1 : e2);
}
/*
* ==========================================================================
* I/O completion
* ==========================================================================
*/
static zio_t *
zio_ready(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
zio_t *pio, *pio_next;
zio_link_t *zl = NULL;
if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT | ZIO_CHILD_DDT_BIT,
ZIO_WAIT_READY)) {
return (NULL);
}
if (zio->io_ready) {
ASSERT(IO_IS_ALLOCATING(zio));
ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp) ||
(zio->io_flags & ZIO_FLAG_NOPWRITE));
ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0);
zio->io_ready(zio);
}
if (bp != NULL && bp != &zio->io_bp_copy)
zio->io_bp_copy = *bp;
if (zio->io_error != 0) {
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(IO_IS_ALLOCATING(zio));
ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(zio->io_metaslab_class != NULL);
/*
* We were unable to allocate anything, unreserve and
* issue the next I/O to allocate.
*/
metaslab_class_throttle_unreserve(
zio->io_metaslab_class, zio->io_prop.zp_copies,
zio->io_allocator, zio);
zio_allocate_dispatch(zio->io_spa, zio->io_allocator);
}
}
mutex_enter(&zio->io_lock);
zio->io_state[ZIO_WAIT_READY] = 1;
pio = zio_walk_parents(zio, &zl);
mutex_exit(&zio->io_lock);
/*
* As we notify zio's parents, new parents could be added.
* New parents go to the head of zio's io_parent_list, however,
* so we will (correctly) not notify them. The remainder of zio's
* io_parent_list, from 'pio_next' onward, cannot change because
* all parents must wait for us to be done before they can be done.
*/
for (; pio != NULL; pio = pio_next) {
pio_next = zio_walk_parents(zio, &zl);
zio_notify_parent(pio, zio, ZIO_WAIT_READY, NULL);
}
if (zio->io_flags & ZIO_FLAG_NODATA) {
if (BP_IS_GANG(bp)) {
zio->io_flags &= ~ZIO_FLAG_NODATA;
} else {
ASSERT((uintptr_t)zio->io_abd < SPA_MAXBLOCKSIZE);
zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
}
}
if (zio_injection_enabled &&
zio->io_spa->spa_syncing_txg == zio->io_txg)
zio_handle_ignored_writes(zio);
return (zio);
}
/*
* Update the allocation throttle accounting.
*/
static void
zio_dva_throttle_done(zio_t *zio)
{
zio_t *lio __maybe_unused = zio->io_logical;
zio_t *pio = zio_unique_parent(zio);
vdev_t *vd = zio->io_vd;
int flags = METASLAB_ASYNC_ALLOC;
ASSERT3P(zio->io_bp, !=, NULL);
ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
ASSERT3U(zio->io_priority, ==, ZIO_PRIORITY_ASYNC_WRITE);
ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
ASSERT(vd != NULL);
ASSERT3P(vd, ==, vd->vdev_top);
ASSERT(zio_injection_enabled || !(zio->io_flags & ZIO_FLAG_IO_RETRY));
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
ASSERT(zio->io_flags & ZIO_FLAG_IO_ALLOCATING);
ASSERT(!(lio->io_flags & ZIO_FLAG_IO_REWRITE));
ASSERT(!(lio->io_orig_flags & ZIO_FLAG_NODATA));
/*
* Parents of gang children can have two flavors -- ones that
* allocated the gang header (will have ZIO_FLAG_IO_REWRITE set)
* and ones that allocated the constituent blocks. The allocation
* throttle needs to know the allocating parent zio so we must find
* it here.
*/
if (pio->io_child_type == ZIO_CHILD_GANG) {
/*
* If our parent is a rewrite gang child then our grandparent
* would have been the one that performed the allocation.
*/
if (pio->io_flags & ZIO_FLAG_IO_REWRITE)
pio = zio_unique_parent(pio);
flags |= METASLAB_GANG_CHILD;
}
ASSERT(IO_IS_ALLOCATING(pio));
ASSERT3P(zio, !=, zio->io_logical);
ASSERT(zio->io_logical != NULL);
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
ASSERT0(zio->io_flags & ZIO_FLAG_NOPWRITE);
ASSERT(zio->io_metaslab_class != NULL);
mutex_enter(&pio->io_lock);
metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, pio, flags,
pio->io_allocator, B_TRUE);
mutex_exit(&pio->io_lock);
metaslab_class_throttle_unreserve(zio->io_metaslab_class, 1,
pio->io_allocator, pio);
/*
* Call into the pipeline to see if there is more work that
* needs to be done. If there is work to be done it will be
* dispatched to another taskq thread.
*/
zio_allocate_dispatch(zio->io_spa, pio->io_allocator);
}
static zio_t *
zio_done(zio_t *zio)
{
/*
* Always attempt to keep stack usage minimal here since
* we can be called recursively up to 19 levels deep.
*/
const uint64_t psize = zio->io_size;
zio_t *pio, *pio_next;
zio_link_t *zl = NULL;
/*
* If our children haven't all completed,
* wait for them and then repeat this pipeline stage.
*/
if (zio_wait_for_children(zio, ZIO_CHILD_ALL_BITS, ZIO_WAIT_DONE)) {
return (NULL);
}
/*
* If the allocation throttle is enabled, then update the accounting.
* We only track child I/Os that are part of an allocating async
* write. We must do this since the allocation is performed
* by the logical I/O but the actual write is done by child I/Os.
*/
if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING &&
zio->io_child_type == ZIO_CHILD_VDEV) {
ASSERT(zio->io_metaslab_class != NULL);
ASSERT(zio->io_metaslab_class->mc_alloc_throttle_enabled);
zio_dva_throttle_done(zio);
}
/*
* If the allocation throttle is enabled, verify that
* we have decremented the refcounts for every I/O that was throttled.
*/
if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(zio->io_bp != NULL);
metaslab_group_alloc_verify(zio->io_spa, zio->io_bp, zio,
zio->io_allocator);
VERIFY(zfs_refcount_not_held(&zio->io_metaslab_class->
mc_allocator[zio->io_allocator].mca_alloc_slots, zio));
}
for (int c = 0; c < ZIO_CHILD_TYPES; c++)
for (int w = 0; w < ZIO_WAIT_TYPES; w++)
ASSERT(zio->io_children[c][w] == 0);
if (zio->io_bp != NULL && !BP_IS_EMBEDDED(zio->io_bp)) {
ASSERT(zio->io_bp->blk_pad[0] == 0);
ASSERT(zio->io_bp->blk_pad[1] == 0);
ASSERT(bcmp(zio->io_bp, &zio->io_bp_copy,
sizeof (blkptr_t)) == 0 ||
(zio->io_bp == zio_unique_parent(zio)->io_bp));
if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(zio->io_bp) &&
zio->io_bp_override == NULL &&
!(zio->io_flags & ZIO_FLAG_IO_REPAIR)) {
ASSERT3U(zio->io_prop.zp_copies, <=,
BP_GET_NDVAS(zio->io_bp));
ASSERT(BP_COUNT_GANG(zio->io_bp) == 0 ||
(BP_COUNT_GANG(zio->io_bp) ==
BP_GET_NDVAS(zio->io_bp)));
}
if (zio->io_flags & ZIO_FLAG_NOPWRITE)
VERIFY(BP_EQUAL(zio->io_bp, &zio->io_bp_orig));
}
/*
* If there were child vdev/gang/ddt errors, they apply to us now.
*/
zio_inherit_child_errors(zio, ZIO_CHILD_VDEV);
zio_inherit_child_errors(zio, ZIO_CHILD_GANG);
zio_inherit_child_errors(zio, ZIO_CHILD_DDT);
/*
* If the I/O on the transformed data was successful, generate any
* checksum reports now while we still have the transformed data.
*/
if (zio->io_error == 0) {
while (zio->io_cksum_report != NULL) {
zio_cksum_report_t *zcr = zio->io_cksum_report;
uint64_t align = zcr->zcr_align;
uint64_t asize = P2ROUNDUP(psize, align);
abd_t *adata = zio->io_abd;
if (adata != NULL && asize != psize) {
adata = abd_alloc(asize, B_TRUE);
abd_copy(adata, zio->io_abd, psize);
abd_zero_off(adata, psize, asize - psize);
}
zio->io_cksum_report = zcr->zcr_next;
zcr->zcr_next = NULL;
zcr->zcr_finish(zcr, adata);
zfs_ereport_free_checksum(zcr);
if (adata != NULL && asize != psize)
abd_free(adata);
}
}
zio_pop_transforms(zio); /* note: may set zio->io_error */
vdev_stat_update(zio, psize);
/*
* If this I/O is attached to a particular vdev is slow, exceeding
* 30 seconds to complete, post an error described the I/O delay.
* We ignore these errors if the device is currently unavailable.
*/
if (zio->io_delay >= MSEC2NSEC(zio_slow_io_ms)) {
if (zio->io_vd != NULL && !vdev_is_dead(zio->io_vd)) {
/*
* We want to only increment our slow IO counters if
* the IO is valid (i.e. not if the drive is removed).
*
* zfs_ereport_post() will also do these checks, but
* it can also ratelimit and have other failures, so we
* need to increment the slow_io counters independent
* of it.
*/
if (zfs_ereport_is_valid(FM_EREPORT_ZFS_DELAY,
zio->io_spa, zio->io_vd, zio)) {
mutex_enter(&zio->io_vd->vdev_stat_lock);
zio->io_vd->vdev_stat.vs_slow_ios++;
mutex_exit(&zio->io_vd->vdev_stat_lock);
(void) zfs_ereport_post(FM_EREPORT_ZFS_DELAY,
zio->io_spa, zio->io_vd, &zio->io_bookmark,
zio, 0);
}
}
}
if (zio->io_error) {
/*
* If this I/O is attached to a particular vdev,
* generate an error message describing the I/O failure
* at the block level. We ignore these errors if the
* device is currently unavailable.
*/
if (zio->io_error != ECKSUM && zio->io_vd != NULL &&
!vdev_is_dead(zio->io_vd)) {
int ret = zfs_ereport_post(FM_EREPORT_ZFS_IO,
zio->io_spa, zio->io_vd, &zio->io_bookmark, zio, 0);
if (ret != EALREADY) {
mutex_enter(&zio->io_vd->vdev_stat_lock);
if (zio->io_type == ZIO_TYPE_READ)
zio->io_vd->vdev_stat.vs_read_errors++;
else if (zio->io_type == ZIO_TYPE_WRITE)
zio->io_vd->vdev_stat.vs_write_errors++;
mutex_exit(&zio->io_vd->vdev_stat_lock);
}
}
if ((zio->io_error == EIO || !(zio->io_flags &
(ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) &&
zio == zio->io_logical) {
/*
* For logical I/O requests, tell the SPA to log the
* error and generate a logical data ereport.
*/
spa_log_error(zio->io_spa, &zio->io_bookmark);
(void) zfs_ereport_post(FM_EREPORT_ZFS_DATA,
zio->io_spa, NULL, &zio->io_bookmark, zio, 0);
}
}
if (zio->io_error && zio == zio->io_logical) {
/*
* Determine whether zio should be reexecuted. This will
* propagate all the way to the root via zio_notify_parent().
*/
ASSERT(zio->io_vd == NULL && zio->io_bp != NULL);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
if (IO_IS_ALLOCATING(zio) &&
!(zio->io_flags & ZIO_FLAG_CANFAIL)) {
if (zio->io_error != ENOSPC)
zio->io_reexecute |= ZIO_REEXECUTE_NOW;
else
zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
}
if ((zio->io_type == ZIO_TYPE_READ ||
zio->io_type == ZIO_TYPE_FREE) &&
!(zio->io_flags & ZIO_FLAG_SCAN_THREAD) &&
zio->io_error == ENXIO &&
spa_load_state(zio->io_spa) == SPA_LOAD_NONE &&
spa_get_failmode(zio->io_spa) != ZIO_FAILURE_MODE_CONTINUE)
zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute)
zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
/*
* Here is a possibly good place to attempt to do
* either combinatorial reconstruction or error correction
* based on checksums. It also might be a good place
* to send out preliminary ereports before we suspend
* processing.
*/
}
/*
* If there were logical child errors, they apply to us now.
* We defer this until now to avoid conflating logical child
* errors with errors that happened to the zio itself when
* updating vdev stats and reporting FMA events above.
*/
zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL);
if ((zio->io_error || zio->io_reexecute) &&
IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio &&
!(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)))
zio_dva_unallocate(zio, zio->io_gang_tree, zio->io_bp);
zio_gang_tree_free(&zio->io_gang_tree);
/*
* Godfather I/Os should never suspend.
*/
if ((zio->io_flags & ZIO_FLAG_GODFATHER) &&
(zio->io_reexecute & ZIO_REEXECUTE_SUSPEND))
zio->io_reexecute &= ~ZIO_REEXECUTE_SUSPEND;
if (zio->io_reexecute) {
/*
* This is a logical I/O that wants to reexecute.
*
* Reexecute is top-down. When an i/o fails, if it's not
* the root, it simply notifies its parent and sticks around.
* The parent, seeing that it still has children in zio_done(),
* does the same. This percolates all the way up to the root.
* The root i/o will reexecute or suspend the entire tree.
*
* This approach ensures that zio_reexecute() honors
* all the original i/o dependency relationships, e.g.
* parents not executing until children are ready.
*/
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
zio->io_gang_leader = NULL;
mutex_enter(&zio->io_lock);
zio->io_state[ZIO_WAIT_DONE] = 1;
mutex_exit(&zio->io_lock);
/*
* "The Godfather" I/O monitors its children but is
* not a true parent to them. It will track them through
* the pipeline but severs its ties whenever they get into
* trouble (e.g. suspended). This allows "The Godfather"
* I/O to return status without blocking.
*/
zl = NULL;
for (pio = zio_walk_parents(zio, &zl); pio != NULL;
pio = pio_next) {
zio_link_t *remove_zl = zl;
pio_next = zio_walk_parents(zio, &zl);
if ((pio->io_flags & ZIO_FLAG_GODFATHER) &&
(zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) {
zio_remove_child(pio, zio, remove_zl);
/*
* This is a rare code path, so we don't
* bother with "next_to_execute".
*/
zio_notify_parent(pio, zio, ZIO_WAIT_DONE,
NULL);
}
}
if ((pio = zio_unique_parent(zio)) != NULL) {
/*
* We're not a root i/o, so there's nothing to do
* but notify our parent. Don't propagate errors
* upward since we haven't permanently failed yet.
*/
ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE;
/*
* This is a rare code path, so we don't bother with
* "next_to_execute".
*/
zio_notify_parent(pio, zio, ZIO_WAIT_DONE, NULL);
} else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) {
/*
* We'd fail again if we reexecuted now, so suspend
* until conditions improve (e.g. device comes online).
*/
zio_suspend(zio->io_spa, zio, ZIO_SUSPEND_IOERR);
} else {
/*
* Reexecution is potentially a huge amount of work.
* Hand it off to the otherwise-unused claim taskq.
*/
ASSERT(taskq_empty_ent(&zio->io_tqent));
spa_taskq_dispatch_ent(zio->io_spa,
ZIO_TYPE_CLAIM, ZIO_TASKQ_ISSUE,
zio_reexecute, zio, 0, &zio->io_tqent);
}
return (NULL);
}
ASSERT(zio->io_child_count == 0);
ASSERT(zio->io_reexecute == 0);
ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL));
/*
* Report any checksum errors, since the I/O is complete.
*/
while (zio->io_cksum_report != NULL) {
zio_cksum_report_t *zcr = zio->io_cksum_report;
zio->io_cksum_report = zcr->zcr_next;
zcr->zcr_next = NULL;
zcr->zcr_finish(zcr, NULL);
zfs_ereport_free_checksum(zcr);
}
if (zio->io_flags & ZIO_FLAG_FASTWRITE && zio->io_bp &&
!BP_IS_HOLE(zio->io_bp) && !BP_IS_EMBEDDED(zio->io_bp) &&
!(zio->io_flags & ZIO_FLAG_NOPWRITE)) {
metaslab_fastwrite_unmark(zio->io_spa, zio->io_bp);
}
/*
* It is the responsibility of the done callback to ensure that this
* particular zio is no longer discoverable for adoption, and as
* such, cannot acquire any new parents.
*/
if (zio->io_done)
zio->io_done(zio);
mutex_enter(&zio->io_lock);
zio->io_state[ZIO_WAIT_DONE] = 1;
mutex_exit(&zio->io_lock);
/*
* We are done executing this zio. We may want to execute a parent
* next. See the comment in zio_notify_parent().
*/
zio_t *next_to_execute = NULL;
zl = NULL;
for (pio = zio_walk_parents(zio, &zl); pio != NULL; pio = pio_next) {
zio_link_t *remove_zl = zl;
pio_next = zio_walk_parents(zio, &zl);
zio_remove_child(pio, zio, remove_zl);
zio_notify_parent(pio, zio, ZIO_WAIT_DONE, &next_to_execute);
}
if (zio->io_waiter != NULL) {
mutex_enter(&zio->io_lock);
zio->io_executor = NULL;
cv_broadcast(&zio->io_cv);
mutex_exit(&zio->io_lock);
} else {
zio_destroy(zio);
}
return (next_to_execute);
}
/*
* ==========================================================================
* I/O pipeline definition
* ==========================================================================
*/
static zio_pipe_stage_t *zio_pipeline[] = {
NULL,
zio_read_bp_init,
zio_write_bp_init,
zio_free_bp_init,
zio_issue_async,
zio_write_compress,
zio_encrypt,
zio_checksum_generate,
zio_nop_write,
zio_ddt_read_start,
zio_ddt_read_done,
zio_ddt_write,
zio_ddt_free,
zio_gang_assemble,
zio_gang_issue,
zio_dva_throttle,
zio_dva_allocate,
zio_dva_free,
zio_dva_claim,
zio_ready,
zio_vdev_io_start,
zio_vdev_io_done,
zio_vdev_io_assess,
zio_checksum_verify,
zio_done
};
/*
* Compare two zbookmark_phys_t's to see which we would reach first in a
* pre-order traversal of the object tree.
*
* This is simple in every case aside from the meta-dnode object. For all other
* objects, we traverse them in order (object 1 before object 2, and so on).
* However, all of these objects are traversed while traversing object 0, since
* the data it points to is the list of objects. Thus, we need to convert to a
* canonical representation so we can compare meta-dnode bookmarks to
* non-meta-dnode bookmarks.
*
* We do this by calculating "equivalents" for each field of the zbookmark.
* zbookmarks outside of the meta-dnode use their own object and level, and
* calculate the level 0 equivalent (the first L0 blkid that is contained in the
* blocks this bookmark refers to) by multiplying their blkid by their span
* (the number of L0 blocks contained within one block at their level).
* zbookmarks inside the meta-dnode calculate their object equivalent
* (which is L0equiv * dnodes per data block), use 0 for their L0equiv, and use
* level + 1<<31 (any value larger than a level could ever be) for their level.
* This causes them to always compare before a bookmark in their object
* equivalent, compare appropriately to bookmarks in other objects, and to
* compare appropriately to other bookmarks in the meta-dnode.
*/
int
zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2, uint8_t ibs2,
const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2)
{
/*
* These variables represent the "equivalent" values for the zbookmark,
* after converting zbookmarks inside the meta dnode to their
* normal-object equivalents.
*/
uint64_t zb1obj, zb2obj;
uint64_t zb1L0, zb2L0;
uint64_t zb1level, zb2level;
if (zb1->zb_object == zb2->zb_object &&
zb1->zb_level == zb2->zb_level &&
zb1->zb_blkid == zb2->zb_blkid)
return (0);
IMPLY(zb1->zb_level > 0, ibs1 >= SPA_MINBLOCKSHIFT);
IMPLY(zb2->zb_level > 0, ibs2 >= SPA_MINBLOCKSHIFT);
/*
* BP_SPANB calculates the span in blocks.
*/
zb1L0 = (zb1->zb_blkid) * BP_SPANB(ibs1, zb1->zb_level);
zb2L0 = (zb2->zb_blkid) * BP_SPANB(ibs2, zb2->zb_level);
if (zb1->zb_object == DMU_META_DNODE_OBJECT) {
zb1obj = zb1L0 * (dbss1 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
zb1L0 = 0;
zb1level = zb1->zb_level + COMPARE_META_LEVEL;
} else {
zb1obj = zb1->zb_object;
zb1level = zb1->zb_level;
}
if (zb2->zb_object == DMU_META_DNODE_OBJECT) {
zb2obj = zb2L0 * (dbss2 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
zb2L0 = 0;
zb2level = zb2->zb_level + COMPARE_META_LEVEL;
} else {
zb2obj = zb2->zb_object;
zb2level = zb2->zb_level;
}
/* Now that we have a canonical representation, do the comparison. */
if (zb1obj != zb2obj)
return (zb1obj < zb2obj ? -1 : 1);
else if (zb1L0 != zb2L0)
return (zb1L0 < zb2L0 ? -1 : 1);
else if (zb1level != zb2level)
return (zb1level > zb2level ? -1 : 1);
/*
* This can (theoretically) happen if the bookmarks have the same object
* and level, but different blkids, if the block sizes are not the same.
* There is presently no way to change the indirect block sizes
*/
return (0);
}
/*
* This function checks the following: given that last_block is the place that
* our traversal stopped last time, does that guarantee that we've visited
* every node under subtree_root? Therefore, we can't just use the raw output
* of zbookmark_compare. We have to pass in a modified version of
* subtree_root; by incrementing the block id, and then checking whether
* last_block is before or equal to that, we can tell whether or not having
* visited last_block implies that all of subtree_root's children have been
* visited.
*/
boolean_t
zbookmark_subtree_completed(const dnode_phys_t *dnp,
const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block)
{
zbookmark_phys_t mod_zb = *subtree_root;
mod_zb.zb_blkid++;
ASSERT(last_block->zb_level == 0);
/* The objset_phys_t isn't before anything. */
if (dnp == NULL)
return (B_FALSE);
/*
* We pass in 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT) for the
* data block size in sectors, because that variable is only used if
* the bookmark refers to a block in the meta-dnode. Since we don't
* know without examining it what object it refers to, and there's no
* harm in passing in this value in other cases, we always pass it in.
*
* We pass in 0 for the indirect block size shift because zb2 must be
* level 0. The indirect block size is only used to calculate the span
* of the bookmark, but since the bookmark must be level 0, the span is
* always 1, so the math works out.
*
* If you make changes to how the zbookmark_compare code works, be sure
* to make sure that this code still works afterwards.
*/
return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift,
1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, &mod_zb,
last_block) <= 0);
}
EXPORT_SYMBOL(zio_type_name);
EXPORT_SYMBOL(zio_buf_alloc);
EXPORT_SYMBOL(zio_data_buf_alloc);
EXPORT_SYMBOL(zio_buf_free);
EXPORT_SYMBOL(zio_data_buf_free);
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_zio, zio_, slow_io_ms, INT, ZMOD_RW,
"Max I/O completion time (milliseconds) before marking it as slow");
ZFS_MODULE_PARAM(zfs_zio, zio_, requeue_io_start_cut_in_line, INT, ZMOD_RW,
"Prioritize requeued I/O");
ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_deferred_free, INT, ZMOD_RW,
"Defer frees starting in this pass");
ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_dont_compress, INT, ZMOD_RW,
"Don't compress starting in this pass");
ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_rewrite, INT, ZMOD_RW,
"Rewrite new bps starting in this pass");
ZFS_MODULE_PARAM(zfs_zio, zio_, dva_throttle_enabled, INT, ZMOD_RW,
"Throttle block allocations in the ZIO pipeline");
ZFS_MODULE_PARAM(zfs_zio, zio_, deadman_log_all, INT, ZMOD_RW,
"Log all slow ZIOs, not just those with vdevs");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/zio_checksum.c b/sys/contrib/openzfs/module/zfs/zio_checksum.c
index f8fee78c6068..00837f07e8e7 100644
--- a/sys/contrib/openzfs/module/zfs/zio_checksum.c
+++ b/sys/contrib/openzfs/module/zfs/zio_checksum.c
@@ -1,570 +1,570 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2016 by Delphix. All rights reserved.
* Copyright 2013 Saso Kiselkov. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/zio.h>
#include <sys/zio_checksum.h>
#include <sys/zil.h>
#include <sys/abd.h>
#include <zfs_fletcher.h>
/*
* Checksum vectors.
*
* In the SPA, everything is checksummed. We support checksum vectors
* for three distinct reasons:
*
* 1. Different kinds of data need different levels of protection.
* For SPA metadata, we always want a very strong checksum.
* For user data, we let users make the trade-off between speed
* and checksum strength.
*
* 2. Cryptographic hash and MAC algorithms are an area of active research.
* It is likely that in future hash functions will be at least as strong
* as current best-of-breed, and may be substantially faster as well.
* We want the ability to take advantage of these new hashes as soon as
* they become available.
*
* 3. If someone develops hardware that can compute a strong hash quickly,
* we want the ability to take advantage of that hardware.
*
* Of course, we don't want a checksum upgrade to invalidate existing
* data, so we store the checksum *function* in eight bits of the bp.
* This gives us room for up to 256 different checksum functions.
*
* When writing a block, we always checksum it with the latest-and-greatest
* checksum function of the appropriate strength. When reading a block,
* we compare the expected checksum against the actual checksum, which we
* compute via the checksum function specified by BP_GET_CHECKSUM(bp).
*
* SALTED CHECKSUMS
*
* To enable the use of less secure hash algorithms with dedup, we
* introduce the notion of salted checksums (MACs, really). A salted
* checksum is fed both a random 256-bit value (the salt) and the data
* to be checksummed. This salt is kept secret (stored on the pool, but
* never shown to the user). Thus even if an attacker knew of collision
* weaknesses in the hash algorithm, they won't be able to mount a known
* plaintext attack on the DDT, since the actual hash value cannot be
* known ahead of time. How the salt is used is algorithm-specific
* (some might simply prefix it to the data block, others might need to
* utilize a full-blown HMAC). On disk the salt is stored in a ZAP
* object in the MOS (DMU_POOL_CHECKSUM_SALT).
*
* CONTEXT TEMPLATES
*
* Some hashing algorithms need to perform a substantial amount of
* initialization work (e.g. salted checksums above may need to pre-hash
* the salt) before being able to process data. Performing this
* redundant work for each block would be wasteful, so we instead allow
* a checksum algorithm to do the work once (the first time it's used)
* and then keep this pre-initialized context as a template inside the
* spa_t (spa_cksum_tmpls). If the zio_checksum_info_t contains
* non-NULL ci_tmpl_init and ci_tmpl_free callbacks, they are used to
* construct and destruct the pre-initialized checksum context. The
* pre-initialized context is then reused during each checksum
* invocation and passed to the checksum function.
*/
-/*ARGSUSED*/
static void
abd_checksum_off(abd_t *abd, uint64_t size,
const void *ctx_template, zio_cksum_t *zcp)
{
+ (void) abd, (void) size, (void) ctx_template;
ZIO_SET_CHECKSUM(zcp, 0, 0, 0, 0);
}
-/*ARGSUSED*/
static void
abd_fletcher_2_native(abd_t *abd, uint64_t size,
const void *ctx_template, zio_cksum_t *zcp)
{
+ (void) ctx_template;
fletcher_init(zcp);
(void) abd_iterate_func(abd, 0, size,
fletcher_2_incremental_native, zcp);
}
-/*ARGSUSED*/
static void
abd_fletcher_2_byteswap(abd_t *abd, uint64_t size,
const void *ctx_template, zio_cksum_t *zcp)
{
+ (void) ctx_template;
fletcher_init(zcp);
(void) abd_iterate_func(abd, 0, size,
fletcher_2_incremental_byteswap, zcp);
}
static inline void
abd_fletcher_4_impl(abd_t *abd, uint64_t size, zio_abd_checksum_data_t *acdp)
{
fletcher_4_abd_ops.acf_init(acdp);
abd_iterate_func(abd, 0, size, fletcher_4_abd_ops.acf_iter, acdp);
fletcher_4_abd_ops.acf_fini(acdp);
}
-/*ARGSUSED*/
void
abd_fletcher_4_native(abd_t *abd, uint64_t size,
const void *ctx_template, zio_cksum_t *zcp)
{
+ (void) ctx_template;
fletcher_4_ctx_t ctx;
zio_abd_checksum_data_t acd = {
.acd_byteorder = ZIO_CHECKSUM_NATIVE,
.acd_zcp = zcp,
.acd_ctx = &ctx
};
abd_fletcher_4_impl(abd, size, &acd);
}
-/*ARGSUSED*/
void
abd_fletcher_4_byteswap(abd_t *abd, uint64_t size,
const void *ctx_template, zio_cksum_t *zcp)
{
+ (void) ctx_template;
fletcher_4_ctx_t ctx;
zio_abd_checksum_data_t acd = {
.acd_byteorder = ZIO_CHECKSUM_BYTESWAP,
.acd_zcp = zcp,
.acd_ctx = &ctx
};
abd_fletcher_4_impl(abd, size, &acd);
}
zio_checksum_info_t zio_checksum_table[ZIO_CHECKSUM_FUNCTIONS] = {
{{NULL, NULL}, NULL, NULL, 0, "inherit"},
{{NULL, NULL}, NULL, NULL, 0, "on"},
{{abd_checksum_off, abd_checksum_off},
NULL, NULL, 0, "off"},
{{abd_checksum_SHA256, abd_checksum_SHA256},
NULL, NULL, ZCHECKSUM_FLAG_METADATA | ZCHECKSUM_FLAG_EMBEDDED,
"label"},
{{abd_checksum_SHA256, abd_checksum_SHA256},
NULL, NULL, ZCHECKSUM_FLAG_METADATA | ZCHECKSUM_FLAG_EMBEDDED,
"gang_header"},
{{abd_fletcher_2_native, abd_fletcher_2_byteswap},
NULL, NULL, ZCHECKSUM_FLAG_EMBEDDED, "zilog"},
{{abd_fletcher_2_native, abd_fletcher_2_byteswap},
NULL, NULL, 0, "fletcher2"},
{{abd_fletcher_4_native, abd_fletcher_4_byteswap},
NULL, NULL, ZCHECKSUM_FLAG_METADATA, "fletcher4"},
{{abd_checksum_SHA256, abd_checksum_SHA256},
NULL, NULL, ZCHECKSUM_FLAG_METADATA | ZCHECKSUM_FLAG_DEDUP |
ZCHECKSUM_FLAG_NOPWRITE, "sha256"},
{{abd_fletcher_4_native, abd_fletcher_4_byteswap},
NULL, NULL, ZCHECKSUM_FLAG_EMBEDDED, "zilog2"},
{{abd_checksum_off, abd_checksum_off},
NULL, NULL, 0, "noparity"},
{{abd_checksum_SHA512_native, abd_checksum_SHA512_byteswap},
NULL, NULL, ZCHECKSUM_FLAG_METADATA | ZCHECKSUM_FLAG_DEDUP |
ZCHECKSUM_FLAG_NOPWRITE, "sha512"},
{{abd_checksum_skein_native, abd_checksum_skein_byteswap},
abd_checksum_skein_tmpl_init, abd_checksum_skein_tmpl_free,
ZCHECKSUM_FLAG_METADATA | ZCHECKSUM_FLAG_DEDUP |
ZCHECKSUM_FLAG_SALTED | ZCHECKSUM_FLAG_NOPWRITE, "skein"},
#if !defined(__FreeBSD__)
{{abd_checksum_edonr_native, abd_checksum_edonr_byteswap},
abd_checksum_edonr_tmpl_init, abd_checksum_edonr_tmpl_free,
ZCHECKSUM_FLAG_METADATA | ZCHECKSUM_FLAG_SALTED |
ZCHECKSUM_FLAG_NOPWRITE, "edonr"},
#endif
};
/*
* The flag corresponding to the "verify" in dedup=[checksum,]verify
* must be cleared first, so callers should use ZIO_CHECKSUM_MASK.
*/
spa_feature_t
zio_checksum_to_feature(enum zio_checksum cksum)
{
VERIFY((cksum & ~ZIO_CHECKSUM_MASK) == 0);
switch (cksum) {
case ZIO_CHECKSUM_SHA512:
return (SPA_FEATURE_SHA512);
case ZIO_CHECKSUM_SKEIN:
return (SPA_FEATURE_SKEIN);
#if !defined(__FreeBSD__)
case ZIO_CHECKSUM_EDONR:
return (SPA_FEATURE_EDONR);
#endif
default:
return (SPA_FEATURE_NONE);
}
}
enum zio_checksum
zio_checksum_select(enum zio_checksum child, enum zio_checksum parent)
{
ASSERT(child < ZIO_CHECKSUM_FUNCTIONS);
ASSERT(parent < ZIO_CHECKSUM_FUNCTIONS);
ASSERT(parent != ZIO_CHECKSUM_INHERIT && parent != ZIO_CHECKSUM_ON);
if (child == ZIO_CHECKSUM_INHERIT)
return (parent);
if (child == ZIO_CHECKSUM_ON)
return (ZIO_CHECKSUM_ON_VALUE);
return (child);
}
enum zio_checksum
zio_checksum_dedup_select(spa_t *spa, enum zio_checksum child,
enum zio_checksum parent)
{
ASSERT((child & ZIO_CHECKSUM_MASK) < ZIO_CHECKSUM_FUNCTIONS);
ASSERT((parent & ZIO_CHECKSUM_MASK) < ZIO_CHECKSUM_FUNCTIONS);
ASSERT(parent != ZIO_CHECKSUM_INHERIT && parent != ZIO_CHECKSUM_ON);
if (child == ZIO_CHECKSUM_INHERIT)
return (parent);
if (child == ZIO_CHECKSUM_ON)
return (spa_dedup_checksum(spa));
if (child == (ZIO_CHECKSUM_ON | ZIO_CHECKSUM_VERIFY))
return (spa_dedup_checksum(spa) | ZIO_CHECKSUM_VERIFY);
ASSERT((zio_checksum_table[child & ZIO_CHECKSUM_MASK].ci_flags &
ZCHECKSUM_FLAG_DEDUP) ||
(child & ZIO_CHECKSUM_VERIFY) || child == ZIO_CHECKSUM_OFF);
return (child);
}
/*
* Set the external verifier for a gang block based on <vdev, offset, txg>,
* a tuple which is guaranteed to be unique for the life of the pool.
*/
static void
zio_checksum_gang_verifier(zio_cksum_t *zcp, const blkptr_t *bp)
{
const dva_t *dva = BP_IDENTITY(bp);
uint64_t txg = BP_PHYSICAL_BIRTH(bp);
ASSERT(BP_IS_GANG(bp));
ZIO_SET_CHECKSUM(zcp, DVA_GET_VDEV(dva), DVA_GET_OFFSET(dva), txg, 0);
}
/*
* Set the external verifier for a label block based on its offset.
* The vdev is implicit, and the txg is unknowable at pool open time --
* hence the logic in vdev_uberblock_load() to find the most recent copy.
*/
static void
zio_checksum_label_verifier(zio_cksum_t *zcp, uint64_t offset)
{
ZIO_SET_CHECKSUM(zcp, offset, 0, 0, 0);
}
/*
* Calls the template init function of a checksum which supports context
* templates and installs the template into the spa_t.
*/
static void
zio_checksum_template_init(enum zio_checksum checksum, spa_t *spa)
{
zio_checksum_info_t *ci = &zio_checksum_table[checksum];
if (ci->ci_tmpl_init == NULL)
return;
if (spa->spa_cksum_tmpls[checksum] != NULL)
return;
VERIFY(ci->ci_tmpl_free != NULL);
mutex_enter(&spa->spa_cksum_tmpls_lock);
if (spa->spa_cksum_tmpls[checksum] == NULL) {
spa->spa_cksum_tmpls[checksum] =
ci->ci_tmpl_init(&spa->spa_cksum_salt);
VERIFY(spa->spa_cksum_tmpls[checksum] != NULL);
}
mutex_exit(&spa->spa_cksum_tmpls_lock);
}
/* convenience function to update a checksum to accommodate an encryption MAC */
static void
zio_checksum_handle_crypt(zio_cksum_t *cksum, zio_cksum_t *saved, boolean_t xor)
{
/*
* Weak checksums do not have their entropy spread evenly
* across the bits of the checksum. Therefore, when truncating
* a weak checksum we XOR the first 2 words with the last 2 so
* that we don't "lose" any entropy unnecessarily.
*/
if (xor) {
cksum->zc_word[0] ^= cksum->zc_word[2];
cksum->zc_word[1] ^= cksum->zc_word[3];
}
cksum->zc_word[2] = saved->zc_word[2];
cksum->zc_word[3] = saved->zc_word[3];
}
/*
* Generate the checksum.
*/
void
zio_checksum_compute(zio_t *zio, enum zio_checksum checksum,
abd_t *abd, uint64_t size)
{
static const uint64_t zec_magic = ZEC_MAGIC;
blkptr_t *bp = zio->io_bp;
uint64_t offset = zio->io_offset;
zio_checksum_info_t *ci = &zio_checksum_table[checksum];
zio_cksum_t cksum, saved;
spa_t *spa = zio->io_spa;
boolean_t insecure = (ci->ci_flags & ZCHECKSUM_FLAG_DEDUP) == 0;
ASSERT((uint_t)checksum < ZIO_CHECKSUM_FUNCTIONS);
ASSERT(ci->ci_func[0] != NULL);
zio_checksum_template_init(checksum, spa);
if (ci->ci_flags & ZCHECKSUM_FLAG_EMBEDDED) {
zio_eck_t eck;
size_t eck_offset;
bzero(&saved, sizeof (zio_cksum_t));
if (checksum == ZIO_CHECKSUM_ZILOG2) {
zil_chain_t zilc;
abd_copy_to_buf(&zilc, abd, sizeof (zil_chain_t));
size = P2ROUNDUP_TYPED(zilc.zc_nused, ZIL_MIN_BLKSZ,
uint64_t);
eck = zilc.zc_eck;
eck_offset = offsetof(zil_chain_t, zc_eck);
} else {
eck_offset = size - sizeof (zio_eck_t);
abd_copy_to_buf_off(&eck, abd, eck_offset,
sizeof (zio_eck_t));
}
if (checksum == ZIO_CHECKSUM_GANG_HEADER) {
zio_checksum_gang_verifier(&eck.zec_cksum, bp);
} else if (checksum == ZIO_CHECKSUM_LABEL) {
zio_checksum_label_verifier(&eck.zec_cksum, offset);
} else {
saved = eck.zec_cksum;
eck.zec_cksum = bp->blk_cksum;
}
abd_copy_from_buf_off(abd, &zec_magic,
eck_offset + offsetof(zio_eck_t, zec_magic),
sizeof (zec_magic));
abd_copy_from_buf_off(abd, &eck.zec_cksum,
eck_offset + offsetof(zio_eck_t, zec_cksum),
sizeof (zio_cksum_t));
ci->ci_func[0](abd, size, spa->spa_cksum_tmpls[checksum],
&cksum);
if (bp != NULL && BP_USES_CRYPT(bp) &&
BP_GET_TYPE(bp) != DMU_OT_OBJSET)
zio_checksum_handle_crypt(&cksum, &saved, insecure);
abd_copy_from_buf_off(abd, &cksum,
eck_offset + offsetof(zio_eck_t, zec_cksum),
sizeof (zio_cksum_t));
} else {
saved = bp->blk_cksum;
ci->ci_func[0](abd, size, spa->spa_cksum_tmpls[checksum],
&cksum);
if (BP_USES_CRYPT(bp) && BP_GET_TYPE(bp) != DMU_OT_OBJSET)
zio_checksum_handle_crypt(&cksum, &saved, insecure);
bp->blk_cksum = cksum;
}
}
int
zio_checksum_error_impl(spa_t *spa, const blkptr_t *bp,
enum zio_checksum checksum, abd_t *abd, uint64_t size, uint64_t offset,
zio_bad_cksum_t *info)
{
zio_checksum_info_t *ci = &zio_checksum_table[checksum];
zio_cksum_t actual_cksum, expected_cksum;
zio_eck_t eck;
int byteswap;
if (checksum >= ZIO_CHECKSUM_FUNCTIONS || ci->ci_func[0] == NULL)
return (SET_ERROR(EINVAL));
zio_checksum_template_init(checksum, spa);
if (ci->ci_flags & ZCHECKSUM_FLAG_EMBEDDED) {
zio_cksum_t verifier;
size_t eck_offset;
if (checksum == ZIO_CHECKSUM_ZILOG2) {
zil_chain_t zilc;
uint64_t nused;
abd_copy_to_buf(&zilc, abd, sizeof (zil_chain_t));
eck = zilc.zc_eck;
eck_offset = offsetof(zil_chain_t, zc_eck) +
offsetof(zio_eck_t, zec_cksum);
if (eck.zec_magic == ZEC_MAGIC) {
nused = zilc.zc_nused;
} else if (eck.zec_magic == BSWAP_64(ZEC_MAGIC)) {
nused = BSWAP_64(zilc.zc_nused);
} else {
return (SET_ERROR(ECKSUM));
}
if (nused > size) {
return (SET_ERROR(ECKSUM));
}
size = P2ROUNDUP_TYPED(nused, ZIL_MIN_BLKSZ, uint64_t);
} else {
eck_offset = size - sizeof (zio_eck_t);
abd_copy_to_buf_off(&eck, abd, eck_offset,
sizeof (zio_eck_t));
eck_offset += offsetof(zio_eck_t, zec_cksum);
}
if (checksum == ZIO_CHECKSUM_GANG_HEADER)
zio_checksum_gang_verifier(&verifier, bp);
else if (checksum == ZIO_CHECKSUM_LABEL)
zio_checksum_label_verifier(&verifier, offset);
else
verifier = bp->blk_cksum;
byteswap = (eck.zec_magic == BSWAP_64(ZEC_MAGIC));
if (byteswap)
byteswap_uint64_array(&verifier, sizeof (zio_cksum_t));
expected_cksum = eck.zec_cksum;
abd_copy_from_buf_off(abd, &verifier, eck_offset,
sizeof (zio_cksum_t));
ci->ci_func[byteswap](abd, size,
spa->spa_cksum_tmpls[checksum], &actual_cksum);
abd_copy_from_buf_off(abd, &expected_cksum, eck_offset,
sizeof (zio_cksum_t));
if (byteswap) {
byteswap_uint64_array(&expected_cksum,
sizeof (zio_cksum_t));
}
} else {
byteswap = BP_SHOULD_BYTESWAP(bp);
expected_cksum = bp->blk_cksum;
ci->ci_func[byteswap](abd, size,
spa->spa_cksum_tmpls[checksum], &actual_cksum);
}
/*
* MAC checksums are a special case since half of this checksum will
* actually be the encryption MAC. This will be verified by the
* decryption process, so we just check the truncated checksum now.
* Objset blocks use embedded MACs so we don't truncate the checksum
* for them.
*/
if (bp != NULL && BP_USES_CRYPT(bp) &&
BP_GET_TYPE(bp) != DMU_OT_OBJSET) {
if (!(ci->ci_flags & ZCHECKSUM_FLAG_DEDUP)) {
actual_cksum.zc_word[0] ^= actual_cksum.zc_word[2];
actual_cksum.zc_word[1] ^= actual_cksum.zc_word[3];
}
actual_cksum.zc_word[2] = 0;
actual_cksum.zc_word[3] = 0;
expected_cksum.zc_word[2] = 0;
expected_cksum.zc_word[3] = 0;
}
if (info != NULL) {
info->zbc_expected = expected_cksum;
info->zbc_actual = actual_cksum;
info->zbc_checksum_name = ci->ci_name;
info->zbc_byteswapped = byteswap;
info->zbc_injected = 0;
info->zbc_has_cksum = 1;
}
if (!ZIO_CHECKSUM_EQUAL(actual_cksum, expected_cksum))
return (SET_ERROR(ECKSUM));
return (0);
}
int
zio_checksum_error(zio_t *zio, zio_bad_cksum_t *info)
{
blkptr_t *bp = zio->io_bp;
uint_t checksum = (bp == NULL ? zio->io_prop.zp_checksum :
(BP_IS_GANG(bp) ? ZIO_CHECKSUM_GANG_HEADER : BP_GET_CHECKSUM(bp)));
int error;
uint64_t size = (bp == NULL ? zio->io_size :
(BP_IS_GANG(bp) ? SPA_GANGBLOCKSIZE : BP_GET_PSIZE(bp)));
uint64_t offset = zio->io_offset;
abd_t *data = zio->io_abd;
spa_t *spa = zio->io_spa;
error = zio_checksum_error_impl(spa, bp, checksum, data, size,
offset, info);
if (zio_injection_enabled && error == 0 && zio->io_error == 0) {
error = zio_handle_fault_injection(zio, ECKSUM);
if (error != 0)
info->zbc_injected = 1;
}
return (error);
}
/*
* Called by a spa_t that's about to be deallocated. This steps through
* all of the checksum context templates and deallocates any that were
* initialized using the algorithm-specific template init function.
*/
void
zio_checksum_templates_free(spa_t *spa)
{
for (enum zio_checksum checksum = 0;
checksum < ZIO_CHECKSUM_FUNCTIONS; checksum++) {
if (spa->spa_cksum_tmpls[checksum] != NULL) {
zio_checksum_info_t *ci = &zio_checksum_table[checksum];
VERIFY(ci->ci_tmpl_free != NULL);
ci->ci_tmpl_free(spa->spa_cksum_tmpls[checksum]);
spa->spa_cksum_tmpls[checksum] = NULL;
}
}
}
diff --git a/sys/contrib/openzfs/module/zfs/zio_compress.c b/sys/contrib/openzfs/module/zfs/zio_compress.c
index 1ff1e76d7f22..cded11f4cbd5 100644
--- a/sys/contrib/openzfs/module/zfs/zio_compress.c
+++ b/sys/contrib/openzfs/module/zfs/zio_compress.c
@@ -1,220 +1,222 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
*/
/*
* Copyright (c) 2013, 2018 by Delphix. All rights reserved.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/zfeature.h>
#include <sys/zio.h>
#include <sys/zio_compress.h>
#include <sys/zstd/zstd.h>
/*
* If nonzero, every 1/X decompression attempts will fail, simulating
* an undetected memory error.
*/
unsigned long zio_decompress_fail_fraction = 0;
/*
* Compression vectors.
*/
zio_compress_info_t zio_compress_table[ZIO_COMPRESS_FUNCTIONS] = {
{"inherit", 0, NULL, NULL, NULL},
{"on", 0, NULL, NULL, NULL},
{"uncompressed", 0, NULL, NULL, NULL},
{"lzjb", 0, lzjb_compress, lzjb_decompress, NULL},
{"empty", 0, NULL, NULL, NULL},
{"gzip-1", 1, gzip_compress, gzip_decompress, NULL},
{"gzip-2", 2, gzip_compress, gzip_decompress, NULL},
{"gzip-3", 3, gzip_compress, gzip_decompress, NULL},
{"gzip-4", 4, gzip_compress, gzip_decompress, NULL},
{"gzip-5", 5, gzip_compress, gzip_decompress, NULL},
{"gzip-6", 6, gzip_compress, gzip_decompress, NULL},
{"gzip-7", 7, gzip_compress, gzip_decompress, NULL},
{"gzip-8", 8, gzip_compress, gzip_decompress, NULL},
{"gzip-9", 9, gzip_compress, gzip_decompress, NULL},
{"zle", 64, zle_compress, zle_decompress, NULL},
{"lz4", 0, lz4_compress_zfs, lz4_decompress_zfs, NULL},
{"zstd", ZIO_ZSTD_LEVEL_DEFAULT, zfs_zstd_compress,
zfs_zstd_decompress, zfs_zstd_decompress_level},
};
uint8_t
zio_complevel_select(spa_t *spa, enum zio_compress compress, uint8_t child,
uint8_t parent)
{
+ (void) spa;
uint8_t result;
if (!ZIO_COMPRESS_HASLEVEL(compress))
return (0);
result = child;
if (result == ZIO_COMPLEVEL_INHERIT)
result = parent;
return (result);
}
enum zio_compress
zio_compress_select(spa_t *spa, enum zio_compress child,
enum zio_compress parent)
{
enum zio_compress result;
ASSERT(child < ZIO_COMPRESS_FUNCTIONS);
ASSERT(parent < ZIO_COMPRESS_FUNCTIONS);
ASSERT(parent != ZIO_COMPRESS_INHERIT);
result = child;
if (result == ZIO_COMPRESS_INHERIT)
result = parent;
if (result == ZIO_COMPRESS_ON) {
if (spa_feature_is_active(spa, SPA_FEATURE_LZ4_COMPRESS))
result = ZIO_COMPRESS_LZ4_ON_VALUE;
else
result = ZIO_COMPRESS_LEGACY_ON_VALUE;
}
return (result);
}
-/*ARGSUSED*/
static int
zio_compress_zeroed_cb(void *data, size_t len, void *private)
{
+ (void) private;
+
uint64_t *end = (uint64_t *)((char *)data + len);
for (uint64_t *word = (uint64_t *)data; word < end; word++)
if (*word != 0)
return (1);
return (0);
}
size_t
zio_compress_data(enum zio_compress c, abd_t *src, void *dst, size_t s_len,
uint8_t level)
{
size_t c_len, d_len;
uint8_t complevel;
zio_compress_info_t *ci = &zio_compress_table[c];
ASSERT((uint_t)c < ZIO_COMPRESS_FUNCTIONS);
ASSERT((uint_t)c == ZIO_COMPRESS_EMPTY || ci->ci_compress != NULL);
/*
* If the data is all zeroes, we don't even need to allocate
* a block for it. We indicate this by returning zero size.
*/
if (abd_iterate_func(src, 0, s_len, zio_compress_zeroed_cb, NULL) == 0)
return (0);
if (c == ZIO_COMPRESS_EMPTY)
return (s_len);
/* Compress at least 12.5% */
d_len = s_len - (s_len >> 3);
complevel = ci->ci_level;
if (c == ZIO_COMPRESS_ZSTD) {
/* If we don't know the level, we can't compress it */
if (level == ZIO_COMPLEVEL_INHERIT)
return (s_len);
if (level == ZIO_COMPLEVEL_DEFAULT)
complevel = ZIO_ZSTD_LEVEL_DEFAULT;
else
complevel = level;
ASSERT3U(complevel, !=, ZIO_COMPLEVEL_INHERIT);
}
/* No compression algorithms can read from ABDs directly */
void *tmp = abd_borrow_buf_copy(src, s_len);
c_len = ci->ci_compress(tmp, dst, s_len, d_len, complevel);
abd_return_buf(src, tmp, s_len);
if (c_len > d_len)
return (s_len);
ASSERT3U(c_len, <=, d_len);
return (c_len);
}
int
zio_decompress_data_buf(enum zio_compress c, void *src, void *dst,
size_t s_len, size_t d_len, uint8_t *level)
{
zio_compress_info_t *ci = &zio_compress_table[c];
if ((uint_t)c >= ZIO_COMPRESS_FUNCTIONS || ci->ci_decompress == NULL)
return (SET_ERROR(EINVAL));
if (ci->ci_decompress_level != NULL && level != NULL)
return (ci->ci_decompress_level(src, dst, s_len, d_len, level));
return (ci->ci_decompress(src, dst, s_len, d_len, ci->ci_level));
}
int
zio_decompress_data(enum zio_compress c, abd_t *src, void *dst,
size_t s_len, size_t d_len, uint8_t *level)
{
void *tmp = abd_borrow_buf_copy(src, s_len);
int ret = zio_decompress_data_buf(c, tmp, dst, s_len, d_len, level);
abd_return_buf(src, tmp, s_len);
/*
* Decompression shouldn't fail, because we've already verified
* the checksum. However, for extra protection (e.g. against bitflips
* in non-ECC RAM), we handle this error (and test it).
*/
if (zio_decompress_fail_fraction != 0 &&
random_in_range(zio_decompress_fail_fraction) == 0)
ret = SET_ERROR(EINVAL);
return (ret);
}
int
zio_compress_to_feature(enum zio_compress comp)
{
switch (comp) {
case ZIO_COMPRESS_ZSTD:
return (SPA_FEATURE_ZSTD_COMPRESS);
default:
break;
}
return (SPA_FEATURE_NONE);
}
diff --git a/sys/contrib/openzfs/rpm/generic/zfs-dkms.spec.in b/sys/contrib/openzfs/rpm/generic/zfs-dkms.spec.in
index aab1d9399077..02be716aa964 100644
--- a/sys/contrib/openzfs/rpm/generic/zfs-dkms.spec.in
+++ b/sys/contrib/openzfs/rpm/generic/zfs-dkms.spec.in
@@ -1,113 +1,113 @@
%{?!packager: %define packager Brian Behlendorf <behlendorf1@llnl.gov>}
%if ! 0%{?rhel}%{?fedora}%{?mageia}%{?suse_version}
%define not_rpm 1
%endif
# Exclude input files from mangling
%global __brp_mangle_shebangs_exclude_from ^/usr/src/.*$
%define module @PACKAGE@
%define mkconf scripts/dkms.mkconf
Name: %{module}-dkms
Version: @VERSION@
Release: @RELEASE@%{?dist}
Summary: Kernel module(s) (dkms)
Group: System Environment/Kernel
License: @ZFS_META_LICENSE@
URL: https://github.com/openzfs/zfs
Source0: %{module}-%{version}.tar.gz
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
BuildArch: noarch
Requires: dkms >= 2.2.0.3
Requires(post): dkms >= 2.2.0.3
Requires(preun): dkms >= 2.2.0.3
Requires: gcc, make, perl, diffutils
Requires(post): gcc, make, perl, diffutils
%if 0%{?rhel}%{?fedora}%{?mageia}%{?suse_version}
Requires: kernel-devel >= @ZFS_META_KVER_MIN@, kernel-devel <= @ZFS_META_KVER_MAX@.999
Requires(post): kernel-devel >= @ZFS_META_KVER_MIN@, kernel-devel <= @ZFS_META_KVER_MAX@.999
Obsoletes: spl-dkms
%endif
Provides: %{module}-kmod = %{version}
AutoReqProv: no
%if 0%{?rhel}%{?fedora}%{?suse_version}
# We don't directly use it, but if this isn't installed, rpmbuild as root can
# crash+corrupt rpmdb
# See issue #12071
BuildRequires: ncompress
%endif
%description
This package contains the dkms ZFS kernel modules.
%prep
%setup -q -n %{module}-%{version}
%build
%{mkconf} -n %{module} -v %{version} -f dkms.conf
%install
if [ "$RPM_BUILD_ROOT" != "/" ]; then
rm -rf $RPM_BUILD_ROOT
fi
mkdir -p $RPM_BUILD_ROOT/usr/src/
cp -rf ${RPM_BUILD_DIR}/%{module}-%{version} $RPM_BUILD_ROOT/usr/src/
%clean
if [ "$RPM_BUILD_ROOT" != "/" ]; then
rm -rf $RPM_BUILD_ROOT
fi
%files
%defattr(-,root,root)
/usr/src/%{module}-%{version}
%post
for POSTINST in /usr/lib/dkms/common.postinst; do
if [ -f $POSTINST ]; then
$POSTINST %{module} %{version}
exit $?
fi
echo "WARNING: $POSTINST does not exist."
done
echo -e "ERROR: DKMS version is too old and %{module} was not"
echo -e "built with legacy DKMS support."
echo -e "You must either rebuild %{module} with legacy postinst"
echo -e "support or upgrade DKMS to a more current version."
exit 1
%preun
# Are we doing an upgrade?
if [ "$1" = "1" -o "$1" = "upgrade" ] ; then
# Yes we are. Are we upgrading to a new ZFS version?
- NEWEST_VER=$(dkms status zfs | sed 's/,//g' | sort -r -V | awk '/installed/{print $2; exit}')
+ NEWEST_VER=$(dkms status zfs | tr -d , | sort -r -V | awk '/installed/{print $2; exit}')
if [ "$NEWEST_VER" != "%{version}" ] ; then
# Yes, it's a new ZFS version. We'll uninstall the old module
# later on in this script.
true
else
# No, it's probably an upgrade of the same ZFS version
# to a new distro (zfs-dkms-0.7.12.fc28->zfs-dkms-0.7.12.fc29).
# Don't remove our modules, since the rebuild for the new
# distro will automatically delete the old modules.
exit 0
fi
fi
# If we're here then we're doing an uninstall (not upgrade).
CONFIG_H="/var/lib/dkms/%{module}/%{version}/*/*/%{module}_config.h"
SPEC_META_ALIAS="@PACKAGE@-@VERSION@-@RELEASE@"
DKMS_META_ALIAS=`cat $CONFIG_H 2>/dev/null |
awk -F'"' '/META_ALIAS\s+"/ { print $2; exit 0 }'`
if [ "$SPEC_META_ALIAS" = "$DKMS_META_ALIAS" ]; then
echo -e
echo -e "Uninstall of %{module} module ($SPEC_META_ALIAS) beginning:"
dkms remove -m %{module} -v %{version} --all %{!?not_rpm:--rpm_safe_upgrade}
fi
exit 0
diff --git a/sys/contrib/openzfs/rpm/generic/zfs-kmod.spec.in b/sys/contrib/openzfs/rpm/generic/zfs-kmod.spec.in
index 1692be1a72e6..53b1e1385159 100644
--- a/sys/contrib/openzfs/rpm/generic/zfs-kmod.spec.in
+++ b/sys/contrib/openzfs/rpm/generic/zfs-kmod.spec.in
@@ -1,167 +1,170 @@
%define module @PACKAGE@
%if !%{defined ksrc}
%if 0%{?rhel}%{?fedora}
%define ksrc ${kernel_version##*___}
%else
%define ksrc "$( \
if [ -e "/usr/src/linux-${kernel_version%%___*}" ]; then \
echo "/usr/src/linux-${kernel_version%%___*}"; \
elif [ -e "/lib/modules/${kernel_version%%___*}/source" ]; then \
echo "/lib/modules/${kernel_version%%___*}/source"; \
else \
echo "/lib/modules/${kernel_version%%___*}/build"; \
fi)"
%endif
%endif
%if !%{defined kobj}
%if 0%{?rhel}%{?fedora}
%define kobj ${kernel_version##*___}
%else
%define kobj "$( \
if [ -e "/usr/src/linux-${kernel_version%%___*}" ]; then \
echo "/usr/src/linux-${kernel_version%%___*}"; \
else \
echo "/lib/modules/${kernel_version%%___*}/build"; \
fi)"
%endif
%endif
#define repo rpmfusion
#define repo chaos
# (un)define the next line to either build for the newest or all current kernels
%define buildforkernels newest
#define buildforkernels current
#define buildforkernels akmod
%bcond_with debug
%bcond_with debuginfo
Name: %{module}-kmod
Version: @VERSION@
Release: @RELEASE@%{?dist}
Summary: Kernel module(s)
Group: System Environment/Kernel
License: @ZFS_META_LICENSE@
URL: https://github.com/openzfs/zfs
Source0: %{module}-%{version}.tar.gz
Source10: kmodtool
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id} -u -n)
%if 0%{?rhel}%{?fedora}
BuildRequires: gcc, make
BuildRequires: elfutils-libelf-devel
%endif
%if 0%{?rhel}%{?fedora}%{?suse_version}
# We don't directly use it, but if this isn't installed, rpmbuild as root can
# crash+corrupt rpmdb
# See issue #12071
BuildRequires: ncompress
%endif
# The developments headers will conflict with the dkms packages.
Conflicts: %{module}-dkms
%if %{defined repo}
# Building for a repository use the proper build-sysbuild package
# to determine which kernel-devel packages should be installed.
BuildRequires: %{_bindir}/kmodtool
%{!?kernels:BuildRequires: buildsys-build-%{repo}-kerneldevpkgs-%{?buildforkernels:%{buildforkernels}}%{!?buildforkernels:current}-%{_target_cpu}}
%else
# Building local packages attempt to to use the installed kernel.
%{?rhel:BuildRequires: kernel-devel}
%{?fedora:BuildRequires: kernel-devel}
%{?suse_version:BuildRequires: kernel-source}
%if !%{defined kernels} && !%{defined build_src_rpm}
%if 0%{?rhel}%{?fedora}%{?suse_version}
%define kernels %(ls -1 /usr/src/kernels)
%else
%define kernels %(ls -1 /lib/modules)
%endif
%endif
%endif
# LDFLAGS are not sanitized by arch/*/Makefile for these architectures.
%ifarch ppc ppc64 ppc64le aarch64
%global __global_ldflags %{nil}
%endif
# Kmodtool does its magic here. A patched version of kmodtool is shipped
# with the source rpm until kmod development packages are supported upstream.
# https://bugzilla.rpmfusion.org/show_bug.cgi?id=2714
%{expand:%(bash %{SOURCE10} --target %{_target_cpu} %{?repo:--repo %{?repo}} --kmodname %{name} %{?buildforkernels:--%{buildforkernels}} --devel %{?prefix:--prefix "%{?prefix}"} %{?kernels:--for-kernels "%{?kernels}"} %{?kernelbuildroot:--buildroot "%{?kernelbuildroot}"} --obsolete-name spl --obsolete-version 0.8 2>/dev/null) }
%description
This package contains the ZFS kernel modules.
%prep
# Error out if there was something wrong with kmodtool.
%{?kmodtool_check}
# Print kmodtool output for debugging purposes:
bash %{SOURCE10} --target %{_target_cpu} %{?repo:--repo %{?repo}} --kmodname %{name} %{?buildforkernels:--%{buildforkernels}} --devel %{?prefix:--prefix "%{?prefix}"} %{?kernels:--for-kernels "%{?kernels}"} %{?kernelbuildroot:--buildroot "%{?kernelbuildroot}"} --obsolete-name spl --obsolete-version 0.8 2>/dev/null
%if %{with debug}
%define debug --enable-debug
%else
%define debug --disable-debug
%endif
%if %{with debuginfo}
%define debuginfo --enable-debuginfo
%else
%define debuginfo --disable-debuginfo
%endif
# Leverage VPATH from configure to avoid making multiple copies.
%define _configure ../%{module}-%{version}/configure
%setup -q -c -T -a 0
for kernel_version in %{?kernel_versions}; do
%{__mkdir} _kmod_build_${kernel_version%%___*}
done
%build
for kernel_version in %{?kernel_versions}; do
cd _kmod_build_${kernel_version%%___*}
%configure \
--with-config=kernel \
--with-linux=%{ksrc} \
--with-linux-obj=%{kobj} \
%{debug} \
- %{debuginfo}
+ %{debuginfo} \
+ %{?kernel_cc} \
+ %{?kernel_ld} \
+ %{?kernel_llvm}
make %{?_smp_mflags}
cd ..
done
%install
rm -rf ${RPM_BUILD_ROOT}
# Relies on the kernel 'modules_install' make target.
for kernel_version in %{?kernel_versions}; do
cd _kmod_build_${kernel_version%%___*}
make install \
DESTDIR=${RPM_BUILD_ROOT} \
%{?prefix:INSTALL_MOD_PATH=%{?prefix}} \
INSTALL_MOD_DIR=%{kmodinstdir_postfix}
cd ..
done
# find-debuginfo.sh only considers executables
chmod u+x ${RPM_BUILD_ROOT}%{kmodinstdir_prefix}/*/extra/*/*/*
%{?akmod_install}
%clean
rm -rf $RPM_BUILD_ROOT
diff --git a/sys/contrib/openzfs/rpm/generic/zfs.spec.in b/sys/contrib/openzfs/rpm/generic/zfs.spec.in
index 4a37ae8ce1d5..0e9cdcc31f32 100644
--- a/sys/contrib/openzfs/rpm/generic/zfs.spec.in
+++ b/sys/contrib/openzfs/rpm/generic/zfs.spec.in
@@ -1,567 +1,574 @@
%global _sbindir /sbin
%global _libdir /%{_lib}
# Set the default udev directory based on distribution.
%if %{undefined _udevdir}
%if 0%{?fedora} >= 17 || 0%{?rhel} >= 7 || 0%{?centos} >= 7
%global _udevdir %{_prefix}/lib/udev
%else
%global _udevdir /lib/udev
%endif
%endif
# Set the default udevrule directory based on distribution.
%if %{undefined _udevruledir}
%if 0%{?fedora} >= 17 || 0%{?rhel} >= 7 || 0%{?centos} >= 7
%global _udevruledir %{_prefix}/lib/udev/rules.d
%else
%global _udevruledir /lib/udev/rules.d
%endif
%endif
# Set the default dracut directory based on distribution.
%if %{undefined _dracutdir}
%if 0%{?fedora} >= 17 || 0%{?rhel} >= 7 || 0%{?centos} >= 7
%global _dracutdir %{_prefix}/lib/dracut
%else
%global _dracutdir %{_prefix}/share/dracut
%endif
%endif
%if %{undefined _initconfdir}
%global _initconfdir /etc/sysconfig
%endif
%if %{undefined _unitdir}
%global _unitdir %{_prefix}/lib/systemd/system
%endif
%if %{undefined _presetdir}
%global _presetdir %{_prefix}/lib/systemd/system-preset
%endif
%if %{undefined _modulesloaddir}
%global _modulesloaddir %{_prefix}/lib/modules-load.d
%endif
%if %{undefined _systemdgeneratordir}
%global _systemdgeneratordir %{_prefix}/lib/systemd/system-generators
%endif
%if %{undefined _pkgconfigdir}
%global _pkgconfigdir %{_prefix}/%{_lib}/pkgconfig
%endif
%bcond_with debug
%bcond_with debuginfo
%bcond_with asan
%bcond_with systemd
%bcond_with pam
# Generic enable switch for systemd
%if %{with systemd}
%define _systemd 1
%endif
# RHEL >= 7 comes with systemd
%if 0%{?rhel} >= 7
%define _systemd 1
%endif
# Fedora >= 15 comes with systemd, but only >= 18 has
# the proper macros
%if 0%{?fedora} >= 18
%define _systemd 1
%endif
# opensuse >= 12.1 comes with systemd, but only >= 13.1
# has the proper macros
%if 0%{?suse_version} >= 1310
%define _systemd 1
%endif
# When not specified default to distribution provided version. This
# is normally Python 3, but for RHEL <= 7 only Python 2 is provided.
%if %{undefined __use_python}
%if 0%{?rhel} && 0%{?rhel} <= 7
%define __python /usr/bin/python2
%define __python_pkg_version 2
%define __python_cffi_pkg python-cffi
%define __python_setuptools_pkg python-setuptools
%else
%define __python /usr/bin/python3
%define __python_pkg_version 3
%define __python_cffi_pkg python3-cffi
%define __python_setuptools_pkg python3-setuptools
%endif
%else
%define __python %{__use_python}
%define __python_pkg_version %{__use_python_pkg_version}
%define __python_cffi_pkg python%{__python_pkg_version}-cffi
%define __python_setuptools_pkg python%{__python_pkg_version}-setuptools
%endif
%define __python_sitelib %(%{__python} -Esc "from distutils.sysconfig import get_python_lib; print(get_python_lib())")
# By default python-pyzfs is enabled, with the exception of
# RHEL 6 which by default uses Python 2.6 which is too old.
%if 0%{?rhel} == 6
%bcond_with pyzfs
%else
%bcond_without pyzfs
%endif
Name: @PACKAGE@
Version: @VERSION@
Release: @RELEASE@%{?dist}
Summary: Commands to control the kernel modules and libraries
Group: System Environment/Kernel
License: @ZFS_META_LICENSE@
URL: https://github.com/openzfs/zfs
Source0: %{name}-%{version}.tar.gz
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
Requires: libzpool5 = %{version}
Requires: libnvpair3 = %{version}
Requires: libuutil3 = %{version}
Requires: libzfs5 = %{version}
Requires: %{name}-kmod = %{version}
Provides: %{name}-kmod-common = %{version}
Obsoletes: spl
# zfs-fuse provides the same commands and man pages that OpenZFS does.
# Renaming those on either side would conflict with all available documentation.
Conflicts: zfs-fuse
%if 0%{?rhel}%{?fedora}%{?suse_version}
BuildRequires: gcc, make
BuildRequires: zlib-devel
BuildRequires: libuuid-devel
BuildRequires: libblkid-devel
BuildRequires: libudev-devel
BuildRequires: libattr-devel
BuildRequires: openssl-devel
# We don't directly use it, but if this isn't installed, rpmbuild as root can
# crash+corrupt rpmdb
# See issue #12071
BuildRequires: ncompress
%if 0%{?fedora} >= 28 || 0%{?rhel} >= 8 || 0%{?centos} >= 8
BuildRequires: libtirpc-devel
%endif
+%if %{with pam}
+BuildRequires: pam-devel
+%endif
+
Requires: openssl
%if 0%{?_systemd}
BuildRequires: systemd
%endif
+
%endif
%if 0%{?_systemd}
Requires(post): systemd
Requires(preun): systemd
Requires(postun): systemd
%endif
# The zpool iostat/status -c scripts call some utilities like lsblk and iostat
Requires: util-linux
Requires: sysstat
%description
This package contains the core ZFS command line utilities.
%package -n libzpool5
Summary: Native ZFS pool library for Linux
Group: System Environment/Kernel
Obsoletes: libzpool2
Obsoletes: libzpool4
%description -n libzpool5
This package contains the zpool library, which provides support
for managing zpools
%if %{defined ldconfig_scriptlets}
%ldconfig_scriptlets -n libzpool5
%else
%post -n libzpool5 -p /sbin/ldconfig
%postun -n libzpool5 -p /sbin/ldconfig
%endif
%package -n libnvpair3
Summary: Solaris name-value library for Linux
Group: System Environment/Kernel
Obsoletes: libnvpair1
%description -n libnvpair3
This package contains routines for packing and unpacking name-value
pairs. This functionality is used to portably transport data across
process boundaries, between kernel and user space, and can be used
to write self describing data structures on disk.
%if %{defined ldconfig_scriptlets}
%ldconfig_scriptlets -n libnvpair3
%else
%post -n libnvpair3 -p /sbin/ldconfig
%postun -n libnvpair3 -p /sbin/ldconfig
%endif
%package -n libuutil3
Summary: Solaris userland utility library for Linux
Group: System Environment/Kernel
Obsoletes: libuutil1
%description -n libuutil3
This library provides a variety of compatibility functions for OpenZFS:
* libspl: The Solaris Porting Layer userland library, which provides APIs
that make it possible to run Solaris user code in a Linux environment
with relatively minimal modification.
* libavl: The Adelson-Velskii Landis balanced binary tree manipulation
library.
* libefi: The Extensible Firmware Interface library for GUID disk
partitioning.
* libshare: NFS, SMB, and iSCSI service integration for ZFS.
%if %{defined ldconfig_scriptlets}
%ldconfig_scriptlets -n libuutil3
%else
%post -n libuutil3 -p /sbin/ldconfig
%postun -n libuutil3 -p /sbin/ldconfig
%endif
# The library version is encoded in the package name. When updating the
# version information it is important to add an obsoletes line below for
# the previous version of the package.
%package -n libzfs5
Summary: Native ZFS filesystem library for Linux
Group: System Environment/Kernel
Obsoletes: libzfs2
Obsoletes: libzfs4
%description -n libzfs5
This package provides support for managing ZFS filesystems
%if %{defined ldconfig_scriptlets}
%ldconfig_scriptlets -n libzfs5
%else
%post -n libzfs5 -p /sbin/ldconfig
%postun -n libzfs5 -p /sbin/ldconfig
%endif
%package -n libzfs5-devel
Summary: Development headers
Group: System Environment/Kernel
Requires: libzfs5 = %{version}
Requires: libzpool5 = %{version}
Requires: libnvpair3 = %{version}
Requires: libuutil3 = %{version}
Provides: libzpool5-devel
Provides: libnvpair3-devel
Provides: libuutil3-devel
Obsoletes: zfs-devel
Obsoletes: libzfs2-devel
Obsoletes: libzfs4-devel
%description -n libzfs5-devel
This package contains the header files needed for building additional
applications against the ZFS libraries.
%package test
Summary: Test infrastructure
Group: System Environment/Kernel
Requires: %{name}%{?_isa} = %{version}-%{release}
Requires: parted
Requires: lsscsi
Requires: mdadm
Requires: bc
Requires: ksh
Requires: fio
Requires: acl
Requires: sudo
Requires: sysstat
Requires: libaio
Requires: python%{__python_pkg_version}
%if 0%{?rhel}%{?fedora}%{?suse_version}
BuildRequires: libaio-devel
%endif
AutoReqProv: no
%description test
This package contains test infrastructure and support scripts for
validating the file system.
%package dracut
Summary: Dracut module
Group: System Environment/Kernel
BuildArch: noarch
Requires: %{name} >= %{version}
Requires: dracut
Requires: /usr/bin/awk
Requires: grep
%description dracut
This package contains a dracut module used to construct an initramfs
image which is ZFS aware.
%if %{with pyzfs}
%package -n python%{__python_pkg_version}-pyzfs
Summary: Python %{python_version} wrapper for libzfs_core
Group: Development/Languages/Python
License: Apache-2.0
BuildArch: noarch
Requires: libzfs5 = %{version}
Requires: libnvpair3 = %{version}
Requires: libffi
Requires: python%{__python_pkg_version}
Requires: %{__python_cffi_pkg}
%if 0%{?rhel}%{?fedora}%{?suse_version}
%if 0%{?rhel} >= 8 || 0%{?centos} >= 8 || 0%{?fedora} >= 28
BuildRequires: python3-packaging
%else
BuildRequires: python-packaging
%endif
BuildRequires: python%{__python_pkg_version}-devel
BuildRequires: %{__python_cffi_pkg}
BuildRequires: %{__python_setuptools_pkg}
BuildRequires: libffi-devel
%endif
%description -n python%{__python_pkg_version}-pyzfs
This package provides a python wrapper for the libzfs_core C library.
%endif
%if 0%{?_initramfs}
%package initramfs
Summary: Initramfs module
Group: System Environment/Kernel
Requires: %{name}%{?_isa} = %{version}-%{release}
Requires: %{name} = %{version}-%{release}
Requires: initramfs-tools
%description initramfs
This package contains a initramfs module used to construct an initramfs
image which is ZFS aware.
%endif
%prep
%if %{with debug}
%define debug --enable-debug
%else
%define debug --disable-debug
%endif
%if %{with debuginfo}
%define debuginfo --enable-debuginfo
%else
%define debuginfo --disable-debuginfo
%endif
%if %{with asan}
%define asan --enable-asan
%else
%define asan --disable-asan
%endif
%if 0%{?_systemd}
%define systemd --enable-systemd --with-systemdunitdir=%{_unitdir} --with-systemdpresetdir=%{_presetdir} --with-systemdmodulesloaddir=%{_modulesloaddir} --with-systemdgeneratordir=%{_systemdgeneratordir} --disable-sysvinit
%define systemd_svcs zfs-import-cache.service zfs-import-scan.service zfs-mount.service zfs-share.service zfs-zed.service zfs.target zfs-import.target zfs-volume-wait.service zfs-volumes.target
%else
%define systemd --enable-sysvinit --disable-systemd
%endif
%if %{with pyzfs}
%define pyzfs --enable-pyzfs
%else
%define pyzfs --disable-pyzfs
%endif
%if %{with pam}
%define pam --enable-pam
%else
%define pam --disable-pam
%endif
%setup -q
%build
%configure \
--with-config=user \
--with-udevdir=%{_udevdir} \
--with-udevruledir=%{_udevruledir} \
--with-dracutdir=%{_dracutdir} \
--with-pamconfigsdir=%{_datadir}/pam-configs \
--with-pammoduledir=%{_libdir}/security \
--with-python=%{__python} \
--with-pkgconfigdir=%{_pkgconfigdir} \
--disable-static \
%{debug} \
%{debuginfo} \
%{asan} \
%{systemd} \
%{pam} \
%{pyzfs}
make %{?_smp_mflags}
%install
%{__rm} -rf $RPM_BUILD_ROOT
make install DESTDIR=%{?buildroot}
find %{?buildroot}%{_libdir} -name '*.la' -exec rm -f {} \;
%if 0%{!?__brp_mangle_shebangs:1}
find %{?buildroot}%{_bindir} \
\( -name arc_summary -or -name arcstat -or -name dbufstat \) \
-exec %{__sed} -i 's|^#!.*|#!%{__python}|' {} \;
find %{?buildroot}%{_datadir} \
\( -name test-runner.py -or -name zts-report.py \) \
-exec %{__sed} -i 's|^#!.*|#!%{__python}|' {} \;
%endif
%post
%if 0%{?_systemd}
%if 0%{?systemd_post:1}
%systemd_post %{systemd_svcs}
%else
if [ "$1" = "1" -o "$1" = "install" ] ; then
# Initial installation
systemctl preset %{systemd_svcs} >/dev/null || true
fi
%endif
%else
if [ -x /sbin/chkconfig ]; then
/sbin/chkconfig --add zfs-import
+ /sbin/chkconfig --add zfs-load-key
/sbin/chkconfig --add zfs-mount
/sbin/chkconfig --add zfs-share
/sbin/chkconfig --add zfs-zed
fi
%endif
exit 0
# On RHEL/CentOS 7 the static nodes aren't refreshed by default after
# installing a package. This is the default behavior for Fedora.
%posttrans
%if 0%{?rhel} == 7 || 0%{?centos} == 7
systemctl restart kmod-static-nodes
systemctl restart systemd-tmpfiles-setup-dev
udevadm trigger
%endif
%preun
%if 0%{?_systemd}
%if 0%{?systemd_preun:1}
%systemd_preun %{systemd_svcs}
%else
if [ "$1" = "0" -o "$1" = "remove" ] ; then
# Package removal, not upgrade
systemctl --no-reload disable %{systemd_svcs} >/dev/null || true
systemctl stop %{systemd_svcs} >/dev/null || true
fi
%endif
%else
if [ "$1" = "0" -o "$1" = "remove" ] && [ -x /sbin/chkconfig ]; then
/sbin/chkconfig --del zfs-import
+ /sbin/chkconfig --del zfs-load-key
/sbin/chkconfig --del zfs-mount
/sbin/chkconfig --del zfs-share
/sbin/chkconfig --del zfs-zed
fi
%endif
exit 0
%postun
%if 0%{?_systemd}
%if 0%{?systemd_postun:1}
%systemd_postun %{systemd_svcs}
%else
systemctl --system daemon-reload >/dev/null || true
%endif
%endif
%files
# Core utilities
%{_sbindir}/*
%{_bindir}/raidz_test
%{_sbindir}/zgenhostid
%{_bindir}/zvol_wait
# Optional Python 2/3 scripts
%{_bindir}/arc_summary
%{_bindir}/arcstat
%{_bindir}/dbufstat
# Man pages
%{_mandir}/man1/*
%{_mandir}/man4/*
%{_mandir}/man5/*
%{_mandir}/man7/*
%{_mandir}/man8/*
# Configuration files and scripts
%{_libexecdir}/%{name}
%{_udevdir}/vdev_id
%{_udevdir}/zvol_id
%{_udevdir}/rules.d/*
%{_datadir}/%{name}/compatibility.d
%if ! 0%{?_systemd} || 0%{?_initramfs}
# Files needed for sysvinit and initramfs-tools
%{_sysconfdir}/%{name}/zfs-functions
%config(noreplace) %{_initconfdir}/zfs
%else
%exclude %{_sysconfdir}/%{name}/zfs-functions
%exclude %{_initconfdir}/zfs
%endif
%if 0%{?_systemd}
%{_unitdir}/*
%{_presetdir}/*
%{_modulesloaddir}/*
%{_systemdgeneratordir}/*
%else
%config(noreplace) %{_sysconfdir}/init.d/*
%endif
%config(noreplace) %{_sysconfdir}/%{name}/zed.d/*
%config(noreplace) %{_sysconfdir}/%{name}/zpool.d/*
%config(noreplace) %{_sysconfdir}/%{name}/vdev_id.conf.*.example
%attr(440, root, root) %config(noreplace) %{_sysconfdir}/sudoers.d/*
%if %{with pam}
%{_libdir}/security/*
%{_datadir}/pam-configs/*
%endif
%files -n libzpool5
%{_libdir}/libzpool.so.*
%files -n libnvpair3
%{_libdir}/libnvpair.so.*
%files -n libuutil3
%{_libdir}/libuutil.so.*
%files -n libzfs5
%{_libdir}/libzfs*.so.*
%files -n libzfs5-devel
%{_pkgconfigdir}/libzfs.pc
%{_pkgconfigdir}/libzfsbootenv.pc
%{_pkgconfigdir}/libzfs_core.pc
%{_libdir}/*.so
%{_includedir}/*
%doc AUTHORS COPYRIGHT LICENSE NOTICE README.md
%files test
%{_datadir}/%{name}/zfs-tests
%{_datadir}/%{name}/test-runner
%{_datadir}/%{name}/runfiles
%{_datadir}/%{name}/*.sh
%files dracut
%doc contrib/dracut/README.dracut.markdown
%{_dracutdir}/modules.d/*
%if %{with pyzfs}
%files -n python%{__python_pkg_version}-pyzfs
%doc contrib/pyzfs/README
%doc contrib/pyzfs/LICENSE
%defattr(-,root,root,-)
%{__python_sitelib}/libzfs_core/*
%{__python_sitelib}/pyzfs*
%endif
%if 0%{?_initramfs}
%files initramfs
%doc contrib/initramfs/README.initramfs.markdown
/usr/share/initramfs-tools/*
%else
# Since we're not building the initramfs package,
# ignore those files.
%exclude /usr/share/initramfs-tools
%endif
diff --git a/sys/contrib/openzfs/rpm/redhat/zfs-kmod.spec.in b/sys/contrib/openzfs/rpm/redhat/zfs-kmod.spec.in
index eb93aeeb2efe..7b74fdc51f43 100644
--- a/sys/contrib/openzfs/rpm/redhat/zfs-kmod.spec.in
+++ b/sys/contrib/openzfs/rpm/redhat/zfs-kmod.spec.in
@@ -1,89 +1,92 @@
%bcond_with debug
%bcond_with debuginfo
Name: @PACKAGE@-kmod
Version: @VERSION@
Release: @RELEASE@%{?dist}
Summary: Kernel module(s)
Group: System Environment/Kernel
License: @ZFS_META_LICENSE@
URL: https://github.com/openzfs/zfs
BuildRequires: %kernel_module_package_buildreqs
Source0: @PACKAGE@-%{version}.tar.gz
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
# Additional dependency information for the kmod sub-package must be specified
# by generating a preamble text file which kmodtool can append to the spec file.
%(/bin/echo -e "\
Requires: @PACKAGE@ = %{version}\n\
Conflicts: @PACKAGE@-dkms\n\
Obsoletes: kmod-spl\n\
Obsoletes: spl-kmod\n\n" > %{_sourcedir}/kmod-preamble)
# LDFLAGS are not sanitized by arch/*/Makefile for these architectures.
%ifarch ppc ppc64 ppc64le aarch64
%global __global_ldflags %{nil}
%endif
%description
This package contains the ZFS kernel modules.
%define kmod_name @PACKAGE@
%kernel_module_package -n %{kmod_name} -p %{_sourcedir}/kmod-preamble
%define ksrc %{_usrsrc}/kernels/%{kverrel}
%define kobj %{ksrc}
%package -n kmod-%{kmod_name}-devel
Summary: ZFS kernel module(s) devel common
Group: System Environment/Kernel
Provides: kmod-spl-devel = %{version}
%description -n kmod-%{kmod_name}-devel
This package provides the header files and objects to build kernel modules.
%prep
if ! [ -d "%{ksrc}" ]; then
echo "Kernel build directory isn't set properly, cannot continue"
exit 1
fi
%if %{with debug}
%define debug --enable-debug
%else
%define debug --disable-debug
%endif
%if %{with debuginfo}
%define debuginfo --enable-debuginfo
%else
%define debuginfo --disable-debuginfo
%endif
%setup -n %{kmod_name}-%{version}
%build
%configure \
--with-config=kernel \
--with-linux=%{ksrc} \
--with-linux-obj=%{kobj} \
%{debug} \
- %{debuginfo}
+ %{debuginfo} \
+ %{?kernel_cc} \
+ %{?kernel_ld} \
+ %{?kernel_llvm}
make %{?_smp_mflags}
%install
make install \
DESTDIR=${RPM_BUILD_ROOT} \
INSTALL_MOD_DIR=extra/%{kmod_name}
%{__rm} -f %{buildroot}/lib/modules/%{kverrel}/modules.*
# find-debuginfo.sh only considers executables
%{__chmod} u+x %{buildroot}/lib/modules/%{kverrel}/extra/*/*/*
%clean
rm -rf $RPM_BUILD_ROOT
%files -n kmod-%{kmod_name}-devel
%{_usrsrc}/%{kmod_name}-%{version}
%{_usrsrc}/spl-%{version}
diff --git a/sys/contrib/openzfs/scripts/dkms.mkconf b/sys/contrib/openzfs/scripts/dkms.mkconf
index 9d12a8c3b3fd..408322fa124a 100755
--- a/sys/contrib/openzfs/scripts/dkms.mkconf
+++ b/sys/contrib/openzfs/scripts/dkms.mkconf
@@ -1,123 +1,115 @@
#!/bin/sh
PROG=$0
pkgcfg=/etc/sysconfig/zfs
while getopts "n:v:c:f:" opt; do
case $opt in
n) pkgname=$OPTARG ;;
v) pkgver=$OPTARG ;;
c) pkgcfg=$OPTARG ;;
f) filename=$OPTARG ;;
*) err=1 ;;
esac
done
if [ -z "${pkgname}" ] || [ -z "${pkgver}" ] || [ -z "${filename}" ] ||
[ -n "${err}" ]; then
echo "Usage: $PROG -n <pkgname> -v <pkgver> -c <pkgcfg> -f <filename>"
exit 1
fi
exec cat >"${filename}" <<EOF
PACKAGE_NAME="${pkgname}"
PACKAGE_VERSION="${pkgver}"
PACKAGE_CONFIG="${pkgcfg}"
NO_WEAK_MODULES="yes"
PRE_BUILD="configure
--prefix=/usr
--with-config=kernel
--with-linux=\$(
- case \`lsb_release -is\` in
- (Debian|Devuan)
- if [[ -e \${kernel_source_dir/%build/source} ]]
- then
- echo \${kernel_source_dir/%build/source}
- else
- # A kpkg exception for Proxmox 2.0
- echo \${kernel_source_dir}
- fi
- ;;
- (*)
- echo \${kernel_source_dir}
- ;;
- esac
+ if [ -e "\${kernel_source_dir/%build/source}" ]
+ then
+ echo "\${kernel_source_dir/%build/source}"
+ else
+ echo "\${kernel_source_dir}"
+ fi
)
- --with-linux-obj=\${kernel_source_dir}
+ --with-linux-obj="\${kernel_source_dir}"
\$(
[[ -n \"\${ICP_ROOT}\" ]] && \\
{
echo --with-qat=\"\${ICP_ROOT}\"
}
)
\$(
[[ -r \${PACKAGE_CONFIG} ]] \\
&& source \${PACKAGE_CONFIG} \\
&& shopt -q -s extglob \\
&& \\
{
if [[ \${ZFS_DKMS_ENABLE_DEBUG,,} == @(y|yes) ]]
then
echo --enable-debug
fi
if [[ \${ZFS_DKMS_ENABLE_DEBUGINFO,,} == @(y|yes) ]]
then
echo --enable-debuginfo
fi
}
)
"
POST_BUILD="scripts/dkms.postbuild
-n \${PACKAGE_NAME}
-v \${PACKAGE_VERSION}
-a \${arch}
-k \${kernelver}
-t \${dkms_tree}
"
AUTOINSTALL="yes"
REMAKE_INITRD="no"
MAKE[0]="make"
STRIP[0]="\$(
[[ -r \${PACKAGE_CONFIG} ]] \\
&& source \${PACKAGE_CONFIG} \\
&& shopt -q -s extglob \\
&& [[ \${ZFS_DKMS_DISABLE_STRIP,,} == @(y|yes) ]] \\
&& echo -n no
)"
STRIP[1]="\${STRIP[0]}"
STRIP[2]="\${STRIP[0]}"
STRIP[3]="\${STRIP[0]}"
STRIP[4]="\${STRIP[0]}"
STRIP[5]="\${STRIP[0]}"
STRIP[6]="\${STRIP[0]}"
STRIP[7]="\${STRIP[0]}"
STRIP[8]="\${STRIP[0]}"
BUILT_MODULE_NAME[0]="zavl"
BUILT_MODULE_LOCATION[0]="module/avl/"
DEST_MODULE_LOCATION[0]="/extra/avl/avl"
BUILT_MODULE_NAME[1]="znvpair"
BUILT_MODULE_LOCATION[1]="module/nvpair/"
DEST_MODULE_LOCATION[1]="/extra/nvpair/znvpair"
BUILT_MODULE_NAME[2]="zunicode"
BUILT_MODULE_LOCATION[2]="module/unicode/"
DEST_MODULE_LOCATION[2]="/extra/unicode/zunicode"
BUILT_MODULE_NAME[3]="zcommon"
BUILT_MODULE_LOCATION[3]="module/zcommon/"
DEST_MODULE_LOCATION[3]="/extra/zcommon/zcommon"
BUILT_MODULE_NAME[4]="zfs"
BUILT_MODULE_LOCATION[4]="module/zfs/"
DEST_MODULE_LOCATION[4]="/extra/zfs/zfs"
BUILT_MODULE_NAME[5]="icp"
BUILT_MODULE_LOCATION[5]="module/icp/"
DEST_MODULE_LOCATION[5]="/extra/icp/icp"
BUILT_MODULE_NAME[6]="zlua"
BUILT_MODULE_LOCATION[6]="module/lua/"
DEST_MODULE_LOCATION[6]="/extra/lua/zlua"
BUILT_MODULE_NAME[7]="spl"
BUILT_MODULE_LOCATION[7]="module/spl/"
DEST_MODULE_LOCATION[7]="/extra/spl/spl"
BUILT_MODULE_NAME[8]="zzstd"
BUILT_MODULE_LOCATION[8]="module/zstd/"
DEST_MODULE_LOCATION[8]="/extra/zstd/zzstd"
EOF
diff --git a/sys/contrib/openzfs/scripts/kmodtool b/sys/contrib/openzfs/scripts/kmodtool
index 26bacf5991d2..b1021596997e 100755
--- a/sys/contrib/openzfs/scripts/kmodtool
+++ b/sys/contrib/openzfs/scripts/kmodtool
@@ -1,625 +1,625 @@
#!/usr/bin/env bash
# shellcheck disable=SC2086
# kmodtool - Helper script for building kernel module RPMs
# Copyright (c) 2003-2012 Ville Skyttä <ville.skytta@iki.fi>,
# Thorsten Leemhuis <fedora@leemhuis.info>
# Nicolas Chauvet <kwizart@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
shopt -s extglob
myprog="kmodtool-${repo}"
myver="0.12.1"
kmodname=
build_kernels="current"
kernels_known_variants=
kernel_versions=
kernel_versions_to_build_for=
prefix=
filterfile=
target=
buildroot=
dashvariant=
error_out()
{
local errorlevel=${1}
shift
echo "Error: $*" >&2
# the next line is not multi-line safe -- not needed *yet*
echo "%global kmodtool_check echo \"kmodtool error: $*\"; exit ${errorlevel};"
exit "${errorlevel}"
}
print_rpmtemplate_header()
{
echo
echo '%global kmodinstdir_prefix '${prefix}/lib/modules/
echo '%global kmodinstdir_postfix '/extra/${kmodname}/
echo '%global kernel_versions '${kernel_versions}
echo
}
print_akmodtemplate ()
{
echo
cat <<EOF
%global akmod_install mkdir -p \$RPM_BUILD_ROOT/%{_usrsrc}/akmods/; \\\
LANG=C rpmbuild --define "_sourcedir %{_sourcedir}" \\\
--define "_srcrpmdir \$RPM_BUILD_ROOT/%{_usrsrc}/akmods/" \\\
-bs --nodeps %{_specdir}/%{name}.spec ; \\\
ln -s \$(ls \$RPM_BUILD_ROOT/%{_usrsrc}/akmods/) \$RPM_BUILD_ROOT/%{_usrsrc}/akmods/${kmodname}-kmod.latest
%package -n akmod-${kmodname}
Summary: Akmod package for ${kmodname} kernel module(s)
Group: System Environment/Kernel
Requires: kmodtool
Requires: akmods
%{?AkmodsBuildRequires:Requires: %{AkmodsBuildRequires}}
# same requires and provides as a kmods package would have
Requires: ${kmodname}-kmod-common >= %{?epoch:%{epoch}:}%{version}
Provides: ${kmodname}-kmod = %{?epoch:%{epoch}:}%{version}-%{release}
EOF
if [[ ${obsolete_name} ]]; then
echo "Provides: akmod-${obsolete_name} = ${obsolete_version}"
echo "Obsoletes: akmod-${obsolete_name} < ${obsolete_version}"
fi
cat <<EOF
%description -n akmod-${kmodname}
This package provides the akmod package for the ${kmodname} kernel modules.
%posttrans -n akmod-${kmodname}
nohup ${prefix}/sbin/akmods --from-akmod-posttrans --akmod ${kmodname} &> /dev/null &
%files -n akmod-${kmodname}
%defattr(-,root,root,-)
%{_usrsrc}/akmods/*
EOF
}
print_akmodmeta ()
{
cat <<EOF
%package -n kmod-${kmodname}
Summary: Metapackage which tracks in ${kmodname} kernel module for newest kernel${dashvariant}
Group: System Environment/Kernel
Provides: ${kmodname}-kmod = %{?epoch:%{epoch}:}%{version}-%{release}
Provides: kmod-${kmodname}-xen = %{?epoch:%{epoch}:}%{version}-%{release}
Provides: kmod-${kmodname}-smp = %{?epoch:%{epoch}:}%{version}-%{release}
Provides: kmod-${kmodname}-PAE = %{?epoch:%{epoch}:}%{version}-%{release}
Requires: akmod-${kmodname} = %{?epoch:%{epoch}:}%{version}-%{release}
EOF
if [[ ${obsolete_name} ]]; then
echo "Provides: kmod-${obsolete_name} = ${obsolete_version}"
echo "Obsoletes: kmod-${obsolete_name} < ${obsolete_version}"
fi
cat <<EOF
%description -n kmod-${kmodname}${dashvariant}
This is a meta-package without payload which sole purpose is to require the
${kmodname} kernel module(s) for the newest kernel${dashvariant},
to make sure you get it together with a new kernel.
%files -n kmod-${kmodname}${dashvariant}
%defattr(644,root,root,755)
EOF
}
print_rpmtemplate_per_kmodpkg ()
{
if [[ "${1}" == "--custom" ]]; then
shift
local customkernel=true
elif [[ "${1}" == "--redhat" ]]; then
# this is needed for akmods
shift
local redhatkernel=true
fi
local kernel_uname_r=${1}
local kernel_variant="${2:+-${2}}"
# Detect depmod install location
local depmod_path=/sbin/depmod
if [ ! -f ${depmod_path} ]; then
depmod_path=/usr/sbin/depmod
fi
# first part
cat <<EOF
%package -n kmod-${kmodname}-${kernel_uname_r}
Summary: ${kmodname} kernel module(s) for ${kernel_uname_r}
Group: System Environment/Kernel
Provides: kernel-modules-for-kernel = ${kernel_uname_r}
Provides: kmod-${kmodname}-uname-r = ${kernel_uname_r}
Provides: ${kmodname}-kmod = %{?epoch:%{epoch}:}%{version}-%{release}
Requires: ${kmodname}-kmod-common >= %{?epoch:%{epoch}:}%{version}
%if 0%{?rhel} == 6 || 0%{?centos} == 6
Requires(post): module-init-tools
Requires(postun): module-init-tools
%else
Requires(post): kmod
Requires(postun): kmod
%endif
EOF
if [[ ${obsolete_name} ]]; then
echo "Provides: kmod-${obsolete_name}-${kernel_uname_r} = ${obsolete_version}"
echo "Obsoletes: kmod-${obsolete_name}-${kernel_uname_r} < ${obsolete_version}"
fi
# second part
if [[ ! "${customkernel}" ]]; then
cat <<EOF
Requires: kernel-uname-r = ${kernel_uname_r}
BuildRequires: kernel-devel-uname-r = ${kernel_uname_r}
%{?KmodsRequires:Requires: %{KmodsRequires}-uname-r = ${kernel_uname_r}}
%{?KmodsRequires:BuildRequires: %{KmodsRequires}-uname-r = ${kernel_uname_r}}
%post -n kmod-${kmodname}-${kernel_uname_r}
if [[ -f "/boot/System.map-${kernel_uname_r}" ]]; then
${prefix}${depmod_path} -aeF /boot/System.map-${kernel_uname_r} ${kernel_uname_r} > /dev/null || :
elif [[ -f "/lib/modules/${kernel_uname_r}/System.map" ]]; then
${prefix}${depmod_path} -aeF /lib/modules/${kernel_uname_r}/System.map ${kernel_uname_r} > /dev/null || :
else
${prefix}${depmod_path} -ae ${kernel_uname_r} &> /dev/null || :
fi
%postun -n kmod-${kmodname}-${kernel_uname_r}
if [[ -f "/boot/System.map-${kernel_uname_r}" ]]; then
${prefix}${depmod_path} -aF /boot/System.map-${kernel_uname_r} ${kernel_uname_r} &> /dev/null || :
elif [[ -f "/lib/modules/${kernel_uname_r}/System.map" ]]; then
${prefix}${depmod_path} -aF /lib/modules/${kernel_uname_r}/System.map ${kernel_uname_r} &> /dev/null || :
else
${prefix}${depmod_path} -a ${kernel_uname_r} &> /dev/null || :
fi
EOF
else
cat <<EOF
%post -n kmod-${kmodname}-${kernel_uname_r}
[[ "\$(uname -r)" == "${kernel_uname_r}" ]] && ${prefix}${depmod_path} -a > /dev/null || :
%postun -n kmod-${kmodname}-${kernel_uname_r}
[[ "\$(uname -r)" == "${kernel_uname_r}" ]] && ${prefix}${depmod_path} -a > /dev/null || :
EOF
fi
# third part
cat <<EOF
%description -n kmod-${kmodname}-${kernel_uname_r}
This package provides the ${kmodname} kernel modules built for the Linux
kernel ${kernel_uname_r} for the %{_target_cpu} family of processors.
%files -n kmod-${kmodname}-${kernel_uname_r}
%defattr(644,root,root,755)
%dir $prefix/lib/modules/${kernel_uname_r}/extra
${prefix}/lib/modules/${kernel_uname_r}/extra/${kmodname}/
EOF
}
print_rpmtemplate_kmoddevelpkg ()
{
if [[ "${1}" == "--custom" ]]; then
shift
local customkernel=true
elif [[ "${1}" == "--redhat" ]]; then
shift
local redhatkernel=true
fi
local kernel_uname_r=${1}
cat <<EOF
%package -n kmod-${kmodname}-devel
Summary: ${kmodname} kernel module(s) devel common
Group: System Environment/Kernel
Provides: ${kmodname}-devel-kmod = %{?epoch:%{epoch}:}%{version}-%{release}
EOF
if [[ ! ${customkernel} ]] && [[ ! ${redhatkernel} ]]; then
echo "Requires: kmod-${kmodname}-devel-${kernel_uname_r} >= %{?epoch:%{epoch}:}%{version}-%{release}"
fi
if [[ ${obsolete_name} ]]; then
echo "Provides: kmod-${obsolete_name}-devel = ${obsolete_version}"
echo "Obsoletes: kmod-${obsolete_name}-devel < ${obsolete_version}"
fi
cat <<EOF
%description -n kmod-${kmodname}-devel
This package provides the common header files to build kernel modules
which depend on the ${kmodname} kernel module. It may optionally require
the ${kmodname}-devel-<kernel> objects for the newest kernel.
%files -n kmod-${kmodname}-devel
%defattr(644,root,root,755)
%{_usrsrc}/${kmodname}-%{version}
EOF
if [[ ${obsolete_name} ]]; then
echo "%{_usrsrc}/${obsolete_name}-%{version}"
fi
for kernel in ${1}; do
local kernel_uname_r=${kernel}
echo "%exclude %{_usrsrc}/${kmodname}-%{version}/${kernel_uname_r}"
if [[ ${obsolete_name} ]]; then
echo "%exclude %{_usrsrc}/${obsolete_name}-%{version}/${kernel_uname_r}"
fi
done
echo
echo
}
print_rpmtemplate_per_kmoddevelpkg ()
{
if [[ "${1}" == "--custom" ]]; then
shift
local customkernel=true
elif [[ "${1}" == "--redhat" ]]; then
# this is needed for akmods
shift
local redhatkernel=true
fi
local kernel_uname_r=${1}
local kernel_variant="${2:+-${2}}"
# first part
cat <<EOF
%package -n kmod-${kmodname}-devel-${kernel_uname_r}
Summary: ${kmodname} kernel module(s) devel for ${kernel_uname_r}
Group: System Environment/Kernel
Provides: kernel-objects-for-kernel = ${kernel_uname_r}
Provides: ${kmodname}-devel-kmod = %{?epoch:%{epoch}:}%{version}-%{release}
Provides: kmod-${kmodname}-devel-uname-r = ${kernel_uname_r}
EOF
if [[ ${obsolete_name} ]]; then
echo "Provides: kmod-${obsolete_name}-devel-${kernel_uname_r} = ${obsolete_version}"
echo "Obsoletes: kmod-${obsolete_name}-devel-${kernel_uname_r} < ${obsolete_version}"
fi
# second part
if [[ ! "${customkernel}" ]]; then
cat <<EOF
Requires: kernel-devel-uname-r = ${kernel_uname_r}
BuildRequires: kernel-devel-uname-r = ${kernel_uname_r}
%{?KmodsDevelRequires:Requires: %{KmodsDevelRequires}-uname-r = ${kernel_uname_r}}
%{?KmodsDevelRequires:BuildRequires: %{KmodsDevelRequires}-uname-r = ${kernel_uname_r}}
EOF
fi
# third part
cat <<EOF
%description -n kmod-${kmodname}-devel-${kernel_uname_r}
This package provides objects and symbols required to build kernel modules
which depend on the ${kmodname} kernel modules built for the Linux
kernel ${kernel_uname_r} for the %{_target_cpu} family of processors.
%files -n kmod-${kmodname}-devel-${kernel_uname_r}
%defattr(644,root,root,755)
%{_usrsrc}/${kmodname}-%{version}/${kernel_uname_r}
EOF
if [[ ${obsolete_name} ]]; then
echo "%{_usrsrc}/${obsolete_name}-%{version}/${kernel_uname_r}"
fi
}
print_rpmtemplate_kmodmetapkg ()
{
local kernel_uname_r=${1}
local kernel_variant="${2:+-${2}}"
cat <<EOF
%package -n kmod-${kmodname}${kernel_variant}
Summary: Metapackage which tracks in ${kmodname} kernel module for newest kernel${kernel_variant}
Group: System Environment/Kernel
Provides: ${kmodname}-kmod = %{?epoch:%{epoch}:}%{version}-%{release}
Requires: kmod-${kmodname}-${kernel_uname_r} >= %{?epoch:%{epoch}:}%{version}-%{release}
%{?KmodsMetaRequires:Requires: %{?KmodsMetaRequires}}
EOF
if [[ ${obsolete_name} ]]; then
echo "Provides: kmod-${obsolete_name}${kernel_variant} = ${obsolete_version}"
echo "Obsoletes: kmod-${obsolete_name}${kernel_variant} < ${obsolete_version}"
fi
cat <<EOF
%description -n kmod-${kmodname}${kernel_variant}
This is a meta-package without payload which sole purpose is to require the
${kmodname} kernel module(s) for the newest kernel${kernel_variant}.
to make sure you get it together with a new kernel.
%files -n kmod-${kmodname}${kernel_variant}
%defattr(644,root,root,755)
EOF
}
print_customrpmtemplate ()
{
for kernel in ${1}
do
if [[ -e "${buildroot}/usr/src/kernels/${kernel}" ]] ; then
# this looks like a Fedora/RH kernel -- print a normal template (which includes the proper BR) and be happy :)
kernel_versions="${kernel_versions}${kernel}___${buildroot}%{_usrsrc}/kernels/${kernel} "
# parse kernel versions string and print template
local kernel_verrelarch=${kernel%%${kernels_known_variants}}
print_rpmtemplate_per_kmodpkg --redhat ${kernel} ${kernel##${kernel_verrelarch}}
# create development package
if [[ "${devel}" ]]; then
# create devel package including common headers
print_rpmtemplate_kmoddevelpkg --redhat ${kernel} ${kernel##${kernel_verrelarch}}
# create devel package
print_rpmtemplate_per_kmoddevelpkg --redhat ${kernel} ${kernel##${kernel_verrelarch}}
fi
elif [[ -e ${prefix}/lib/modules/"${kernel}"/build/Makefile ]] ; then
# likely a user-build-kernel with available buildfiles
# fixme: we should check if uname from Makefile is the same as ${kernel}
kernel_versions="${kernel_versions}${kernel}___${prefix}/lib/modules/${kernel}/build/ "
print_rpmtemplate_per_kmodpkg --custom "${kernel}"
# create development package
if [[ "${devel}" ]]; then
# create devel package including common headers
print_rpmtemplate_kmoddevelpkg --custom "${kernel}"
# create devel package
print_rpmtemplate_per_kmoddevelpkg --custom "${kernel}"
fi
else
error_out 2 "Don't know how to handle ${kernel} -- ${prefix}/lib/modules/${kernel}/build/Makefile not found"
fi
done
# well, it's no header anymore, but who cares ;-)
print_rpmtemplate_header
}
print_rpmtemplate ()
{
# create kernel_versions var
for kernel_version in ${kernel_versions_to_build_for}
do
kernel_versions="${kernel_versions}${kernel_version}___%{_usrsrc}/kernels/${kernel_version} "
done
# and print it and some other required stuff as macro
print_rpmtemplate_header
# now print the packages
for kernel in ${kernel_versions_to_build_for} ; do
local kernel_verrelarch=${kernel%%${kernels_known_variants}}
# create metapackage
print_rpmtemplate_kmodmetapkg ${kernel} ${kernel##${kernel_verrelarch}}
# create package
print_rpmtemplate_per_kmodpkg ${kernel} ${kernel##${kernel_verrelarch}}
if [[ "${devel}" ]]; then
# create devel package including common headers
print_rpmtemplate_kmoddevelpkg ${kernel} ${kernel##${kernel_verrelarch}}
# create devel package
print_rpmtemplate_per_kmoddevelpkg ${kernel} ${kernel##${kernel_verrelarch}}
fi
done
}
myprog_help ()
{
- echo "Usage: $(basename ${0}) [OPTIONS]"
+ echo "Usage: ${0##*/} [OPTIONS]"
echo $'\n'"Creates a template to be used during kmod building"
echo $'\n'"Available options:"
echo " --filterfile <file> -- filter the results with grep --file <file>"
echo " --for-kernels <list> -- created templates only for these kernels"
echo " --kmodname <file> -- name of the kmod (required)"
echo " --devel -- make kmod-devel package"
echo " --noakmod -- no akmod package"
echo " --repo <name> -- use buildsys-build-<name>-kerneldevpkgs"
echo " --target <arch> -- target-arch (required)"
echo " --buildroot <dir> -- Build root (place to look for build files)"
}
while [ "${1}" ] ; do
case "${1}" in
--filterfile)
shift
if [[ ! "${1}" ]] ; then
error_out 2 "Please provide path to a filter-file together with --filterfile" >&2
elif [[ ! -e "${1}" ]]; then
error_out 2 "Filterfile ${1} not found" >&2
fi
filterfile="${1}"
shift
;;
--kmodname)
shift
if [[ ! "${1}" ]] ; then
error_out 2 "Please provide the name of the kmod together with --kmodname" >&2
fi
# strip pending -kmod
kmodname="${1%%-kmod}"
shift
;;
--devel)
shift
devel="true"
;;
--prefix)
shift
if [[ ! "${1}" ]] ; then
error_out 2 "Please provide a prefix with --prefix" >&2
fi
prefix="${1}"
shift
;;
--repo)
shift
if [[ ! "${1}" ]] ; then
error_out 2 "Please provide the name of the repo together with --repo" >&2
fi
repo=${1}
shift
;;
--for-kernels)
shift
if [[ ! "${1}" ]] ; then
error_out 2 "Please provide the name of the kmod together with --kmodname" >&2
fi
for_kernels="${1}"
shift
;;
--noakmod)
shift
noakmod="true"
;;
--obsolete-name)
shift
if [[ ! "${1}" ]] ; then
error_out 2 "Please provide the name of the kmod to obsolete together with --obsolete-name" >&2
fi
obsolete_name="${1}"
shift
;;
--obsolete-version)
shift
if [[ ! "${1}" ]] ; then
error_out 2 "Please provide the version of the kmod to obsolete together with --obsolete-version" >&2
fi
obsolete_version="${1}"
shift
;;
--target)
shift
target="${1}"
shift
;;
--akmod)
shift
build_kernels="akmod"
;;
--newest)
shift
build_kernels="newest"
;;
--current)
shift
build_kernels="current"
;;
--buildroot)
shift
buildroot="${1}"
shift
;;
--help)
myprog_help
exit 0
;;
--version)
echo "${myprog} ${myver}"
exit 0
;;
*)
echo "Error: Unknown option '${1}'." >&2
usage >&2
exit 2
;;
esac
done
if [[ -e ./kmodtool-kernel-variants ]]; then
kernels_known_variants="$(cat ./kmodtool-kernel-variants)"
elif [[ -e /usr/share/kmodtool/kernel-variants ]] ; then
kernels_known_variants="$(cat /usr/share/kmodtool/kernel-variants)"
else
kernels_known_variants="@(smp?(-debug)|PAE?(-debug)|debug|kdump|xen|kirkwood|highbank|imx|omap|tegra)"
fi
# general sanity checks
if [[ ! "${target}" ]]; then
error_out 2 "please pass target arch with --target"
elif [[ ! "${kmodname}" ]]; then
error_out 2 "please pass kmodname with --kmodname"
elif [[ ! "${kernels_known_variants}" ]] ; then
error_out 2 "could not determine known variants"
elif { [[ "${obsolete_name}" ]] && [[ ! "${obsolete_version}" ]]; } || { [[ ! "${obsolete_name}" ]] && [[ "${obsolete_version}" ]]; } ; then
error_out 2 "you need to provide both --obsolete-name and --obsolete-version"
fi
# go
if [[ "${for_kernels}" ]]; then
# this is easy:
print_customrpmtemplate "${for_kernels}"
elif [[ "${build_kernels}" == "akmod" ]]; then
# do only a akmod package
print_akmodtemplate
print_akmodmeta
else
# seems we are on out own to decide for which kernels to build
# we need more sanity checks in this case
if [[ ! "${repo}" ]]; then
error_out 2 "please provide repo name with --repo"
elif ! command -v "buildsys-build-${repo}-kerneldevpkgs" &> /dev/null ; then
error_out 2 "buildsys-build-${repo}-kerneldevpkgs not found"
fi
# call buildsys-build-${repo}-kerneldevpkgs to get the list of kernels
cmdoptions="--target ${target}"
# filterfile to filter list of kernels?
if [[ "${filterfile}" ]] ; then
cmdoptions="${cmdoptions} --filterfile ${filterfile}"
fi
kernel_versions_to_build_for="$(buildsys-build-${repo}-kerneldevpkgs --${build_kernels} ${cmdoptions})"
returncode=$?
if (( returncode != 0 )); then
error_out 2 "buildsys-build-${repo}-kerneldevpkgs failed: $(buildsys-build-${repo}-kerneldevpkgs --${build_kernels} ${cmdoptions})"
fi
if [[ "${build_kernels}" == "current" ]] && [[ ! "${noakmod}" ]]; then
print_akmodtemplate
fi
print_rpmtemplate
fi
diff --git a/sys/contrib/openzfs/scripts/zfs-tests.sh b/sys/contrib/openzfs/scripts/zfs-tests.sh
index 60499e09e249..aa0829b28326 100755
--- a/sys/contrib/openzfs/scripts/zfs-tests.sh
+++ b/sys/contrib/openzfs/scripts/zfs-tests.sh
@@ -1,748 +1,750 @@
#!/bin/sh
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License, Version 1.0 only
# (the "License"). You may not use this file except in compliance
# with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2020 OmniOS Community Edition (OmniOSce) Association.
#
BASE_DIR=$(dirname "$0")
SCRIPT_COMMON=common.sh
if [ -f "${BASE_DIR}/${SCRIPT_COMMON}" ]; then
. "${BASE_DIR}/${SCRIPT_COMMON}"
else
echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
fi
PROG=zfs-tests.sh
VERBOSE="no"
QUIET=""
CLEANUP="yes"
CLEANUPALL="no"
LOOPBACK="yes"
STACK_TRACER="no"
FILESIZE="4G"
DEFAULT_RUNFILES="common.run,$(uname | tr '[:upper:]' '[:lower:]').run"
RUNFILES=${RUNFILES:-$DEFAULT_RUNFILES}
FILEDIR=${FILEDIR:-/var/tmp}
DISKS=${DISKS:-""}
SINGLETEST=""
SINGLETESTUSER="root"
TAGS=""
ITERATIONS=1
ZFS_DBGMSG="$STF_SUITE/callbacks/zfs_dbgmsg.ksh"
ZFS_DMESG="$STF_SUITE/callbacks/zfs_dmesg.ksh"
UNAME=$(uname -s)
RERUN=""
# Override some defaults if on FreeBSD
if [ "$UNAME" = "FreeBSD" ] ; then
TESTFAIL_CALLBACKS=${TESTFAIL_CALLBACKS:-"$ZFS_DMESG"}
LOSETUP=/sbin/mdconfig
DMSETUP=/sbin/gpart
else
ZFS_MMP="$STF_SUITE/callbacks/zfs_mmp.ksh"
TESTFAIL_CALLBACKS=${TESTFAIL_CALLBACKS:-"$ZFS_DBGMSG:$ZFS_DMESG:$ZFS_MMP"}
LOSETUP=${LOSETUP:-/sbin/losetup}
DMSETUP=${DMSETUP:-/sbin/dmsetup}
fi
#
# Log an informational message when additional verbosity is enabled.
#
msg() {
if [ "$VERBOSE" = "yes" ]; then
echo "$@"
fi
}
#
# Log a failure message, cleanup, and return an error.
#
fail() {
echo "$PROG: $1" >&2
cleanup
exit 1
}
cleanup_freebsd_loopback() {
for TEST_LOOPBACK in ${LOOPBACKS}; do
if [ -c "/dev/${TEST_LOOPBACK}" ]; then
sudo "${LOSETUP}" -d -u "${TEST_LOOPBACK}" ||
echo "Failed to destroy: ${TEST_LOOPBACK}"
fi
done
}
cleanup_linux_loopback() {
for TEST_LOOPBACK in ${LOOPBACKS}; do
- LOOP_DEV=$(basename "$TEST_LOOPBACK")
+ LOOP_DEV="${TEST_LOOPBACK##*/}"
DM_DEV=$(sudo "${DMSETUP}" ls 2>/dev/null | \
grep "${LOOP_DEV}" | cut -f1)
if [ -n "$DM_DEV" ]; then
sudo "${DMSETUP}" remove "${DM_DEV}" ||
echo "Failed to remove: ${DM_DEV}"
fi
if [ -n "${TEST_LOOPBACK}" ]; then
sudo "${LOSETUP}" -d "${TEST_LOOPBACK}" ||
echo "Failed to remove: ${TEST_LOOPBACK}"
fi
done
}
#
# Attempt to remove loopback devices and files which where created earlier
# by this script to run the test framework. The '-k' option may be passed
# to the script to suppress cleanup for debugging purposes.
#
cleanup() {
if [ "$CLEANUP" = "no" ]; then
return 0
fi
if [ "$LOOPBACK" = "yes" ]; then
if [ "$UNAME" = "FreeBSD" ] ; then
cleanup_freebsd_loopback
else
cleanup_linux_loopback
fi
fi
for TEST_FILE in ${FILES}; do
rm -f "${TEST_FILE}" >/dev/null 2>&1
done
if [ "$STF_PATH_REMOVE" = "yes" ] && [ -d "$STF_PATH" ]; then
rm -Rf "$STF_PATH"
fi
}
trap cleanup EXIT
#
# Attempt to remove all testpools (testpool.XXX), unopened dm devices,
# loopback devices, and files. This is a useful way to cleanup a previous
# test run failure which has left the system in an unknown state. This can
# be dangerous and should only be used in a dedicated test environment.
#
cleanup_all() {
TEST_POOLS=$(sudo "$ZPOOL" list -H -o name | grep testpool)
if [ "$UNAME" = "FreeBSD" ] ; then
TEST_LOOPBACKS=$(sudo "${LOSETUP}" -l)
else
TEST_LOOPBACKS=$(sudo "${LOSETUP}" -a|grep file-vdev|cut -f1 -d:)
fi
TEST_FILES=$(ls /var/tmp/file-vdev* 2>/dev/null)
msg
msg "--- Cleanup ---"
msg "Removing pool(s): $(echo "${TEST_POOLS}" | tr '\n' ' ')"
for TEST_POOL in $TEST_POOLS; do
sudo "$ZPOOL" destroy "${TEST_POOL}"
done
if [ "$UNAME" != "FreeBSD" ] ; then
msg "Removing dm(s): $(sudo "${DMSETUP}" ls |
grep loop | tr '\n' ' ')"
sudo "${DMSETUP}" remove_all
fi
msg "Removing loopback(s): $(echo "${TEST_LOOPBACKS}" | tr '\n' ' ')"
for TEST_LOOPBACK in $TEST_LOOPBACKS; do
if [ "$UNAME" = "FreeBSD" ] ; then
sudo "${LOSETUP}" -d -u "${TEST_LOOPBACK}"
else
sudo "${LOSETUP}" -d "${TEST_LOOPBACK}"
fi
done
msg "Removing files(s): $(echo "${TEST_FILES}" | tr '\n' ' ')"
for TEST_FILE in $TEST_FILES; do
sudo rm -f "${TEST_FILE}"
done
}
#
# Takes a name as the only arguments and looks for the following variations
# on that name. If one is found it is returned.
#
# $RUNFILE_DIR/<name>
# $RUNFILE_DIR/<name>.run
# <name>
# <name>.run
#
find_runfile() {
NAME=$1
RESULT=""
if [ -f "$RUNFILE_DIR/$NAME" ]; then
RESULT="$RUNFILE_DIR/$NAME"
elif [ -f "$RUNFILE_DIR/$NAME.run" ]; then
RESULT="$RUNFILE_DIR/$NAME.run"
elif [ -f "$NAME" ]; then
RESULT="$NAME"
elif [ -f "$NAME.run" ]; then
RESULT="$NAME.run"
fi
echo "$RESULT"
}
#
# Symlink file if it appears under any of the given paths.
#
create_links() {
dir_list="$1"
file_list="$2"
[ -n "$STF_PATH" ] || fail "STF_PATH wasn't correctly set"
for i in $file_list; do
for j in $dir_list; do
[ ! -e "$STF_PATH/$i" ] || continue
if [ ! -d "$j/$i" ] && [ -e "$j/$i" ]; then
ln -sf "$j/$i" "$STF_PATH/$i" || \
fail "Couldn't link $i"
break
fi
done
[ ! -e "$STF_PATH/$i" ] && \
STF_MISSING_BIN="$STF_MISSING_BIN $i"
done
STF_MISSING_BIN=${STF_MISSING_BIN# }
}
#
# Constrain the path to limit the available binaries to a known set.
# When running in-tree a top level ./bin/ directory is created for
# convenience, otherwise a temporary directory is used.
#
constrain_path() {
. "$STF_SUITE/include/commands.cfg"
# On FreeBSD, base system zfs utils are in /sbin and OpenZFS utils
# install to /usr/local/sbin. To avoid testing the wrong utils we
# need /usr/local to come before / in the path search order.
SYSTEM_DIRS="/usr/local/bin /usr/local/sbin"
SYSTEM_DIRS="$SYSTEM_DIRS /usr/bin /usr/sbin /bin /sbin $LIBEXEC_DIR"
if [ "$INTREE" = "yes" ]; then
# Constrained path set to ./zfs/bin/
STF_PATH="$BIN_DIR"
STF_PATH_REMOVE="no"
STF_MISSING_BIN=""
if [ ! -d "$STF_PATH" ]; then
mkdir "$STF_PATH"
chmod 755 "$STF_PATH" || fail "Couldn't chmod $STF_PATH"
fi
# Special case links for standard zfs utilities
DIRS="$(find "$CMD_DIR" -type d \( ! -name .deps -a \
! -name .libs \) -print | tr '\n' ' ')"
create_links "$DIRS" "$ZFS_FILES"
# Special case links for zfs test suite utilities
DIRS="$(find "$STF_SUITE" -type d \( ! -name .deps -a \
! -name .libs \) -print | tr '\n' ' ')"
create_links "$DIRS" "$ZFSTEST_FILES"
else
# Constrained path set to /var/tmp/constrained_path.*
SYSTEMDIR=${SYSTEMDIR:-/var/tmp/constrained_path.XXXXXX}
STF_PATH=$(mktemp -d "$SYSTEMDIR")
STF_PATH_REMOVE="yes"
STF_MISSING_BIN=""
chmod 755 "$STF_PATH" || fail "Couldn't chmod $STF_PATH"
# Special case links for standard zfs utilities
create_links "$SYSTEM_DIRS" "$ZFS_FILES"
# Special case links for zfs test suite utilities
create_links "$STF_SUITE/bin" "$ZFSTEST_FILES"
fi
# Standard system utilities
SYSTEM_FILES="$SYSTEM_FILES_COMMON"
if [ "$UNAME" = "FreeBSD" ] ; then
SYSTEM_FILES="$SYSTEM_FILES $SYSTEM_FILES_FREEBSD"
else
SYSTEM_FILES="$SYSTEM_FILES $SYSTEM_FILES_LINUX"
fi
create_links "$SYSTEM_DIRS" "$SYSTEM_FILES"
# Exceptions
ln -fs "$STF_PATH/awk" "$STF_PATH/nawk"
if [ "$UNAME" = "Linux" ] ; then
ln -fs /sbin/fsck.ext4 "$STF_PATH/fsck"
ln -fs /sbin/mkfs.ext4 "$STF_PATH/newfs"
ln -fs "$STF_PATH/gzip" "$STF_PATH/compress"
ln -fs "$STF_PATH/gunzip" "$STF_PATH/uncompress"
ln -fs "$STF_PATH/exportfs" "$STF_PATH/share"
ln -fs "$STF_PATH/exportfs" "$STF_PATH/unshare"
elif [ "$UNAME" = "FreeBSD" ] ; then
ln -fs /usr/local/bin/ksh93 "$STF_PATH/ksh"
fi
}
#
# Output a useful usage message.
#
usage() {
cat << EOF
USAGE:
$0 [-hvqxkfS] [-s SIZE] [-r RUNFILES] [-t PATH] [-u USER]
DESCRIPTION:
ZFS Test Suite launch script
OPTIONS:
-h Show this message
-v Verbose zfs-tests.sh output
-q Quiet test-runner output
-x Remove all testpools, dm, lo, and files (unsafe)
-k Disable cleanup after test failure
-f Use files only, disables block device tests
-S Enable stack tracer (negative performance impact)
-c Only create and populate constrained path
-R Automatically rerun failing tests
-n NFSFILE Use the nfsfile to determine the NFS configuration
-I NUM Number of iterations
-d DIR Use DIR for files and loopback devices
-s SIZE Use vdevs of SIZE (default: 4G)
-r RUNFILES Run tests in RUNFILES (default: ${DEFAULT_RUNFILES})
-t PATH Run single test at PATH relative to test suite
-T TAGS Comma separated list of tags (default: 'functional')
-u USER Run single test as USER (default: root)
EXAMPLES:
# Run the default (linux) suite of tests and output the configuration used.
$0 -v
# Run a smaller suite of tests designed to run more quickly.
$0 -r linux-fast
# Run a single test
$0 -t tests/functional/cli_root/zfs_bookmark/zfs_bookmark_cliargs.ksh
# Cleanup a previous run of the test suite prior to testing, run the
# default (linux) suite of tests and perform no cleanup on exit.
$0 -x
EOF
}
while getopts 'hvqxkfScRn:d:s:r:?t:T:u:I:' OPTION; do
case $OPTION in
h)
usage
exit 1
;;
v)
VERBOSE="yes"
;;
q)
QUIET="yes"
;;
x)
CLEANUPALL="yes"
;;
k)
CLEANUP="no"
;;
f)
LOOPBACK="no"
;;
S)
STACK_TRACER="yes"
;;
c)
constrain_path
exit
;;
R)
RERUN="yes"
;;
n)
nfsfile=$OPTARG
[ -f "$nfsfile" ] || fail "Cannot read file: $nfsfile"
export NFS=1
. "$nfsfile"
;;
d)
FILEDIR="$OPTARG"
;;
I)
ITERATIONS="$OPTARG"
if [ "$ITERATIONS" -le 0 ]; then
fail "Iterations must be greater than 0."
fi
;;
s)
FILESIZE="$OPTARG"
;;
r)
RUNFILES="$OPTARG"
;;
t)
if [ -n "$SINGLETEST" ]; then
fail "-t can only be provided once."
fi
SINGLETEST="$OPTARG"
;;
T)
TAGS="$OPTARG"
;;
u)
SINGLETESTUSER="$OPTARG"
;;
?)
usage
exit
;;
esac
done
shift $((OPTIND-1))
FILES=${FILES:-"$FILEDIR/file-vdev0 $FILEDIR/file-vdev1 $FILEDIR/file-vdev2"}
LOOPBACKS=${LOOPBACKS:-""}
if [ -n "$SINGLETEST" ]; then
if [ -n "$TAGS" ]; then
fail "-t and -T are mutually exclusive."
fi
RUNFILE_DIR="/var/tmp"
RUNFILES="zfs-tests.$$.run"
SINGLEQUIET="False"
if [ -n "$QUIET" ]; then
SINGLEQUIET="True"
fi
cat >$RUNFILE_DIR/$RUNFILES << EOF
[DEFAULT]
pre =
quiet = $SINGLEQUIET
pre_user = root
user = $SINGLETESTUSER
timeout = 600
post_user = root
post =
outputdir = /var/tmp/test_results
EOF
SINGLETESTDIR=$(dirname "$SINGLETEST")
SINGLETESTFILE=$(basename "$SINGLETEST")
SETUPSCRIPT=
CLEANUPSCRIPT=
if [ -f "$STF_SUITE/$SINGLETESTDIR/setup.ksh" ]; then
SETUPSCRIPT="setup"
fi
if [ -f "$STF_SUITE/$SINGLETESTDIR/cleanup.ksh" ]; then
CLEANUPSCRIPT="cleanup"
fi
cat >>$RUNFILE_DIR/$RUNFILES << EOF
[$SINGLETESTDIR]
tests = ['$SINGLETESTFILE']
pre = $SETUPSCRIPT
post = $CLEANUPSCRIPT
tags = ['functional']
EOF
fi
#
# Use default tag if none was specified
#
TAGS=${TAGS:='functional'}
#
# Attempt to locate the runfiles describing the test workload.
#
R=""
IFS=,
for RUNFILE in $RUNFILES; do
if [ -n "$RUNFILE" ]; then
SAVED_RUNFILE="$RUNFILE"
RUNFILE=$(find_runfile "$RUNFILE")
[ -z "$RUNFILE" ] && fail "Cannot find runfile: $SAVED_RUNFILE"
R="$R,$RUNFILE"
fi
if [ ! -r "$RUNFILE" ]; then
fail "Cannot read runfile: $RUNFILE"
fi
done
unset IFS
RUNFILES=${R#,}
#
# This script should not be run as root. Instead the test user, which may
# be a normal user account, needs to be configured such that it can
# run commands via sudo passwordlessly.
#
if [ "$(id -u)" = "0" ]; then
fail "This script must not be run as root."
fi
if [ "$(sudo whoami)" != "root" ]; then
fail "Passwordless sudo access required."
fi
#
# Constrain the available binaries to a known set.
#
constrain_path
#
# Check if ksh exists
#
if [ "$UNAME" = "FreeBSD" ]; then
sudo ln -fs /usr/local/bin/ksh93 /bin/ksh
fi
[ -e "$STF_PATH/ksh" ] || fail "This test suite requires ksh."
[ -e "$STF_SUITE/include/default.cfg" ] || fail \
"Missing $STF_SUITE/include/default.cfg file."
#
# Verify the ZFS module stack is loaded.
#
if [ "$STACK_TRACER" = "yes" ]; then
sudo "${ZFS_SH}" -S >/dev/null 2>&1
else
sudo "${ZFS_SH}" >/dev/null 2>&1
fi
#
# Attempt to cleanup all previous state for a new test run.
#
if [ "$CLEANUPALL" = "yes" ]; then
cleanup_all
fi
#
# By default preserve any existing pools
# NOTE: Since 'zpool list' outputs a newline-delimited list convert $KEEP from
# space-delimited to newline-delimited.
#
if [ -z "${KEEP}" ]; then
KEEP="$(sudo "$ZPOOL" list -H -o name)"
if [ -z "${KEEP}" ]; then
KEEP="rpool"
fi
else
KEEP="$(echo "$KEEP" | tr '[:blank:]' '\n')"
fi
#
# NOTE: The following environment variables are undocumented
# and should be used for testing purposes only:
#
# __ZFS_POOL_EXCLUDE - don't iterate over the pools it lists
# __ZFS_POOL_RESTRICT - iterate only over the pools it lists
#
# See libzfs/libzfs_config.c for more information.
#
if [ "$UNAME" = "FreeBSD" ] ; then
__ZFS_POOL_EXCLUDE="$(echo "$KEEP" | tr -s '\n' ' ')"
else
__ZFS_POOL_EXCLUDE="$(echo "$KEEP" | sed ':a;N;s/\n/ /g;ba')"
fi
. "$STF_SUITE/include/default.cfg"
#
# No DISKS have been provided so a basic file or loopback based devices
# must be created for the test suite to use.
#
if [ -z "${DISKS}" ]; then
#
# If this is a performance run, prevent accidental use of
# loopback devices.
#
[ "$TAGS" = "perf" ] && fail "Running perf tests without disks."
#
# Create sparse files for the test suite. These may be used
# directory or have loopback devices layered on them.
#
for TEST_FILE in ${FILES}; do
[ -f "$TEST_FILE" ] && fail "Failed file exists: ${TEST_FILE}"
truncate -s "${FILESIZE}" "${TEST_FILE}" ||
fail "Failed creating: ${TEST_FILE} ($?)"
done
#
# If requested setup loopback devices backed by the sparse files.
#
if [ "$LOOPBACK" = "yes" ]; then
test -x "$LOSETUP" || fail "$LOSETUP utility must be installed"
for TEST_FILE in ${FILES}; do
if [ "$UNAME" = "FreeBSD" ] ; then
MDDEVICE=$(sudo "${LOSETUP}" -a -t vnode -f "${TEST_FILE}")
if [ -z "$MDDEVICE" ] ; then
fail "Failed: ${TEST_FILE} -> loopback"
fi
DISKS="$DISKS $MDDEVICE"
LOOPBACKS="$LOOPBACKS $MDDEVICE"
else
TEST_LOOPBACK=$(sudo "${LOSETUP}" -f)
sudo "${LOSETUP}" "${TEST_LOOPBACK}" "${TEST_FILE}" ||
fail "Failed: ${TEST_FILE} -> ${TEST_LOOPBACK}"
- BASELOOPBACK=$(basename "$TEST_LOOPBACK")
+ BASELOOPBACK="${TEST_LOOPBACK##*/}"
DISKS="$DISKS $BASELOOPBACK"
LOOPBACKS="$LOOPBACKS $TEST_LOOPBACK"
fi
done
DISKS=${DISKS# }
LOOPBACKS=${LOOPBACKS# }
else
DISKS="$FILES"
fi
fi
#
# It may be desirable to test with fewer disks than the default when running
# the performance tests, but the functional tests require at least three.
#
NUM_DISKS=$(echo "${DISKS}" | awk '{print NF}')
if [ "$TAGS" != "perf" ]; then
[ "$NUM_DISKS" -lt 3 ] && fail "Not enough disks ($NUM_DISKS/3 minimum)"
fi
#
# Disable SELinux until the ZFS Test Suite has been updated accordingly.
#
if [ -x "$STF_PATH/setenforce" ]; then
sudo setenforce permissive >/dev/null 2>&1
fi
#
# Enable internal ZFS debug log and clear it.
#
if [ -e /sys/module/zfs/parameters/zfs_dbgmsg_enable ]; then
sudo /bin/sh -c "echo 1 >/sys/module/zfs/parameters/zfs_dbgmsg_enable"
sudo /bin/sh -c "echo 0 >/proc/spl/kstat/zfs/dbgmsg"
fi
msg
msg "--- Configuration ---"
msg "Runfiles: $RUNFILES"
msg "STF_TOOLS: $STF_TOOLS"
msg "STF_SUITE: $STF_SUITE"
msg "STF_PATH: $STF_PATH"
msg "FILEDIR: $FILEDIR"
msg "FILES: $FILES"
msg "LOOPBACKS: $LOOPBACKS"
msg "DISKS: $DISKS"
msg "NUM_DISKS: $NUM_DISKS"
msg "FILESIZE: $FILESIZE"
msg "ITERATIONS: $ITERATIONS"
msg "TAGS: $TAGS"
msg "STACK_TRACER: $STACK_TRACER"
msg "Keep pool(s): $KEEP"
msg "Missing util(s): $STF_MISSING_BIN"
msg ""
export STF_TOOLS
export STF_SUITE
export STF_PATH
export DISKS
export FILEDIR
export KEEP
export __ZFS_POOL_EXCLUDE
export TESTFAIL_CALLBACKS
export PATH=$STF_PATH
-if [ "$UNAME" = "FreeBSD" ] ; then
- mkdir -p "$FILEDIR" || true
- RESULTS_FILE=$(mktemp -u "${FILEDIR}/zts-results.XXXXXX")
- REPORT_FILE=$(mktemp -u "${FILEDIR}/zts-report.XXXXXX")
-else
- RESULTS_FILE=$(mktemp -u -t zts-results.XXXXXX -p "$FILEDIR")
- REPORT_FILE=$(mktemp -u -t zts-report.XXXXXX -p "$FILEDIR")
-fi
+mktemp_file() {
+ if [ "$UNAME" = "FreeBSD" ]; then
+ mktemp -u "${FILEDIR}/$1.XXXXXX"
+ else
+ mktemp -ut "$1.XXXXXX" -p "$FILEDIR"
+ fi
+}
+mkdir -p "$FILEDIR" || :
+RESULTS_FILE=$(mktemp_file zts-results)
+REPORT_FILE=$(mktemp_file zts-report)
#
# Run all the tests as specified.
#
msg "${TEST_RUNNER} ${QUIET:+-q}" \
"-c \"${RUNFILES}\"" \
"-T \"${TAGS}\"" \
"-i \"${STF_SUITE}\"" \
"-I \"${ITERATIONS}\""
${TEST_RUNNER} ${QUIET:+-q} \
-c "${RUNFILES}" \
-T "${TAGS}" \
-i "${STF_SUITE}" \
-I "${ITERATIONS}" \
2>&1 | tee "$RESULTS_FILE"
#
# Analyze the results.
#
${ZTS_REPORT} ${RERUN:+--no-maybes} "$RESULTS_FILE" >"$REPORT_FILE"
RESULT=$?
if [ "$RESULT" -eq "2" ] && [ -n "$RERUN" ]; then
MAYBES="$($ZTS_REPORT --list-maybes)"
- TEMP_RESULTS_FILE=$(mktemp -u -t zts-results-tmp.XXXXX -p "$FILEDIR")
- TEST_LIST=$(mktemp -u -t test-list.XXXXX -p "$FILEDIR")
+ TEMP_RESULTS_FILE=$(mktemp_file zts-results-tmp)
+ TEST_LIST=$(mktemp_file test-list)
grep "^Test:.*\[FAIL\]" "$RESULTS_FILE" >"$TEMP_RESULTS_FILE"
for test_name in $MAYBES; do
grep "$test_name " "$TEMP_RESULTS_FILE" >>"$TEST_LIST"
done
${TEST_RUNNER} ${QUIET:+-q} \
-c "${RUNFILES}" \
-T "${TAGS}" \
-i "${STF_SUITE}" \
-I "${ITERATIONS}" \
-l "${TEST_LIST}" \
2>&1 | tee "$RESULTS_FILE"
#
# Analyze the results.
#
${ZTS_REPORT} --no-maybes "$RESULTS_FILE" >"$REPORT_FILE"
RESULT=$?
fi
cat "$REPORT_FILE"
RESULTS_DIR=$(awk '/^Log directory/ { print $3 }' "$RESULTS_FILE")
if [ -d "$RESULTS_DIR" ]; then
cat "$RESULTS_FILE" "$REPORT_FILE" >"$RESULTS_DIR/results"
fi
rm -f "$RESULTS_FILE" "$REPORT_FILE"
if [ -n "$SINGLETEST" ]; then
rm -f "$RUNFILES" >/dev/null 2>&1
fi
exit ${RESULT}
diff --git a/sys/contrib/openzfs/scripts/zfs.sh b/sys/contrib/openzfs/scripts/zfs.sh
index 7870b8930cab..940c83ffa28f 100755
--- a/sys/contrib/openzfs/scripts/zfs.sh
+++ b/sys/contrib/openzfs/scripts/zfs.sh
@@ -1,288 +1,291 @@
#!/bin/sh
#
# A simple script to load/unload the ZFS module stack.
#
BASE_DIR=$(dirname "$0")
SCRIPT_COMMON=common.sh
if [ -f "${BASE_DIR}/${SCRIPT_COMMON}" ]; then
. "${BASE_DIR}/${SCRIPT_COMMON}"
else
echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
fi
PROG=zfs.sh
VERBOSE="no"
UNLOAD="no"
LOAD="yes"
STACK_TRACER="no"
ZED_PIDFILE=${ZED_PIDFILE:-/var/run/zed.pid}
LDMOD=${LDMOD:-/sbin/modprobe}
KMOD_ZLIB_DEFLATE=${KMOD_ZLIB_DEFLATE:-zlib_deflate}
KMOD_ZLIB_INFLATE=${KMOD_ZLIB_INFLATE:-zlib_inflate}
KMOD_SPL=${KMOD_SPL:-spl}
KMOD_ZAVL=${KMOD_ZAVL:-zavl}
KMOD_ZNVPAIR=${KMOD_ZNVPAIR:-znvpair}
KMOD_ZUNICODE=${KMOD_ZUNICODE:-zunicode}
KMOD_ZCOMMON=${KMOD_ZCOMMON:-zcommon}
KMOD_ZLUA=${KMOD_ZLUA:-zlua}
KMOD_ICP=${KMOD_ICP:-icp}
KMOD_ZFS=${KMOD_ZFS:-zfs}
KMOD_FREEBSD=${KMOD_FREEBSD:-openzfs}
KMOD_ZZSTD=${KMOD_ZZSTD:-zzstd}
usage() {
cat << EOF
USAGE:
$0 [hvudS] [module-options]
DESCRIPTION:
Load/unload the ZFS module stack.
OPTIONS:
-h Show this message
-v Verbose
-r Reload modules
-u Unload modules
-S Enable kernel stack tracer
EOF
}
while getopts 'hvruS' OPTION; do
case $OPTION in
h)
usage
exit 1
;;
v)
VERBOSE="yes"
;;
r)
UNLOAD="yes"
LOAD="yes"
;;
u)
UNLOAD="yes"
LOAD="no"
;;
S)
STACK_TRACER="yes"
;;
?)
usage
exit
;;
esac
done
kill_zed() {
if [ -f "$ZED_PIDFILE" ]; then
PID=$(cat "$ZED_PIDFILE")
kill "$PID"
fi
}
check_modules_linux() {
LOADED_MODULES=""
MISSING_MODULES=""
for KMOD in $KMOD_SPL $KMOD_ZAVL $KMOD_ZNVPAIR $KMOD_ZUNICODE $KMOD_ZCOMMON \
$KMOD_ZLUA $KMOD_ZZSTD $KMOD_ICP $KMOD_ZFS; do
- NAME=$(basename "$KMOD" .ko)
+ NAME="${KMOD##*/}"
+ NAME="${NAME%.ko}"
if lsmod | grep -E -q "^${NAME}"; then
LOADED_MODULES="$LOADED_MODULES\t$NAME\n"
fi
if ! modinfo "$KMOD" >/dev/null 2>&1; then
MISSING_MODULES="$MISSING_MODULES\t${KMOD}\n"
fi
done
if [ -n "$LOADED_MODULES" ]; then
printf "Unload the kernel modules by running '%s -u':\n" "$PROG"
printf "%b" "$LOADED_MODULES"
exit 1
fi
if [ -n "$MISSING_MODULES" ]; then
printf "The following kernel modules can not be found:\n"
printf "%b" "$MISSING_MODULES"
exit 1
fi
return 0
}
load_module_linux() {
KMOD=$1
FILE=$(modinfo "$KMOD" | awk '/^filename:/ {print $2}')
VERSION=$(modinfo "$KMOD" | awk '/^version:/ {print $2}')
if [ "$VERBOSE" = "yes" ]; then
echo "Loading: $FILE ($VERSION)"
fi
if ! $LDMOD "$KMOD" >/dev/null 2>&1; then
echo "Failed to load $KMOD"
return 1
fi
return 0
}
load_modules_freebsd() {
kldload "$KMOD_FREEBSD" || return 1
if [ "$VERBOSE" = "yes" ]; then
echo "Successfully loaded ZFS module stack"
fi
return 0
}
load_modules_linux() {
mkdir -p /etc/zfs
if modinfo "$KMOD_ZLIB_DEFLATE" >/dev/null 2>&1; then
modprobe "$KMOD_ZLIB_DEFLATE" >/dev/null 2>&1
fi
if modinfo "$KMOD_ZLIB_INFLATE">/dev/null 2>&1; then
modprobe "$KMOD_ZLIB_INFLATE" >/dev/null 2>&1
fi
for KMOD in $KMOD_SPL $KMOD_ZAVL $KMOD_ZNVPAIR \
$KMOD_ZUNICODE $KMOD_ZCOMMON $KMOD_ZLUA $KMOD_ZZSTD \
$KMOD_ICP $KMOD_ZFS; do
load_module_linux "$KMOD" || return 1
done
if [ "$VERBOSE" = "yes" ]; then
echo "Successfully loaded ZFS module stack"
fi
return 0
}
unload_module_linux() {
KMOD=$1
- NAME=$(basename "$KMOD" .ko)
+ NAME="${KMOD##*/}"
+ NAME="${NAME%.ko}"
FILE=$(modinfo "$KMOD" | awk '/^filename:/ {print $2}')
VERSION=$(modinfo "$KMOD" | awk '/^version:/ {print $2}')
if [ "$VERBOSE" = "yes" ]; then
echo "Unloading: $KMOD ($VERSION)"
fi
rmmod "$NAME" || echo "Failed to unload $NAME"
return 0
}
unload_modules_freebsd() {
kldunload "$KMOD_FREEBSD" || echo "Failed to unload $KMOD_FREEBSD"
if [ "$VERBOSE" = "yes" ]; then
echo "Successfully unloaded ZFS module stack"
fi
return 0
}
unload_modules_linux() {
for KMOD in $KMOD_ZFS $KMOD_ICP $KMOD_ZZSTD $KMOD_ZLUA $KMOD_ZCOMMON \
$KMOD_ZUNICODE $KMOD_ZNVPAIR $KMOD_ZAVL $KMOD_SPL; do
- NAME=$(basename "$KMOD" .ko)
- USE_COUNT=$(lsmod | grep -E "^${NAME} " | awk '{print $3}')
+ NAME="${KMOD##*/}"
+ NAME="${NAME%.ko}"
+ USE_COUNT=$(lsmod | awk '/^'"${NAME}"'/ {print $3}')
if [ "$USE_COUNT" = "0" ] ; then
unload_module_linux "$KMOD" || return 1
elif [ "$USE_COUNT" != "" ] ; then
echo "Module ${NAME} is still in use!"
return 1
fi
done
if modinfo "$KMOD_ZLIB_DEFLATE" >/dev/null 2>&1; then
modprobe -r "$KMOD_ZLIB_DEFLATE" >/dev/null 2>&1
fi
if modinfo "$KMOD_ZLIB_INFLATE">/dev/null 2>&1; then
modprobe -r "$KMOD_ZLIB_INFLATE" >/dev/null 2>&1
fi
if [ "$VERBOSE" = "yes" ]; then
echo "Successfully unloaded ZFS module stack"
fi
return 0
}
stack_clear_linux() {
STACK_MAX_SIZE=/sys/kernel/debug/tracing/stack_max_size
STACK_TRACER_ENABLED=/proc/sys/kernel/stack_tracer_enabled
if [ "$STACK_TRACER" = "yes" ] && [ -e "$STACK_MAX_SIZE" ]; then
echo 1 >"$STACK_TRACER_ENABLED"
echo 0 >"$STACK_MAX_SIZE"
fi
}
stack_check_linux() {
STACK_MAX_SIZE=/sys/kernel/debug/tracing/stack_max_size
STACK_TRACE=/sys/kernel/debug/tracing/stack_trace
STACK_LIMIT=15362
if [ -e "$STACK_MAX_SIZE" ]; then
STACK_SIZE=$(cat "$STACK_MAX_SIZE")
if [ "$STACK_SIZE" -ge "$STACK_LIMIT" ]; then
echo
echo "Warning: max stack size $STACK_SIZE bytes"
cat "$STACK_TRACE"
fi
fi
}
if [ "$(id -u)" != 0 ]; then
echo "Must run as root"
exit 1
fi
UNAME=$(uname -s)
if [ "$UNLOAD" = "yes" ]; then
kill_zed
umount -t zfs -a
case $UNAME in
FreeBSD)
unload_modules_freebsd
;;
Linux)
stack_check_linux
unload_modules_linux
;;
esac
fi
if [ "$LOAD" = "yes" ]; then
case $UNAME in
FreeBSD)
load_modules_freebsd
;;
Linux)
stack_clear_linux
check_modules_linux
load_modules_linux "$@"
udevadm trigger
udevadm settle
;;
esac
fi
exit 0
diff --git a/sys/contrib/openzfs/scripts/zimport.sh b/sys/contrib/openzfs/scripts/zimport.sh
index 0e9c01182b8b..03c766cf36c2 100755
--- a/sys/contrib/openzfs/scripts/zimport.sh
+++ b/sys/contrib/openzfs/scripts/zimport.sh
@@ -1,512 +1,512 @@
#!/usr/bin/env bash
#
# Verify that an assortment of known good reference pools can be imported
# using different versions of OpenZFS code.
#
# By default references pools for the major ZFS implementation will be
# checked against the most recent OpenZFS tags and the master development branch.
# Alternate tags or branches may be verified with the '-s <src-tag> option.
# Passing the keyword "installed" will instruct the script to test whatever
# version is installed.
#
# Preferentially a reference pool is used for all tests. However, if one
# does not exist and the pool-tag matches one of the src-tags then a new
# reference pool will be created using binaries from that source build.
# This is particularly useful when you need to test your changes before
# opening a pull request. The keyword 'all' can be used as short hand
# refer to all available reference pools.
#
# New reference pools may be added by placing a bzip2 compressed tarball
# of the pool in the scripts/zfs-images directory and then passing
# the -p <pool-tag> option. To increase the test coverage reference pools
# should be collected for all the major ZFS implementations. Having these
# pools easily available is also helpful to the developers.
#
# Care should be taken to run these tests with a kernel supported by all
# the listed tags. Otherwise build failure will cause false positives.
#
#
# EXAMPLES:
#
# The following example will verify the zfs-0.6.2 tag, the master branch,
# and the installed zfs version can correctly import the listed pools.
# Note there is no reference pool available for master and installed but
# because binaries are available one is automatically constructed. The
# working directory is also preserved between runs (-k) preventing the
# need to rebuild from source for multiple runs.
#
# zimport.sh -k -f /var/tmp/zimport \
# -s "zfs-0.6.2 master installed" \
# -p "zevo-1.1.1 zol-0.6.2 zol-0.6.2-173 master installed"
#
# ------------------------ OpenZFS Source Versions ----------------
# zfs-0.6.2 master 0.6.2-175_g36eb554
# -----------------------------------------------------------------
# Clone ZFS Local Local Skip
# Build ZFS Pass Pass Skip
# -----------------------------------------------------------------
# zevo-1.1.1 Pass Pass Pass
# zol-0.6.2 Pass Pass Pass
# zol-0.6.2-173 Fail Pass Pass
# master Pass Pass Pass
# installed Pass Pass Pass
#
BASE_DIR=$(dirname "$0")
SCRIPT_COMMON=common.sh
if [ -f "${BASE_DIR}/${SCRIPT_COMMON}" ]; then
. "${BASE_DIR}/${SCRIPT_COMMON}"
else
echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
fi
PROG=zimport.sh
SRC_TAGS="zfs-0.6.5.11 master"
POOL_TAGS="all master"
POOL_CREATE_OPTIONS=
TEST_DIR=$(mktemp -u -d -p /var/tmp zimport.XXXXXXXX)
KEEP="no"
VERBOSE="no"
COLOR="yes"
REPO="https://github.com/openzfs"
IMAGES_DIR="$SCRIPTDIR/zfs-images/"
IMAGES_TAR="https://github.com/openzfs/zfs-images/tarball/master"
ERROR=0
CONFIG_LOG="configure.log"
CONFIG_OPTIONS=${CONFIG_OPTIONS:-""}
MAKE_LOG="make.log"
MAKE_OPTIONS=${MAKE_OPTIONS:-"-s -j$(nproc)"}
COLOR_GREEN="\033[0;32m"
COLOR_RED="\033[0;31m"
COLOR_BROWN="\033[0;33m"
COLOR_RESET="\033[0m"
usage() {
cat << EOF
USAGE:
zimport.sh [hvl] [-r repo] [-s src-tag] [-i pool-dir] [-p pool-tag]
[-f path] [-o options]
DESCRIPTION:
ZPOOL import verification tests
OPTIONS:
-h Show this message
-v Verbose
-c No color
-k Keep temporary directory
-r <repo> Source repository ($REPO)
-s <src-tag>... Verify OpenZFS versions with the listed tags
-i <pool-dir> Pool image directory
-p <pool-tag>... Verify pools created with the listed tags
-f <path> Temporary directory to use
-o <options> Additional options to pass to 'zpool create'
EOF
}
while getopts 'hvckr:s:i:p:f:o:?' OPTION; do
case $OPTION in
h)
usage
exit 1
;;
v)
VERBOSE="yes"
;;
c)
COLOR="no"
;;
k)
KEEP="yes"
;;
r)
REPO="$OPTARG"
;;
s)
SRC_TAGS="$OPTARG"
;;
i)
IMAGES_DIR="$OPTARG"
;;
p)
POOL_TAGS="$OPTARG"
;;
f)
TEST_DIR="$OPTARG"
;;
o)
POOL_CREATE_OPTIONS="$OPTARG"
;;
?)
usage
exit 1
;;
esac
done
#
# Verify the module start is not loaded
#
if lsmod | grep zfs >/dev/null; then
echo "ZFS modules must be unloaded"
exit 1
fi
#
# Create a random directory tree of files and sub-directories to
# to act as a copy source for the various regression tests.
#
populate() {
local ROOT=$1
local MAX_DIR_SIZE=$2
local MAX_FILE_SIZE=$3
mkdir -p "$ROOT"/{a,b,c,d,e,f,g}/{h,i}
DIRS=$(find "$ROOT")
for DIR in $DIRS; do
COUNT=$((RANDOM % MAX_DIR_SIZE))
for _ in $(seq $COUNT); do
FILE=$(mktemp -p "$DIR")
SIZE=$((RANDOM % MAX_FILE_SIZE))
dd if=/dev/urandom of="$FILE" bs=1k \
count="$SIZE" &>/dev/null
done
done
return 0
}
SRC_DIR=$(mktemp -d -p /var/tmp/ zfs.src.XXXXXXXX)
trap 'rm -Rf "$SRC_DIR"' INT TERM EXIT
populate "$SRC_DIR" 10 100
SRC_DIR="$TEST_DIR/src"
SRC_DIR_ZFS="$SRC_DIR/zfs"
if [ "$COLOR" = "no" ]; then
COLOR_GREEN=""
COLOR_BROWN=""
COLOR_RED=""
COLOR_RESET=""
fi
pass_nonewline() {
echo -n -e "${COLOR_GREEN}Pass${COLOR_RESET}\t\t"
}
skip_nonewline() {
echo -n -e "${COLOR_BROWN}Skip${COLOR_RESET}\t\t"
}
fail_nonewline() {
echo -n -e "${COLOR_RED}Fail${COLOR_RESET}\t\t"
}
#
# Log a failure message, cleanup, and return an error.
#
fail() {
echo -e "$PROG: $1" >&2
$ZFS_SH -u >/dev/null 2>&1
exit 1
}
#
# Set several helper variables which are derived from a source tag.
#
# ZFS_TAG - The passed zfs-x.y.z tag
# ZFS_DIR - The zfs directory name
# ZFS_URL - The zfs github URL to fetch the tarball
#
src_set_vars() {
local TAG=$1
ZFS_TAG="$TAG"
ZFS_DIR="$SRC_DIR_ZFS/$ZFS_TAG"
ZFS_URL="$REPO/zfs/tarball/$ZFS_TAG"
if [ "$TAG" = "installed" ]; then
ZPOOL_CMD=$(command -v zpool)
ZFS_CMD=$(command -v zfs)
ZFS_SH="/usr/share/zfs/zfs.sh"
else
ZPOOL_CMD="./cmd/zpool/zpool"
ZFS_CMD="./cmd/zfs/zfs"
ZFS_SH="./scripts/zfs.sh"
fi
}
#
# Set several helper variables which are derived from a pool name such
# as zol-0.6.x, zevo-1.1.1, etc. These refer to example pools from various
# ZFS implementations which are used to verify compatibility.
#
# POOL_TAG - The example pools name in scripts/zfs-images/.
# POOL_BZIP - The full path to the example bzip2 compressed pool.
# POOL_DIR - The top level test path for this pool.
# POOL_DIR_PRISTINE - The directory containing a pristine version of the pool.
# POOL_DIR_COPY - The directory containing a working copy of the pool.
# POOL_DIR_SRC - Location of a source build if it exists for this pool.
#
pool_set_vars() {
local TAG=$1
POOL_TAG=$TAG
POOL_BZIP=$IMAGES_DIR/$POOL_TAG.tar.bz2
POOL_DIR=$TEST_DIR/pools/$POOL_TAG
POOL_DIR_PRISTINE=$POOL_DIR/pristine
POOL_DIR_COPY=$POOL_DIR/copy
POOL_DIR_SRC="$SRC_DIR_ZFS/${POOL_TAG//zol/zfs}"
}
#
# Construct a non-trivial pool given a specific version of the source. More
# interesting pools provide better test coverage so this function should
# extended as needed to create more realistic pools.
#
pool_create() {
pool_set_vars "$1"
src_set_vars "$1"
if [ "$POOL_TAG" != "installed" ]; then
cd "$POOL_DIR_SRC" || fail "Failed 'cd $POOL_DIR_SRC'"
fi
$ZFS_SH zfs="spa_config_path=$POOL_DIR_PRISTINE" || \
fail "Failed to load kmods"
# Create a file vdev RAIDZ pool.
truncate -s 1G \
"$POOL_DIR_PRISTINE/vdev1" "$POOL_DIR_PRISTINE/vdev2" \
"$POOL_DIR_PRISTINE/vdev3" "$POOL_DIR_PRISTINE/vdev4" || \
fail "Failed 'truncate -s 1G ...'"
# shellcheck disable=SC2086
$ZPOOL_CMD create $POOL_CREATE_OPTIONS "$POOL_TAG" raidz \
"$POOL_DIR_PRISTINE/vdev1" "$POOL_DIR_PRISTINE/vdev2" \
"$POOL_DIR_PRISTINE/vdev3" "$POOL_DIR_PRISTINE/vdev4" || \
fail "Failed '$ZPOOL_CMD create $POOL_CREATE_OPTIONS $POOL_TAG ...'"
# Create a pool/fs filesystem with some random contents.
$ZFS_CMD create "$POOL_TAG/fs" || \
fail "Failed '$ZFS_CMD create $POOL_TAG/fs'"
populate "/$POOL_TAG/fs/" 10 100
# Snapshot that filesystem, clone it, remove the files/dirs,
# replace them with new files/dirs.
$ZFS_CMD snap "$POOL_TAG/fs@snap" || \
fail "Failed '$ZFS_CMD snap $POOL_TAG/fs@snap'"
$ZFS_CMD clone "$POOL_TAG/fs@snap" "$POOL_TAG/clone" || \
fail "Failed '$ZFS_CMD clone $POOL_TAG/fs@snap $POOL_TAG/clone'"
# shellcheck disable=SC2086
rm -Rf /$POOL_TAG/clone/*
populate "/$POOL_TAG/clone/" 10 100
# Scrub the pool, delay slightly, then export it. It is now
# somewhat interesting for testing purposes.
$ZPOOL_CMD scrub "$POOL_TAG" || \
fail "Failed '$ZPOOL_CMD scrub $POOL_TAG'"
sleep 10
$ZPOOL_CMD export "$POOL_TAG" || \
fail "Failed '$ZPOOL_CMD export $POOL_TAG'"
$ZFS_SH -u || fail "Failed to unload kmods"
}
# If the zfs-images directory doesn't exist fetch a copy from Github then
# cache it in the $TEST_DIR and update $IMAGES_DIR.
if [ ! -d "$IMAGES_DIR" ]; then
IMAGES_DIR="$TEST_DIR/zfs-images"
mkdir -p "$IMAGES_DIR"
curl -sL "$IMAGES_TAR" | \
tar -xz -C "$IMAGES_DIR" --strip-components=1 || \
fail "Failed to download pool images"
fi
# Given the available images in the zfs-images directory substitute the
# list of available images for the reserved keyword 'all'.
for TAG in $POOL_TAGS; do
if [ "$TAG" = "all" ]; then
ALL_TAGS=$(echo "$IMAGES_DIR"/*.tar.bz2 | \
sed "s|$IMAGES_DIR/||g;s|.tar.bz2||g")
NEW_TAGS="$NEW_TAGS $ALL_TAGS"
else
NEW_TAGS="$NEW_TAGS $TAG"
fi
done
POOL_TAGS="$NEW_TAGS"
if [ "$VERBOSE" = "yes" ]; then
echo "---------------------------- Options ----------------------------"
echo "VERBOSE=$VERBOSE"
echo "KEEP=$KEEP"
echo "REPO=$REPO"
echo "SRC_TAGS=$SRC_TAGS"
echo "POOL_TAGS=$POOL_TAGS"
echo "PATH=$TEST_DIR"
echo "POOL_CREATE_OPTIONS=$POOL_CREATE_OPTIONS"
echo
fi
if [ ! -d "$TEST_DIR" ]; then
mkdir -p "$TEST_DIR"
fi
if [ ! -d "$SRC_DIR" ]; then
mkdir -p "$SRC_DIR"
fi
# Print a header for all tags which are being tested.
echo "------------------------ OpenZFS Source Versions ----------------"
printf "%-16s" " "
for TAG in $SRC_TAGS; do
src_set_vars "$TAG"
if [ "$TAG" = "installed" ]; then
ZFS_VERSION=$(modinfo zfs | awk '/version:/ { print $2; exit }')
if [ -n "$ZFS_VERSION" ]; then
printf "%-16s" "$ZFS_VERSION"
else
fail "ZFS is not installed"
fi
else
printf "%-16s" "$TAG"
fi
done
echo -e "\n-----------------------------------------------------------------"
#
# Attempt to generate the tarball from your local git repository, if that
# fails then attempt to download the tarball from Github.
#
printf "%-16s" "Clone ZFS"
for TAG in $SRC_TAGS; do
src_set_vars "$TAG"
if [ -d "$ZFS_DIR" ]; then
skip_nonewline
elif [ "$ZFS_TAG" = "installed" ]; then
skip_nonewline
else
cd "$SRC_DIR" || fail "Failed 'cd $SRC_DIR'"
if [ ! -d "$SRC_DIR_ZFS" ]; then
mkdir -p "$SRC_DIR_ZFS"
fi
git archive --format=tar --prefix="$ZFS_TAG/ $ZFS_TAG" \
-o "$SRC_DIR_ZFS/$ZFS_TAG.tar" &>/dev/null || \
rm "$SRC_DIR_ZFS/$ZFS_TAG.tar"
if [ -s "$SRC_DIR_ZFS/$ZFS_TAG.tar" ]; then
tar -xf "$SRC_DIR_ZFS/$ZFS_TAG.tar" -C "$SRC_DIR_ZFS"
rm "$SRC_DIR_ZFS/$ZFS_TAG.tar"
echo -n -e "${COLOR_GREEN}Local${COLOR_RESET}\t\t"
else
mkdir -p "$ZFS_DIR" || fail "Failed to create $ZFS_DIR"
curl -sL "$ZFS_URL" | tar -xz -C "$ZFS_DIR" \
--strip-components=1 || \
fail "Failed to download $ZFS_URL"
echo -n -e "${COLOR_GREEN}Remote${COLOR_RESET}\t\t"
fi
fi
done
printf "\n"
# Build the listed tags
printf "%-16s" "Build ZFS"
for TAG in $SRC_TAGS; do
src_set_vars "$TAG"
if [ -f "$ZFS_DIR/module/zfs/zfs.ko" ]; then
skip_nonewline
elif [ "$ZFS_TAG" = "installed" ]; then
skip_nonewline
else
cd "$ZFS_DIR" || fail "Failed 'cd $ZFS_DIR'"
make distclean &>/dev/null
./autogen.sh >>"$CONFIG_LOG" 2>&1 || \
fail "Failed ZFS 'autogen.sh'"
# shellcheck disable=SC2086
./configure $CONFIG_OPTIONS >>"$CONFIG_LOG" 2>&1 || \
fail "Failed ZFS 'configure $CONFIG_OPTIONS'"
# shellcheck disable=SC2086
make $MAKE_OPTIONS >>"$MAKE_LOG" 2>&1 || \
fail "Failed ZFS 'make $MAKE_OPTIONS'"
pass_nonewline
fi
done
printf "\n"
echo "-----------------------------------------------------------------"
# Either create a new pool using 'zpool create', or alternately restore an
# existing pool from another ZFS implementation for compatibility testing.
for TAG in $POOL_TAGS; do
pool_set_vars "$TAG"
SKIP=0
printf "%-16s" "$POOL_TAG"
rm -Rf "$POOL_DIR"
mkdir -p "$POOL_DIR_PRISTINE"
# Use the existing compressed image if available.
if [ -f "$POOL_BZIP" ]; then
tar -xjf "$POOL_BZIP" -C "$POOL_DIR_PRISTINE" \
--strip-components=1 || \
fail "Failed 'tar -xjf $POOL_BZIP"
# Use the installed version to create the pool.
elif [ "$TAG" = "installed" ]; then
pool_create "$TAG"
# A source build is available to create the pool.
elif [ -d "$POOL_DIR_SRC" ]; then
pool_create "$TAG"
else
SKIP=1
fi
# Verify 'zpool import' works for all listed source versions.
for SRC_TAG in $SRC_TAGS; do
if [ $SKIP -eq 1 ]; then
skip_nonewline
continue
fi
src_set_vars "$SRC_TAG"
if [ "$SRC_TAG" != "installed" ]; then
cd "$ZFS_DIR" || fail "Failed 'cd $ZFS_DIR'"
fi
$ZFS_SH zfs="spa_config_path=$POOL_DIR_COPY"
cp -a --sparse=always "$POOL_DIR_PRISTINE" \
"$POOL_DIR_COPY" || \
fail "Failed to copy $POOL_DIR_PRISTINE to $POOL_DIR_COPY"
POOL_NAME=$($ZPOOL_CMD import -d "$POOL_DIR_COPY" | \
- awk '/pool:/ { print $2; exit 0 }')
+ awk '/pool:/ { print $2; exit }')
if ! $ZPOOL_CMD import -N -d "$POOL_DIR_COPY"
"$POOL_NAME" &>/dev/null; then
fail_nonewline
ERROR=1
else
$ZPOOL_CMD export "$POOL_NAME" || \
fail "Failed to export pool"
pass_nonewline
fi
rm -Rf "$POOL_DIR_COPY"
$ZFS_SH -u || fail "Failed to unload kmods"
done
printf "\n"
done
if [ "$KEEP" = "no" ]; then
rm -Rf "$TEST_DIR"
fi
exit $ERROR
diff --git a/sys/contrib/openzfs/tests/runfiles/common.run b/sys/contrib/openzfs/tests/runfiles/common.run
index 97ee7b8ae241..aefcd98436d7 100644
--- a/sys/contrib/openzfs/tests/runfiles/common.run
+++ b/sys/contrib/openzfs/tests/runfiles/common.run
@@ -1,946 +1,948 @@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
# This run file contains all of the common functional tests. When
# adding a new test consider also adding it to the sanity.run file
# if the new test runs to completion in only a few seconds.
#
# Approximate run time: 4-5 hours
#
[DEFAULT]
pre = setup
quiet = False
pre_user = root
user = root
timeout = 600
post_user = root
post = cleanup
failsafe_user = root
failsafe = callbacks/zfs_failsafe
outputdir = /var/tmp/test_results
tags = ['functional']
[tests/functional/acl/off]
tests = ['posixmode']
tags = ['functional', 'acl']
[tests/functional/alloc_class]
tests = ['alloc_class_001_pos', 'alloc_class_002_neg', 'alloc_class_003_pos',
'alloc_class_004_pos', 'alloc_class_005_pos', 'alloc_class_006_pos',
'alloc_class_007_pos', 'alloc_class_008_pos', 'alloc_class_009_pos',
'alloc_class_010_pos', 'alloc_class_011_neg', 'alloc_class_012_pos',
'alloc_class_013_pos']
tags = ['functional', 'alloc_class']
[tests/functional/arc]
tests = ['dbufstats_001_pos', 'dbufstats_002_pos', 'dbufstats_003_pos',
'arcstats_runtime_tuning']
tags = ['functional', 'arc']
[tests/functional/atime]
tests = ['atime_001_pos', 'atime_002_neg', 'root_atime_off', 'root_atime_on']
tags = ['functional', 'atime']
[tests/functional/bootfs]
tests = ['bootfs_001_pos', 'bootfs_002_neg', 'bootfs_003_pos',
'bootfs_004_neg', 'bootfs_005_neg', 'bootfs_006_pos', 'bootfs_007_pos',
'bootfs_008_pos']
tags = ['functional', 'bootfs']
[tests/functional/btree]
tests = ['btree_positive', 'btree_negative']
tags = ['functional', 'btree']
pre =
post =
[tests/functional/cache]
tests = ['cache_001_pos', 'cache_002_pos', 'cache_003_pos', 'cache_004_neg',
'cache_005_neg', 'cache_006_pos', 'cache_007_neg', 'cache_008_neg',
'cache_009_pos', 'cache_010_pos', 'cache_011_pos', 'cache_012_pos']
tags = ['functional', 'cache']
[tests/functional/cachefile]
tests = ['cachefile_001_pos', 'cachefile_002_pos', 'cachefile_003_pos',
'cachefile_004_pos']
tags = ['functional', 'cachefile']
[tests/functional/casenorm]
tests = ['case_all_values', 'norm_all_values', 'mixed_create_failure',
'sensitive_none_lookup', 'sensitive_none_delete',
'sensitive_formd_lookup', 'sensitive_formd_delete',
'insensitive_none_lookup', 'insensitive_none_delete',
'insensitive_formd_lookup', 'insensitive_formd_delete',
'mixed_none_lookup', 'mixed_none_lookup_ci', 'mixed_none_delete',
'mixed_formd_lookup', 'mixed_formd_lookup_ci', 'mixed_formd_delete']
tags = ['functional', 'casenorm']
[tests/functional/channel_program/lua_core]
tests = ['tst.args_to_lua', 'tst.divide_by_zero', 'tst.exists',
'tst.integer_illegal', 'tst.integer_overflow', 'tst.language_functions_neg',
'tst.language_functions_pos', 'tst.large_prog', 'tst.libraries',
'tst.memory_limit', 'tst.nested_neg', 'tst.nested_pos', 'tst.nvlist_to_lua',
'tst.recursive_neg', 'tst.recursive_pos', 'tst.return_large',
'tst.return_nvlist_neg', 'tst.return_nvlist_pos',
'tst.return_recursive_table', 'tst.stack_gsub', 'tst.timeout']
tags = ['functional', 'channel_program', 'lua_core']
[tests/functional/channel_program/synctask_core]
tests = ['tst.destroy_fs', 'tst.destroy_snap', 'tst.get_count_and_limit',
'tst.get_index_props', 'tst.get_mountpoint', 'tst.get_neg',
'tst.get_number_props', 'tst.get_string_props', 'tst.get_type',
'tst.get_userquota', 'tst.get_written', 'tst.inherit', 'tst.list_bookmarks',
'tst.list_children', 'tst.list_clones', 'tst.list_holds',
'tst.list_snapshots', 'tst.list_system_props',
'tst.list_user_props', 'tst.parse_args_neg','tst.promote_conflict',
'tst.promote_multiple', 'tst.promote_simple', 'tst.rollback_mult',
'tst.rollback_one', 'tst.set_props', 'tst.snapshot_destroy', 'tst.snapshot_neg',
'tst.snapshot_recursive', 'tst.snapshot_simple',
'tst.bookmark.create', 'tst.bookmark.copy',
'tst.terminate_by_signal'
]
tags = ['functional', 'channel_program', 'synctask_core']
[tests/functional/checksum]
tests = ['run_sha2_test', 'run_skein_test', 'filetest_001_pos',
'filetest_002_pos']
tags = ['functional', 'checksum']
[tests/functional/clean_mirror]
tests = [ 'clean_mirror_001_pos', 'clean_mirror_002_pos',
'clean_mirror_003_pos', 'clean_mirror_004_pos']
tags = ['functional', 'clean_mirror']
[tests/functional/cli_root/zdb]
tests = ['zdb_002_pos', 'zdb_003_pos', 'zdb_004_pos', 'zdb_005_pos',
'zdb_006_pos', 'zdb_args_neg', 'zdb_args_pos',
'zdb_block_size_histogram', 'zdb_checksum', 'zdb_decompress',
'zdb_display_block', 'zdb_object_range_neg', 'zdb_object_range_pos',
'zdb_objset_id', 'zdb_decompress_zstd', 'zdb_recover', 'zdb_recover_2']
pre =
post =
tags = ['functional', 'cli_root', 'zdb']
[tests/functional/cli_root/zfs]
tests = ['zfs_001_neg', 'zfs_002_pos']
tags = ['functional', 'cli_root', 'zfs']
[tests/functional/cli_root/zfs_bookmark]
tests = ['zfs_bookmark_cliargs']
tags = ['functional', 'cli_root', 'zfs_bookmark']
[tests/functional/cli_root/zfs_change-key]
tests = ['zfs_change-key', 'zfs_change-key_child', 'zfs_change-key_format',
'zfs_change-key_inherit', 'zfs_change-key_load', 'zfs_change-key_location',
'zfs_change-key_pbkdf2iters', 'zfs_change-key_clones']
tags = ['functional', 'cli_root', 'zfs_change-key']
[tests/functional/cli_root/zfs_clone]
tests = ['zfs_clone_001_neg', 'zfs_clone_002_pos', 'zfs_clone_003_pos',
'zfs_clone_004_pos', 'zfs_clone_005_pos', 'zfs_clone_006_pos',
'zfs_clone_007_pos', 'zfs_clone_008_neg', 'zfs_clone_009_neg',
'zfs_clone_010_pos', 'zfs_clone_encrypted', 'zfs_clone_deeply_nested']
tags = ['functional', 'cli_root', 'zfs_clone']
[tests/functional/cli_root/zfs_copies]
tests = ['zfs_copies_001_pos', 'zfs_copies_002_pos', 'zfs_copies_003_pos',
'zfs_copies_004_neg', 'zfs_copies_005_neg', 'zfs_copies_006_pos']
tags = ['functional', 'cli_root', 'zfs_copies']
[tests/functional/cli_root/zfs_create]
tests = ['zfs_create_001_pos', 'zfs_create_002_pos', 'zfs_create_003_pos',
'zfs_create_004_pos', 'zfs_create_005_pos', 'zfs_create_006_pos',
'zfs_create_007_pos', 'zfs_create_008_neg', 'zfs_create_009_neg',
'zfs_create_010_neg', 'zfs_create_011_pos', 'zfs_create_012_pos',
'zfs_create_013_pos', 'zfs_create_014_pos', 'zfs_create_encrypted',
'zfs_create_crypt_combos', 'zfs_create_dryrun', 'zfs_create_nomount',
'zfs_create_verbose']
tags = ['functional', 'cli_root', 'zfs_create']
[tests/functional/cli_root/zfs_destroy]
tests = ['zfs_clone_livelist_condense_and_disable',
'zfs_clone_livelist_condense_races', 'zfs_clone_livelist_dedup',
'zfs_destroy_001_pos', 'zfs_destroy_002_pos', 'zfs_destroy_003_pos',
'zfs_destroy_004_pos', 'zfs_destroy_005_neg', 'zfs_destroy_006_neg',
'zfs_destroy_007_neg', 'zfs_destroy_008_pos', 'zfs_destroy_009_pos',
'zfs_destroy_010_pos', 'zfs_destroy_011_pos', 'zfs_destroy_012_pos',
'zfs_destroy_013_neg', 'zfs_destroy_014_pos', 'zfs_destroy_015_pos',
'zfs_destroy_016_pos', 'zfs_destroy_clone_livelist',
'zfs_destroy_dev_removal', 'zfs_destroy_dev_removal_condense']
tags = ['functional', 'cli_root', 'zfs_destroy']
[tests/functional/cli_root/zfs_diff]
tests = ['zfs_diff_changes', 'zfs_diff_cliargs', 'zfs_diff_timestamp',
'zfs_diff_types', 'zfs_diff_encrypted']
tags = ['functional', 'cli_root', 'zfs_diff']
[tests/functional/cli_root/zfs_get]
tests = ['zfs_get_001_pos', 'zfs_get_002_pos', 'zfs_get_003_pos',
'zfs_get_004_pos', 'zfs_get_005_neg', 'zfs_get_006_neg', 'zfs_get_007_neg',
'zfs_get_008_pos', 'zfs_get_009_pos', 'zfs_get_010_neg']
tags = ['functional', 'cli_root', 'zfs_get']
[tests/functional/cli_root/zfs_ids_to_path]
tests = ['zfs_ids_to_path_001_pos']
tags = ['functional', 'cli_root', 'zfs_ids_to_path']
[tests/functional/cli_root/zfs_inherit]
tests = ['zfs_inherit_001_neg', 'zfs_inherit_002_neg', 'zfs_inherit_003_pos',
'zfs_inherit_mountpoint']
tags = ['functional', 'cli_root', 'zfs_inherit']
[tests/functional/cli_root/zfs_load-key]
tests = ['zfs_load-key', 'zfs_load-key_all', 'zfs_load-key_file',
- 'zfs_load-key_location', 'zfs_load-key_noop', 'zfs_load-key_recursive']
+ 'zfs_load-key_https', 'zfs_load-key_location', 'zfs_load-key_noop',
+ 'zfs_load-key_recursive']
tags = ['functional', 'cli_root', 'zfs_load-key']
[tests/functional/cli_root/zfs_mount]
tests = ['zfs_mount_001_pos', 'zfs_mount_002_pos', 'zfs_mount_003_pos',
'zfs_mount_004_pos', 'zfs_mount_005_pos', 'zfs_mount_007_pos',
'zfs_mount_009_neg', 'zfs_mount_010_neg', 'zfs_mount_011_neg',
'zfs_mount_012_pos', 'zfs_mount_all_001_pos', 'zfs_mount_encrypted',
'zfs_mount_remount', 'zfs_mount_all_fail', 'zfs_mount_all_mountpoints',
'zfs_mount_test_race']
tags = ['functional', 'cli_root', 'zfs_mount']
[tests/functional/cli_root/zfs_program]
tests = ['zfs_program_json']
tags = ['functional', 'cli_root', 'zfs_program']
[tests/functional/cli_root/zfs_promote]
tests = ['zfs_promote_001_pos', 'zfs_promote_002_pos', 'zfs_promote_003_pos',
'zfs_promote_004_pos', 'zfs_promote_005_pos', 'zfs_promote_006_neg',
'zfs_promote_007_neg', 'zfs_promote_008_pos', 'zfs_promote_encryptionroot']
tags = ['functional', 'cli_root', 'zfs_promote']
[tests/functional/cli_root/zfs_property]
tests = ['zfs_written_property_001_pos']
tags = ['functional', 'cli_root', 'zfs_property']
[tests/functional/cli_root/zfs_receive]
tests = ['zfs_receive_001_pos', 'zfs_receive_002_pos', 'zfs_receive_003_pos',
'zfs_receive_004_neg', 'zfs_receive_005_neg', 'zfs_receive_006_pos',
'zfs_receive_007_neg', 'zfs_receive_008_pos', 'zfs_receive_009_neg',
'zfs_receive_010_pos', 'zfs_receive_011_pos', 'zfs_receive_012_pos',
'zfs_receive_013_pos', 'zfs_receive_014_pos', 'zfs_receive_015_pos',
'zfs_receive_016_pos', 'receive-o-x_props_override',
'zfs_receive_from_encrypted', 'zfs_receive_to_encrypted',
'zfs_receive_raw', 'zfs_receive_raw_incremental', 'zfs_receive_-e',
- 'zfs_receive_raw_-d', 'zfs_receive_from_zstd', 'zfs_receive_new_props']
+ 'zfs_receive_raw_-d', 'zfs_receive_from_zstd', 'zfs_receive_new_props',
+ 'zfs_receive_-wR-encrypted-mix']
tags = ['functional', 'cli_root', 'zfs_receive']
[tests/functional/cli_root/zfs_rename]
tests = ['zfs_rename_001_pos', 'zfs_rename_002_pos', 'zfs_rename_003_pos',
'zfs_rename_004_neg', 'zfs_rename_005_neg', 'zfs_rename_006_pos',
'zfs_rename_007_pos', 'zfs_rename_008_pos', 'zfs_rename_009_neg',
'zfs_rename_010_neg', 'zfs_rename_011_pos', 'zfs_rename_012_neg',
'zfs_rename_013_pos', 'zfs_rename_014_neg', 'zfs_rename_encrypted_child',
'zfs_rename_to_encrypted', 'zfs_rename_mountpoint', 'zfs_rename_nounmount']
tags = ['functional', 'cli_root', 'zfs_rename']
[tests/functional/cli_root/zfs_reservation]
tests = ['zfs_reservation_001_pos', 'zfs_reservation_002_pos']
tags = ['functional', 'cli_root', 'zfs_reservation']
[tests/functional/cli_root/zfs_rollback]
tests = ['zfs_rollback_001_pos', 'zfs_rollback_002_pos',
'zfs_rollback_003_neg', 'zfs_rollback_004_neg']
tags = ['functional', 'cli_root', 'zfs_rollback']
[tests/functional/cli_root/zfs_send]
tests = ['zfs_send_001_pos', 'zfs_send_002_pos', 'zfs_send_003_pos',
'zfs_send_004_neg', 'zfs_send_005_pos', 'zfs_send_006_pos',
'zfs_send_007_pos', 'zfs_send_encrypted', 'zfs_send_raw',
'zfs_send_sparse', 'zfs_send-b', 'zfs_send_skip_missing']
tags = ['functional', 'cli_root', 'zfs_send']
[tests/functional/cli_root/zfs_set]
tests = ['cache_001_pos', 'cache_002_neg', 'canmount_001_pos',
'canmount_002_pos', 'canmount_003_pos', 'canmount_004_pos',
'checksum_001_pos', 'compression_001_pos', 'mountpoint_001_pos',
'mountpoint_002_pos', 'reservation_001_neg', 'user_property_002_pos',
'share_mount_001_neg', 'snapdir_001_pos', 'onoffs_001_pos',
'user_property_001_pos', 'user_property_003_neg', 'readonly_001_pos',
'user_property_004_pos', 'version_001_neg', 'zfs_set_001_neg',
'zfs_set_002_neg', 'zfs_set_003_neg', 'property_alias_001_pos',
'mountpoint_003_pos', 'ro_props_001_pos', 'zfs_set_keylocation',
'zfs_set_feature_activation']
tags = ['functional', 'cli_root', 'zfs_set']
[tests/functional/cli_root/zfs_share]
tests = ['zfs_share_001_pos', 'zfs_share_002_pos', 'zfs_share_003_pos',
'zfs_share_004_pos', 'zfs_share_006_pos', 'zfs_share_008_neg',
'zfs_share_010_neg', 'zfs_share_011_pos', 'zfs_share_concurrent_shares']
tags = ['functional', 'cli_root', 'zfs_share']
[tests/functional/cli_root/zfs_snapshot]
tests = ['zfs_snapshot_001_neg', 'zfs_snapshot_002_neg',
'zfs_snapshot_003_neg', 'zfs_snapshot_004_neg', 'zfs_snapshot_005_neg',
'zfs_snapshot_006_pos', 'zfs_snapshot_007_neg', 'zfs_snapshot_008_neg',
'zfs_snapshot_009_pos']
tags = ['functional', 'cli_root', 'zfs_snapshot']
[tests/functional/cli_root/zfs_unload-key]
tests = ['zfs_unload-key', 'zfs_unload-key_all', 'zfs_unload-key_recursive']
tags = ['functional', 'cli_root', 'zfs_unload-key']
[tests/functional/cli_root/zfs_unmount]
tests = ['zfs_unmount_001_pos', 'zfs_unmount_002_pos', 'zfs_unmount_003_pos',
'zfs_unmount_004_pos', 'zfs_unmount_005_pos', 'zfs_unmount_006_pos',
'zfs_unmount_007_neg', 'zfs_unmount_008_neg', 'zfs_unmount_009_pos',
'zfs_unmount_all_001_pos', 'zfs_unmount_nested', 'zfs_unmount_unload_keys']
tags = ['functional', 'cli_root', 'zfs_unmount']
[tests/functional/cli_root/zfs_unshare]
tests = ['zfs_unshare_001_pos', 'zfs_unshare_002_pos', 'zfs_unshare_003_pos',
'zfs_unshare_004_neg', 'zfs_unshare_005_neg', 'zfs_unshare_006_pos',
'zfs_unshare_007_pos']
tags = ['functional', 'cli_root', 'zfs_unshare']
[tests/functional/cli_root/zfs_upgrade]
tests = ['zfs_upgrade_001_pos', 'zfs_upgrade_002_pos', 'zfs_upgrade_003_pos',
'zfs_upgrade_004_pos', 'zfs_upgrade_005_pos', 'zfs_upgrade_006_neg',
'zfs_upgrade_007_neg']
tags = ['functional', 'cli_root', 'zfs_upgrade']
[tests/functional/cli_root/zfs_wait]
tests = ['zfs_wait_deleteq']
tags = ['functional', 'cli_root', 'zfs_wait']
[tests/functional/cli_root/zpool]
tests = ['zpool_001_neg', 'zpool_002_pos', 'zpool_003_pos', 'zpool_colors']
tags = ['functional', 'cli_root', 'zpool']
[tests/functional/cli_root/zpool_add]
tests = ['zpool_add_001_pos', 'zpool_add_002_pos', 'zpool_add_003_pos',
'zpool_add_004_pos', 'zpool_add_006_pos', 'zpool_add_007_neg',
'zpool_add_008_neg', 'zpool_add_009_neg', 'zpool_add_010_pos',
'add-o_ashift', 'add_prop_ashift', 'zpool_add_dryrun_output']
tags = ['functional', 'cli_root', 'zpool_add']
[tests/functional/cli_root/zpool_attach]
tests = ['zpool_attach_001_neg', 'attach-o_ashift']
tags = ['functional', 'cli_root', 'zpool_attach']
[tests/functional/cli_root/zpool_clear]
tests = ['zpool_clear_001_pos', 'zpool_clear_002_neg', 'zpool_clear_003_neg',
'zpool_clear_readonly']
tags = ['functional', 'cli_root', 'zpool_clear']
[tests/functional/cli_root/zpool_create]
tests = ['zpool_create_001_pos', 'zpool_create_002_pos',
'zpool_create_003_pos', 'zpool_create_004_pos', 'zpool_create_005_pos',
'zpool_create_006_pos', 'zpool_create_007_neg', 'zpool_create_008_pos',
'zpool_create_009_neg', 'zpool_create_010_neg', 'zpool_create_011_neg',
'zpool_create_012_neg', 'zpool_create_014_neg', 'zpool_create_015_neg',
'zpool_create_017_neg', 'zpool_create_018_pos', 'zpool_create_019_pos',
'zpool_create_020_pos', 'zpool_create_021_pos', 'zpool_create_022_pos',
'zpool_create_023_neg', 'zpool_create_024_pos',
'zpool_create_encrypted', 'zpool_create_crypt_combos',
'zpool_create_draid_001_pos', 'zpool_create_draid_002_pos',
'zpool_create_draid_003_pos', 'zpool_create_draid_004_pos',
'zpool_create_features_001_pos', 'zpool_create_features_002_pos',
'zpool_create_features_003_pos', 'zpool_create_features_004_neg',
'zpool_create_features_005_pos', 'zpool_create_features_006_pos',
'zpool_create_features_007_pos', 'zpool_create_features_008_pos',
'zpool_create_features_009_pos', 'create-o_ashift',
'zpool_create_tempname', 'zpool_create_dryrun_output']
tags = ['functional', 'cli_root', 'zpool_create']
[tests/functional/cli_root/zpool_destroy]
tests = ['zpool_destroy_001_pos', 'zpool_destroy_002_pos',
'zpool_destroy_003_neg']
pre =
post =
tags = ['functional', 'cli_root', 'zpool_destroy']
[tests/functional/cli_root/zpool_detach]
tests = ['zpool_detach_001_neg']
tags = ['functional', 'cli_root', 'zpool_detach']
[tests/functional/cli_root/zpool_events]
tests = ['zpool_events_clear', 'zpool_events_cliargs', 'zpool_events_follow',
'zpool_events_poolname', 'zpool_events_errors', 'zpool_events_duplicates',
'zpool_events_clear_retained']
tags = ['functional', 'cli_root', 'zpool_events']
[tests/functional/cli_root/zpool_export]
tests = ['zpool_export_001_pos', 'zpool_export_002_pos',
'zpool_export_003_neg', 'zpool_export_004_pos']
tags = ['functional', 'cli_root', 'zpool_export']
[tests/functional/cli_root/zpool_get]
tests = ['zpool_get_001_pos', 'zpool_get_002_pos', 'zpool_get_003_pos',
'zpool_get_004_neg', 'zpool_get_005_pos']
tags = ['functional', 'cli_root', 'zpool_get']
[tests/functional/cli_root/zpool_history]
tests = ['zpool_history_001_neg', 'zpool_history_002_pos']
tags = ['functional', 'cli_root', 'zpool_history']
[tests/functional/cli_root/zpool_import]
tests = ['zpool_import_001_pos', 'zpool_import_002_pos',
'zpool_import_003_pos', 'zpool_import_004_pos', 'zpool_import_005_pos',
'zpool_import_006_pos', 'zpool_import_007_pos', 'zpool_import_008_pos',
'zpool_import_009_neg', 'zpool_import_010_pos', 'zpool_import_011_neg',
'zpool_import_012_pos', 'zpool_import_013_neg', 'zpool_import_014_pos',
'zpool_import_015_pos', 'zpool_import_016_pos', 'zpool_import_017_pos',
'zpool_import_features_001_pos', 'zpool_import_features_002_neg',
'zpool_import_features_003_pos', 'zpool_import_missing_001_pos',
'zpool_import_missing_002_pos', 'zpool_import_missing_003_pos',
'zpool_import_rename_001_pos', 'zpool_import_all_001_pos',
'zpool_import_encrypted', 'zpool_import_encrypted_load',
'zpool_import_errata3', 'zpool_import_errata4',
'import_cachefile_device_added',
'import_cachefile_device_removed',
'import_cachefile_device_replaced',
'import_cachefile_mirror_attached',
'import_cachefile_mirror_detached',
'import_cachefile_paths_changed',
'import_cachefile_shared_device',
'import_devices_missing',
'import_paths_changed',
'import_rewind_config_changed',
'import_rewind_device_replaced']
tags = ['functional', 'cli_root', 'zpool_import']
timeout = 1200
[tests/functional/cli_root/zpool_labelclear]
tests = ['zpool_labelclear_active', 'zpool_labelclear_exported',
'zpool_labelclear_removed', 'zpool_labelclear_valid']
pre =
post =
tags = ['functional', 'cli_root', 'zpool_labelclear']
[tests/functional/cli_root/zpool_initialize]
tests = ['zpool_initialize_attach_detach_add_remove',
'zpool_initialize_fault_export_import_online',
'zpool_initialize_import_export',
'zpool_initialize_offline_export_import_online',
'zpool_initialize_online_offline',
'zpool_initialize_split',
'zpool_initialize_start_and_cancel_neg',
'zpool_initialize_start_and_cancel_pos',
'zpool_initialize_suspend_resume',
'zpool_initialize_unsupported_vdevs',
'zpool_initialize_verify_checksums',
'zpool_initialize_verify_initialized']
pre =
tags = ['functional', 'cli_root', 'zpool_initialize']
[tests/functional/cli_root/zpool_offline]
tests = ['zpool_offline_001_pos', 'zpool_offline_002_neg',
'zpool_offline_003_pos']
tags = ['functional', 'cli_root', 'zpool_offline']
[tests/functional/cli_root/zpool_online]
tests = ['zpool_online_001_pos', 'zpool_online_002_neg']
tags = ['functional', 'cli_root', 'zpool_online']
[tests/functional/cli_root/zpool_remove]
tests = ['zpool_remove_001_neg', 'zpool_remove_002_pos',
'zpool_remove_003_pos']
tags = ['functional', 'cli_root', 'zpool_remove']
[tests/functional/cli_root/zpool_replace]
tests = ['zpool_replace_001_neg', 'replace-o_ashift', 'replace_prop_ashift']
tags = ['functional', 'cli_root', 'zpool_replace']
[tests/functional/cli_root/zpool_resilver]
tests = ['zpool_resilver_bad_args', 'zpool_resilver_restart']
tags = ['functional', 'cli_root', 'zpool_resilver']
[tests/functional/cli_root/zpool_scrub]
tests = ['zpool_scrub_001_neg', 'zpool_scrub_002_pos', 'zpool_scrub_003_pos',
'zpool_scrub_004_pos', 'zpool_scrub_005_pos',
'zpool_scrub_encrypted_unloaded', 'zpool_scrub_print_repairing',
'zpool_scrub_offline_device', 'zpool_scrub_multiple_copies']
tags = ['functional', 'cli_root', 'zpool_scrub']
[tests/functional/cli_root/zpool_set]
tests = ['zpool_set_001_pos', 'zpool_set_002_neg', 'zpool_set_003_neg',
'zpool_set_ashift', 'zpool_set_features']
tags = ['functional', 'cli_root', 'zpool_set']
[tests/functional/cli_root/zpool_split]
tests = ['zpool_split_cliargs', 'zpool_split_devices',
'zpool_split_encryption', 'zpool_split_props', 'zpool_split_vdevs',
'zpool_split_resilver', 'zpool_split_indirect',
'zpool_split_dryrun_output']
tags = ['functional', 'cli_root', 'zpool_split']
[tests/functional/cli_root/zpool_status]
tests = ['zpool_status_001_pos', 'zpool_status_002_pos',
'zpool_status_features_001_pos']
tags = ['functional', 'cli_root', 'zpool_status']
[tests/functional/cli_root/zpool_sync]
tests = ['zpool_sync_001_pos', 'zpool_sync_002_neg']
tags = ['functional', 'cli_root', 'zpool_sync']
[tests/functional/cli_root/zpool_trim]
tests = ['zpool_trim_attach_detach_add_remove',
'zpool_trim_fault_export_import_online',
'zpool_trim_import_export', 'zpool_trim_multiple', 'zpool_trim_neg',
'zpool_trim_offline_export_import_online', 'zpool_trim_online_offline',
'zpool_trim_partial', 'zpool_trim_rate', 'zpool_trim_rate_neg',
'zpool_trim_secure', 'zpool_trim_split', 'zpool_trim_start_and_cancel_neg',
'zpool_trim_start_and_cancel_pos', 'zpool_trim_suspend_resume',
'zpool_trim_unsupported_vdevs', 'zpool_trim_verify_checksums',
'zpool_trim_verify_trimmed']
tags = ['functional', 'zpool_trim']
[tests/functional/cli_root/zpool_upgrade]
tests = ['zpool_upgrade_001_pos', 'zpool_upgrade_002_pos',
'zpool_upgrade_003_pos', 'zpool_upgrade_004_pos',
'zpool_upgrade_005_neg', 'zpool_upgrade_006_neg',
'zpool_upgrade_007_pos', 'zpool_upgrade_008_pos',
'zpool_upgrade_009_neg', 'zpool_upgrade_features_001_pos']
tags = ['functional', 'cli_root', 'zpool_upgrade']
[tests/functional/cli_root/zpool_wait]
tests = ['zpool_wait_discard', 'zpool_wait_freeing',
'zpool_wait_initialize_basic', 'zpool_wait_initialize_cancel',
'zpool_wait_initialize_flag', 'zpool_wait_multiple',
'zpool_wait_no_activity', 'zpool_wait_remove', 'zpool_wait_remove_cancel',
'zpool_wait_trim_basic', 'zpool_wait_trim_cancel', 'zpool_wait_trim_flag',
'zpool_wait_usage']
tags = ['functional', 'cli_root', 'zpool_wait']
[tests/functional/cli_root/zpool_wait/scan]
tests = ['zpool_wait_replace_cancel', 'zpool_wait_rebuild',
'zpool_wait_resilver', 'zpool_wait_scrub_cancel',
'zpool_wait_replace', 'zpool_wait_scrub_basic', 'zpool_wait_scrub_flag']
tags = ['functional', 'cli_root', 'zpool_wait']
[tests/functional/cli_user/misc]
tests = ['zdb_001_neg', 'zfs_001_neg', 'zfs_allow_001_neg',
'zfs_clone_001_neg', 'zfs_create_001_neg', 'zfs_destroy_001_neg',
'zfs_get_001_neg', 'zfs_inherit_001_neg', 'zfs_mount_001_neg',
'zfs_promote_001_neg', 'zfs_receive_001_neg', 'zfs_rename_001_neg',
'zfs_rollback_001_neg', 'zfs_send_001_neg', 'zfs_set_001_neg',
'zfs_share_001_neg', 'zfs_snapshot_001_neg', 'zfs_unallow_001_neg',
'zfs_unmount_001_neg', 'zfs_unshare_001_neg', 'zfs_upgrade_001_neg',
'zpool_001_neg', 'zpool_add_001_neg', 'zpool_attach_001_neg',
'zpool_clear_001_neg', 'zpool_create_001_neg', 'zpool_destroy_001_neg',
'zpool_detach_001_neg', 'zpool_export_001_neg', 'zpool_get_001_neg',
'zpool_history_001_neg', 'zpool_import_001_neg', 'zpool_import_002_neg',
'zpool_offline_001_neg', 'zpool_online_001_neg', 'zpool_remove_001_neg',
'zpool_replace_001_neg', 'zpool_scrub_001_neg', 'zpool_set_001_neg',
'zpool_status_001_neg', 'zpool_upgrade_001_neg', 'arcstat_001_pos',
'arc_summary_001_pos', 'arc_summary_002_neg', 'zpool_wait_privilege']
user =
tags = ['functional', 'cli_user', 'misc']
[tests/functional/cli_user/zfs_list]
tests = ['zfs_list_001_pos', 'zfs_list_002_pos', 'zfs_list_003_pos',
'zfs_list_004_neg', 'zfs_list_007_pos', 'zfs_list_008_neg']
user =
tags = ['functional', 'cli_user', 'zfs_list']
[tests/functional/cli_user/zpool_iostat]
tests = ['zpool_iostat_001_neg', 'zpool_iostat_002_pos',
'zpool_iostat_003_neg', 'zpool_iostat_004_pos',
'zpool_iostat_005_pos', 'zpool_iostat_-c_disable',
'zpool_iostat_-c_homedir', 'zpool_iostat_-c_searchpath']
user =
tags = ['functional', 'cli_user', 'zpool_iostat']
[tests/functional/cli_user/zpool_list]
tests = ['zpool_list_001_pos', 'zpool_list_002_neg']
user =
tags = ['functional', 'cli_user', 'zpool_list']
[tests/functional/cli_user/zpool_status]
tests = ['zpool_status_003_pos', 'zpool_status_-c_disable',
'zpool_status_-c_homedir', 'zpool_status_-c_searchpath']
user =
tags = ['functional', 'cli_user', 'zpool_status']
[tests/functional/compression]
tests = ['compress_001_pos', 'compress_002_pos', 'compress_003_pos',
'l2arc_compressed_arc', 'l2arc_compressed_arc_disabled',
'l2arc_encrypted', 'l2arc_encrypted_no_compressed_arc']
tags = ['functional', 'compression']
[tests/functional/cp_files]
tests = ['cp_files_001_pos']
tags = ['functional', 'cp_files']
[tests/functional/crtime]
tests = ['crtime_001_pos' ]
tags = ['functional', 'crtime']
[tests/functional/ctime]
tests = ['ctime_001_pos' ]
tags = ['functional', 'ctime']
[tests/functional/deadman]
tests = ['deadman_ratelimit', 'deadman_sync', 'deadman_zio']
pre =
post =
tags = ['functional', 'deadman']
[tests/functional/delegate]
tests = ['zfs_allow_001_pos', 'zfs_allow_002_pos', 'zfs_allow_003_pos',
'zfs_allow_004_pos', 'zfs_allow_005_pos', 'zfs_allow_006_pos',
'zfs_allow_007_pos', 'zfs_allow_008_pos', 'zfs_allow_009_neg',
'zfs_allow_010_pos', 'zfs_allow_011_neg', 'zfs_allow_012_neg',
'zfs_unallow_001_pos', 'zfs_unallow_002_pos', 'zfs_unallow_003_pos',
'zfs_unallow_004_pos', 'zfs_unallow_005_pos', 'zfs_unallow_006_pos',
'zfs_unallow_007_neg', 'zfs_unallow_008_neg']
tags = ['functional', 'delegate']
[tests/functional/exec]
tests = ['exec_001_pos', 'exec_002_neg']
tags = ['functional', 'exec']
+[tests/functional/fallocate]
+tests = ['fallocate_punch-hole']
+tags = ['functional', 'fallocate']
+
[tests/functional/features/async_destroy]
tests = ['async_destroy_001_pos']
tags = ['functional', 'features', 'async_destroy']
[tests/functional/features/large_dnode]
tests = ['large_dnode_001_pos', 'large_dnode_003_pos', 'large_dnode_004_neg',
'large_dnode_005_pos', 'large_dnode_007_neg', 'large_dnode_009_pos']
tags = ['functional', 'features', 'large_dnode']
[tests/functional/grow]
pre =
post =
tests = ['grow_pool_001_pos', 'grow_replicas_001_pos']
tags = ['functional', 'grow']
[tests/functional/history]
tests = ['history_001_pos', 'history_002_pos', 'history_003_pos',
'history_004_pos', 'history_005_neg', 'history_006_neg',
'history_007_pos', 'history_008_pos', 'history_009_pos',
'history_010_pos']
tags = ['functional', 'history']
[tests/functional/hkdf]
tests = ['run_hkdf_test']
tags = ['functional', 'hkdf']
[tests/functional/inheritance]
tests = ['inherit_001_pos']
pre =
tags = ['functional', 'inheritance']
[tests/functional/io]
tests = ['sync', 'psync', 'posixaio', 'mmap']
tags = ['functional', 'io']
[tests/functional/inuse]
tests = ['inuse_004_pos', 'inuse_005_pos', 'inuse_008_pos', 'inuse_009_pos']
post =
tags = ['functional', 'inuse']
[tests/functional/large_files]
tests = ['large_files_001_pos', 'large_files_002_pos']
tags = ['functional', 'large_files']
-[tests/functional/largest_pool]
-tests = ['largest_pool_001_pos']
-pre =
-post =
-tags = ['functional', 'largest_pool']
-
[tests/functional/limits]
tests = ['filesystem_count', 'filesystem_limit', 'snapshot_count',
'snapshot_limit']
tags = ['functional', 'limits']
[tests/functional/link_count]
tests = ['link_count_001', 'link_count_root_inode']
tags = ['functional', 'link_count']
[tests/functional/migration]
tests = ['migration_001_pos', 'migration_002_pos', 'migration_003_pos',
'migration_004_pos', 'migration_005_pos', 'migration_006_pos',
'migration_007_pos', 'migration_008_pos', 'migration_009_pos',
'migration_010_pos', 'migration_011_pos', 'migration_012_pos']
tags = ['functional', 'migration']
[tests/functional/mmap]
tests = ['mmap_write_001_pos', 'mmap_read_001_pos', 'mmap_seek_001_pos']
tags = ['functional', 'mmap']
[tests/functional/mount]
tests = ['umount_001', 'umountall_001']
tags = ['functional', 'mount']
[tests/functional/mv_files]
tests = ['mv_files_001_pos', 'mv_files_002_pos', 'random_creation']
tags = ['functional', 'mv_files']
[tests/functional/nestedfs]
tests = ['nestedfs_001_pos']
tags = ['functional', 'nestedfs']
[tests/functional/no_space]
tests = ['enospc_001_pos', 'enospc_002_pos', 'enospc_003_pos',
- 'enospc_df']
+ 'enospc_df', 'enospc_rm']
tags = ['functional', 'no_space']
[tests/functional/nopwrite]
tests = ['nopwrite_copies', 'nopwrite_mtime', 'nopwrite_negative',
'nopwrite_promoted_clone', 'nopwrite_recsize', 'nopwrite_sync',
'nopwrite_varying_compression', 'nopwrite_volume']
tags = ['functional', 'nopwrite']
[tests/functional/online_offline]
tests = ['online_offline_001_pos', 'online_offline_002_neg',
'online_offline_003_neg']
tags = ['functional', 'online_offline']
[tests/functional/pool_checkpoint]
tests = ['checkpoint_after_rewind', 'checkpoint_big_rewind',
'checkpoint_capacity', 'checkpoint_conf_change', 'checkpoint_discard',
'checkpoint_discard_busy', 'checkpoint_discard_many',
'checkpoint_indirect', 'checkpoint_invalid', 'checkpoint_lun_expsz',
'checkpoint_open', 'checkpoint_removal', 'checkpoint_rewind',
'checkpoint_ro_rewind', 'checkpoint_sm_scale', 'checkpoint_twice',
'checkpoint_vdev_add', 'checkpoint_zdb', 'checkpoint_zhack_feat']
tags = ['functional', 'pool_checkpoint']
timeout = 1800
[tests/functional/pool_names]
tests = ['pool_names_001_pos', 'pool_names_002_neg']
pre =
post =
tags = ['functional', 'pool_names']
[tests/functional/poolversion]
tests = ['poolversion_001_pos', 'poolversion_002_pos']
tags = ['functional', 'poolversion']
[tests/functional/pyzfs]
tests = ['pyzfs_unittest']
pre =
post =
tags = ['functional', 'pyzfs']
[tests/functional/quota]
tests = ['quota_001_pos', 'quota_002_pos', 'quota_003_pos',
'quota_004_pos', 'quota_005_pos', 'quota_006_neg']
tags = ['functional', 'quota']
[tests/functional/redacted_send]
tests = ['redacted_compressed', 'redacted_contents', 'redacted_deleted',
'redacted_disabled_feature', 'redacted_embedded', 'redacted_holes',
'redacted_incrementals', 'redacted_largeblocks', 'redacted_many_clones',
'redacted_mixed_recsize', 'redacted_mounts', 'redacted_negative',
'redacted_origin', 'redacted_panic', 'redacted_props', 'redacted_resume',
'redacted_size', 'redacted_volume']
tags = ['functional', 'redacted_send']
[tests/functional/raidz]
tests = ['raidz_001_neg', 'raidz_002_pos', 'raidz_003_pos', 'raidz_004_pos']
tags = ['functional', 'raidz']
[tests/functional/redundancy]
tests = ['redundancy_draid', 'redundancy_draid1', 'redundancy_draid2',
'redundancy_draid3', 'redundancy_draid_damaged', 'redundancy_draid_spare1',
'redundancy_draid_spare2', 'redundancy_draid_spare3', 'redundancy_mirror',
'redundancy_raidz', 'redundancy_raidz1', 'redundancy_raidz2',
'redundancy_raidz3', 'redundancy_stripe']
tags = ['functional', 'redundancy']
timeout = 1200
[tests/functional/refquota]
tests = ['refquota_001_pos', 'refquota_002_pos', 'refquota_003_pos',
'refquota_004_pos', 'refquota_005_pos', 'refquota_006_neg',
'refquota_007_neg', 'refquota_008_neg']
tags = ['functional', 'refquota']
[tests/functional/refreserv]
tests = ['refreserv_001_pos', 'refreserv_002_pos', 'refreserv_003_pos',
'refreserv_004_pos', 'refreserv_005_pos', 'refreserv_multi_raidz',
'refreserv_raidz']
tags = ['functional', 'refreserv']
[tests/functional/removal]
pre =
tests = ['removal_all_vdev', 'removal_cancel', 'removal_check_space',
'removal_condense_export', 'removal_multiple_indirection',
'removal_nopwrite', 'removal_remap_deadlists',
'removal_resume_export', 'removal_sanity', 'removal_with_add',
'removal_with_create_fs', 'removal_with_dedup',
'removal_with_errors', 'removal_with_export',
'removal_with_ganging', 'removal_with_faulted',
'removal_with_remove', 'removal_with_scrub', 'removal_with_send',
'removal_with_send_recv', 'removal_with_snapshot',
'removal_with_write', 'removal_with_zdb', 'remove_expanded',
'remove_mirror', 'remove_mirror_sanity', 'remove_raidz',
'remove_indirect', 'remove_attach_mirror']
tags = ['functional', 'removal']
[tests/functional/rename_dirs]
tests = ['rename_dirs_001_pos']
tags = ['functional', 'rename_dirs']
[tests/functional/replacement]
tests = ['attach_import', 'attach_multiple', 'attach_rebuild',
'attach_resilver', 'detach', 'rebuild_disabled_feature',
'rebuild_multiple', 'rebuild_raidz', 'replace_import', 'replace_rebuild',
'replace_resilver', 'resilver_restart_001', 'resilver_restart_002',
'scrub_cancel']
tags = ['functional', 'replacement']
[tests/functional/reservation]
tests = ['reservation_001_pos', 'reservation_002_pos', 'reservation_003_pos',
'reservation_004_pos', 'reservation_005_pos', 'reservation_006_pos',
'reservation_007_pos', 'reservation_008_pos', 'reservation_009_pos',
'reservation_010_pos', 'reservation_011_pos', 'reservation_012_pos',
'reservation_013_pos', 'reservation_014_pos', 'reservation_015_pos',
'reservation_016_pos', 'reservation_017_pos', 'reservation_018_pos',
'reservation_019_pos', 'reservation_020_pos', 'reservation_021_neg',
'reservation_022_pos']
tags = ['functional', 'reservation']
[tests/functional/rootpool]
tests = ['rootpool_002_neg', 'rootpool_003_neg', 'rootpool_007_pos']
tags = ['functional', 'rootpool']
[tests/functional/rsend]
tests = ['recv_dedup', 'recv_dedup_encrypted_zvol', 'rsend_001_pos',
'rsend_002_pos', 'rsend_003_pos', 'rsend_004_pos', 'rsend_005_pos',
'rsend_006_pos', 'rsend_007_pos', 'rsend_008_pos', 'rsend_009_pos',
'rsend_010_pos', 'rsend_011_pos', 'rsend_012_pos', 'rsend_013_pos',
'rsend_014_pos', 'rsend_016_neg', 'rsend_019_pos', 'rsend_020_pos',
'rsend_021_pos', 'rsend_022_pos', 'rsend_024_pos',
'send-c_verify_ratio', 'send-c_verify_contents', 'send-c_props',
'send-c_incremental', 'send-c_volume', 'send-c_zstreamdump',
'send-c_lz4_disabled', 'send-c_recv_lz4_disabled',
'send-c_mixed_compression', 'send-c_stream_size_estimate',
'send-c_embedded_blocks', 'send-c_resume', 'send-cpL_varied_recsize',
'send-c_recv_dedup', 'send-L_toggle', 'send_encrypted_hierarchy',
'send_encrypted_props', 'send_encrypted_truncated_files',
'send_freeobjects', 'send_realloc_files',
'send_realloc_encrypted_files', 'send_spill_block', 'send_holds',
'send_hole_birth', 'send_mixed_raw', 'send-wR_encrypted_zvol',
- 'send_partial_dataset', 'send_invalid', 'send_doall']
+ 'send_partial_dataset', 'send_invalid', 'send_doall',
+ 'send_raw_spill_block', 'send_raw_ashift']
tags = ['functional', 'rsend']
[tests/functional/scrub_mirror]
tests = ['scrub_mirror_001_pos', 'scrub_mirror_002_pos',
'scrub_mirror_003_pos', 'scrub_mirror_004_pos']
tags = ['functional', 'scrub_mirror']
[tests/functional/slog]
tests = ['slog_001_pos', 'slog_002_pos', 'slog_003_pos', 'slog_004_pos',
'slog_005_pos', 'slog_006_pos', 'slog_007_pos', 'slog_008_neg',
'slog_009_neg', 'slog_010_neg', 'slog_011_neg', 'slog_012_neg',
'slog_013_pos', 'slog_014_pos', 'slog_015_neg', 'slog_replay_fs_001',
'slog_replay_fs_002', 'slog_replay_volume']
tags = ['functional', 'slog']
[tests/functional/snapshot]
tests = ['clone_001_pos', 'rollback_001_pos', 'rollback_002_pos',
'rollback_003_pos', 'snapshot_001_pos', 'snapshot_002_pos',
'snapshot_003_pos', 'snapshot_004_pos', 'snapshot_005_pos',
'snapshot_006_pos', 'snapshot_007_pos', 'snapshot_008_pos',
'snapshot_009_pos', 'snapshot_010_pos', 'snapshot_011_pos',
'snapshot_012_pos', 'snapshot_013_pos', 'snapshot_014_pos',
'snapshot_017_pos']
tags = ['functional', 'snapshot']
[tests/functional/snapused]
tests = ['snapused_001_pos', 'snapused_002_pos', 'snapused_003_pos',
'snapused_004_pos', 'snapused_005_pos']
tags = ['functional', 'snapused']
[tests/functional/sparse]
tests = ['sparse_001_pos']
tags = ['functional', 'sparse']
[tests/functional/suid]
tests = ['suid_write_to_suid', 'suid_write_to_sgid', 'suid_write_to_suid_sgid',
- 'suid_write_to_none']
+ 'suid_write_to_none', 'suid_write_zil_replay']
tags = ['functional', 'suid']
[tests/functional/threadsappend]
tests = ['threadsappend_001_pos']
tags = ['functional', 'threadsappend']
[tests/functional/trim]
tests = ['autotrim_integrity', 'autotrim_config', 'autotrim_trim_integrity',
'trim_integrity', 'trim_config', 'trim_l2arc']
tags = ['functional', 'trim']
[tests/functional/truncate]
tests = ['truncate_001_pos', 'truncate_002_pos', 'truncate_timestamps']
tags = ['functional', 'truncate']
[tests/functional/upgrade]
tests = ['upgrade_userobj_001_pos', 'upgrade_readonly_pool']
tags = ['functional', 'upgrade']
[tests/functional/userquota]
tests = [
'userquota_001_pos', 'userquota_002_pos', 'userquota_003_pos',
'userquota_004_pos', 'userquota_005_neg', 'userquota_006_pos',
'userquota_007_pos', 'userquota_008_pos', 'userquota_009_pos',
'userquota_010_pos', 'userquota_011_pos', 'userquota_012_neg',
- 'userspace_001_pos', 'userspace_002_pos', 'userspace_encrypted']
+ 'userspace_001_pos', 'userspace_002_pos', 'userspace_encrypted',
+ 'userspace_send_encrypted']
tags = ['functional', 'userquota']
[tests/functional/vdev_zaps]
tests = ['vdev_zaps_001_pos', 'vdev_zaps_002_pos', 'vdev_zaps_003_pos',
'vdev_zaps_004_pos', 'vdev_zaps_005_pos', 'vdev_zaps_006_pos',
'vdev_zaps_007_pos']
tags = ['functional', 'vdev_zaps']
[tests/functional/write_dirs]
tests = ['write_dirs_001_pos', 'write_dirs_002_pos']
tags = ['functional', 'write_dirs']
[tests/functional/xattr]
tests = ['xattr_001_pos', 'xattr_002_neg', 'xattr_003_neg', 'xattr_004_pos',
'xattr_005_pos', 'xattr_006_pos', 'xattr_007_neg',
'xattr_011_pos', 'xattr_012_pos', 'xattr_013_pos']
tags = ['functional', 'xattr']
[tests/functional/zvol/zvol_ENOSPC]
tests = ['zvol_ENOSPC_001_pos']
tags = ['functional', 'zvol', 'zvol_ENOSPC']
[tests/functional/zvol/zvol_cli]
tests = ['zvol_cli_001_pos', 'zvol_cli_002_pos', 'zvol_cli_003_neg']
tags = ['functional', 'zvol', 'zvol_cli']
[tests/functional/zvol/zvol_misc]
tests = ['zvol_misc_002_pos', 'zvol_misc_hierarchy', 'zvol_misc_rename_inuse',
'zvol_misc_snapdev', 'zvol_misc_volmode', 'zvol_misc_zil']
tags = ['functional', 'zvol', 'zvol_misc']
[tests/functional/zvol/zvol_swap]
tests = ['zvol_swap_001_pos', 'zvol_swap_002_pos', 'zvol_swap_004_pos']
tags = ['functional', 'zvol', 'zvol_swap']
[tests/functional/libzfs]
tests = ['many_fds', 'libzfs_input']
tags = ['functional', 'libzfs']
[tests/functional/log_spacemap]
tests = ['log_spacemap_import_logs']
pre =
post =
tags = ['functional', 'log_spacemap']
[tests/functional/l2arc]
tests = ['l2arc_arcstats_pos', 'l2arc_mfuonly_pos', 'l2arc_l2miss_pos',
'persist_l2arc_001_pos', 'persist_l2arc_002_pos',
'persist_l2arc_003_neg', 'persist_l2arc_004_pos', 'persist_l2arc_005_pos']
tags = ['functional', 'l2arc']
[tests/functional/zpool_influxdb]
tests = ['zpool_influxdb']
tags = ['functional', 'zpool_influxdb']
diff --git a/sys/contrib/openzfs/tests/runfiles/linux.run b/sys/contrib/openzfs/tests/runfiles/linux.run
index 642ed824d462..c01e1e3c4d53 100644
--- a/sys/contrib/openzfs/tests/runfiles/linux.run
+++ b/sys/contrib/openzfs/tests/runfiles/linux.run
@@ -1,174 +1,186 @@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
[DEFAULT]
pre = setup
quiet = False
pre_user = root
user = root
timeout = 600
post_user = root
post = cleanup
failsafe_user = root
failsafe = callbacks/zfs_failsafe
outputdir = /var/tmp/test_results
tags = ['functional']
[tests/functional/acl/posix:Linux]
tests = ['posix_001_pos', 'posix_002_pos', 'posix_003_pos', 'posix_004_pos']
tags = ['functional', 'acl', 'posix']
[tests/functional/acl/posix-sa:Linux]
tests = ['posix_001_pos', 'posix_002_pos', 'posix_003_pos', 'posix_004_pos']
tags = ['functional', 'acl', 'posix-sa']
[tests/functional/atime:Linux]
tests = ['atime_003_pos', 'root_relatime_on']
tags = ['functional', 'atime']
[tests/functional/chattr:Linux]
tests = ['chattr_001_pos', 'chattr_002_neg']
tags = ['functional', 'chattr']
[tests/functional/checksum:Linux]
tests = ['run_edonr_test']
tags = ['functional', 'checksum']
[tests/functional/cli_root/zfs:Linux]
tests = ['zfs_003_neg']
tags = ['functional', 'cli_root', 'zfs']
[tests/functional/cli_root/zfs_mount:Linux]
tests = ['zfs_mount_006_pos', 'zfs_mount_008_pos', 'zfs_mount_013_pos',
'zfs_mount_014_neg', 'zfs_multi_mount']
tags = ['functional', 'cli_root', 'zfs_mount']
[tests/functional/cli_root/zfs_share:Linux]
tests = ['zfs_share_005_pos', 'zfs_share_007_neg', 'zfs_share_009_neg',
'zfs_share_012_pos']
tags = ['functional', 'cli_root', 'zfs_share']
[tests/functional/cli_root/zfs_sysfs:Linux]
tests = ['zfeature_set_unsupported', 'zfs_get_unsupported',
'zfs_set_unsupported', 'zfs_sysfs_live', 'zpool_get_unsupported',
'zpool_set_unsupported']
tags = ['functional', 'cli_root', 'zfs_sysfs']
[tests/functional/cli_root/zpool_add:Linux]
tests = ['add_nested_replacing_spare']
tags = ['functional', 'cli_root', 'zpool_add']
[tests/functional/cli_root/zpool_expand:Linux]
tests = ['zpool_expand_001_pos', 'zpool_expand_002_pos',
'zpool_expand_003_neg', 'zpool_expand_004_pos', 'zpool_expand_005_pos']
tags = ['functional', 'cli_root', 'zpool_expand']
[tests/functional/cli_root/zpool_reopen:Linux]
tests = ['zpool_reopen_001_pos', 'zpool_reopen_002_pos',
'zpool_reopen_003_pos', 'zpool_reopen_004_pos', 'zpool_reopen_005_pos',
'zpool_reopen_006_neg', 'zpool_reopen_007_pos']
tags = ['functional', 'cli_root', 'zpool_reopen']
[tests/functional/cli_root/zpool_split:Linux]
tests = ['zpool_split_wholedisk']
tags = ['functional', 'cli_root', 'zpool_split']
[tests/functional/compression:Linux]
tests = ['compress_004_pos']
tags = ['functional', 'compression']
[tests/functional/devices:Linux]
tests = ['devices_001_pos', 'devices_002_neg', 'devices_003_pos']
tags = ['functional', 'devices']
[tests/functional/events:Linux]
tests = ['events_001_pos', 'events_002_pos', 'zed_rc_filter', 'zed_fd_spill']
tags = ['functional', 'events']
[tests/functional/fallocate:Linux]
-tests = ['fallocate_prealloc', 'fallocate_punch-hole']
+tests = ['fallocate_prealloc']
tags = ['functional', 'fallocate']
[tests/functional/fault:Linux]
tests = ['auto_offline_001_pos', 'auto_online_001_pos', 'auto_online_002_pos',
'auto_replace_001_pos', 'auto_spare_001_pos', 'auto_spare_002_pos',
'auto_spare_multiple', 'auto_spare_ashift', 'auto_spare_shared',
'decrypt_fault', 'decompress_fault', 'scrub_after_resilver',
'zpool_status_-s']
tags = ['functional', 'fault']
[tests/functional/features/large_dnode:Linux]
tests = ['large_dnode_002_pos', 'large_dnode_006_pos', 'large_dnode_008_pos']
tags = ['functional', 'features', 'large_dnode']
[tests/functional/io:Linux]
tests = ['libaio', 'io_uring']
tags = ['functional', 'io']
+[tests/functional/largest_pool:Linux]
+tests = ['largest_pool_001_pos']
+pre =
+post =
+tags = ['functional', 'largest_pool']
+
[tests/functional/mmap:Linux]
tests = ['mmap_libaio_001_pos']
tags = ['functional', 'mmap']
[tests/functional/mmp:Linux]
tests = ['mmp_on_thread', 'mmp_on_uberblocks', 'mmp_on_off', 'mmp_interval',
'mmp_active_import', 'mmp_inactive_import', 'mmp_exported_import',
'mmp_write_uberblocks', 'mmp_reset_interval', 'multihost_history',
'mmp_on_zdb', 'mmp_write_distribution', 'mmp_hostid']
tags = ['functional', 'mmp']
[tests/functional/mount:Linux]
tests = ['umount_unlinked_drain']
tags = ['functional', 'mount']
[tests/functional/pam:Linux]
tests = ['pam_basic', 'pam_nounmount']
tags = ['functional', 'pam']
[tests/functional/procfs:Linux]
tests = ['procfs_list_basic', 'procfs_list_concurrent_readers',
'procfs_list_stale_read', 'pool_state']
tags = ['functional', 'procfs']
[tests/functional/projectquota:Linux]
tests = ['projectid_001_pos', 'projectid_002_pos', 'projectid_003_pos',
'projectquota_001_pos', 'projectquota_002_pos', 'projectquota_003_pos',
'projectquota_004_neg', 'projectquota_005_pos', 'projectquota_006_pos',
'projectquota_007_pos', 'projectquota_008_pos', 'projectquota_009_pos',
'projectspace_001_pos', 'projectspace_002_pos', 'projectspace_003_pos',
'projectspace_004_pos',
'projecttree_001_pos', 'projecttree_002_pos', 'projecttree_003_neg']
tags = ['functional', 'projectquota']
[tests/functional/rsend:Linux]
tests = ['send_realloc_dnode_size', 'send_encrypted_files']
tags = ['functional', 'rsend']
+[tests/functional/simd:Linux]
+pre =
+post =
+tests = ['simd_supported']
+tags = ['functional', 'simd']
+
[tests/functional/snapshot:Linux]
tests = ['snapshot_015_pos', 'snapshot_016_pos']
tags = ['functional', 'snapshot']
[tests/functional/tmpfile:Linux]
tests = ['tmpfile_001_pos', 'tmpfile_002_pos', 'tmpfile_003_pos',
'tmpfile_stat_mode']
tags = ['functional', 'tmpfile']
[tests/functional/upgrade:Linux]
tests = ['upgrade_projectquota_001_pos']
tags = ['functional', 'upgrade']
[tests/functional/user_namespace:Linux]
tests = ['user_namespace_001']
tags = ['functional', 'user_namespace']
[tests/functional/userquota:Linux]
tests = ['groupspace_001_pos', 'groupspace_002_pos', 'groupspace_003_pos',
'userquota_013_pos', 'userspace_003_pos']
tags = ['functional', 'userquota']
diff --git a/sys/contrib/openzfs/tests/runfiles/sanity.run b/sys/contrib/openzfs/tests/runfiles/sanity.run
index 3f4eb302f1e0..fb39fa54b9d5 100644
--- a/sys/contrib/openzfs/tests/runfiles/sanity.run
+++ b/sys/contrib/openzfs/tests/runfiles/sanity.run
@@ -1,621 +1,622 @@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
# This run file contains a subset of functional tests which exercise
# as much functionality as possible while still executing relatively
# quickly. The included tests should take no more than a few seconds
# each to run at most. This provides a convenient way to sanity test a
# change before committing to a full test run which takes several hours.
#
# Approximate run time: 15 minutes
#
[DEFAULT]
pre = setup
quiet = False
pre_user = root
user = root
timeout = 180
post_user = root
post = cleanup
failsafe_user = root
failsafe = callbacks/zfs_failsafe
outputdir = /var/tmp/test_results
tags = ['functional']
[tests/functional/acl/off]
tests = ['posixmode']
tags = ['functional', 'acl']
[tests/functional/alloc_class]
tests = ['alloc_class_003_pos', 'alloc_class_004_pos', 'alloc_class_005_pos',
'alloc_class_006_pos', 'alloc_class_008_pos', 'alloc_class_010_pos',
'alloc_class_011_neg']
tags = ['functional', 'alloc_class']
[tests/functional/arc]
tests = ['dbufstats_001_pos', 'dbufstats_002_pos', 'arcstats_runtime_tuning']
tags = ['functional', 'arc']
[tests/functional/bootfs]
tests = ['bootfs_004_neg', 'bootfs_007_pos']
tags = ['functional', 'bootfs']
[tests/functional/cache]
tests = ['cache_004_neg', 'cache_005_neg', 'cache_007_neg', 'cache_010_pos']
tags = ['functional', 'cache']
[tests/functional/cachefile]
tests = ['cachefile_001_pos', 'cachefile_002_pos', 'cachefile_003_pos',
'cachefile_004_pos']
tags = ['functional', 'cachefile']
[tests/functional/casenorm]
tests = ['case_all_values', 'norm_all_values', 'sensitive_none_lookup',
'sensitive_none_delete', 'insensitive_none_lookup',
'insensitive_none_delete', 'mixed_none_lookup', 'mixed_none_delete']
tags = ['functional', 'casenorm']
[tests/functional/channel_program/lua_core]
tests = ['tst.args_to_lua', 'tst.divide_by_zero', 'tst.exists',
'tst.integer_illegal', 'tst.integer_overflow', 'tst.language_functions_neg',
'tst.language_functions_pos', 'tst.large_prog', 'tst.libraries',
'tst.memory_limit', 'tst.nested_neg', 'tst.nested_pos', 'tst.nvlist_to_lua',
'tst.recursive_neg', 'tst.recursive_pos', 'tst.return_large',
'tst.return_nvlist_neg', 'tst.return_nvlist_pos',
'tst.return_recursive_table', 'tst.stack_gsub', 'tst.timeout']
tags = ['functional', 'channel_program', 'lua_core']
[tests/functional/channel_program/synctask_core]
tests = ['tst.destroy_fs', 'tst.destroy_snap', 'tst.get_count_and_limit',
'tst.get_index_props', 'tst.get_mountpoint', 'tst.get_neg',
'tst.get_number_props', 'tst.get_string_props', 'tst.get_type',
'tst.get_userquota', 'tst.get_written', 'tst.inherit', 'tst.list_bookmarks',
'tst.list_children', 'tst.list_clones', 'tst.list_holds',
'tst.list_snapshots', 'tst.list_system_props',
'tst.list_user_props', 'tst.parse_args_neg','tst.promote_conflict',
'tst.promote_multiple', 'tst.promote_simple', 'tst.rollback_mult',
'tst.rollback_one', 'tst.set_props', 'tst.snapshot_destroy',
'tst.snapshot_neg', 'tst.snapshot_recursive', 'tst.snapshot_simple',
'tst.bookmark.create', 'tst.bookmark.copy']
tags = ['functional', 'channel_program', 'synctask_core']
[tests/functional/cli_root/zdb]
tests = ['zdb_003_pos', 'zdb_004_pos', 'zdb_005_pos']
pre =
post =
tags = ['functional', 'cli_root', 'zdb']
[tests/functional/cli_root/zfs]
tests = ['zfs_001_neg', 'zfs_002_pos']
tags = ['functional', 'cli_root', 'zfs']
[tests/functional/cli_root/zfs_bookmark]
tests = ['zfs_bookmark_cliargs']
tags = ['functional', 'cli_root', 'zfs_bookmark']
[tests/functional/cli_root/zfs_change-key]
tests = ['zfs_change-key', 'zfs_change-key_child', 'zfs_change-key_format',
'zfs_change-key_inherit', 'zfs_change-key_load', 'zfs_change-key_location',
'zfs_change-key_pbkdf2iters', 'zfs_change-key_clones']
tags = ['functional', 'cli_root', 'zfs_change-key']
[tests/functional/cli_root/zfs_clone]
tests = ['zfs_clone_001_neg', 'zfs_clone_002_pos', 'zfs_clone_003_pos',
'zfs_clone_004_pos', 'zfs_clone_005_pos', 'zfs_clone_006_pos',
'zfs_clone_007_pos', 'zfs_clone_008_neg', 'zfs_clone_009_neg',
'zfs_clone_encrypted']
tags = ['functional', 'cli_root', 'zfs_clone']
[tests/functional/cli_root/zfs_create]
tests = ['zfs_create_001_pos', 'zfs_create_002_pos', 'zfs_create_003_pos',
'zfs_create_004_pos', 'zfs_create_005_pos', 'zfs_create_006_pos',
'zfs_create_007_pos', 'zfs_create_011_pos', 'zfs_create_012_pos',
'zfs_create_013_pos', 'zfs_create_014_pos', 'zfs_create_encrypted',
'zfs_create_dryrun', 'zfs_create_verbose']
tags = ['functional', 'cli_root', 'zfs_create']
[tests/functional/cli_root/zfs_destroy]
tests = ['zfs_destroy_002_pos', 'zfs_destroy_003_pos',
'zfs_destroy_004_pos', 'zfs_destroy_006_neg', 'zfs_destroy_007_neg',
'zfs_destroy_008_pos', 'zfs_destroy_009_pos', 'zfs_destroy_010_pos',
'zfs_destroy_011_pos', 'zfs_destroy_012_pos', 'zfs_destroy_013_neg',
'zfs_destroy_014_pos', 'zfs_destroy_dev_removal',
'zfs_destroy_dev_removal_condense']
tags = ['functional', 'cli_root', 'zfs_destroy']
[tests/functional/cli_root/zfs_diff]
tests = ['zfs_diff_cliargs', 'zfs_diff_encrypted']
tags = ['functional', 'cli_root', 'zfs_diff']
[tests/functional/cli_root/zfs_get]
tests = ['zfs_get_003_pos', 'zfs_get_006_neg', 'zfs_get_007_neg',
'zfs_get_010_neg']
tags = ['functional', 'cli_root', 'zfs_get']
[tests/functional/cli_root/zfs_inherit]
tests = ['zfs_inherit_001_neg', 'zfs_inherit_003_pos', 'zfs_inherit_mountpoint']
tags = ['functional', 'cli_root', 'zfs_inherit']
[tests/functional/cli_root/zfs_load-key]
tests = ['zfs_load-key', 'zfs_load-key_all', 'zfs_load-key_file',
- 'zfs_load-key_location', 'zfs_load-key_noop', 'zfs_load-key_recursive']
+ 'zfs_load-key_https', 'zfs_load-key_location', 'zfs_load-key_noop',
+ 'zfs_load-key_recursive']
tags = ['functional', 'cli_root', 'zfs_load-key']
[tests/functional/cli_root/zfs_mount]
tests = ['zfs_mount_001_pos', 'zfs_mount_002_pos', 'zfs_mount_003_pos',
'zfs_mount_004_pos', 'zfs_mount_005_pos', 'zfs_mount_007_pos',
'zfs_mount_009_neg', 'zfs_mount_010_neg', 'zfs_mount_011_neg',
'zfs_mount_012_pos', 'zfs_mount_encrypted', 'zfs_mount_remount',
'zfs_mount_all_fail', 'zfs_mount_all_mountpoints', 'zfs_mount_test_race']
tags = ['functional', 'cli_root', 'zfs_mount']
[tests/functional/cli_root/zfs_program]
tests = ['zfs_program_json']
tags = ['functional', 'cli_root', 'zfs_program']
[tests/functional/cli_root/zfs_promote]
tests = ['zfs_promote_001_pos', 'zfs_promote_002_pos', 'zfs_promote_003_pos',
'zfs_promote_004_pos', 'zfs_promote_005_pos', 'zfs_promote_006_neg',
'zfs_promote_007_neg', 'zfs_promote_008_pos', 'zfs_promote_encryptionroot']
tags = ['functional', 'cli_root', 'zfs_promote']
[tests/functional/cli_root/zfs_receive]
tests = ['zfs_receive_001_pos', 'zfs_receive_002_pos', 'zfs_receive_003_pos',
'zfs_receive_004_neg', 'zfs_receive_005_neg', 'zfs_receive_006_pos',
'zfs_receive_007_neg', 'zfs_receive_008_pos', 'zfs_receive_009_neg',
'zfs_receive_010_pos', 'zfs_receive_011_pos', 'zfs_receive_012_pos',
'zfs_receive_013_pos', 'zfs_receive_014_pos', 'zfs_receive_015_pos',
'zfs_receive_016_pos', 'zfs_receive_from_encrypted',
'zfs_receive_to_encrypted', 'zfs_receive_raw',
'zfs_receive_raw_incremental', 'zfs_receive_-e',
'zfs_receive_raw_-d', 'zfs_receive_from_zstd', 'zfs_receive_new_props']
tags = ['functional', 'cli_root', 'zfs_receive']
[tests/functional/cli_root/zfs_rename]
tests = ['zfs_rename_003_pos', 'zfs_rename_004_neg',
'zfs_rename_005_neg', 'zfs_rename_006_pos', 'zfs_rename_007_pos',
'zfs_rename_008_pos', 'zfs_rename_009_neg', 'zfs_rename_010_neg',
'zfs_rename_011_pos', 'zfs_rename_012_neg', 'zfs_rename_013_pos',
'zfs_rename_encrypted_child', 'zfs_rename_to_encrypted',
'zfs_rename_mountpoint', 'zfs_rename_nounmount']
tags = ['functional', 'cli_root', 'zfs_rename']
[tests/functional/cli_root/zfs_reservation]
tests = ['zfs_reservation_001_pos', 'zfs_reservation_002_pos']
tags = ['functional', 'cli_root', 'zfs_reservation']
[tests/functional/cli_root/zfs_rollback]
tests = ['zfs_rollback_003_neg', 'zfs_rollback_004_neg']
tags = ['functional', 'cli_root', 'zfs_rollback']
[tests/functional/cli_root/zfs_send]
tests = ['zfs_send_001_pos', 'zfs_send_002_pos', 'zfs_send_003_pos',
'zfs_send_004_neg', 'zfs_send_005_pos', 'zfs_send_encrypted',
'zfs_send_raw']
tags = ['functional', 'cli_root', 'zfs_send']
[tests/functional/cli_root/zfs_set]
tests = ['cache_001_pos', 'cache_002_neg', 'canmount_001_pos',
'canmount_002_pos', 'canmount_003_pos', 'canmount_004_pos',
'checksum_001_pos', 'compression_001_pos', 'mountpoint_001_pos',
'mountpoint_002_pos', 'user_property_002_pos',
'share_mount_001_neg', 'snapdir_001_pos', 'onoffs_001_pos',
'user_property_001_pos', 'user_property_003_neg', 'readonly_001_pos',
'user_property_004_pos', 'version_001_neg',
'zfs_set_003_neg', 'property_alias_001_pos',
'zfs_set_keylocation', 'zfs_set_feature_activation']
tags = ['functional', 'cli_root', 'zfs_set']
[tests/functional/cli_root/zfs_snapshot]
tests = ['zfs_snapshot_001_neg', 'zfs_snapshot_002_neg',
'zfs_snapshot_003_neg', 'zfs_snapshot_006_pos', 'zfs_snapshot_007_neg']
tags = ['functional', 'cli_root', 'zfs_snapshot']
[tests/functional/cli_root/zfs_unload-key]
tests = ['zfs_unload-key', 'zfs_unload-key_all', 'zfs_unload-key_recursive']
tags = ['functional', 'cli_root', 'zfs_unload-key']
[tests/functional/cli_root/zfs_unmount]
tests = ['zfs_unmount_001_pos', 'zfs_unmount_002_pos', 'zfs_unmount_003_pos',
'zfs_unmount_004_pos', 'zfs_unmount_007_neg', 'zfs_unmount_008_neg',
'zfs_unmount_009_pos', 'zfs_unmount_unload_keys']
tags = ['functional', 'cli_root', 'zfs_unmount']
[tests/functional/cli_root/zfs_upgrade]
tests = ['zfs_upgrade_001_pos', 'zfs_upgrade_002_pos', 'zfs_upgrade_006_neg',
'zfs_upgrade_007_neg']
tags = ['functional', 'cli_root', 'zfs_upgrade']
[tests/functional/cli_root/zfs_wait]
tests = ['zfs_wait_deleteq']
tags = ['functional', 'cli_root', 'zfs_wait']
[tests/functional/cli_root/zpool]
tests = ['zpool_001_neg', 'zpool_003_pos', 'zpool_colors']
tags = ['functional', 'cli_root', 'zpool']
[tests/functional/cli_root/zpool_add]
tests = ['zpool_add_002_pos', 'zpool_add_003_pos',
'zpool_add_004_pos', 'zpool_add_006_pos', 'zpool_add_007_neg',
'zpool_add_008_neg', 'zpool_add_009_neg']
tags = ['functional', 'cli_root', 'zpool_add']
[tests/functional/cli_root/zpool_attach]
tests = ['zpool_attach_001_neg']
tags = ['functional', 'cli_root', 'zpool_attach']
[tests/functional/cli_root/zpool_clear]
tests = ['zpool_clear_002_neg']
tags = ['functional', 'cli_root', 'zpool_clear']
[tests/functional/cli_root/zpool_create]
tests = ['zpool_create_001_pos', 'zpool_create_002_pos',
'zpool_create_003_pos', 'zpool_create_004_pos', 'zpool_create_007_neg',
'zpool_create_008_pos', 'zpool_create_010_neg', 'zpool_create_011_neg',
'zpool_create_012_neg', 'zpool_create_014_neg', 'zpool_create_015_neg',
'zpool_create_017_neg', 'zpool_create_018_pos', 'zpool_create_019_pos',
'zpool_create_020_pos', 'zpool_create_021_pos', 'zpool_create_022_pos',
'zpool_create_encrypted',
'zpool_create_features_001_pos', 'zpool_create_features_002_pos',
'zpool_create_features_003_pos', 'zpool_create_features_004_neg',
'zpool_create_features_005_pos']
tags = ['functional', 'cli_root', 'zpool_create']
[tests/functional/cli_root/zpool_destroy]
tests = ['zpool_destroy_001_pos', 'zpool_destroy_002_pos',
'zpool_destroy_003_neg']
pre =
post =
tags = ['functional', 'cli_root', 'zpool_destroy']
[tests/functional/cli_root/zpool_detach]
tests = ['zpool_detach_001_neg']
tags = ['functional', 'cli_root', 'zpool_detach']
[tests/functional/cli_root/zpool_events]
tests = ['zpool_events_clear', 'zpool_events_follow', 'zpool_events_poolname']
tags = ['functional', 'cli_root', 'zpool_events']
[tests/functional/cli_root/zpool_export]
tests = ['zpool_export_001_pos', 'zpool_export_002_pos', 'zpool_export_003_neg']
tags = ['functional', 'cli_root', 'zpool_export']
[tests/functional/cli_root/zpool_get]
tests = ['zpool_get_001_pos', 'zpool_get_002_pos', 'zpool_get_003_pos',
'zpool_get_004_neg', 'zpool_get_005_pos']
tags = ['functional', 'cli_root', 'zpool_get']
[tests/functional/cli_root/zpool_history]
tests = ['zpool_history_001_neg', 'zpool_history_002_pos']
tags = ['functional', 'cli_root', 'zpool_history']
[tests/functional/cli_root/zpool_import]
tests = ['zpool_import_003_pos', 'zpool_import_010_pos', 'zpool_import_011_neg',
'zpool_import_014_pos', 'zpool_import_features_001_pos',
'zpool_import_all_001_pos', 'zpool_import_encrypted']
tags = ['functional', 'cli_root', 'zpool_import']
[tests/functional/cli_root/zpool_labelclear]
tests = ['zpool_labelclear_active', 'zpool_labelclear_exported',
'zpool_labelclear_removed', 'zpool_labelclear_valid']
pre =
post =
tags = ['functional', 'cli_root', 'zpool_labelclear']
[tests/functional/cli_root/zpool_initialize]
tests = ['zpool_initialize_online_offline']
pre =
tags = ['functional', 'cli_root', 'zpool_initialize']
[tests/functional/cli_root/zpool_offline]
tests = ['zpool_offline_001_pos', 'zpool_offline_002_neg']
tags = ['functional', 'cli_root', 'zpool_offline']
[tests/functional/cli_root/zpool_online]
tests = ['zpool_online_001_pos', 'zpool_online_002_neg']
tags = ['functional', 'cli_root', 'zpool_online']
[tests/functional/cli_root/zpool_remove]
tests = ['zpool_remove_001_neg', 'zpool_remove_002_pos',
'zpool_remove_003_pos']
tags = ['functional', 'cli_root', 'zpool_remove']
[tests/functional/cli_root/zpool_replace]
tests = ['zpool_replace_001_neg']
tags = ['functional', 'cli_root', 'zpool_replace']
[tests/functional/cli_root/zpool_resilver]
tests = ['zpool_resilver_bad_args']
tags = ['functional', 'cli_root', 'zpool_resilver']
[tests/functional/cli_root/zpool_scrub]
tests = ['zpool_scrub_001_neg', 'zpool_scrub_003_pos',
'zpool_scrub_encrypted_unloaded', 'zpool_scrub_print_repairing',
'zpool_scrub_offline_device', 'zpool_scrub_multiple_copies']
tags = ['functional', 'cli_root', 'zpool_scrub']
[tests/functional/cli_root/zpool_set]
tests = ['zpool_set_001_pos', 'zpool_set_002_neg', 'zpool_set_003_neg',
'zpool_set_ashift', 'zpool_set_features']
tags = ['functional', 'cli_root', 'zpool_set']
[tests/functional/cli_root/zpool_split]
tests = ['zpool_split_cliargs', 'zpool_split_devices',
'zpool_split_props', 'zpool_split_vdevs', 'zpool_split_indirect']
tags = ['functional', 'cli_root', 'zpool_split']
[tests/functional/cli_root/zpool_status]
tests = ['zpool_status_001_pos', 'zpool_status_002_pos']
tags = ['functional', 'cli_root', 'zpool_status']
[tests/functional/cli_root/zpool_sync]
tests = ['zpool_sync_002_neg']
tags = ['functional', 'cli_root', 'zpool_sync']
[tests/functional/cli_root/zpool_trim]
tests = ['zpool_trim_attach_detach_add_remove', 'zpool_trim_neg',
'zpool_trim_offline_export_import_online', 'zpool_trim_online_offline',
'zpool_trim_rate_neg', 'zpool_trim_secure', 'zpool_trim_split',
'zpool_trim_start_and_cancel_neg', 'zpool_trim_start_and_cancel_pos']
tags = ['functional', 'zpool_trim']
[tests/functional/cli_root/zpool_upgrade]
tests = ['zpool_upgrade_001_pos', 'zpool_upgrade_003_pos',
'zpool_upgrade_005_neg', 'zpool_upgrade_006_neg',
'zpool_upgrade_009_neg']
tags = ['functional', 'cli_root', 'zpool_upgrade']
[tests/functional/cli_root/zpool_wait]
tests = ['zpool_wait_no_activity', 'zpool_wait_usage']
tags = ['functional', 'cli_root', 'zpool_wait']
[tests/functional/cli_root/zpool_wait/scan]
tests = ['zpool_wait_scrub_flag']
tags = ['functional', 'cli_root', 'zpool_wait']
[tests/functional/cli_user/misc]
tests = ['zdb_001_neg', 'zfs_001_neg', 'zfs_allow_001_neg',
'zfs_clone_001_neg', 'zfs_create_001_neg', 'zfs_destroy_001_neg',
'zfs_get_001_neg', 'zfs_inherit_001_neg', 'zfs_mount_001_neg',
'zfs_promote_001_neg', 'zfs_receive_001_neg', 'zfs_rename_001_neg',
'zfs_rollback_001_neg', 'zfs_send_001_neg', 'zfs_set_001_neg',
'zfs_snapshot_001_neg', 'zfs_unallow_001_neg',
'zfs_unmount_001_neg', 'zfs_upgrade_001_neg',
'zpool_001_neg', 'zpool_add_001_neg', 'zpool_attach_001_neg',
'zpool_clear_001_neg', 'zpool_create_001_neg', 'zpool_destroy_001_neg',
'zpool_detach_001_neg', 'zpool_export_001_neg', 'zpool_get_001_neg',
'zpool_history_001_neg', 'zpool_offline_001_neg', 'zpool_online_001_neg',
'zpool_remove_001_neg', 'zpool_scrub_001_neg', 'zpool_set_001_neg',
'zpool_status_001_neg', 'zpool_upgrade_001_neg', 'arcstat_001_pos',
'arc_summary_001_pos', 'arc_summary_002_neg', 'zpool_wait_privilege']
user =
tags = ['functional', 'cli_user', 'misc']
[tests/functional/cli_user/zpool_iostat]
tests = ['zpool_iostat_001_neg', 'zpool_iostat_002_pos',
'zpool_iostat_003_neg', 'zpool_iostat_004_pos',
'zpool_iostat_-c_disable',
'zpool_iostat_-c_homedir', 'zpool_iostat_-c_searchpath']
user =
tags = ['functional', 'cli_user', 'zpool_iostat']
[tests/functional/cli_user/zpool_list]
tests = ['zpool_list_001_pos', 'zpool_list_002_neg']
user =
tags = ['functional', 'cli_user', 'zpool_list']
[tests/functional/compression]
tests = ['compress_003_pos']
tags = ['functional', 'compression']
[tests/functional/exec]
tests = ['exec_001_pos', 'exec_002_neg']
tags = ['functional', 'exec']
[tests/functional/features/large_dnode]
tests = ['large_dnode_003_pos', 'large_dnode_004_neg',
'large_dnode_005_pos', 'large_dnode_007_neg']
tags = ['functional', 'features', 'large_dnode']
[tests/functional/grow]
pre =
post =
tests = ['grow_pool_001_pos', 'grow_replicas_001_pos']
tags = ['functional', 'grow']
[tests/functional/history]
tests = ['history_004_pos', 'history_005_neg', 'history_007_pos',
'history_009_pos']
tags = ['functional', 'history']
[tests/functional/hkdf]
tests = ['run_hkdf_test']
tags = ['functional', 'hkdf']
[tests/functional/inuse]
tests = ['inuse_004_pos', 'inuse_005_pos']
post =
tags = ['functional', 'inuse']
[tests/functional/large_files]
tests = ['large_files_001_pos', 'large_files_002_pos']
tags = ['functional', 'large_files']
[tests/functional/libzfs]
tests = ['many_fds', 'libzfs_input']
tags = ['functional', 'libzfs']
[tests/functional/limits]
tests = ['filesystem_count', 'snapshot_count']
tags = ['functional', 'limits']
[tests/functional/link_count]
tests = ['link_count_root_inode']
tags = ['functional', 'link_count']
[tests/functional/log_spacemap]
tests = ['log_spacemap_import_logs']
pre =
post =
tags = ['functional', 'log_spacemap']
[tests/functional/migration]
tests = ['migration_001_pos', 'migration_002_pos', 'migration_003_pos',
'migration_004_pos', 'migration_005_pos', 'migration_006_pos',
'migration_007_pos', 'migration_008_pos', 'migration_009_pos',
'migration_010_pos', 'migration_011_pos', 'migration_012_pos']
tags = ['functional', 'migration']
[tests/functional/mmap]
tests = ['mmap_read_001_pos']
tags = ['functional', 'mmap']
[tests/functional/nestedfs]
tests = ['nestedfs_001_pos']
tags = ['functional', 'nestedfs']
[tests/functional/nopwrite]
tests = ['nopwrite_sync', 'nopwrite_volume']
tags = ['functional', 'nopwrite']
[tests/functional/pool_checkpoint]
tests = ['checkpoint_conf_change', 'checkpoint_discard_many',
'checkpoint_removal', 'checkpoint_sm_scale', 'checkpoint_twice']
tags = ['functional', 'pool_checkpoint']
timeout = 1800
[tests/functional/poolversion]
tests = ['poolversion_001_pos', 'poolversion_002_pos']
tags = ['functional', 'poolversion']
[tests/functional/redacted_send]
tests = ['redacted_compressed', 'redacted_contents', 'redacted_deleted',
'redacted_disabled_feature', 'redacted_incrementals',
'redacted_largeblocks', 'redacted_mixed_recsize', 'redacted_negative',
'redacted_origin', 'redacted_props', 'redacted_resume', 'redacted_size']
tags = ['functional', 'redacted_send']
[tests/functional/raidz]
tests = ['raidz_001_neg']
tags = ['functional', 'raidz']
[tests/functional/refquota]
tests = ['refquota_001_pos', 'refquota_002_pos', 'refquota_003_pos',
'refquota_004_pos', 'refquota_005_pos', 'refquota_006_neg',
'refquota_007_neg']
tags = ['functional', 'refquota']
[tests/functional/refreserv]
tests = ['refreserv_001_pos', 'refreserv_002_pos', 'refreserv_003_pos',
'refreserv_005_pos', 'refreserv_multi_raidz']
tags = ['functional', 'refreserv']
[tests/functional/removal]
pre =
tests = ['removal_all_vdev', 'removal_sanity', 'removal_with_dedup',
'removal_with_ganging', 'removal_with_faulted']
tags = ['functional', 'removal']
[tests/functional/replacement]
tests = ['rebuild_raidz']
tags = ['functional', 'replacement']
[tests/functional/reservation]
tests = ['reservation_001_pos', 'reservation_002_pos', 'reservation_003_pos',
'reservation_004_pos', 'reservation_005_pos', 'reservation_006_pos',
'reservation_007_pos', 'reservation_008_pos', 'reservation_009_pos',
'reservation_010_pos', 'reservation_011_pos', 'reservation_012_pos',
'reservation_014_pos', 'reservation_015_pos',
'reservation_016_pos', 'reservation_017_pos', 'reservation_018_pos',
'reservation_019_pos', 'reservation_020_pos', 'reservation_021_neg',
'reservation_022_pos']
tags = ['functional', 'reservation']
[tests/functional/rsend]
tests = ['recv_dedup', 'recv_dedup_encrypted_zvol', 'rsend_001_pos',
'rsend_002_pos', 'rsend_003_pos', 'rsend_004_pos', 'rsend_005_pos',
'rsend_006_pos', 'rsend_009_pos', 'rsend_010_pos', 'rsend_011_pos',
'rsend_014_pos', 'rsend_016_neg', 'send-c_verify_contents',
'send-c_volume', 'send-c_zstreamdump', 'send-c_recv_dedup',
'send-L_toggle', 'send_encrypted_hierarchy', 'send_encrypted_props',
'send_encrypted_truncated_files', 'send_freeobjects', 'send_holds',
'send_mixed_raw', 'send-wR_encrypted_zvol', 'send_partial_dataset',
'send_invalid']
tags = ['functional', 'rsend']
[tests/functional/scrub_mirror]
tests = ['scrub_mirror_001_pos', 'scrub_mirror_002_pos']
tags = ['functional', 'scrub_mirror']
[tests/functional/slog]
tests = ['slog_008_neg', 'slog_009_neg', 'slog_010_neg']
tags = ['functional', 'slog']
[tests/functional/snapshot]
tests = ['clone_001_pos', 'rollback_001_pos', 'rollback_002_pos',
'snapshot_001_pos', 'snapshot_002_pos', 'snapshot_003_pos',
'snapshot_004_pos', 'snapshot_005_pos', 'snapshot_006_pos',
'snapshot_007_pos', 'snapshot_008_pos', 'snapshot_009_pos',
'snapshot_010_pos', 'snapshot_011_pos', 'snapshot_012_pos',
'snapshot_013_pos', 'snapshot_014_pos', 'snapshot_017_pos']
tags = ['functional', 'snapshot']
[tests/functional/snapused]
tests = ['snapused_002_pos', 'snapused_004_pos', 'snapused_005_pos']
tags = ['functional', 'snapused']
[tests/functional/sparse]
tests = ['sparse_001_pos']
tags = ['functional', 'sparse']
[tests/functional/suid]
tests = ['suid_write_to_suid', 'suid_write_to_sgid', 'suid_write_to_suid_sgid',
'suid_write_to_none']
tags = ['functional', 'suid']
[tests/functional/threadsappend]
tests = ['threadsappend_001_pos']
tags = ['functional', 'threadsappend']
[tests/functional/truncate]
tests = ['truncate_001_pos', 'truncate_002_pos']
tags = ['functional', 'truncate']
[tests/functional/upgrade]
tests = ['upgrade_userobj_001_pos', 'upgrade_readonly_pool']
tags = ['functional', 'upgrade']
[tests/functional/vdev_zaps]
tests = ['vdev_zaps_001_pos', 'vdev_zaps_003_pos', 'vdev_zaps_004_pos',
'vdev_zaps_005_pos', 'vdev_zaps_006_pos']
tags = ['functional', 'vdev_zaps']
[tests/functional/xattr]
tests = ['xattr_001_pos', 'xattr_002_neg', 'xattr_003_neg', 'xattr_004_pos',
'xattr_005_pos', 'xattr_006_pos', 'xattr_007_neg',
'xattr_011_pos', 'xattr_013_pos']
tags = ['functional', 'xattr']
[tests/functional/zvol/zvol_ENOSPC]
tests = ['zvol_ENOSPC_001_pos']
tags = ['functional', 'zvol', 'zvol_ENOSPC']
[tests/functional/zvol/zvol_cli]
tests = ['zvol_cli_001_pos', 'zvol_cli_002_pos', 'zvol_cli_003_neg']
tags = ['functional', 'zvol', 'zvol_cli']
[tests/functional/zvol/zvol_swap]
tests = ['zvol_swap_001_pos', 'zvol_swap_002_pos']
tags = ['functional', 'zvol', 'zvol_swap']
[tests/functional/zpool_influxdb]
tests = ['zpool_influxdb']
tags = ['functional', 'zpool_influxdb']
diff --git a/sys/contrib/openzfs/tests/test-runner/bin/zts-report.py.in b/sys/contrib/openzfs/tests/test-runner/bin/zts-report.py.in
index c3a679675280..559e98dd07b2 100755
--- a/sys/contrib/openzfs/tests/test-runner/bin/zts-report.py.in
+++ b/sys/contrib/openzfs/tests/test-runner/bin/zts-report.py.in
@@ -1,479 +1,483 @@
#!/usr/bin/env @PYTHON_SHEBANG@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2017 by Delphix. All rights reserved.
# Copyright (c) 2018 by Lawrence Livermore National Security, LLC.
#
# This script must remain compatible with Python 2.6+ and Python 3.4+.
#
import os
import re
import sys
import argparse
#
# This script parses the stdout of zfstest, which has this format:
#
# Test: /path/to/testa (run as root) [00:00] [PASS]
# Test: /path/to/testb (run as jkennedy) [00:00] [PASS]
# Test: /path/to/testc (run as root) [00:00] [FAIL]
# [...many more results...]
#
# Results Summary
# FAIL 22
# SKIP 32
# PASS 1156
#
# Running Time: 02:50:31
# Percent passed: 95.5%
# Log directory: /var/tmp/test_results/20180615T205926
#
#
# Common generic reasons for a test or test group to be skipped.
#
# Some test cases are known to fail in ways which are not harmful or dangerous.
# In these cases simply mark the test as a known failure until it can be
# updated and the issue resolved. Note that it's preferable to open a unique
# issue on the GitHub issue tracker for each test case failure.
#
known_reason = 'Known issue'
#
# Some tests require that a test user be able to execute the zfs utilities.
# This may not be possible when testing in-tree due to the default permissions
# on the user's home directory. When testing this can be resolved by granting
# group read access.
#
# chmod 0750 $HOME
#
exec_reason = 'Test user execute permissions required for utilities'
#
# Some tests require a minimum python version of 3.5 and will be skipped when
# the default system version is too old. There may also be tests which require
# additional python modules be installed, for example python-cffi is required
# by the pyzfs tests.
#
python_reason = 'Python v3.5 or newer required'
python_deps_reason = 'Python modules missing: python-cffi'
#
# Some tests require the O_TMPFILE flag which was first introduced in the
# 3.11 kernel.
#
tmpfile_reason = 'Kernel O_TMPFILE support required'
#
# Some tests require the statx(2) system call on Linux which was first
# introduced in the 4.11 kernel.
#
statx_reason = 'Kernel statx(2) system call required on Linux'
#
# Some tests require that the NFS client and server utilities be installed.
#
share_reason = 'NFS client and server utilities required'
#
# Some tests require that the lsattr utility support the project id feature.
#
project_id_reason = 'lsattr with set/show project ID required'
#
# Some tests require that the kernel support user namespaces.
#
user_ns_reason = 'Kernel user namespace support required'
#
# Some rewind tests can fail since nothing guarantees that old MOS blocks
# are not overwritten. Snapshots protect datasets and data files but not
# the MOS. Reasonable efforts are made in the test case to increase the
# odds that some txgs will have their MOS data left untouched, but it is
# never a sure thing.
#
rewind_reason = 'Arbitrary pool rewind is not guaranteed'
#
# Some tests may by structured in a way that relies on exact knowledge
# of how much free space in available in a pool. These tests cannot be
# made completely reliable because the internal details of how free space
# is managed are not exposed to user space.
#
enospc_reason = 'Exact free space reporting is not guaranteed'
#
# Some tests require a minimum version of the fio benchmark utility.
# Older distributions such as CentOS 6.x only provide fio-2.0.13.
#
fio_reason = 'Fio v2.3 or newer required'
#
# Some tests require that the DISKS provided support the discard operation.
# Normally this is not an issue because loop back devices are used for DISKS
# and they support discard (TRIM/UNMAP).
#
trim_reason = 'DISKS must support discard (TRIM/UNMAP)'
+#
+# Some tests on FreeBSD require the fspacectl(2) system call and the
+# truncate(1) utility supporting the -d option. The system call was first
+# introduced in FreeBSD version 1400032.
+#
+fspacectl_reason = 'fspacectl(2) and truncate -d support required'
+
#
# Some tests are not applicable to a platform or need to be updated to operate
# in the manor required by the platform. Any tests which are skipped for this
# reason will be suppressed in the final analysis output.
#
na_reason = "Not applicable"
#
# Some test cases doesn't have all requirements to run on Github actions CI.
#
ci_reason = 'CI runner doesn\'t have all requirements'
summary = {
'total': float(0),
'passed': float(0),
'logfile': "Could not determine logfile location."
}
#
# These tests are known to fail, thus we use this list to prevent these
# failures from failing the job as a whole; only unexpected failures
# bubble up to cause this script to exit with a non-zero exit status.
#
# Format: { 'test-name': ['expected result', 'issue-number | reason'] }
#
# For each known failure it is recommended to link to a GitHub issue by
# setting the reason to the issue number. Alternately, one of the generic
# reasons listed above can be used.
#
known = {
'casenorm/mixed_none_lookup_ci': ['FAIL', '7633'],
'casenorm/mixed_formd_lookup_ci': ['FAIL', '7633'],
'cli_root/zfs_unshare/zfs_unshare_002_pos': ['SKIP', na_reason],
'cli_root/zfs_unshare/zfs_unshare_006_pos': ['SKIP', na_reason],
'cli_root/zpool_import/import_rewind_device_replaced':
['FAIL', rewind_reason],
'cli_user/misc/zfs_share_001_neg': ['SKIP', na_reason],
'cli_user/misc/zfs_unshare_001_neg': ['SKIP', na_reason],
'privilege/setup': ['SKIP', na_reason],
'refreserv/refreserv_004_pos': ['FAIL', known_reason],
'rootpool/setup': ['SKIP', na_reason],
'rsend/rsend_008_pos': ['SKIP', '6066'],
'vdev_zaps/vdev_zaps_007_pos': ['FAIL', known_reason],
}
if sys.platform.startswith('freebsd'):
known.update({
+ 'cli_root/zfs_receive/receive-o-x_props_override':
+ ['FAIL', known_reason],
'cli_root/zpool_wait/zpool_wait_trim_basic': ['SKIP', trim_reason],
'cli_root/zpool_wait/zpool_wait_trim_cancel': ['SKIP', trim_reason],
'cli_root/zpool_wait/zpool_wait_trim_flag': ['SKIP', trim_reason],
'link_count/link_count_001': ['SKIP', na_reason],
})
elif sys.platform.startswith('linux'):
known.update({
'casenorm/mixed_formd_lookup': ['FAIL', '7633'],
'casenorm/mixed_formd_delete': ['FAIL', '7633'],
'casenorm/sensitive_formd_lookup': ['FAIL', '7633'],
'casenorm/sensitive_formd_delete': ['FAIL', '7633'],
'removal/removal_with_zdb': ['SKIP', known_reason],
})
#
# These tests may occasionally fail or be skipped. We want there failures
# to be reported but only unexpected failures should bubble up to cause
# this script to exit with a non-zero exit status.
#
# Format: { 'test-name': ['expected result', 'issue-number | reason'] }
#
# For each known failure it is recommended to link to a GitHub issue by
# setting the reason to the issue number. Alternately, one of the generic
# reasons listed above can be used.
#
maybe = {
'chattr/setup': ['SKIP', exec_reason],
'crtime/crtime_001_pos': ['SKIP', statx_reason],
'cli_root/zdb/zdb_006_pos': ['FAIL', known_reason],
'cli_root/zfs_destroy/zfs_destroy_dev_removal_condense':
['FAIL', known_reason],
'cli_root/zfs_get/zfs_get_004_pos': ['FAIL', known_reason],
'cli_root/zfs_get/zfs_get_009_pos': ['SKIP', '5479'],
'cli_root/zfs_rollback/zfs_rollback_001_pos': ['FAIL', known_reason],
'cli_root/zfs_rollback/zfs_rollback_002_pos': ['FAIL', known_reason],
'cli_root/zfs_share/setup': ['SKIP', share_reason],
'cli_root/zfs_snapshot/zfs_snapshot_002_neg': ['FAIL', known_reason],
'cli_root/zfs_unshare/setup': ['SKIP', share_reason],
'cli_root/zpool_add/zpool_add_004_pos': ['FAIL', known_reason],
'cli_root/zpool_destroy/zpool_destroy_001_pos': ['SKIP', '6145'],
- 'cli_root/zpool_import/import_rewind_config_changed':
- ['FAIL', rewind_reason],
'cli_root/zpool_import/zpool_import_missing_003_pos': ['SKIP', '6839'],
'cli_root/zpool_initialize/zpool_initialize_import_export':
['FAIL', '11948'],
'cli_root/zpool_labelclear/zpool_labelclear_removed':
['FAIL', known_reason],
'cli_root/zpool_trim/setup': ['SKIP', trim_reason],
'cli_root/zpool_upgrade/zpool_upgrade_004_pos': ['FAIL', '6141'],
'delegate/setup': ['SKIP', exec_reason],
+ 'fallocate/fallocate_punch-hole': ['SKIP', fspacectl_reason],
'history/history_004_pos': ['FAIL', '7026'],
'history/history_005_neg': ['FAIL', '6680'],
'history/history_006_neg': ['FAIL', '5657'],
'history/history_008_pos': ['FAIL', known_reason],
'history/history_010_pos': ['SKIP', exec_reason],
'io/mmap': ['SKIP', fio_reason],
'largest_pool/largest_pool_001_pos': ['FAIL', known_reason],
'mmp/mmp_on_uberblocks': ['FAIL', known_reason],
'pyzfs/pyzfs_unittest': ['SKIP', python_deps_reason],
- 'no_space/enospc_002_pos': ['FAIL', enospc_reason],
'pool_checkpoint/checkpoint_discard_busy': ['FAIL', '11946'],
'projectquota/setup': ['SKIP', exec_reason],
'redundancy/redundancy_004_neg': ['FAIL', '7290'],
'redundancy/redundancy_draid_spare3': ['SKIP', known_reason],
'removal/removal_condense_export': ['FAIL', known_reason],
'reservation/reservation_008_pos': ['FAIL', '7741'],
'reservation/reservation_018_pos': ['FAIL', '5642'],
- 'rsend/rsend_019_pos': ['FAIL', '6086'],
- 'rsend/rsend_020_pos': ['FAIL', '6446'],
- 'rsend/rsend_021_pos': ['FAIL', '6446'],
- 'rsend/rsend_024_pos': ['FAIL', '5665'],
- 'rsend/send-c_volume': ['FAIL', '6087'],
- 'rsend/send_partial_dataset': ['FAIL', known_reason],
'snapshot/clone_001_pos': ['FAIL', known_reason],
'snapshot/snapshot_009_pos': ['FAIL', '7961'],
'snapshot/snapshot_010_pos': ['FAIL', '7961'],
'snapused/snapused_004_pos': ['FAIL', '5513'],
'tmpfile/setup': ['SKIP', tmpfile_reason],
'threadsappend/threadsappend_001_pos': ['FAIL', '6136'],
'trim/setup': ['SKIP', trim_reason],
'upgrade/upgrade_projectquota_001_pos': ['SKIP', project_id_reason],
'user_namespace/setup': ['SKIP', user_ns_reason],
'userquota/setup': ['SKIP', exec_reason],
'vdev_zaps/vdev_zaps_004_pos': ['FAIL', '6935'],
'zvol/zvol_ENOSPC/zvol_ENOSPC_001_pos': ['FAIL', '5848'],
'pam/setup': ['SKIP', "pamtester might be not available"],
}
if sys.platform.startswith('freebsd'):
maybe.update({
'cli_root/zfs_copies/zfs_copies_002_pos': ['FAIL', known_reason],
'cli_root/zfs_inherit/zfs_inherit_001_neg': ['FAIL', known_reason],
- 'cli_root/zfs_receive/receive-o-x_props_override':
- ['FAIL', known_reason],
'cli_root/zfs_share/zfs_share_011_pos': ['FAIL', known_reason],
'cli_root/zfs_share/zfs_share_concurrent_shares':
['FAIL', known_reason],
'cli_root/zpool_import/zpool_import_012_pos': ['FAIL', known_reason],
'delegate/zfs_allow_003_pos': ['FAIL', known_reason],
'inheritance/inherit_001_pos': ['FAIL', '11829'],
'resilver/resilver_restart_001': ['FAIL', known_reason],
- 'zvol/zvol_misc/zvol_misc_volmode': ['FAIL', known_reason],
+ 'pool_checkpoint/checkpoint_big_rewind': ['FAIL', '12622'],
+ 'pool_checkpoint/checkpoint_indirect': ['FAIL', '12623'],
})
elif sys.platform.startswith('linux'):
maybe.update({
- 'alloc_class/alloc_class_009_pos': ['FAIL', known_reason],
- 'alloc_class/alloc_class_010_pos': ['FAIL', known_reason],
- 'alloc_class/alloc_class_011_neg': ['FAIL', known_reason],
- 'alloc_class/alloc_class_012_pos': ['FAIL', known_reason],
- 'alloc_class/alloc_class_013_pos': ['FAIL', '11888'],
'cli_root/zfs_rename/zfs_rename_002_pos': ['FAIL', known_reason],
- 'cli_root/zpool_expand/zpool_expand_001_pos': ['FAIL', known_reason],
- 'cli_root/zpool_expand/zpool_expand_005_pos': ['FAIL', known_reason],
'cli_root/zpool_reopen/zpool_reopen_003_pos': ['FAIL', known_reason],
'fault/auto_spare_shared': ['FAIL', '11889'],
'io/io_uring': ['SKIP', 'io_uring support required'],
'limits/filesystem_limit': ['SKIP', known_reason],
'limits/snapshot_limit': ['SKIP', known_reason],
'mmp/mmp_active_import': ['FAIL', known_reason],
'mmp/mmp_exported_import': ['FAIL', known_reason],
'mmp/mmp_inactive_import': ['FAIL', known_reason],
- 'refreserv/refreserv_raidz': ['FAIL', known_reason],
- 'rsend/rsend_007_pos': ['FAIL', known_reason],
- 'rsend/rsend_010_pos': ['FAIL', known_reason],
- 'rsend/rsend_011_pos': ['FAIL', known_reason],
- 'snapshot/rollback_003_pos': ['FAIL', known_reason],
+ 'zvol/zvol_misc/zvol_misc_snapdev': ['FAIL', '12621'],
+ 'zvol/zvol_misc/zvol_misc_volmode': ['FAIL', known_reason],
})
# Not all Github actions runners have scsi_debug module, so we may skip
# some tests which use it.
if os.environ.get('CI') == 'true':
known.update({
'cli_root/zpool_expand/zpool_expand_001_pos': ['SKIP', ci_reason],
'cli_root/zpool_expand/zpool_expand_003_neg': ['SKIP', ci_reason],
'cli_root/zpool_expand/zpool_expand_005_pos': ['SKIP', ci_reason],
'cli_root/zpool_reopen/setup': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_001_pos': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_002_pos': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_003_pos': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_004_pos': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_005_pos': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_006_neg': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_007_pos': ['SKIP', ci_reason],
'cli_root/zpool_split/zpool_split_wholedisk': ['SKIP', ci_reason],
'fault/auto_offline_001_pos': ['SKIP', ci_reason],
'fault/auto_online_001_pos': ['SKIP', ci_reason],
'fault/auto_online_002_pos': ['SKIP', ci_reason],
'fault/auto_replace_001_pos': ['SKIP', ci_reason],
'fault/auto_spare_ashift': ['SKIP', ci_reason],
'fault/auto_spare_shared': ['SKIP', ci_reason],
'procfs/pool_state': ['SKIP', ci_reason],
})
maybe.update({
'events/events_002_pos': ['FAIL', '11546'],
})
+elif sys.platform.startswith('linux'):
+ maybe.update({
+ 'alloc_class/alloc_class_009_pos': ['FAIL', known_reason],
+ 'alloc_class/alloc_class_010_pos': ['FAIL', known_reason],
+ 'cli_root/zfs_rename/zfs_rename_002_pos': ['FAIL', known_reason],
+ 'cli_root/zpool_expand/zpool_expand_001_pos': ['FAIL', known_reason],
+ 'cli_root/zpool_expand/zpool_expand_005_pos': ['FAIL', known_reason],
+ 'cli_root/zpool_reopen/zpool_reopen_003_pos': ['FAIL', known_reason],
+ 'refreserv/refreserv_raidz': ['FAIL', known_reason],
+ 'rsend/rsend_007_pos': ['FAIL', known_reason],
+ 'rsend/rsend_010_pos': ['FAIL', known_reason],
+ 'rsend/rsend_011_pos': ['FAIL', known_reason],
+ 'snapshot/rollback_003_pos': ['FAIL', known_reason],
+ })
def usage(s):
print(s)
sys.exit(1)
def process_results(pathname):
try:
f = open(pathname)
except IOError as e:
print('Error opening file: %s' % e)
sys.exit(1)
prefix = '/zfs-tests/tests/functional/'
pattern = \
r'^Test(?:\s+\(\S+\))?:' + \
r'\s*\S*%s(\S+)\s*\(run as (\S+)\)\s*\[(\S+)\]\s*\[(\S+)\]' \
% prefix
pattern_log = r'^\s*Log directory:\s*(\S*)'
d = {}
for line in f.readlines():
m = re.match(pattern, line)
if m and len(m.groups()) == 4:
summary['total'] += 1
if m.group(4) == "PASS":
summary['passed'] += 1
d[m.group(1)] = m.group(4)
continue
m = re.match(pattern_log, line)
if m:
summary['logfile'] = m.group(1)
return d
class ListMaybesAction(argparse.Action):
def __init__(self,
option_strings,
dest="SUPPRESS",
default="SUPPRESS",
help="list flaky tests and exit"):
super(ListMaybesAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
for test in maybe:
print(test)
sys.exit(0)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Analyze ZTS logs')
parser.add_argument('logfile')
parser.add_argument('--list-maybes', action=ListMaybesAction)
parser.add_argument('--no-maybes', action='store_false', dest='maybes')
args = parser.parse_args()
results = process_results(args.logfile)
if summary['total'] == 0:
print("\n\nNo test results were found.")
print("Log directory: %s" % summary['logfile'])
sys.exit(0)
expected = []
unexpected = []
all_maybes = True
for test in list(results.keys()):
if results[test] == "PASS":
continue
setup = test.replace(os.path.basename(test), "setup")
if results[test] == "SKIP" and test != setup:
if setup in known and known[setup][0] == "SKIP":
continue
if setup in maybe and maybe[setup][0] == "SKIP":
continue
if (test in known and results[test] in known[test][0]):
expected.append(test)
elif test in maybe and results[test] in maybe[test][0]:
if results[test] == 'SKIP' or args.maybes:
expected.append(test)
elif not args.maybes:
unexpected.append(test)
else:
unexpected.append(test)
all_maybes = False
print("\nTests with results other than PASS that are expected:")
for test in sorted(expected):
issue_url = 'https://github.com/openzfs/zfs/issues/'
# Include the reason why the result is expected, given the following:
# 1. Suppress test results which set the "Not applicable" reason.
# 2. Numerical reasons are assumed to be GitHub issue numbers.
# 3. When an entire test group is skipped only report the setup reason.
if test in known:
if known[test][1] == na_reason:
continue
elif known[test][1].isdigit():
expect = issue_url + known[test][1]
else:
expect = known[test][1]
elif test in maybe:
if maybe[test][1].isdigit():
expect = issue_url + maybe[test][1]
else:
expect = maybe[test][1]
elif setup in known and known[setup][0] == "SKIP" and setup != test:
continue
elif setup in maybe and maybe[setup][0] == "SKIP" and setup != test:
continue
else:
expect = "UNKNOWN REASON"
print(" %s %s (%s)" % (results[test], test, expect))
print("\nTests with result of PASS that are unexpected:")
for test in sorted(known.keys()):
# We probably should not be silently ignoring the case
# where "test" is not in "results".
if test not in results or results[test] != "PASS":
continue
print(" %s %s (expected %s)" % (results[test], test,
known[test][0]))
print("\nTests with results other than PASS that are unexpected:")
for test in sorted(unexpected):
expect = "PASS" if test not in known else known[test][0]
print(" %s %s (expected %s)" % (results[test], test, expect))
if len(unexpected) == 0:
sys.exit(0)
elif not args.maybes and all_maybes:
sys.exit(2)
else:
sys.exit(1)
diff --git a/sys/contrib/openzfs/tests/zfs-tests/cmd/mmap_seek/mmap_seek.c b/sys/contrib/openzfs/tests/zfs-tests/cmd/mmap_seek/mmap_seek.c
index f476e1dba9a4..bb36527aafee 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/cmd/mmap_seek/mmap_seek.c
+++ b/sys/contrib/openzfs/tests/zfs-tests/cmd/mmap_seek/mmap_seek.c
@@ -1,147 +1,151 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2021 by Lawrence Livermore National Security, LLC.
*/
#include <unistd.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
+#include <sys/sysmacros.h>
#include <errno.h>
+#ifdef __linux__
+#include <linux/fs.h>
+#endif
static void
seek_data(int fd, off_t offset, off_t expected)
{
off_t data_offset = lseek(fd, offset, SEEK_DATA);
if (data_offset != expected) {
fprintf(stderr, "lseek(fd, %d, SEEK_DATA) = %d (expected %d)\n",
(int)offset, (int)data_offset, (int)expected);
exit(2);
}
}
static void
seek_hole(int fd, off_t offset, off_t expected)
{
off_t hole_offset = lseek(fd, offset, SEEK_HOLE);
if (hole_offset != expected) {
fprintf(stderr, "lseek(fd, %d, SEEK_HOLE) = %d (expected %d)\n",
(int)offset, (int)hole_offset, (int)expected);
exit(2);
}
}
int
main(int argc, char **argv)
{
char *execname = argv[0];
char *file_path = argv[1];
char *buf = NULL;
int err;
if (argc != 4) {
(void) printf("usage: %s <file name> <file size> "
"<block size>\n", argv[0]);
exit(1);
}
int fd = open(file_path, O_RDWR | O_CREAT, 0666);
if (fd == -1) {
(void) fprintf(stderr, "%s: %s: ", execname, file_path);
perror("open");
exit(2);
}
off_t file_size = atoi(argv[2]);
off_t block_size = atoi(argv[3]);
if (block_size * 2 > file_size) {
(void) fprintf(stderr, "file size must be at least "
"double the block size\n");
exit(2);
}
err = ftruncate(fd, file_size);
if (err == -1) {
perror("ftruncate");
exit(2);
}
if ((buf = mmap(NULL, file_size, PROT_READ | PROT_WRITE,
MAP_SHARED, fd, 0)) == MAP_FAILED) {
perror("mmap");
exit(2);
}
/* Verify the file is sparse and reports no data. */
seek_data(fd, 0, -1);
/* Verify the file is reported as a hole. */
seek_hole(fd, 0, 0);
/* Verify search beyond end of file is an error. */
seek_data(fd, 2 * file_size, -1);
seek_hole(fd, 2 * file_size, -1);
/* Dirty the first byte. */
memset(buf, 'a', 1);
seek_data(fd, 0, 0);
seek_data(fd, block_size, -1);
seek_hole(fd, 0, block_size);
seek_hole(fd, block_size, block_size);
/* Dirty the first half of the file. */
memset(buf, 'b', file_size / 2);
seek_data(fd, 0, 0);
seek_data(fd, block_size, block_size);
seek_hole(fd, 0, P2ROUNDUP(file_size / 2, block_size));
seek_hole(fd, block_size, P2ROUNDUP(file_size / 2, block_size));
/* Dirty the whole file. */
memset(buf, 'c', file_size);
seek_data(fd, 0, 0);
seek_data(fd, file_size * 3 / 4,
P2ROUNDUP(file_size * 3 / 4, block_size));
seek_hole(fd, 0, file_size);
seek_hole(fd, file_size / 2, file_size);
/* Punch a hole (required compression be enabled). */
memset(buf + block_size, 0, block_size);
seek_data(fd, 0, 0);
seek_data(fd, block_size, 2 * block_size);
seek_hole(fd, 0, block_size);
seek_hole(fd, block_size, block_size);
seek_hole(fd, 2 * block_size, file_size);
err = munmap(buf, file_size);
if (err == -1) {
perror("munmap");
exit(2);
}
close(fd);
return (0);
}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/include/blkdev.shlib b/sys/contrib/openzfs/tests/zfs-tests/include/blkdev.shlib
index bcba8ee759c9..bf70952904bb 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/include/blkdev.shlib
+++ b/sys/contrib/openzfs/tests/zfs-tests/include/blkdev.shlib
@@ -1,654 +1,654 @@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
# Copyright (c) 2012, 2019 by Delphix. All rights reserved.
# Copyright 2016 Nexenta Systems, Inc.
# Copyright (c) 2016, 2017 by Intel Corporation. All rights reserved.
# Copyright (c) 2017 Lawrence Livermore National Security, LLC.
# Copyright (c) 2017 Datto Inc.
# Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
# Copyright 2019 Richard Elling
#
#
# Returns SCSI host number for the given disk
#
function get_scsi_host #disk
{
typeset disk=$1
ls /sys/block/${disk}/device/scsi_device | cut -d : -f 1
}
#
# Cause a scan of all scsi host adapters by default
#
# $1 optional host number
#
function scan_scsi_hosts
{
typeset hostnum=${1}
if is_linux; then
if [[ -z $hostnum ]]; then
for host in /sys/class/scsi_host/host*; do
log_must eval "echo '- - -' > $host/scan"
done
else
log_must eval \
"echo /sys/class/scsi_host/host$hostnum/scan" \
> /dev/null
log_must eval \
"echo '- - -' > /sys/class/scsi_host/host$hostnum/scan"
fi
fi
}
#
# Wait for newly created block devices to have their minors created.
# Additional arguments can be passed to udevadm trigger, with the expected
# arguments to typically be a block device pathname. This is useful when
# checking waiting on a specific device to settle rather than triggering
# all devices and waiting for them all to settle.
#
# The udevadm settle timeout can be 120 or 180 seconds by default for
# some distros. If a long delay is experienced, it could be due to some
# strangeness in a malfunctioning device that isn't related to the devices
# under test. To help debug this condition, a notice is given if settle takes
# too long.
#
# Note: there is no meaningful return code if udevadm fails. Consumers
# should not expect a return code (do not call as argument to log_must)
#
function block_device_wait
{
if is_linux; then
- udevadm trigger $*
+ udevadm trigger $* 2>/dev/null
typeset start=$SECONDS
udevadm settle
typeset elapsed=$((SECONDS - start))
[[ $elapsed > 60 ]] && \
log_note udevadm settle time too long: $elapsed
elif is_freebsd; then
if [[ ${#@} -eq 0 ]]; then
# Do something that has to go through the geom event
# queue to complete.
sysctl kern.geom.conftxt >/dev/null
return
fi
fi
# Poll for the given paths to appear, but give up eventually.
typeset -i i
for (( i = 0; i < 5; ++i )); do
typeset missing=false
typeset dev
for dev in "${@}"; do
if ! [[ -e $dev ]]; then
missing=true
break
fi
done
if ! $missing; then
break
fi
sleep ${#@}
done
}
#
# Check if the given device is physical device
#
function is_physical_device #device
{
typeset device=${1#$DEV_DSKDIR/}
device=${device#$DEV_RDSKDIR/}
if is_linux; then
is_disk_device "$DEV_DSKDIR/$device" && \
[[ -f /sys/module/loop/parameters/max_part ]]
return $?
elif is_freebsd; then
is_disk_device "$DEV_DSKDIR/$device" && \
echo $device | egrep -q \
-e '^a?da[0-9]+$' \
-e '^md[0-9]+$' \
-e '^mfid[0-9]+$' \
-e '^nda[0-9]+$' \
-e '^nvd[0-9]+$' \
-e '^vtbd[0-9]+$'
return $?
else
echo $device | egrep "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1
return $?
fi
}
#
# Check if the given device is a real device (ie SCSI device)
#
function is_real_device #disk
{
typeset disk=$1
[[ -z $disk ]] && log_fail "No argument for disk given."
if is_linux; then
lsblk $DEV_RDSKDIR/$disk -o TYPE 2>/dev/null | \
egrep disk >/dev/null
return $?
fi
}
#
# Check if the given device is a loop device
#
function is_loop_device #disk
{
typeset disk=$1
[[ -z $disk ]] && log_fail "No argument for disk given."
if is_linux; then
lsblk $DEV_RDSKDIR/$disk -o TYPE 2>/dev/null | \
egrep loop >/dev/null
return $?
fi
}
#
# Linux:
# Check if the given device is a multipath device and if there is a symbolic
# link to a device mapper and to a disk
# Currently no support for dm devices alone without multipath
#
# FreeBSD:
# Check if the given device is a gmultipath device.
#
# Others:
# No multipath detection.
#
function is_mpath_device #disk
{
typeset disk=$1
[[ -z $disk ]] && log_fail "No argument for disk given."
if is_linux; then
lsblk $DEV_MPATHDIR/$disk -o TYPE 2>/dev/null | \
egrep mpath >/dev/null
if (($? == 0)); then
readlink $DEV_MPATHDIR/$disk > /dev/null 2>&1
return $?
else
return $?
fi
elif is_freebsd; then
is_disk_device $DEV_MPATHDIR/$disk
else
false
fi
}
#
# Check if the given path is the appropriate sort of device special node.
#
function is_disk_device #path
{
typeset path=$1
if is_freebsd; then
# FreeBSD doesn't have block devices, only character devices.
test -c $path
else
test -b $path
fi
}
# Set the slice prefix for disk partitioning depending
# on whether the device is a real, multipath, or loop device.
# Currently all disks have to be of the same type, so only
# checks first disk to determine slice prefix.
#
function set_slice_prefix
{
typeset disk
typeset -i i=0
if is_linux; then
while (( i < $DISK_ARRAY_NUM )); do
disk="$(echo $DISKS | nawk '{print $(i + 1)}')"
if ( is_mpath_device $disk ) && [[ -z $(echo $disk | awk 'substr($1,18,1)\
~ /^[[:digit:]]+$/') ]] || ( is_real_device $disk ); then
export SLICE_PREFIX=""
return 0
elif ( is_mpath_device $disk || is_loop_device \
$disk ); then
export SLICE_PREFIX="p"
return 0
else
log_fail "$disk not supported for partitioning."
fi
(( i = i + 1))
done
fi
}
#
# Set the directory path of the listed devices in $DISK_ARRAY_NUM
# Currently all disks have to be of the same type, so only
# checks first disk to determine device directory
# default = /dev (linux)
# real disk = /dev (linux)
# multipath device = /dev/mapper (linux)
#
function set_device_dir
{
typeset disk
typeset -i i=0
if is_linux; then
while (( i < $DISK_ARRAY_NUM )); do
disk="$(echo $DISKS | nawk '{print $(i + 1)}')"
if is_mpath_device $disk; then
export DEV_DSKDIR=$DEV_MPATHDIR
return 0
else
export DEV_DSKDIR=$DEV_RDSKDIR
return 0
fi
(( i = i + 1))
done
else
export DEV_DSKDIR=$DEV_RDSKDIR
fi
}
#
# Get the directory path of given device
#
function get_device_dir #device
{
typeset device=$1
if ! is_freebsd && ! is_physical_device $device; then
if [[ $device != "/" ]]; then
device=${device%/*}
fi
if is_disk_device "$DEV_DSKDIR/$device"; then
device="$DEV_DSKDIR"
fi
echo $device
else
echo "$DEV_DSKDIR"
fi
}
#
# Get persistent name for given disk
#
function get_persistent_disk_name #device
{
typeset device=$1
typeset dev_id
if is_linux; then
if is_real_device $device; then
dev_id="$(udevadm info -q all -n $DEV_DSKDIR/$device \
| egrep disk/by-id | nawk '{print $2; exit}' \
| nawk -F / '{print $3}')"
echo $dev_id
elif is_mpath_device $device; then
dev_id="$(udevadm info -q all -n $DEV_DSKDIR/$device \
| egrep disk/by-id/dm-uuid \
| nawk '{print $2; exit}' \
| nawk -F / '{print $3}')"
echo $dev_id
else
echo $device
fi
else
echo $device
fi
}
#
# Online or offline a disk on the system
#
# First checks state of disk. Test will fail if disk is not properly onlined
# or offlined. Online is a full rescan of SCSI disks by echoing to every
# host entry.
#
function on_off_disk # disk state{online,offline} host
{
typeset disk=$1
typeset state=$2
typeset host=$3
[[ -z $disk ]] || [[ -z $state ]] && \
log_fail "Arguments invalid or missing"
if is_linux; then
if [[ $state == "offline" ]] && ( is_mpath_device $disk ); then
dm_name="$(readlink $DEV_DSKDIR/$disk \
| nawk -F / '{print $2}')"
dep="$(ls /sys/block/${dm_name}/slaves \
| nawk '{print $1}')"
while [[ -n $dep ]]; do
#check if disk is online
lsscsi | egrep $dep > /dev/null
if (($? == 0)); then
dep_dir="/sys/block/${dm_name}"
dep_dir+="/slaves/${dep}/device"
ss="${dep_dir}/state"
sd="${dep_dir}/delete"
log_must eval "echo 'offline' > ${ss}"
log_must eval "echo '1' > ${sd}"
lsscsi | egrep $dep > /dev/null
if (($? == 0)); then
log_fail "Offlining" \
"$disk failed"
fi
fi
dep="$(ls /sys/block/$dm_name/slaves \
2>/dev/null | nawk '{print $1}')"
done
elif [[ $state == "offline" ]] && ( is_real_device $disk ); then
#check if disk is online
lsscsi | egrep $disk > /dev/null
if (($? == 0)); then
dev_state="/sys/block/$disk/device/state"
dev_delete="/sys/block/$disk/device/delete"
log_must eval "echo 'offline' > ${dev_state}"
log_must eval "echo '1' > ${dev_delete}"
lsscsi | egrep $disk > /dev/null
if (($? == 0)); then
log_fail "Offlining $disk" \
"failed"
fi
else
log_note "$disk is already offline"
fi
elif [[ $state == "online" ]]; then
#force a full rescan
scan_scsi_hosts $host
block_device_wait
if is_mpath_device $disk; then
dm_name="$(readlink $DEV_DSKDIR/$disk \
| nawk -F / '{print $2}')"
dep="$(ls /sys/block/$dm_name/slaves \
| nawk '{print $1}')"
lsscsi | egrep $dep > /dev/null
if (($? != 0)); then
log_fail "Onlining $disk failed"
fi
elif is_real_device $disk; then
block_device_wait
typeset -i retries=0
while ! lsscsi | egrep -q $disk; do
if (( $retries > 2 )); then
log_fail "Onlining $disk failed"
break
fi
(( ++retries ))
sleep 1
done
else
log_fail "$disk is not a real dev"
fi
else
log_fail "$disk failed to $state"
fi
fi
}
#
# Simulate disk removal
#
function remove_disk #disk
{
typeset disk=$1
on_off_disk $disk "offline"
block_device_wait
}
#
# Simulate disk insertion for the given SCSI host
#
function insert_disk #disk scsi_host
{
typeset disk=$1
typeset scsi_host=$2
on_off_disk $disk "online" $scsi_host
block_device_wait
}
#
# Load scsi_debug module with specified parameters
# $blksz can be either one of: < 512b | 512e | 4Kn >
#
function load_scsi_debug # dev_size_mb add_host num_tgts max_luns blksz
{
typeset devsize=$1
typeset hosts=$2
typeset tgts=$3
typeset luns=$4
typeset blksz=$5
[[ -z $devsize ]] || [[ -z $hosts ]] || [[ -z $tgts ]] || \
[[ -z $luns ]] || [[ -z $blksz ]] && \
log_fail "Arguments invalid or missing"
case "$5" in
'512b')
typeset sector=512
typeset blkexp=0
;;
'512e')
typeset sector=512
typeset blkexp=3
;;
'4Kn')
typeset sector=4096
typeset blkexp=0
;;
*) log_fail "Unsupported blksz value: $5" ;;
esac
if is_linux; then
modprobe -n scsi_debug
if (($? != 0)); then
log_unsupported "Platform does not have scsi_debug"
"module"
fi
lsmod | egrep scsi_debug > /dev/null
if (($? == 0)); then
log_fail "scsi_debug module already installed"
else
log_must modprobe scsi_debug dev_size_mb=$devsize \
add_host=$hosts num_tgts=$tgts max_luns=$luns \
sector_size=$sector physblk_exp=$blkexp
block_device_wait
lsscsi | egrep scsi_debug > /dev/null
if (($? == 1)); then
log_fail "scsi_debug module install failed"
fi
fi
fi
}
#
# Unload scsi_debug module, if needed.
#
function unload_scsi_debug
{
log_must_retry "in use" 5 modprobe -r scsi_debug
}
#
# Get scsi_debug device name.
# Returns basename of scsi_debug device (for example "sdb").
#
function get_debug_device
{
for i in {1..10} ; do
val=$(lsscsi | nawk '/scsi_debug/ {print $6; exit}' | cut -d / -f3)
# lsscsi can take time to settle
if [ "$val" != "-" ] ; then
break
fi
sleep 1
done
echo "$val"
}
#
# Get actual devices used by the pool (i.e. linux sdb1 not sdb).
#
function get_pool_devices #testpool #devdir
{
typeset testpool=$1
typeset devdir=$2
typeset out=""
if is_linux || is_freebsd; then
out=$(zpool status -P $testpool |grep ${devdir} | awk '{print $1}')
out=$(echo $out | sed -e "s|${devdir}/||g" | tr '\n' ' ')
fi
echo $out
}
#
# Write to standard out giving the level, device name, offset and length
# of all blocks in an input file. The offset and length are in units of
# 512 byte blocks. In the case of mirrored vdevs, only the first
# device is listed, as the levels, blocks and offsets will be the same
# on other devices. Note that this function only works with mirrored
# or non-redundant pools, not raidz.
#
# The output of this function can be used to introduce corruption at
# varying levels of indirection.
#
function list_file_blocks # input_file
{
typeset input_file=$1
[[ -f $input_file ]] || log_fail "Couldn't find $input_file"
typeset ds="$(zfs list -H -o name $input_file)"
typeset pool="${ds%%/*}"
typeset objnum="$(get_objnum $input_file)"
#
# Establish a mapping between vdev ids as shown in a DVA and the
# pathnames they correspond to in ${VDEV_MAP[][]}.
#
# The vdev bits in a DVA refer to the top level vdev id.
# ${VDEV_MAP[$id]} is an array of the vdev paths within that vdev.
#
eval $(zdb -C $pool | awk '
BEGIN { printf "typeset -a VDEV_MAP;" }
function subscript(s) {
# "[#]" is more convenient than the bare "#"
match(s, /\[[0-9]*\]/)
return substr(s, RSTART, RLENGTH)
}
id && !/^ / {
# left a top level vdev
id = 0
}
id && $1 ~ /^path:$/ {
# found a vdev path; save it in the map
printf "VDEV_MAP%s%s=%s;", id, child, $2
}
/^ children/ {
# entering a top level vdev
id = subscript($0)
child = "[0]" # default in case there is no nested vdev
printf "typeset -a VDEV_MAP%s;", id
}
/^ children/ {
# entering a nested vdev (e.g. child of a top level mirror)
child = subscript($0)
}
')
#
# The awk below parses the output of zdb, printing out the level
# of each block along with vdev id, offset and length. The last
# two are converted to decimal in the while loop. 4M is added to
# the offset to compensate for the first two labels and boot
# block. Lastly, the offset and length are printed in units of
# 512B blocks for ease of use with dd.
#
typeset level vdev path offset length
if awk -n '' 2>/dev/null; then
# gawk needs -n to decode hex
AWK='awk -n'
else
AWK='awk'
fi
log_must zpool sync -f
zdb -dddddd $ds $objnum | $AWK -v pad=$((4<<20)) -v bs=512 '
/^$/ { looking = 0 }
looking {
level = $2
field = 3
while (split($field, dva, ":") == 3) {
# top level vdev id
vdev = int(dva[1])
# offset + 4M label/boot pad in 512B blocks
offset = (int("0x"dva[2]) + pad) / bs
# length in 512B blocks
len = int("0x"dva[3]) / bs
print level, vdev, offset, len
++field
}
}
/^Indirect blocks:/ { looking = 1 }
' | \
while read level vdev offset length; do
for path in ${VDEV_MAP[$vdev][@]}; do
echo "$level $path $offset $length"
done
done 2>/dev/null
}
function corrupt_blocks_at_level # input_file corrupt_level
{
typeset input_file=$1
typeset corrupt_level="L${2:-0}"
typeset level path offset length
[[ -f $input_file ]] || log_fail "Couldn't find $input_file"
if is_freebsd; then
# Temporarily allow corrupting an inuse device.
debugflags=$(sysctl -n kern.geom.debugflags)
sysctl kern.geom.debugflags=16
fi
list_file_blocks $input_file | \
while read level path offset length; do
if [[ $level = $corrupt_level ]]; then
log_must dd if=/dev/urandom of=$path bs=512 \
count=$length seek=$offset conv=notrunc
fi
done
if is_freebsd; then
sysctl kern.geom.debugflags=$debugflags
fi
# This is necessary for pools made of loop devices.
sync
}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/include/libtest.shlib b/sys/contrib/openzfs/tests/zfs-tests/include/libtest.shlib
index 9391c050b776..dd43b02a6868 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/include/libtest.shlib
+++ b/sys/contrib/openzfs/tests/zfs-tests/include/libtest.shlib
@@ -1,4278 +1,4296 @@
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright (c) 2009, Sun Microsystems Inc. All rights reserved.
# Copyright (c) 2012, 2020, Delphix. All rights reserved.
# Copyright (c) 2017, Tim Chase. All rights reserved.
# Copyright (c) 2017, Nexenta Systems Inc. All rights reserved.
# Copyright (c) 2017, Lawrence Livermore National Security LLC.
# Copyright (c) 2017, Datto Inc. All rights reserved.
# Copyright (c) 2017, Open-E Inc. All rights reserved.
+# Copyright (c) 2021, The FreeBSD Foundation.
# Use is subject to license terms.
#
. ${STF_TOOLS}/include/logapi.shlib
. ${STF_SUITE}/include/math.shlib
. ${STF_SUITE}/include/blkdev.shlib
. ${STF_SUITE}/include/tunables.cfg
#
# Apply constrained path when available. This is required since the
# PATH may have been modified by sudo's secure_path behavior.
#
if [ -n "$STF_PATH" ]; then
export PATH="$STF_PATH"
fi
#
# Generic dot version comparison function
#
# Returns success when version $1 is greater than or equal to $2.
#
function compare_version_gte
{
if [[ "$(printf "$1\n$2" | sort -V | tail -n1)" == "$1" ]]; then
return 0
else
return 1
fi
}
# Linux kernel version comparison function
#
# $1 Linux version ("4.10", "2.6.32") or blank for installed Linux version
#
# Used for comparison: if [ $(linux_version) -ge $(linux_version "2.6.32") ]
#
function linux_version
{
typeset ver="$1"
[[ -z "$ver" ]] && ver=$(uname -r | grep -Eo "^[0-9]+\.[0-9]+\.[0-9]+")
typeset version=$(echo $ver | cut -d '.' -f 1)
typeset major=$(echo $ver | cut -d '.' -f 2)
typeset minor=$(echo $ver | cut -d '.' -f 3)
[[ -z "$version" ]] && version=0
[[ -z "$major" ]] && major=0
[[ -z "$minor" ]] && minor=0
echo $((version * 10000 + major * 100 + minor))
}
# Determine if this is a Linux test system
#
# Return 0 if platform Linux, 1 if otherwise
function is_linux
{
if [[ $(uname -o) == "GNU/Linux" ]]; then
return 0
else
return 1
fi
}
# Determine if this is an illumos test system
#
# Return 0 if platform illumos, 1 if otherwise
function is_illumos
{
if [[ $(uname -o) == "illumos" ]]; then
return 0
else
return 1
fi
}
# Determine if this is a FreeBSD test system
#
# Return 0 if platform FreeBSD, 1 if otherwise
function is_freebsd
{
if [[ $(uname -o) == "FreeBSD" ]]; then
return 0
else
return 1
fi
}
# Determine if this is a DilOS test system
#
# Return 0 if platform DilOS, 1 if otherwise
function is_dilos
{
typeset ID=""
[[ -f /etc/os-release ]] && . /etc/os-release
if [[ $ID == "dilos" ]]; then
return 0
else
return 1
fi
}
# Determine if this is a 32-bit system
#
# Return 0 if platform is 32-bit, 1 if otherwise
function is_32bit
{
if [[ $(getconf LONG_BIT) == "32" ]]; then
return 0
else
return 1
fi
}
# Determine if kmemleak is enabled
#
# Return 0 if kmemleak is enabled, 1 if otherwise
function is_kmemleak
{
if is_linux && [[ -e /sys/kernel/debug/kmemleak ]]; then
return 0
else
return 1
fi
}
# Determine whether a dataset is mounted
#
# $1 dataset name
# $2 filesystem type; optional - defaulted to zfs
#
# Return 0 if dataset is mounted; 1 if unmounted; 2 on error
function ismounted
{
typeset fstype=$2
[[ -z $fstype ]] && fstype=zfs
typeset out dir name ret
case $fstype in
zfs)
if [[ "$1" == "/"* ]] ; then
for out in $(zfs mount | awk '{print $2}'); do
[[ $1 == $out ]] && return 0
done
else
for out in $(zfs mount | awk '{print $1}'); do
[[ $1 == $out ]] && return 0
done
fi
;;
ufs|nfs)
if is_freebsd; then
mount -pt $fstype | while read dev dir _t _flags; do
[[ "$1" == "$dev" || "$1" == "$dir" ]] && return 0
done
else
out=$(df -F $fstype $1 2>/dev/null)
ret=$?
(($ret != 0)) && return $ret
dir=${out%%\(*}
dir=${dir%% *}
name=${out##*\(}
name=${name%%\)*}
name=${name%% *}
[[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
fi
;;
ext*)
out=$(df -t $fstype $1 2>/dev/null)
return $?
;;
zvol)
if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
link=$(readlink -f $ZVOL_DEVDIR/$1)
[[ -n "$link" ]] && \
mount | grep -q "^$link" && \
return 0
fi
;;
esac
return 1
}
# Return 0 if a dataset is mounted; 1 otherwise
#
# $1 dataset name
# $2 filesystem type; optional - defaulted to zfs
function mounted
{
ismounted $1 $2
(($? == 0)) && return 0
return 1
}
# Return 0 if a dataset is unmounted; 1 otherwise
#
# $1 dataset name
# $2 filesystem type; optional - defaulted to zfs
function unmounted
{
ismounted $1 $2
(($? == 1)) && return 0
return 1
}
# split line on ","
#
# $1 - line to split
function splitline
{
- echo $1 | sed "s/,/ /g"
+ echo $1 | tr ',' ' '
}
function default_setup
{
default_setup_noexit "$@"
log_pass
}
function default_setup_no_mountpoint
{
default_setup_noexit "$1" "$2" "$3" "yes"
log_pass
}
#
# Given a list of disks, setup storage pools and datasets.
#
function default_setup_noexit
{
typeset disklist=$1
typeset container=$2
typeset volume=$3
typeset no_mountpoint=$4
log_note begin default_setup_noexit
if is_global_zone; then
if poolexists $TESTPOOL ; then
destroy_pool $TESTPOOL
fi
[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
log_must zpool create -f $TESTPOOL $disklist
else
reexport_pool
fi
rm -rf $TESTDIR || log_unresolved Could not remove $TESTDIR
mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
log_must zfs create $TESTPOOL/$TESTFS
if [[ -z $no_mountpoint ]]; then
log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
fi
if [[ -n $container ]]; then
rm -rf $TESTDIR1 || \
log_unresolved Could not remove $TESTDIR1
mkdir -p $TESTDIR1 || \
log_unresolved Could not create $TESTDIR1
log_must zfs create $TESTPOOL/$TESTCTR
log_must zfs set canmount=off $TESTPOOL/$TESTCTR
log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
if [[ -z $no_mountpoint ]]; then
log_must zfs set mountpoint=$TESTDIR1 \
$TESTPOOL/$TESTCTR/$TESTFS1
fi
fi
if [[ -n $volume ]]; then
if is_global_zone ; then
log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
block_device_wait
else
log_must zfs create $TESTPOOL/$TESTVOL
fi
fi
}
#
# Given a list of disks, setup a storage pool, file system and
# a container.
#
function default_container_setup
{
typeset disklist=$1
default_setup "$disklist" "true"
}
#
# Given a list of disks, setup a storage pool,file system
# and a volume.
#
function default_volume_setup
{
typeset disklist=$1
default_setup "$disklist" "" "true"
}
#
# Given a list of disks, setup a storage pool,file system,
# a container and a volume.
#
function default_container_volume_setup
{
typeset disklist=$1
default_setup "$disklist" "true" "true"
}
#
# Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
# filesystem
#
# $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS
# $2 snapshot name. Default, $TESTSNAP
#
function create_snapshot
{
typeset fs_vol=${1:-$TESTPOOL/$TESTFS}
typeset snap=${2:-$TESTSNAP}
[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
if snapexists $fs_vol@$snap; then
log_fail "$fs_vol@$snap already exists."
fi
datasetexists $fs_vol || \
log_fail "$fs_vol must exist."
log_must zfs snapshot $fs_vol@$snap
}
#
# Create a clone from a snapshot, default clone name is $TESTCLONE.
#
# $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
# $2 Clone name, $TESTPOOL/$TESTCLONE is default.
#
function create_clone # snapshot clone
{
typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
typeset clone=${2:-$TESTPOOL/$TESTCLONE}
[[ -z $snap ]] && \
log_fail "Snapshot name is undefined."
[[ -z $clone ]] && \
log_fail "Clone name is undefined."
log_must zfs clone $snap $clone
}
#
# Create a bookmark of the given snapshot. Defaultly create a bookmark on
# filesystem.
#
# $1 Existing filesystem or volume name. Default, $TESTFS
# $2 Existing snapshot name. Default, $TESTSNAP
# $3 bookmark name. Default, $TESTBKMARK
#
function create_bookmark
{
typeset fs_vol=${1:-$TESTFS}
typeset snap=${2:-$TESTSNAP}
typeset bkmark=${3:-$TESTBKMARK}
[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
[[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
if bkmarkexists $fs_vol#$bkmark; then
log_fail "$fs_vol#$bkmark already exists."
fi
datasetexists $fs_vol || \
log_fail "$fs_vol must exist."
snapexists $fs_vol@$snap || \
log_fail "$fs_vol@$snap must exist."
log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
}
#
# Create a temporary clone result of an interrupted resumable 'zfs receive'
# $1 Destination filesystem name. Must not exist, will be created as the result
# of this function along with its %recv temporary clone
# $2 Source filesystem name. Must not exist, will be created and destroyed
#
function create_recv_clone
{
typeset recvfs="$1"
typeset sendfs="${2:-$TESTPOOL/create_recv_clone}"
typeset snap="$sendfs@snap1"
typeset incr="$sendfs@snap2"
typeset mountpoint="$TESTDIR/create_recv_clone"
typeset sendfile="$TESTDIR/create_recv_clone.zsnap"
[[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined."
datasetexists $recvfs && log_fail "Recv filesystem must not exist."
datasetexists $sendfs && log_fail "Send filesystem must not exist."
log_must zfs create -o mountpoint="$mountpoint" $sendfs
log_must zfs snapshot $snap
log_must eval "zfs send $snap | zfs recv -u $recvfs"
log_must mkfile 1m "$mountpoint/data"
log_must zfs snapshot $incr
log_must eval "zfs send -i $snap $incr | dd bs=10K count=1 \
iflag=fullblock > $sendfile"
log_mustnot eval "zfs recv -su $recvfs < $sendfile"
destroy_dataset "$sendfs" "-r"
log_must rm -f "$sendfile"
if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then
log_fail "Error creating temporary $recvfs/%recv clone"
fi
}
function default_mirror_setup
{
default_mirror_setup_noexit $1 $2 $3
log_pass
}
#
# Given a pair of disks, set up a storage pool and dataset for the mirror
# @parameters: $1 the primary side of the mirror
# $2 the secondary side of the mirror
# @uses: ZPOOL ZFS TESTPOOL TESTFS
function default_mirror_setup_noexit
{
readonly func="default_mirror_setup_noexit"
typeset primary=$1
typeset secondary=$2
[[ -z $primary ]] && \
log_fail "$func: No parameters passed"
[[ -z $secondary ]] && \
log_fail "$func: No secondary partition passed"
[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
log_must zpool create -f $TESTPOOL mirror $@
log_must zfs create $TESTPOOL/$TESTFS
log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
}
#
# create a number of mirrors.
# We create a number($1) of 2 way mirrors using the pairs of disks named
# on the command line. These mirrors are *not* mounted
# @parameters: $1 the number of mirrors to create
# $... the devices to use to create the mirrors on
# @uses: ZPOOL ZFS TESTPOOL
function setup_mirrors
{
typeset -i nmirrors=$1
shift
while ((nmirrors > 0)); do
log_must test -n "$1" -a -n "$2"
[[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors
log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2
shift 2
((nmirrors = nmirrors - 1))
done
}
#
# create a number of raidz pools.
# We create a number($1) of 2 raidz pools using the pairs of disks named
# on the command line. These pools are *not* mounted
# @parameters: $1 the number of pools to create
# $... the devices to use to create the pools on
# @uses: ZPOOL ZFS TESTPOOL
function setup_raidzs
{
typeset -i nraidzs=$1
shift
while ((nraidzs > 0)); do
log_must test -n "$1" -a -n "$2"
[[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs
log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2
shift 2
((nraidzs = nraidzs - 1))
done
}
#
# Destroy the configured testpool mirrors.
# the mirrors are of the form ${TESTPOOL}{number}
# @uses: ZPOOL ZFS TESTPOOL
function destroy_mirrors
{
default_cleanup_noexit
log_pass
}
#
# Given a minimum of two disks, set up a storage pool and dataset for the raid-z
# $1 the list of disks
#
function default_raidz_setup
{
typeset disklist="$*"
disks=(${disklist[*]})
if [[ ${#disks[*]} -lt 2 ]]; then
log_fail "A raid-z requires a minimum of two disks."
fi
[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
log_must zpool create -f $TESTPOOL raidz $disklist
log_must zfs create $TESTPOOL/$TESTFS
log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
log_pass
}
#
# Common function used to cleanup storage pools and datasets.
#
# Invoked at the start of the test suite to ensure the system
# is in a known state, and also at the end of each set of
# sub-tests to ensure errors from one set of tests doesn't
# impact the execution of the next set.
function default_cleanup
{
default_cleanup_noexit
log_pass
}
#
# Utility function used to list all available pool names.
#
# NOTE: $KEEP is a variable containing pool names, separated by a newline
# character, that must be excluded from the returned list.
#
function get_all_pools
{
zpool list -H -o name | grep -Fvx "$KEEP" | grep -v "$NO_POOLS"
}
function default_cleanup_noexit
{
typeset pool=""
#
# Destroying the pool will also destroy any
# filesystems it contains.
#
if is_global_zone; then
zfs unmount -a > /dev/null 2>&1
ALL_POOLS=$(get_all_pools)
# Here, we loop through the pools we're allowed to
# destroy, only destroying them if it's safe to do
# so.
while [ ! -z ${ALL_POOLS} ]
do
for pool in ${ALL_POOLS}
do
if safe_to_destroy_pool $pool ;
then
destroy_pool $pool
fi
done
ALL_POOLS=$(get_all_pools)
done
zfs mount -a
else
typeset fs=""
for fs in $(zfs list -H -o name \
| grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
destroy_dataset "$fs" "-Rf"
done
# Need cleanup here to avoid garbage dir left.
for fs in $(zfs list -H -o name); do
[[ $fs == /$ZONE_POOL ]] && continue
[[ -d $fs ]] && log_must rm -rf $fs/*
done
#
# Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
# the default value
#
for fs in $(zfs list -H -o name); do
if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
log_must zfs set reservation=none $fs
log_must zfs set recordsize=128K $fs
log_must zfs set mountpoint=/$fs $fs
typeset enc=""
enc=$(get_prop encryption $fs)
if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
[[ "$enc" == "off" ]]; then
log_must zfs set checksum=on $fs
fi
log_must zfs set compression=off $fs
log_must zfs set atime=on $fs
log_must zfs set devices=off $fs
log_must zfs set exec=on $fs
log_must zfs set setuid=on $fs
log_must zfs set readonly=off $fs
log_must zfs set snapdir=hidden $fs
log_must zfs set aclmode=groupmask $fs
log_must zfs set aclinherit=secure $fs
fi
done
fi
[[ -d $TESTDIR ]] && \
log_must rm -rf $TESTDIR
disk1=${DISKS%% *}
if is_mpath_device $disk1; then
delete_partitions
fi
rm -f $TEST_BASE_DIR/{err,out}
}
#
# Common function used to cleanup storage pools, file systems
# and containers.
#
function default_container_cleanup
{
if ! is_global_zone; then
reexport_pool
fi
ismounted $TESTPOOL/$TESTCTR/$TESTFS1
[[ $? -eq 0 ]] && \
log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
destroy_dataset "$TESTPOOL/$TESTCTR/$TESTFS1" "-R"
destroy_dataset "$TESTPOOL/$TESTCTR" "-Rf"
[[ -e $TESTDIR1 ]] && \
log_must rm -rf $TESTDIR1 > /dev/null 2>&1
default_cleanup
}
#
# Common function used to cleanup snapshot of file system or volume. Default to
# delete the file system's snapshot
#
# $1 snapshot name
#
function destroy_snapshot
{
typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
if ! snapexists $snap; then
log_fail "'$snap' does not exist."
fi
#
# For the sake of the value which come from 'get_prop' is not equal
# to the really mountpoint when the snapshot is unmounted. So, firstly
# check and make sure this snapshot's been mounted in current system.
#
typeset mtpt=""
if ismounted $snap; then
mtpt=$(get_prop mountpoint $snap)
(($? != 0)) && \
log_fail "get_prop mountpoint $snap failed."
fi
destroy_dataset "$snap"
[[ $mtpt != "" && -d $mtpt ]] && \
log_must rm -rf $mtpt
}
#
# Common function used to cleanup clone.
#
# $1 clone name
#
function destroy_clone
{
typeset clone=${1:-$TESTPOOL/$TESTCLONE}
if ! datasetexists $clone; then
log_fail "'$clone' does not existed."
fi
# With the same reason in destroy_snapshot
typeset mtpt=""
if ismounted $clone; then
mtpt=$(get_prop mountpoint $clone)
(($? != 0)) && \
log_fail "get_prop mountpoint $clone failed."
fi
destroy_dataset "$clone"
[[ $mtpt != "" && -d $mtpt ]] && \
log_must rm -rf $mtpt
}
#
# Common function used to cleanup bookmark of file system or volume. Default
# to delete the file system's bookmark.
#
# $1 bookmark name
#
function destroy_bookmark
{
typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
if ! bkmarkexists $bkmark; then
log_fail "'$bkmarkp' does not existed."
fi
destroy_dataset "$bkmark"
}
# Return 0 if a snapshot exists; $? otherwise
#
# $1 - snapshot name
function snapexists
{
zfs list -H -t snapshot "$1" > /dev/null 2>&1
return $?
}
#
# Return 0 if a bookmark exists; $? otherwise
#
# $1 - bookmark name
#
function bkmarkexists
{
zfs list -H -t bookmark "$1" > /dev/null 2>&1
return $?
}
#
# Return 0 if a hold exists; $? otherwise
#
# $1 - hold tag
# $2 - snapshot name
#
function holdexists
{
zfs holds "$2" | awk '{ print $2 }' | grep "$1" > /dev/null 2>&1
return $?
}
#
# Set a property to a certain value on a dataset.
# Sets a property of the dataset to the value as passed in.
# @param:
# $1 dataset who's property is being set
# $2 property to set
# $3 value to set property to
# @return:
# 0 if the property could be set.
# non-zero otherwise.
# @use: ZFS
#
function dataset_setprop
{
typeset fn=dataset_setprop
if (($# < 3)); then
log_note "$fn: Insufficient parameters (need 3, had $#)"
return 1
fi
typeset output=
output=$(zfs set $2=$3 $1 2>&1)
typeset rv=$?
if ((rv != 0)); then
log_note "Setting property on $1 failed."
log_note "property $2=$3"
log_note "Return Code: $rv"
log_note "Output: $output"
return $rv
fi
return 0
}
#
# Assign suite defined dataset properties.
# This function is used to apply the suite's defined default set of
# properties to a dataset.
# @parameters: $1 dataset to use
# @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
# @returns:
# 0 if the dataset has been altered.
# 1 if no pool name was passed in.
# 2 if the dataset could not be found.
# 3 if the dataset could not have it's properties set.
#
function dataset_set_defaultproperties
{
typeset dataset="$1"
[[ -z $dataset ]] && return 1
typeset confset=
typeset -i found=0
for confset in $(zfs list); do
if [[ $dataset = $confset ]]; then
found=1
break
fi
done
[[ $found -eq 0 ]] && return 2
if [[ -n $COMPRESSION_PROP ]]; then
dataset_setprop $dataset compression $COMPRESSION_PROP || \
return 3
log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
fi
if [[ -n $CHECKSUM_PROP ]]; then
dataset_setprop $dataset checksum $CHECKSUM_PROP || \
return 3
log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
fi
return 0
}
#
# Check a numeric assertion
# @parameter: $@ the assertion to check
# @output: big loud notice if assertion failed
# @use: log_fail
#
function assert
{
(($@)) || log_fail "$@"
}
#
# Function to format partition size of a disk
# Given a disk cxtxdx reduces all partitions
# to 0 size
#
function zero_partitions #<whole_disk_name>
{
typeset diskname=$1
typeset i
if is_freebsd; then
gpart destroy -F $diskname
elif is_linux; then
DSK=$DEV_DSKDIR/$diskname
DSK=$(echo $DSK | sed -e "s|//|/|g")
log_must parted $DSK -s -- mklabel gpt
blockdev --rereadpt $DSK 2>/dev/null
block_device_wait
else
for i in 0 1 3 4 5 6 7
do
log_must set_partition $i "" 0mb $diskname
done
fi
return 0
}
#
# Given a slice, size and disk, this function
# formats the slice to the specified size.
# Size should be specified with units as per
# the `format` command requirements eg. 100mb 3gb
#
# NOTE: This entire interface is problematic for the Linux parted utility
# which requires the end of the partition to be specified. It would be
# best to retire this interface and replace it with something more flexible.
# At the moment a best effort is made.
#
# arguments: <slice_num> <slice_start> <size_plus_units> <whole_disk_name>
function set_partition
{
typeset -i slicenum=$1
typeset start=$2
typeset size=$3
typeset disk=${4#$DEV_DSKDIR/}
disk=${disk#$DEV_RDSKDIR/}
case "$(uname)" in
Linux)
if [[ -z $size || -z $disk ]]; then
log_fail "The size or disk name is unspecified."
fi
disk=$DEV_DSKDIR/$disk
typeset size_mb=${size%%[mMgG]}
size_mb=${size_mb%%[mMgG][bB]}
if [[ ${size:1:1} == 'g' ]]; then
((size_mb = size_mb * 1024))
fi
# Create GPT partition table when setting slice 0 or
# when the device doesn't already contain a GPT label.
parted $disk -s -- print 1 >/dev/null
typeset ret_val=$?
if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then
parted $disk -s -- mklabel gpt
if [[ $? -ne 0 ]]; then
log_note "Failed to create GPT partition table on $disk"
return 1
fi
fi
# When no start is given align on the first cylinder.
if [[ -z "$start" ]]; then
start=1
fi
# Determine the cylinder size for the device and using
# that calculate the end offset in cylinders.
typeset -i cly_size_kb=0
cly_size_kb=$(parted -m $disk -s -- \
unit cyl print | head -3 | tail -1 | \
awk -F '[:k.]' '{print $4}')
((end = (size_mb * 1024 / cly_size_kb) + start))
parted $disk -s -- \
mkpart part$slicenum ${start}cyl ${end}cyl
typeset ret_val=$?
if [[ $ret_val -ne 0 ]]; then
log_note "Failed to create partition $slicenum on $disk"
return 1
fi
blockdev --rereadpt $disk 2>/dev/null
block_device_wait $disk
;;
FreeBSD)
if [[ -z $size || -z $disk ]]; then
log_fail "The size or disk name is unspecified."
fi
disk=$DEV_DSKDIR/$disk
if [[ $slicenum -eq 0 ]] || ! gpart show $disk >/dev/null 2>&1; then
gpart destroy -F $disk >/dev/null 2>&1
gpart create -s GPT $disk
if [[ $? -ne 0 ]]; then
log_note "Failed to create GPT partition table on $disk"
return 1
fi
fi
typeset index=$((slicenum + 1))
if [[ -n $start ]]; then
start="-b $start"
fi
gpart add -t freebsd-zfs $start -s $size -i $index $disk
if [[ $ret_val -ne 0 ]]; then
log_note "Failed to create partition $slicenum on $disk"
return 1
fi
block_device_wait $disk
;;
*)
if [[ -z $slicenum || -z $size || -z $disk ]]; then
log_fail "The slice, size or disk name is unspecified."
fi
typeset format_file=/var/tmp/format_in.$$
echo "partition" >$format_file
echo "$slicenum" >> $format_file
echo "" >> $format_file
echo "" >> $format_file
echo "$start" >> $format_file
echo "$size" >> $format_file
echo "label" >> $format_file
echo "" >> $format_file
echo "q" >> $format_file
echo "q" >> $format_file
format -e -s -d $disk -f $format_file
typeset ret_val=$?
rm -f $format_file
;;
esac
if [[ $ret_val -ne 0 ]]; then
log_note "Unable to format $disk slice $slicenum to $size"
return 1
fi
return 0
}
#
# Delete all partitions on all disks - this is specifically for the use of multipath
# devices which currently can only be used in the test suite as raw/un-partitioned
# devices (ie a zpool cannot be created on a whole mpath device that has partitions)
#
function delete_partitions
{
typeset disk
if [[ -z $DISKSARRAY ]]; then
DISKSARRAY=$DISKS
fi
if is_linux; then
typeset -i part
for disk in $DISKSARRAY; do
for (( part = 1; part < MAX_PARTITIONS; part++ )); do
typeset partition=${disk}${SLICE_PREFIX}${part}
parted $DEV_DSKDIR/$disk -s rm $part > /dev/null 2>&1
if lsblk | grep -qF ${partition}; then
log_fail "Partition ${partition} not deleted"
else
log_note "Partition ${partition} deleted"
fi
done
done
elif is_freebsd; then
for disk in $DISKSARRAY; do
if gpart destroy -F $disk; then
log_note "Partitions for ${disk} deleted"
else
log_fail "Partitions for ${disk} not deleted"
fi
done
fi
}
#
# Get the end cyl of the given slice
#
function get_endslice #<disk> <slice>
{
typeset disk=$1
typeset slice=$2
if [[ -z $disk || -z $slice ]] ; then
log_fail "The disk name or slice number is unspecified."
fi
case "$(uname)" in
Linux)
endcyl=$(parted -s $DEV_DSKDIR/$disk -- unit cyl print | \
- grep "part${slice}" | \
- awk '{print $3}' | \
- sed 's,cyl,,')
+ awk "/part${slice}/"' {sub(/cyl/, "", $3); print $3}')
((endcyl = (endcyl + 1)))
;;
FreeBSD)
disk=${disk#/dev/zvol/}
disk=${disk%p*}
slice=$((slice + 1))
endcyl=$(gpart show $disk | \
awk -v slice=$slice '$3 == slice { print $1 + $2 }')
;;
*)
disk=${disk#/dev/dsk/}
disk=${disk#/dev/rdsk/}
disk=${disk%s*}
typeset -i ratio=0
ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
grep "sectors\/cylinder" | \
awk '{print $2}')
if ((ratio == 0)); then
return
fi
typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
nawk -v token="$slice" '{if ($1==token) print $6}')
((endcyl = (endcyl + 1) / ratio))
;;
esac
echo $endcyl
}
#
# Given a size,disk and total slice number, this function formats the
# disk slices from 0 to the total slice number with the same specified
# size.
#
function partition_disk #<slice_size> <whole_disk_name> <total_slices>
{
typeset -i i=0
typeset slice_size=$1
typeset disk_name=$2
typeset total_slices=$3
typeset cyl
zero_partitions $disk_name
while ((i < $total_slices)); do
if ! is_linux; then
if ((i == 2)); then
((i = i + 1))
continue
fi
fi
log_must set_partition $i "$cyl" $slice_size $disk_name
cyl=$(get_endslice $disk_name $i)
((i = i+1))
done
}
#
# This function continues to write to a filenum number of files into dirnum
# number of directories until either file_write returns an error or the
# maximum number of files per directory have been written.
#
# Usage:
# fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
#
# Return value: 0 on success
# non 0 on error
#
# Where :
# destdir: is the directory where everything is to be created under
# dirnum: the maximum number of subdirectories to use, -1 no limit
# filenum: the maximum number of files per subdirectory
# bytes: number of bytes to write
# num_writes: number of types to write out bytes
# data: the data that will be written
#
# E.g.
# fill_fs /testdir 20 25 1024 256 0
#
# Note: bytes * num_writes equals the size of the testfile
#
function fill_fs # destdir dirnum filenum bytes num_writes data
{
typeset destdir=${1:-$TESTDIR}
typeset -i dirnum=${2:-50}
typeset -i filenum=${3:-50}
typeset -i bytes=${4:-8192}
typeset -i num_writes=${5:-10240}
typeset data=${6:-0}
mkdir -p $destdir/{1..$dirnum}
for f in $destdir/{1..$dirnum}/$TESTFILE{1..$filenum}; do
file_write -o create -f $f -b $bytes -c $num_writes -d $data \
|| return $?
done
return 0
}
#
# Simple function to get the specified property. If unable to
# get the property then exits.
#
# Note property is in 'parsable' format (-p)
#
function get_prop # property dataset
{
typeset prop_val
typeset prop=$1
typeset dataset=$2
prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null)
if [[ $? -ne 0 ]]; then
log_note "Unable to get $prop property for dataset " \
"$dataset"
return 1
fi
echo "$prop_val"
return 0
}
#
# Simple function to get the specified property of pool. If unable to
# get the property then exits.
#
# Note property is in 'parsable' format (-p)
#
function get_pool_prop # property pool
{
typeset prop_val
typeset prop=$1
typeset pool=$2
if poolexists $pool ; then
prop_val=$(zpool get -pH $prop $pool 2>/dev/null | tail -1 | \
awk '{print $3}')
if [[ $? -ne 0 ]]; then
log_note "Unable to get $prop property for pool " \
"$pool"
return 1
fi
else
log_note "Pool $pool not exists."
return 1
fi
echo "$prop_val"
return 0
}
# Return 0 if a pool exists; $? otherwise
#
# $1 - pool name
function poolexists
{
typeset pool=$1
if [[ -z $pool ]]; then
log_note "No pool name given."
return 1
fi
zpool get name "$pool" > /dev/null 2>&1
return $?
}
# Return 0 if all the specified datasets exist; $? otherwise
#
# $1-n dataset name
function datasetexists
{
if (($# == 0)); then
log_note "No dataset name given."
return 1
fi
while (($# > 0)); do
zfs get name $1 > /dev/null 2>&1 || \
return $?
shift
done
return 0
}
# return 0 if none of the specified datasets exists, otherwise return 1.
#
# $1-n dataset name
function datasetnonexists
{
if (($# == 0)); then
log_note "No dataset name given."
return 1
fi
while (($# > 0)); do
zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
&& return 1
shift
done
return 0
}
function is_shared_freebsd
{
typeset fs=$1
pgrep -q mountd && showmount -E | grep -qx $fs
}
function is_shared_illumos
{
typeset fs=$1
typeset mtpt
for mtpt in `share | awk '{print $2}'` ; do
if [[ $mtpt == $fs ]] ; then
return 0
fi
done
typeset stat=$(svcs -H -o STA nfs/server:default)
if [[ $stat != "ON" ]]; then
log_note "Current nfs/server status: $stat"
fi
return 1
}
function is_shared_linux
{
typeset fs=$1
typeset mtpt
for mtpt in `share | awk '{print $1}'` ; do
if [[ $mtpt == $fs ]] ; then
return 0
fi
done
return 1
}
#
# Given a mountpoint, or a dataset name, determine if it is shared via NFS.
#
# Returns 0 if shared, 1 otherwise.
#
function is_shared
{
typeset fs=$1
typeset mtpt
if [[ $fs != "/"* ]] ; then
if datasetnonexists "$fs" ; then
return 1
else
mtpt=$(get_prop mountpoint "$fs")
case $mtpt in
none|legacy|-) return 1
;;
*) fs=$mtpt
;;
esac
fi
fi
case $(uname) in
FreeBSD) is_shared_freebsd "$fs" ;;
Linux) is_shared_linux "$fs" ;;
*) is_shared_illumos "$fs" ;;
esac
}
function is_exported_illumos
{
typeset fs=$1
typeset mtpt
for mtpt in `awk '{print $1}' /etc/dfs/sharetab` ; do
if [[ $mtpt == $fs ]] ; then
return 0
fi
done
return 1
}
function is_exported_freebsd
{
typeset fs=$1
typeset mtpt
for mtpt in `awk '{print $1}' /etc/zfs/exports` ; do
if [[ $mtpt == $fs ]] ; then
return 0
fi
done
return 1
}
function is_exported_linux
{
typeset fs=$1
typeset mtpt
for mtpt in `awk '{print $1}' /etc/exports.d/zfs.exports` ; do
if [[ $mtpt == $fs ]] ; then
return 0
fi
done
return 1
}
#
# Given a mountpoint, or a dataset name, determine if it is exported via
# the os-specific NFS exports file.
#
# Returns 0 if exported, 1 otherwise.
#
function is_exported
{
typeset fs=$1
typeset mtpt
if [[ $fs != "/"* ]] ; then
if datasetnonexists "$fs" ; then
return 1
else
mtpt=$(get_prop mountpoint "$fs")
case $mtpt in
none|legacy|-) return 1
;;
*) fs=$mtpt
;;
esac
fi
fi
case $(uname) in
FreeBSD) is_exported_freebsd "$fs" ;;
Linux) is_exported_linux "$fs" ;;
*) is_exported_illumos "$fs" ;;
esac
}
#
# Given a dataset name determine if it is shared via SMB.
#
# Returns 0 if shared, 1 otherwise.
#
function is_shared_smb
{
typeset fs=$1
typeset mtpt
if datasetnonexists "$fs" ; then
return 1
else
- fs=$(echo $fs | sed 's@/@_@g')
+ fs=$(echo $fs | tr / _)
fi
if is_linux; then
for mtpt in `net usershare list | awk '{print $1}'` ; do
if [[ $mtpt == $fs ]] ; then
return 0
fi
done
return 1
else
log_note "Currently unsupported by the test framework"
return 1
fi
}
#
# Given a mountpoint, determine if it is not shared via NFS.
#
# Returns 0 if not shared, 1 otherwise.
#
function not_shared
{
typeset fs=$1
is_shared $fs
if (($? == 0)); then
return 1
fi
return 0
}
#
# Given a dataset determine if it is not shared via SMB.
#
# Returns 0 if not shared, 1 otherwise.
#
function not_shared_smb
{
typeset fs=$1
is_shared_smb $fs
if (($? == 0)); then
return 1
fi
return 0
}
#
# Helper function to unshare a mountpoint.
#
function unshare_fs #fs
{
typeset fs=$1
is_shared $fs || is_shared_smb $fs
if (($? == 0)); then
zfs unshare $fs || log_fail "zfs unshare $fs failed"
fi
return 0
}
#
# Helper function to share a NFS mountpoint.
#
function share_nfs #fs
{
typeset fs=$1
if is_linux; then
is_shared $fs
if (($? != 0)); then
log_must share "*:$fs"
fi
else
is_shared $fs
if (($? != 0)); then
log_must share -F nfs $fs
fi
fi
return 0
}
#
# Helper function to unshare a NFS mountpoint.
#
function unshare_nfs #fs
{
typeset fs=$1
if is_linux; then
is_shared $fs
if (($? == 0)); then
log_must unshare -u "*:$fs"
fi
else
is_shared $fs
if (($? == 0)); then
log_must unshare -F nfs $fs
fi
fi
return 0
}
#
# Helper function to show NFS shares.
#
function showshares_nfs
{
if is_linux; then
share -v
else
share -F nfs
fi
return 0
}
#
# Helper function to show SMB shares.
#
function showshares_smb
{
if is_linux; then
net usershare list
else
share -F smb
fi
return 0
}
function check_nfs
{
if is_linux; then
share -s
elif is_freebsd; then
showmount -e
else
log_unsupported "Unknown platform"
fi
if [[ $? -ne 0 ]]; then
log_unsupported "The NFS utilities are not installed"
fi
}
#
# Check NFS server status and trigger it online.
#
function setup_nfs_server
{
# Cannot share directory in non-global zone.
#
if ! is_global_zone; then
log_note "Cannot trigger NFS server by sharing in LZ."
return
fi
if is_linux; then
#
# Re-synchronize /var/lib/nfs/etab with /etc/exports and
# /etc/exports.d./* to provide a clean test environment.
#
log_must share -r
log_note "NFS server must be started prior to running ZTS."
return
elif is_freebsd; then
kill -s HUP $(cat /var/run/mountd.pid)
log_note "NFS server must be started prior to running ZTS."
return
fi
typeset nfs_fmri="svc:/network/nfs/server:default"
if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
#
# Only really sharing operation can enable NFS server
# to online permanently.
#
typeset dummy=/tmp/dummy
if [[ -d $dummy ]]; then
log_must rm -rf $dummy
fi
log_must mkdir $dummy
log_must share $dummy
#
# Waiting for fmri's status to be the final status.
# Otherwise, in transition, an asterisk (*) is appended for
# instances, unshare will reverse status to 'DIS' again.
#
# Waiting for 1's at least.
#
log_must sleep 1
timeout=10
while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
do
log_must sleep 1
((timeout -= 1))
done
log_must unshare $dummy
log_must rm -rf $dummy
fi
log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
}
#
# To verify whether calling process is in global zone
#
# Return 0 if in global zone, 1 in non-global zone
#
function is_global_zone
{
if is_linux || is_freebsd; then
return 0
else
typeset cur_zone=$(zonename 2>/dev/null)
if [[ $cur_zone != "global" ]]; then
return 1
fi
return 0
fi
}
#
# Verify whether test is permitted to run from
# global zone, local zone, or both
#
# $1 zone limit, could be "global", "local", or "both"(no limit)
#
# Return 0 if permitted, otherwise exit with log_unsupported
#
function verify_runnable # zone limit
{
typeset limit=$1
[[ -z $limit ]] && return 0
if is_global_zone ; then
case $limit in
global|both)
;;
local) log_unsupported "Test is unable to run from "\
"global zone."
;;
*) log_note "Warning: unknown limit $limit - " \
"use both."
;;
esac
else
case $limit in
local|both)
;;
global) log_unsupported "Test is unable to run from "\
"local zone."
;;
*) log_note "Warning: unknown limit $limit - " \
"use both."
;;
esac
reexport_pool
fi
return 0
}
# Return 0 if create successfully or the pool exists; $? otherwise
# Note: In local zones, this function should return 0 silently.
#
# $1 - pool name
# $2-n - [keyword] devs_list
function create_pool #pool devs_list
{
typeset pool=${1%%/*}
shift
if [[ -z $pool ]]; then
log_note "Missing pool name."
return 1
fi
if poolexists $pool ; then
destroy_pool $pool
fi
if is_global_zone ; then
[[ -d /$pool ]] && rm -rf /$pool
log_must zpool create -f $pool $@
fi
return 0
}
# Return 0 if destroy successfully or the pool exists; $? otherwise
# Note: In local zones, this function should return 0 silently.
#
# $1 - pool name
# Destroy pool with the given parameters.
function destroy_pool #pool
{
typeset pool=${1%%/*}
typeset mtpt
if [[ -z $pool ]]; then
log_note "No pool name given."
return 1
fi
if is_global_zone ; then
if poolexists "$pool" ; then
mtpt=$(get_prop mountpoint "$pool")
# At times, syseventd/udev activity can cause attempts
# to destroy a pool to fail with EBUSY. We retry a few
# times allowing failures before requiring the destroy
# to succeed.
log_must_busy zpool destroy -f $pool
[[ -d $mtpt ]] && \
log_must rm -rf $mtpt
else
log_note "Pool does not exist. ($pool)"
return 1
fi
fi
return 0
}
# Return 0 if created successfully; $? otherwise
#
# $1 - dataset name
# $2-n - dataset options
function create_dataset #dataset dataset_options
{
typeset dataset=$1
shift
if [[ -z $dataset ]]; then
log_note "Missing dataset name."
return 1
fi
if datasetexists $dataset ; then
destroy_dataset $dataset
fi
log_must zfs create $@ $dataset
return 0
}
# Return 0 if destroy successfully or the dataset exists; $? otherwise
# Note: In local zones, this function should return 0 silently.
#
# $1 - dataset name
# $2 - custom arguments for zfs destroy
# Destroy dataset with the given parameters.
function destroy_dataset #dataset #args
{
typeset dataset=$1
typeset mtpt
typeset args=${2:-""}
if [[ -z $dataset ]]; then
log_note "No dataset name given."
return 1
fi
if is_global_zone ; then
if datasetexists "$dataset" ; then
mtpt=$(get_prop mountpoint "$dataset")
log_must_busy zfs destroy $args $dataset
[[ -d $mtpt ]] && \
log_must rm -rf $mtpt
else
log_note "Dataset does not exist. ($dataset)"
return 1
fi
fi
return 0
}
#
# Firstly, create a pool with 5 datasets. Then, create a single zone and
# export the 5 datasets to it. In addition, we also add a ZFS filesystem
# and a zvol device to the zone.
#
# $1 zone name
# $2 zone root directory prefix
# $3 zone ip
#
function zfs_zones_setup #zone_name zone_root zone_ip
{
typeset zone_name=${1:-$(hostname)-z}
typeset zone_root=${2:-"/zone_root"}
typeset zone_ip=${3:-"10.1.1.10"}
typeset prefix_ctr=$ZONE_CTR
typeset pool_name=$ZONE_POOL
typeset -i cntctr=5
typeset -i i=0
# Create pool and 5 container within it
#
[[ -d /$pool_name ]] && rm -rf /$pool_name
log_must zpool create -f $pool_name $DISKS
while ((i < cntctr)); do
log_must zfs create $pool_name/$prefix_ctr$i
((i += 1))
done
# create a zvol
log_must zfs create -V 1g $pool_name/zone_zvol
block_device_wait
#
# If current system support slog, add slog device for pool
#
if verify_slog_support ; then
typeset sdevs="$TEST_BASE_DIR/sdev1 $TEST_BASE_DIR/sdev2"
log_must mkfile $MINVDEVSIZE $sdevs
log_must zpool add $pool_name log mirror $sdevs
fi
# this isn't supported just yet.
# Create a filesystem. In order to add this to
# the zone, it must have it's mountpoint set to 'legacy'
# log_must zfs create $pool_name/zfs_filesystem
# log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem
[[ -d $zone_root ]] && \
log_must rm -rf $zone_root/$zone_name
[[ ! -d $zone_root ]] && \
log_must mkdir -p -m 0700 $zone_root/$zone_name
# Create zone configure file and configure the zone
#
typeset zone_conf=/tmp/zone_conf.$$
echo "create" > $zone_conf
echo "set zonepath=$zone_root/$zone_name" >> $zone_conf
echo "set autoboot=true" >> $zone_conf
i=0
while ((i < cntctr)); do
echo "add dataset" >> $zone_conf
echo "set name=$pool_name/$prefix_ctr$i" >> \
$zone_conf
echo "end" >> $zone_conf
((i += 1))
done
# add our zvol to the zone
echo "add device" >> $zone_conf
echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
echo "end" >> $zone_conf
# add a corresponding zvol rdsk to the zone
echo "add device" >> $zone_conf
echo "set match=$ZVOL_RDEVDIR/$pool_name/zone_zvol" >> $zone_conf
echo "end" >> $zone_conf
# once it's supported, we'll add our filesystem to the zone
# echo "add fs" >> $zone_conf
# echo "set type=zfs" >> $zone_conf
# echo "set special=$pool_name/zfs_filesystem" >> $zone_conf
# echo "set dir=/export/zfs_filesystem" >> $zone_conf
# echo "end" >> $zone_conf
echo "verify" >> $zone_conf
echo "commit" >> $zone_conf
log_must zonecfg -z $zone_name -f $zone_conf
log_must rm -f $zone_conf
# Install the zone
zoneadm -z $zone_name install
if (($? == 0)); then
log_note "SUCCESS: zoneadm -z $zone_name install"
else
log_fail "FAIL: zoneadm -z $zone_name install"
fi
# Install sysidcfg file
#
typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
echo "system_locale=C" > $sysidcfg
echo "terminal=dtterm" >> $sysidcfg
echo "network_interface=primary {" >> $sysidcfg
echo "hostname=$zone_name" >> $sysidcfg
echo "}" >> $sysidcfg
echo "name_service=NONE" >> $sysidcfg
echo "root_password=mo791xfZ/SFiw" >> $sysidcfg
echo "security_policy=NONE" >> $sysidcfg
echo "timezone=US/Eastern" >> $sysidcfg
# Boot this zone
log_must zoneadm -z $zone_name boot
}
#
# Reexport TESTPOOL & TESTPOOL(1-4)
#
function reexport_pool
{
typeset -i cntctr=5
typeset -i i=0
while ((i < cntctr)); do
if ((i == 0)); then
TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
if ! ismounted $TESTPOOL; then
log_must zfs mount $TESTPOOL
fi
else
eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
if eval ! ismounted \$TESTPOOL$i; then
log_must eval zfs mount \$TESTPOOL$i
fi
fi
((i += 1))
done
}
#
# Verify a given disk or pool state
#
# Return 0 is pool/disk matches expected state, 1 otherwise
#
function check_state # pool disk state{online,offline,degraded}
{
typeset pool=$1
typeset disk=${2#$DEV_DSKDIR/}
typeset state=$3
[[ -z $pool ]] || [[ -z $state ]] \
&& log_fail "Arguments invalid or missing"
if [[ -z $disk ]]; then
#check pool state only
zpool get -H -o value health $pool \
| grep -i "$state" > /dev/null 2>&1
else
zpool status -v $pool | grep "$disk" \
| grep -i "$state" > /dev/null 2>&1
fi
return $?
}
#
# Get the mountpoint of snapshot
# For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
# as its mountpoint
#
function snapshot_mountpoint
{
typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
if [[ $dataset != *@* ]]; then
log_fail "Error name of snapshot '$dataset'."
fi
typeset fs=${dataset%@*}
typeset snap=${dataset#*@}
if [[ -z $fs || -z $snap ]]; then
log_fail "Error name of snapshot '$dataset'."
fi
echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
}
#
# Given a device and 'ashift' value verify it's correctly set on every label
#
function verify_ashift # device ashift
{
typeset device="$1"
typeset ashift="$2"
zdb -e -lll $device | awk -v ashift=$ashift '/ashift: / {
if (ashift != $2)
exit 1;
else
count++;
} END {
if (count != 4)
exit 1;
else
exit 0;
}'
return $?
}
#
# Given a pool and file system, this function will verify the file system
# using the zdb internal tool. Note that the pool is exported and imported
# to ensure it has consistent state.
#
function verify_filesys # pool filesystem dir
{
typeset pool="$1"
typeset filesys="$2"
typeset zdbout="/tmp/zdbout.$$"
shift
shift
typeset dirs=$@
typeset search_path=""
log_note "Calling zdb to verify filesystem '$filesys'"
zfs unmount -a > /dev/null 2>&1
log_must zpool export $pool
if [[ -n $dirs ]] ; then
for dir in $dirs ; do
search_path="$search_path -d $dir"
done
fi
log_must zpool import $search_path $pool
zdb -cudi $filesys > $zdbout 2>&1
if [[ $? != 0 ]]; then
log_note "Output: zdb -cudi $filesys"
cat $zdbout
log_fail "zdb detected errors with: '$filesys'"
fi
log_must zfs mount -a
log_must rm -rf $zdbout
}
#
# Given a pool issue a scrub and verify that no checksum errors are reported.
#
function verify_pool
{
typeset pool=${1:-$TESTPOOL}
log_must zpool scrub $pool
log_must wait_scrubbed $pool
typeset -i cksum=$(zpool status $pool | awk '
!NF { isvdev = 0 }
isvdev { errors += $NF }
/CKSUM$/ { isvdev = 1 }
END { print errors }
')
if [[ $cksum != 0 ]]; then
log_must zpool status -v
log_fail "Unexpected CKSUM errors found on $pool ($cksum)"
fi
}
#
# Given a pool, and this function list all disks in the pool
#
function get_disklist # pool
{
typeset disklist=""
disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \
grep -v "\-\-\-\-\-" | \
- egrep -v -e "^(mirror|raidz[1-3]|spare|log|cache|special|dedup)$")
+ egrep -v -e "^(mirror|raidz[1-3]|draid[1-3]|spare|log|cache|special|dedup)|\-[0-9]$")
echo $disklist
}
#
# Given a pool, and this function list all disks in the pool with their full
# path (like "/dev/sda" instead of "sda").
#
function get_disklist_fullpath # pool
{
args="-P $1"
get_disklist $args
}
# /**
# This function kills a given list of processes after a time period. We use
# this in the stress tests instead of STF_TIMEOUT so that we can have processes
# run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
# would be listed as FAIL, which we don't want : we're happy with stress tests
# running for a certain amount of time, then finishing.
#
# @param $1 the time in seconds after which we should terminate these processes
# @param $2..$n the processes we wish to terminate.
# */
function stress_timeout
{
typeset -i TIMEOUT=$1
shift
typeset cpids="$@"
log_note "Waiting for child processes($cpids). " \
"It could last dozens of minutes, please be patient ..."
log_must sleep $TIMEOUT
log_note "Killing child processes after ${TIMEOUT} stress timeout."
typeset pid
for pid in $cpids; do
ps -p $pid > /dev/null 2>&1
if (($? == 0)); then
log_must kill -USR1 $pid
fi
done
}
#
# Verify a given hotspare disk is inuse or avail
#
# Return 0 is pool/disk matches expected state, 1 otherwise
#
function check_hotspare_state # pool disk state{inuse,avail}
{
typeset pool=$1
typeset disk=${2#$DEV_DSKDIR/}
typeset state=$3
cur_state=$(get_device_state $pool $disk "spares")
if [[ $state != ${cur_state} ]]; then
return 1
fi
return 0
}
#
# Wait until a hotspare transitions to a given state or times out.
#
# Return 0 when pool/disk matches expected state, 1 on timeout.
#
function wait_hotspare_state # pool disk state timeout
{
typeset pool=$1
typeset disk=${2#*$DEV_DSKDIR/}
typeset state=$3
typeset timeout=${4:-60}
typeset -i i=0
while [[ $i -lt $timeout ]]; do
if check_hotspare_state $pool $disk $state; then
return 0
fi
i=$((i+1))
sleep 1
done
return 1
}
#
# Verify a given slog disk is inuse or avail
#
# Return 0 is pool/disk matches expected state, 1 otherwise
#
function check_slog_state # pool disk state{online,offline,unavail}
{
typeset pool=$1
typeset disk=${2#$DEV_DSKDIR/}
typeset state=$3
cur_state=$(get_device_state $pool $disk "logs")
if [[ $state != ${cur_state} ]]; then
return 1
fi
return 0
}
#
# Verify a given vdev disk is inuse or avail
#
# Return 0 is pool/disk matches expected state, 1 otherwise
#
function check_vdev_state # pool disk state{online,offline,unavail}
{
typeset pool=$1
typeset disk=${2#*$DEV_DSKDIR/}
typeset state=$3
cur_state=$(get_device_state $pool $disk)
if [[ $state != ${cur_state} ]]; then
return 1
fi
return 0
}
#
# Wait until a vdev transitions to a given state or times out.
#
# Return 0 when pool/disk matches expected state, 1 on timeout.
#
function wait_vdev_state # pool disk state timeout
{
typeset pool=$1
typeset disk=${2#*$DEV_DSKDIR/}
typeset state=$3
typeset timeout=${4:-60}
typeset -i i=0
while [[ $i -lt $timeout ]]; do
if check_vdev_state $pool $disk $state; then
return 0
fi
i=$((i+1))
sleep 1
done
return 1
}
#
# Check the output of 'zpool status -v <pool>',
# and to see if the content of <token> contain the <keyword> specified.
#
# Return 0 is contain, 1 otherwise
#
function check_pool_status # pool token keyword <verbose>
{
typeset pool=$1
typeset token=$2
typeset keyword=$3
typeset verbose=${4:-false}
scan=$(zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
($1==token) {print $0}')
if [[ $verbose == true ]]; then
log_note $scan
fi
echo $scan | egrep -i "$keyword" > /dev/null 2>&1
return $?
}
#
# The following functions are instance of check_pool_status()
# is_pool_resilvering - to check if the pool resilver is in progress
# is_pool_resilvered - to check if the pool resilver is completed
# is_pool_scrubbing - to check if the pool scrub is in progress
# is_pool_scrubbed - to check if the pool scrub is completed
# is_pool_scrub_stopped - to check if the pool scrub is stopped
# is_pool_scrub_paused - to check if the pool scrub has paused
# is_pool_removing - to check if the pool removing is a vdev
# is_pool_removed - to check if the pool remove is completed
# is_pool_discarding - to check if the pool checkpoint is being discarded
#
function is_pool_resilvering #pool <verbose>
{
check_pool_status "$1" "scan" \
"resilver[ ()0-9A-Za-z:_-]* in progress since" $2
return $?
}
function is_pool_resilvered #pool <verbose>
{
check_pool_status "$1" "scan" "resilvered " $2
return $?
}
function is_pool_scrubbing #pool <verbose>
{
check_pool_status "$1" "scan" "scrub in progress since " $2
return $?
}
function is_pool_scrubbed #pool <verbose>
{
check_pool_status "$1" "scan" "scrub repaired" $2
return $?
}
function is_pool_scrub_stopped #pool <verbose>
{
check_pool_status "$1" "scan" "scrub canceled" $2
return $?
}
function is_pool_scrub_paused #pool <verbose>
{
check_pool_status "$1" "scan" "scrub paused since " $2
return $?
}
function is_pool_removing #pool
{
check_pool_status "$1" "remove" "in progress since "
return $?
}
function is_pool_removed #pool
{
check_pool_status "$1" "remove" "completed on"
return $?
}
function is_pool_discarding #pool
{
check_pool_status "$1" "checkpoint" "discarding"
return $?
}
function wait_for_degraded
{
typeset pool=$1
typeset timeout=${2:-30}
typeset t0=$SECONDS
while :; do
[[ $(get_pool_prop health $pool) == "DEGRADED" ]] && break
log_note "$pool is not yet degraded."
sleep 1
if ((SECONDS - t0 > $timeout)); then
log_note "$pool not degraded after $timeout seconds."
return 1
fi
done
return 0
}
#
# Use create_pool()/destroy_pool() to clean up the information in
# in the given disk to avoid slice overlapping.
#
function cleanup_devices #vdevs
{
typeset pool="foopool$$"
for vdev in $@; do
zero_partitions $vdev
done
poolexists $pool && destroy_pool $pool
create_pool $pool $@
destroy_pool $pool
return 0
}
#/**
# A function to find and locate free disks on a system or from given
# disks as the parameter. It works by locating disks that are in use
# as swap devices and dump devices, and also disks listed in /etc/vfstab
#
# $@ given disks to find which are free, default is all disks in
# the test system
#
# @return a string containing the list of available disks
#*/
function find_disks
{
# Trust provided list, no attempt is made to locate unused devices.
if is_linux || is_freebsd; then
echo "$@"
return
fi
sfi=/tmp/swaplist.$$
dmpi=/tmp/dumpdev.$$
max_finddisksnum=${MAX_FINDDISKSNUM:-6}
swap -l > $sfi
dumpadm > $dmpi 2>/dev/null
# write an awk script that can process the output of format
# to produce a list of disks we know about. Note that we have
# to escape "$2" so that the shell doesn't interpret it while
# we're creating the awk script.
# -------------------
cat > /tmp/find_disks.awk <<EOF
#!/bin/nawk -f
BEGIN { FS="."; }
/^Specify disk/{
searchdisks=0;
}
{
if (searchdisks && \$2 !~ "^$"){
split(\$2,arr," ");
print arr[1];
}
}
/^AVAILABLE DISK SELECTIONS:/{
searchdisks=1;
}
EOF
#---------------------
chmod 755 /tmp/find_disks.awk
disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)}
rm /tmp/find_disks.awk
unused=""
for disk in $disks; do
# Check for mounted
grep "${disk}[sp]" /etc/mnttab >/dev/null
(($? == 0)) && continue
# Check for swap
grep "${disk}[sp]" $sfi >/dev/null
(($? == 0)) && continue
# check for dump device
grep "${disk}[sp]" $dmpi >/dev/null
(($? == 0)) && continue
# check to see if this disk hasn't been explicitly excluded
# by a user-set environment variable
echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null
(($? == 0)) && continue
unused_candidates="$unused_candidates $disk"
done
rm $sfi
rm $dmpi
# now just check to see if those disks do actually exist
# by looking for a device pointing to the first slice in
# each case. limit the number to max_finddisksnum
count=0
for disk in $unused_candidates; do
if is_disk_device $DEV_DSKDIR/${disk}s0 && \
[ $count -lt $max_finddisksnum ]; then
unused="$unused $disk"
# do not impose limit if $@ is provided
[[ -z $@ ]] && ((count = count + 1))
fi
done
# finally, return our disk list
echo $unused
}
function add_user_freebsd #<group_name> <user_name> <basedir>
{
typeset group=$1
typeset user=$2
typeset basedir=$3
# Check to see if the user exists.
if id $user > /dev/null 2>&1; then
return 0
fi
# Assign 1000 as the base uid
typeset -i uid=1000
while true; do
typeset -i ret
pw useradd -u $uid -g $group -d $basedir/$user -m -n $user
ret=$?
case $ret in
0) break ;;
# The uid is not unique
65) ((uid += 1)) ;;
*) return 1 ;;
esac
if [[ $uid == 65000 ]]; then
log_fail "No user id available under 65000 for $user"
fi
done
# Silence MOTD
touch $basedir/$user/.hushlogin
return 0
}
#
# Delete the specified user.
#
# $1 login name
#
function del_user_freebsd #<logname>
{
typeset user=$1
if id $user > /dev/null 2>&1; then
log_must pw userdel $user
fi
return 0
}
#
# Select valid gid and create specified group.
#
# $1 group name
#
function add_group_freebsd #<group_name>
{
typeset group=$1
# See if the group already exists.
if pw groupshow $group >/dev/null 2>&1; then
return 0
fi
# Assign 1000 as the base gid
typeset -i gid=1000
while true; do
pw groupadd -g $gid -n $group > /dev/null 2>&1
typeset -i ret=$?
case $ret in
0) return 0 ;;
# The gid is not unique
65) ((gid += 1)) ;;
*) return 1 ;;
esac
if [[ $gid == 65000 ]]; then
log_fail "No user id available under 65000 for $group"
fi
done
}
#
# Delete the specified group.
#
# $1 group name
#
function del_group_freebsd #<group_name>
{
typeset group=$1
pw groupdel -n $group > /dev/null 2>&1
typeset -i ret=$?
case $ret in
# Group does not exist, or was deleted successfully.
0|6|65) return 0 ;;
# Name already exists as a group name
9) log_must pw groupdel $group ;;
*) return 1 ;;
esac
return 0
}
function add_user_illumos #<group_name> <user_name> <basedir>
{
typeset group=$1
typeset user=$2
typeset basedir=$3
log_must useradd -g $group -d $basedir/$user -m $user
return 0
}
function del_user_illumos #<user_name>
{
typeset user=$1
if id $user > /dev/null 2>&1; then
log_must_retry "currently used" 6 userdel $user
fi
return 0
}
function add_group_illumos #<group_name>
{
typeset group=$1
typeset -i gid=100
while true; do
groupadd -g $gid $group > /dev/null 2>&1
typeset -i ret=$?
case $ret in
0) return 0 ;;
# The gid is not unique
4) ((gid += 1)) ;;
*) return 1 ;;
esac
done
}
function del_group_illumos #<group_name>
{
typeset group=$1
groupmod -n $grp $grp > /dev/null 2>&1
typeset -i ret=$?
case $ret in
# Group does not exist.
6) return 0 ;;
# Name already exists as a group name
9) log_must groupdel $grp ;;
*) return 1 ;;
esac
}
function add_user_linux #<group_name> <user_name> <basedir>
{
typeset group=$1
typeset user=$2
typeset basedir=$3
log_must useradd -g $group -d $basedir/$user -m $user
# Add new users to the same group and the command line utils.
# This allows them to be run out of the original users home
# directory as long as it permissioned to be group readable.
cmd_group=$(stat --format="%G" $(which zfs))
log_must usermod -a -G $cmd_group $user
return 0
}
function del_user_linux #<user_name>
{
typeset user=$1
if id $user > /dev/null 2>&1; then
log_must_retry "currently used" 6 userdel $user
fi
return 0
}
function add_group_linux #<group_name>
{
typeset group=$1
# Assign 100 as the base gid, a larger value is selected for
# Linux because for many distributions 1000 and under are reserved.
while true; do
groupadd $group > /dev/null 2>&1
typeset -i ret=$?
case $ret in
0) return 0 ;;
*) return 1 ;;
esac
done
}
function del_group_linux #<group_name>
{
typeset group=$1
getent group $group > /dev/null 2>&1
typeset -i ret=$?
case $ret in
# Group does not exist.
2) return 0 ;;
# Name already exists as a group name
0) log_must groupdel $group ;;
*) return 1 ;;
esac
return 0
}
#
# Add specified user to specified group
#
# $1 group name
# $2 user name
# $3 base of the homedir (optional)
#
function add_user #<group_name> <user_name> <basedir>
{
typeset group=$1
typeset user=$2
typeset basedir=${3:-"/var/tmp"}
if ((${#group} == 0 || ${#user} == 0)); then
log_fail "group name or user name are not defined."
fi
case $(uname) in
FreeBSD)
add_user_freebsd "$group" "$user" "$basedir"
;;
Linux)
add_user_linux "$group" "$user" "$basedir"
;;
*)
add_user_illumos "$group" "$user" "$basedir"
;;
esac
return 0
}
#
# Delete the specified user.
#
# $1 login name
# $2 base of the homedir (optional)
#
function del_user #<logname> <basedir>
{
typeset user=$1
typeset basedir=${2:-"/var/tmp"}
if ((${#user} == 0)); then
log_fail "login name is necessary."
fi
case $(uname) in
FreeBSD)
del_user_freebsd "$user"
;;
Linux)
del_user_linux "$user"
;;
*)
del_user_illumos "$user"
;;
esac
[[ -d $basedir/$user ]] && rm -fr $basedir/$user
return 0
}
#
# Select valid gid and create specified group.
#
# $1 group name
#
function add_group #<group_name>
{
typeset group=$1
if ((${#group} == 0)); then
log_fail "group name is necessary."
fi
case $(uname) in
FreeBSD)
add_group_freebsd "$group"
;;
Linux)
add_group_linux "$group"
;;
*)
add_group_illumos "$group"
;;
esac
return 0
}
#
# Delete the specified group.
#
# $1 group name
#
function del_group #<group_name>
{
typeset group=$1
if ((${#group} == 0)); then
log_fail "group name is necessary."
fi
case $(uname) in
FreeBSD)
del_group_freebsd "$group"
;;
Linux)
del_group_linux "$group"
;;
*)
del_group_illumos "$group"
;;
esac
return 0
}
#
# This function will return true if it's safe to destroy the pool passed
# as argument 1. It checks for pools based on zvols and files, and also
# files contained in a pool that may have a different mountpoint.
#
function safe_to_destroy_pool { # $1 the pool name
typeset pool=""
typeset DONT_DESTROY=""
# We check that by deleting the $1 pool, we're not
# going to pull the rug out from other pools. Do this
# by looking at all other pools, ensuring that they
# aren't built from files or zvols contained in this pool.
for pool in $(zpool list -H -o name)
do
ALTMOUNTPOOL=""
# this is a list of the top-level directories in each of the
# files that make up the path to the files the pool is based on
FILEPOOL=$(zpool status -v $pool | grep /$1/ | \
awk '{print $1}')
# this is a list of the zvols that make up the pool
ZVOLPOOL=$(zpool status -v $pool | grep "$ZVOL_DEVDIR/$1$" \
| awk '{print $1}')
# also want to determine if it's a file-based pool using an
# alternate mountpoint...
POOL_FILE_DIRS=$(zpool status -v $pool | \
grep / | awk '{print $1}' | \
awk -F/ '{print $2}' | grep -v "dev")
for pooldir in $POOL_FILE_DIRS
do
OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
grep "${pooldir}$" | awk '{print $1}')
ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
done
if [ ! -z "$ZVOLPOOL" ]
then
DONT_DESTROY="true"
log_note "Pool $pool is built from $ZVOLPOOL on $1"
fi
if [ ! -z "$FILEPOOL" ]
then
DONT_DESTROY="true"
log_note "Pool $pool is built from $FILEPOOL on $1"
fi
if [ ! -z "$ALTMOUNTPOOL" ]
then
DONT_DESTROY="true"
log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
fi
done
if [ -z "${DONT_DESTROY}" ]
then
return 0
else
log_note "Warning: it is not safe to destroy $1!"
return 1
fi
}
#
# Verify zfs operation with -p option work as expected
# $1 operation, value could be create, clone or rename
# $2 dataset type, value could be fs or vol
# $3 dataset name
# $4 new dataset name
#
function verify_opt_p_ops
{
typeset ops=$1
typeset datatype=$2
typeset dataset=$3
typeset newdataset=$4
if [[ $datatype != "fs" && $datatype != "vol" ]]; then
log_fail "$datatype is not supported."
fi
# check parameters accordingly
case $ops in
create)
newdataset=$dataset
dataset=""
if [[ $datatype == "vol" ]]; then
ops="create -V $VOLSIZE"
fi
;;
clone)
if [[ -z $newdataset ]]; then
log_fail "newdataset should not be empty" \
"when ops is $ops."
fi
log_must datasetexists $dataset
log_must snapexists $dataset
;;
rename)
if [[ -z $newdataset ]]; then
log_fail "newdataset should not be empty" \
"when ops is $ops."
fi
log_must datasetexists $dataset
;;
*)
log_fail "$ops is not supported."
;;
esac
# make sure the upper level filesystem does not exist
destroy_dataset "${newdataset%/*}" "-rRf"
# without -p option, operation will fail
log_mustnot zfs $ops $dataset $newdataset
log_mustnot datasetexists $newdataset ${newdataset%/*}
# with -p option, operation should succeed
log_must zfs $ops -p $dataset $newdataset
block_device_wait
if ! datasetexists $newdataset ; then
log_fail "-p option does not work for $ops"
fi
# when $ops is create or clone, redo the operation still return zero
if [[ $ops != "rename" ]]; then
log_must zfs $ops -p $dataset $newdataset
fi
return 0
}
#
# Get configuration of pool
# $1 pool name
# $2 config name
#
function get_config
{
typeset pool=$1
typeset config=$2
typeset alt_root
if ! poolexists "$pool" ; then
return 1
fi
alt_root=$(zpool list -H $pool | awk '{print $NF}')
if [[ $alt_root == "-" ]]; then
value=$(zdb -C $pool | grep "$config:" | awk -F: \
'{print $2}')
else
value=$(zdb -e $pool | grep "$config:" | awk -F: \
'{print $2}')
fi
if [[ -n $value ]] ; then
value=${value#'}
value=${value%'}
fi
echo $value
return 0
}
#
# Privated function. Random select one of items from arguments.
#
# $1 count
# $2-n string
#
function _random_get
{
typeset cnt=$1
shift
typeset str="$@"
typeset -i ind
((ind = RANDOM % cnt + 1))
typeset ret=$(echo "$str" | cut -f $ind -d ' ')
echo $ret
}
#
# Random select one of item from arguments which include NONE string
#
function random_get_with_non
{
typeset -i cnt=$#
((cnt =+ 1))
_random_get "$cnt" "$@"
}
#
# Random select one of item from arguments which doesn't include NONE string
#
function random_get
{
_random_get "$#" "$@"
}
#
# Detect if the current system support slog
#
function verify_slog_support
{
typeset dir=$TEST_BASE_DIR/disk.$$
typeset pool=foo.$$
typeset vdev=$dir/a
typeset sdev=$dir/b
mkdir -p $dir
mkfile $MINVDEVSIZE $vdev $sdev
typeset -i ret=0
if ! zpool create -n $pool $vdev log $sdev > /dev/null 2>&1; then
ret=1
fi
rm -r $dir
return $ret
}
#
# The function will generate a dataset name with specific length
# $1, the length of the name
# $2, the base string to construct the name
#
function gen_dataset_name
{
typeset -i len=$1
typeset basestr="$2"
typeset -i baselen=${#basestr}
typeset -i iter=0
typeset l_name=""
if ((len % baselen == 0)); then
((iter = len / baselen))
else
((iter = len / baselen + 1))
fi
while ((iter > 0)); do
l_name="${l_name}$basestr"
((iter -= 1))
done
echo $l_name
}
#
# Get cksum tuple of dataset
# $1 dataset name
#
# sample zdb output:
# Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
# DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
# lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
# fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
function datasetcksum
{
typeset cksum
sync
cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
| awk -F= '{print $7}')
echo $cksum
}
#
# Get cksum of file
# #1 file path
#
function checksum
{
typeset cksum
cksum=$(cksum $1 | awk '{print $1}')
echo $cksum
}
#
# Get the given disk/slice state from the specific field of the pool
#
function get_device_state #pool disk field("", "spares","logs")
{
typeset pool=$1
typeset disk=${2#$DEV_DSKDIR/}
typeset field=${3:-$pool}
state=$(zpool status -v "$pool" 2>/dev/null | \
nawk -v device=$disk -v pool=$pool -v field=$field \
'BEGIN {startconfig=0; startfield=0; }
/config:/ {startconfig=1}
(startconfig==1) && ($1==field) {startfield=1; next;}
(startfield==1) && ($1==device) {print $2; exit;}
(startfield==1) &&
($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
echo $state
}
#
# print the given directory filesystem type
#
# $1 directory name
#
function get_fstype
{
typeset dir=$1
if [[ -z $dir ]]; then
log_fail "Usage: get_fstype <directory>"
fi
#
# $ df -n /
# / : ufs
#
df -n $dir | awk '{print $3}'
}
#
# Given a disk, label it to VTOC regardless what label was on the disk
# $1 disk
#
function labelvtoc
{
typeset disk=$1
if [[ -z $disk ]]; then
log_fail "The disk name is unspecified."
fi
typeset label_file=/var/tmp/labelvtoc.$$
typeset arch=$(uname -p)
if is_linux || is_freebsd; then
log_note "Currently unsupported by the test framework"
return 1
fi
if [[ $arch == "i386" ]]; then
echo "label" > $label_file
echo "0" >> $label_file
echo "" >> $label_file
echo "q" >> $label_file
echo "q" >> $label_file
fdisk -B $disk >/dev/null 2>&1
# wait a while for fdisk finishes
sleep 60
elif [[ $arch == "sparc" ]]; then
echo "label" > $label_file
echo "0" >> $label_file
echo "" >> $label_file
echo "" >> $label_file
echo "" >> $label_file
echo "q" >> $label_file
else
log_fail "unknown arch type"
fi
format -e -s -d $disk -f $label_file
typeset -i ret_val=$?
rm -f $label_file
#
# wait the format to finish
#
sleep 60
if ((ret_val != 0)); then
log_fail "unable to label $disk as VTOC."
fi
return 0
}
#
# check if the system was installed as zfsroot or not
# return: 0 if zfsroot, non-zero if not
#
function is_zfsroot
{
df -n / | grep zfs > /dev/null 2>&1
return $?
}
#
# get the root filesystem name if it's zfsroot system.
#
# return: root filesystem name
function get_rootfs
{
typeset rootfs=""
if is_freebsd; then
rootfs=$(mount -p | awk '$2 == "/" && $3 == "zfs" {print $1}')
elif ! is_linux; then
rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \
/etc/mnttab)
fi
if [[ -z "$rootfs" ]]; then
log_fail "Can not get rootfs"
fi
zfs list $rootfs > /dev/null 2>&1
if (($? == 0)); then
echo $rootfs
else
log_fail "This is not a zfsroot system."
fi
}
#
# get the rootfs's pool name
# return:
# rootpool name
#
function get_rootpool
{
typeset rootfs=""
typeset rootpool=""
if is_freebsd; then
rootfs=$(mount -p | awk '$2 == "/" && $3 == "zfs" {print $1}')
elif ! is_linux; then
rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \
/etc/mnttab)
fi
if [[ -z "$rootfs" ]]; then
log_fail "Can not get rootpool"
fi
zfs list $rootfs > /dev/null 2>&1
if (($? == 0)); then
echo ${rootfs%%/*}
else
log_fail "This is not a zfsroot system."
fi
}
#
# Get the word numbers from a string separated by white space
#
function get_word_count
{
echo $1 | wc -w
}
#
# To verify if the require numbers of disks is given
#
function verify_disk_count
{
typeset -i min=${2:-1}
typeset -i count=$(get_word_count "$1")
if ((count < min)); then
log_untested "A minimum of $min disks is required to run." \
" You specified $count disk(s)"
fi
}
function ds_is_volume
{
typeset type=$(get_prop type $1)
[[ $type = "volume" ]] && return 0
return 1
}
function ds_is_filesystem
{
typeset type=$(get_prop type $1)
[[ $type = "filesystem" ]] && return 0
return 1
}
function ds_is_snapshot
{
typeset type=$(get_prop type $1)
[[ $type = "snapshot" ]] && return 0
return 1
}
#
# Check if Trusted Extensions are installed and enabled
#
function is_te_enabled
{
svcs -H -o state labeld 2>/dev/null | grep "enabled"
if (($? != 0)); then
return 1
else
return 0
fi
}
# Utility function to determine if a system has multiple cpus.
function is_mp
{
if is_linux; then
(($(nproc) > 1))
elif is_freebsd; then
sysctl -n kern.smp.cpus
else
(($(psrinfo | wc -l) > 1))
fi
return $?
}
function get_cpu_freq
{
if is_linux; then
lscpu | awk '/CPU MHz/ { print $3 }'
elif is_freebsd; then
sysctl -n hw.clockrate
else
psrinfo -v 0 | awk '/processor operates at/ {print $6}'
fi
}
# Run the given command as the user provided.
function user_run
{
typeset user=$1
shift
log_note "user: $user"
log_note "cmd: $*"
typeset out=$TEST_BASE_DIR/out
typeset err=$TEST_BASE_DIR/err
sudo -Eu $user env PATH="$PATH" ksh <<<"$*" >$out 2>$err
typeset res=$?
log_note "out: $(<$out)"
log_note "err: $(<$err)"
return $res
}
#
# Check if the pool contains the specified vdevs
#
# $1 pool
# $2..n <vdev> ...
#
# Return 0 if the vdevs are contained in the pool, 1 if any of the specified
# vdevs is not in the pool, and 2 if pool name is missing.
#
function vdevs_in_pool
{
typeset pool=$1
typeset vdev
if [[ -z $pool ]]; then
log_note "Missing pool name."
return 2
fi
shift
# We could use 'zpool list' to only get the vdevs of the pool but we
# can't reference a mirror/raidz vdev using its ID (i.e mirror-0),
# therefore we use the 'zpool status' output.
typeset tmpfile=$(mktemp)
zpool status -v "$pool" | grep -A 1000 "config:" >$tmpfile
for vdev in $@; do
grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1
[[ $? -ne 0 ]] && return 1
done
rm -f $tmpfile
return 0;
}
function get_max
{
typeset -l i max=$1
shift
for i in "$@"; do
max=$((max > i ? max : i))
done
echo $max
}
function get_min
{
typeset -l i min=$1
shift
for i in "$@"; do
min=$((min < i ? min : i))
done
echo $min
}
# Write data that can be compressed into a directory
function write_compressible
{
typeset dir=$1
typeset megs=$2
typeset nfiles=${3:-1}
typeset bs=${4:-1024k}
typeset fname=${5:-file}
[[ -d $dir ]] || log_fail "No directory: $dir"
# Under Linux fio is not currently used since its behavior can
# differ significantly across versions. This includes missing
# command line options and cases where the --buffer_compress_*
# options fail to behave as expected.
if is_linux; then
typeset file_bytes=$(to_bytes $megs)
typeset bs_bytes=4096
typeset blocks=$(($file_bytes / $bs_bytes))
for (( i = 0; i < $nfiles; i++ )); do
truncate -s $file_bytes $dir/$fname.$i
# Write every third block to get 66% compression.
for (( j = 0; j < $blocks; j += 3 )); do
dd if=/dev/urandom of=$dir/$fname.$i \
seek=$j bs=$bs_bytes count=1 \
conv=notrunc >/dev/null 2>&1
done
done
else
log_must eval "fio \
--name=job \
--fallocate=0 \
--minimal \
--randrepeat=0 \
--buffer_compress_percentage=66 \
--buffer_compress_chunk=4096 \
--directory=$dir \
--numjobs=$nfiles \
--nrfiles=$nfiles \
--rw=write \
--bs=$bs \
--filesize=$megs \
--filename_format='$fname.\$jobnum' >/dev/null"
fi
}
function get_objnum
{
typeset pathname=$1
typeset objnum
[[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
if is_freebsd; then
objnum=$(stat -f "%i" $pathname)
else
objnum=$(stat -c %i $pathname)
fi
echo $objnum
}
#
# Sync data to the pool
#
# $1 pool name
# $2 boolean to force uberblock (and config including zpool cache file) update
#
function sync_pool #pool <force>
{
typeset pool=${1:-$TESTPOOL}
typeset force=${2:-false}
if [[ $force == true ]]; then
log_must zpool sync -f $pool
else
log_must zpool sync $pool
fi
return 0
}
#
# Wait for zpool 'freeing' property drops to zero.
#
# $1 pool name
#
function wait_freeing #pool
{
typeset pool=${1:-$TESTPOOL}
while true; do
[[ "0" == "$(zpool list -Ho freeing $pool)" ]] && break
log_must sleep 1
done
}
#
# Wait for every device replace operation to complete
#
# $1 pool name
#
function wait_replacing #pool
{
typeset pool=${1:-$TESTPOOL}
while true; do
[[ "" == "$(zpool status $pool |
awk '/replacing-[0-9]+/ {print $1}')" ]] && break
log_must sleep 1
done
}
#
# Wait for a pool to be scrubbed
#
# $1 pool name
#
function wait_scrubbed
{
typeset pool=${1:-$TESTPOOL}
while ! is_pool_scrubbed $pool ; do
sleep 1
done
}
# Backup the zed.rc in our test directory so that we can edit it for our test.
#
# Returns: Backup file name. You will need to pass this to zed_rc_restore().
function zed_rc_backup
{
zedrc_backup="$(mktemp)"
cp $ZEDLET_DIR/zed.rc $zedrc_backup
echo $zedrc_backup
}
function zed_rc_restore
{
mv $1 $ZEDLET_DIR/zed.rc
}
#
# Setup custom environment for the ZED.
#
# $@ Optional list of zedlets to run under zed.
function zed_setup
{
if ! is_linux; then
log_unsupported "No zed on $(uname)"
fi
if [[ ! -d $ZEDLET_DIR ]]; then
log_must mkdir $ZEDLET_DIR
fi
if [[ ! -e $VDEVID_CONF ]]; then
log_must touch $VDEVID_CONF
fi
if [[ -e $VDEVID_CONF_ETC ]]; then
log_fail "Must not have $VDEVID_CONF_ETC file present on system"
fi
EXTRA_ZEDLETS=$@
# Create a symlink for /etc/zfs/vdev_id.conf file.
log_must ln -s $VDEVID_CONF $VDEVID_CONF_ETC
# Setup minimal ZED configuration. Individual test cases should
# add additional ZEDLETs as needed for their specific test.
log_must cp ${ZEDLET_ETC_DIR}/zed.rc $ZEDLET_DIR
log_must cp ${ZEDLET_ETC_DIR}/zed-functions.sh $ZEDLET_DIR
# Scripts must only be user writable.
if [[ -n "$EXTRA_ZEDLETS" ]] ; then
saved_umask=$(umask)
log_must umask 0022
for i in $EXTRA_ZEDLETS ; do
log_must cp ${ZEDLET_LIBEXEC_DIR}/$i $ZEDLET_DIR
done
log_must umask $saved_umask
fi
# Customize the zed.rc file to enable the full debug log.
log_must sed -i '/\#ZED_DEBUG_LOG=.*/d' $ZEDLET_DIR/zed.rc
echo "ZED_DEBUG_LOG=$ZED_DEBUG_LOG" >>$ZEDLET_DIR/zed.rc
}
#
# Cleanup custom ZED environment.
#
# $@ Optional list of zedlets to remove from our test zed.d directory.
function zed_cleanup
{
if ! is_linux; then
return
fi
EXTRA_ZEDLETS=$@
log_must rm -f ${ZEDLET_DIR}/zed.rc
log_must rm -f ${ZEDLET_DIR}/zed-functions.sh
log_must rm -f ${ZEDLET_DIR}/all-syslog.sh
log_must rm -f ${ZEDLET_DIR}/all-debug.sh
log_must rm -f ${ZEDLET_DIR}/state
if [[ -n "$EXTRA_ZEDLETS" ]] ; then
for i in $EXTRA_ZEDLETS ; do
log_must rm -f ${ZEDLET_DIR}/$i
done
fi
log_must rm -f $ZED_LOG
log_must rm -f $ZED_DEBUG_LOG
log_must rm -f $VDEVID_CONF_ETC
log_must rm -f $VDEVID_CONF
rmdir $ZEDLET_DIR
}
#
# Check if ZED is currently running, if not start ZED.
#
function zed_start
{
if ! is_linux; then
return
fi
# ZEDLET_DIR=/var/tmp/zed
if [[ ! -d $ZEDLET_DIR ]]; then
log_must mkdir $ZEDLET_DIR
fi
# Verify the ZED is not already running.
pgrep -x zed > /dev/null
if (($? == 0)); then
log_note "ZED already running"
else
log_note "Starting ZED"
# run ZED in the background and redirect foreground logging
# output to $ZED_LOG.
log_must truncate -s 0 $ZED_DEBUG_LOG
log_must eval "zed -vF -d $ZEDLET_DIR -P $PATH" \
"-s $ZEDLET_DIR/state -j 1 2>$ZED_LOG &"
fi
return 0
}
#
# Kill ZED process
#
function zed_stop
{
if ! is_linux; then
return
fi
log_note "Stopping ZED"
while true; do
zedpids="$(pgrep -x zed)"
[ "$?" -ne 0 ] && break
log_must kill $zedpids
sleep 1
done
return 0
}
#
# Drain all zevents
#
function zed_events_drain
{
while [ $(zpool events -H | wc -l) -ne 0 ]; do
sleep 1
zpool events -c >/dev/null
done
}
# Set a variable in zed.rc to something, un-commenting it in the process.
#
# $1 variable
# $2 value
function zed_rc_set
{
var="$1"
val="$2"
# Remove the line
cmd="'/$var/d'"
eval sed -i $cmd $ZEDLET_DIR/zed.rc
# Add it at the end
echo "$var=$val" >> $ZEDLET_DIR/zed.rc
}
#
# Check is provided device is being active used as a swap device.
#
function is_swap_inuse
{
typeset device=$1
if [[ -z $device ]] ; then
log_note "No device specified."
return 1
fi
if is_linux; then
swapon -s | grep -w $(readlink -f $device) > /dev/null 2>&1
elif is_freebsd; then
swapctl -l | grep -w $device
else
swap -l | grep -w $device > /dev/null 2>&1
fi
return $?
}
#
# Setup a swap device using the provided device.
#
function swap_setup
{
typeset swapdev=$1
if is_linux; then
log_must eval "mkswap $swapdev > /dev/null 2>&1"
log_must swapon $swapdev
elif is_freebsd; then
log_must swapctl -a $swapdev
else
log_must swap -a $swapdev
fi
return 0
}
#
# Cleanup a swap device on the provided device.
#
function swap_cleanup
{
typeset swapdev=$1
if is_swap_inuse $swapdev; then
if is_linux; then
log_must swapoff $swapdev
elif is_freebsd; then
log_must swapoff $swapdev
else
log_must swap -d $swapdev
fi
fi
return 0
}
#
# Set a global system tunable (64-bit value)
#
# $1 tunable name (use a NAME defined in tunables.cfg)
# $2 tunable values
#
function set_tunable64
{
set_tunable_impl "$1" "$2" Z
}
#
# Set a global system tunable (32-bit value)
#
# $1 tunable name (use a NAME defined in tunables.cfg)
# $2 tunable values
#
function set_tunable32
{
set_tunable_impl "$1" "$2" W
}
function set_tunable_impl
{
typeset name="$1"
typeset value="$2"
typeset mdb_cmd="$3"
typeset module="${4:-zfs}"
eval "typeset tunable=\$$name"
case "$tunable" in
UNSUPPORTED)
log_unsupported "Tunable '$name' is unsupported on $(uname)"
;;
"")
log_fail "Tunable '$name' must be added to tunables.cfg"
;;
*)
;;
esac
[[ -z "$value" ]] && return 1
[[ -z "$mdb_cmd" ]] && return 1
case "$(uname)" in
Linux)
typeset zfs_tunables="/sys/module/$module/parameters"
[[ -w "$zfs_tunables/$tunable" ]] || return 1
cat >"$zfs_tunables/$tunable" <<<"$value"
return $?
;;
FreeBSD)
sysctl vfs.zfs.$tunable=$value
return "$?"
;;
SunOS)
[[ "$module" -eq "zfs" ]] || return 1
echo "${tunable}/${mdb_cmd}0t${value}" | mdb -kw
return $?
;;
esac
}
#
# Get a global system tunable
#
# $1 tunable name (use a NAME defined in tunables.cfg)
#
function get_tunable
{
get_tunable_impl "$1"
}
function get_tunable_impl
{
typeset name="$1"
typeset module="${2:-zfs}"
eval "typeset tunable=\$$name"
case "$tunable" in
UNSUPPORTED)
log_unsupported "Tunable '$name' is unsupported on $(uname)"
;;
"")
log_fail "Tunable '$name' must be added to tunables.cfg"
;;
*)
;;
esac
case "$(uname)" in
Linux)
typeset zfs_tunables="/sys/module/$module/parameters"
[[ -f "$zfs_tunables/$tunable" ]] || return 1
cat $zfs_tunables/$tunable
return $?
;;
FreeBSD)
sysctl -n vfs.zfs.$tunable
;;
SunOS)
[[ "$module" -eq "zfs" ]] || return 1
;;
esac
return 1
}
#
# Prints the current time in seconds since UNIX Epoch.
#
function current_epoch
{
printf '%(%s)T'
}
#
# Get decimal value of global uint32_t variable using mdb.
#
function mdb_get_uint32
{
typeset variable=$1
typeset value
value=$(mdb -k -e "$variable/X | ::eval .=U")
if [[ $? -ne 0 ]]; then
log_fail "Failed to get value of '$variable' from mdb."
return 1
fi
echo $value
return 0
}
#
# Set global uint32_t variable to a decimal value using mdb.
#
function mdb_set_uint32
{
typeset variable=$1
typeset value=$2
mdb -kw -e "$variable/W 0t$value" > /dev/null
if [[ $? -ne 0 ]]; then
echo "Failed to set '$variable' to '$value' in mdb."
return 1
fi
return 0
}
#
# Set global scalar integer variable to a hex value using mdb.
# Note: Target should have CTF data loaded.
#
function mdb_ctf_set_int
{
typeset variable=$1
typeset value=$2
mdb -kw -e "$variable/z $value" > /dev/null
if [[ $? -ne 0 ]]; then
echo "Failed to set '$variable' to '$value' in mdb."
return 1
fi
return 0
}
#
# Compute MD5 digest for given file or stdin if no file given.
# Note: file path must not contain spaces
#
function md5digest
{
typeset file=$1
case $(uname) in
FreeBSD)
md5 -q $file
;;
*)
md5sum -b $file | awk '{ print $1 }'
;;
esac
}
#
# Compute SHA256 digest for given file or stdin if no file given.
# Note: file path must not contain spaces
#
function sha256digest
{
typeset file=$1
case $(uname) in
FreeBSD)
sha256 -q $file
;;
*)
sha256sum -b $file | awk '{ print $1 }'
;;
esac
}
function new_fs #<args>
{
case $(uname) in
FreeBSD)
newfs "$@"
;;
*)
echo y | newfs -v "$@"
;;
esac
}
function stat_size #<path>
{
typeset path=$1
case $(uname) in
FreeBSD)
stat -f %z "$path"
;;
*)
stat -c %s "$path"
;;
esac
}
function stat_ctime #<path>
{
typeset path=$1
case $(uname) in
FreeBSD)
stat -f %c "$path"
;;
*)
stat -c %Z "$path"
;;
esac
}
function stat_crtime #<path>
{
typeset path=$1
case $(uname) in
FreeBSD)
stat -f %B "$path"
;;
*)
stat -c %W "$path"
;;
esac
}
# Run a command as if it was being run in a TTY.
#
# Usage:
#
# faketty command
#
function faketty
{
if is_freebsd; then
script -q /dev/null env "$@"
else
script --return --quiet -c "$*" /dev/null
fi
}
#
# Produce a random permutation of the integers in a given range (inclusive).
#
function range_shuffle # begin end
{
typeset -i begin=$1
typeset -i end=$2
seq ${begin} ${end} | sort -R
}
#
# Cross-platform xattr helpers
#
function get_xattr # name path
{
typeset name=$1
typeset path=$2
case $(uname) in
FreeBSD)
getextattr -qq user "${name}" "${path}"
;;
*)
attr -qg "${name}" "${path}"
;;
esac
}
function set_xattr # name value path
{
typeset name=$1
typeset value=$2
typeset path=$3
case $(uname) in
FreeBSD)
setextattr user "${name}" "${value}" "${path}"
;;
*)
attr -qs "${name}" -V "${value}" "${path}"
;;
esac
}
function set_xattr_stdin # name value
{
typeset name=$1
typeset path=$2
case $(uname) in
FreeBSD)
setextattr -i user "${name}" "${path}"
;;
*)
attr -qs "${name}" "${path}"
;;
esac
}
function rm_xattr # name path
{
typeset name=$1
typeset path=$2
case $(uname) in
FreeBSD)
rmextattr -q user "${name}" "${path}"
;;
*)
attr -qr "${name}" "${path}"
;;
esac
}
function ls_xattr # path
{
typeset path=$1
case $(uname) in
FreeBSD)
lsextattr -qq user "${path}"
;;
*)
attr -ql "${path}"
;;
esac
}
function kstat # stat flags?
{
typeset stat=$1
typeset flags=${2-"-n"}
case $(uname) in
FreeBSD)
sysctl $flags kstat.zfs.misc.$stat
;;
Linux)
typeset zfs_kstat="/proc/spl/kstat/zfs/$stat"
[[ -f "$zfs_kstat" ]] || return 1
cat $zfs_kstat
;;
*)
false
;;
esac
}
function get_arcstat # stat
{
typeset stat=$1
case $(uname) in
FreeBSD)
kstat arcstats.$stat
;;
Linux)
kstat arcstats | awk "/$stat/ { print \$3 }"
;;
*)
false
;;
esac
}
+function punch_hole # offset length file
+{
+ typeset offset=$1
+ typeset length=$2
+ typeset file=$3
+
+ case $(uname) in
+ FreeBSD)
+ truncate -d -o $offset -l $length "$file"
+ ;;
+ Linux)
+ fallocate --punch-hole --offset $offset --length $length "$file"
+ ;;
+ *)
+ false
+ ;;
+ esac
+}
+
#
# Wait for the specified arcstat to reach non-zero quiescence.
# If echo is 1 echo the value after reaching quiescence, otherwise
# if echo is 0 print the arcstat we are waiting on.
#
function arcstat_quiescence # stat echo
{
typeset stat=$1
typeset echo=$2
typeset do_once=true
if [[ $echo -eq 0 ]]; then
echo "Waiting for arcstat $1 quiescence."
fi
while $do_once || [ $stat1 -ne $stat2 ] || [ $stat2 -eq 0 ]; do
typeset stat1=$(get_arcstat $stat)
sleep 2
typeset stat2=$(get_arcstat $stat)
do_once=false
done
if [[ $echo -eq 1 ]]; then
echo $stat2
fi
}
function arcstat_quiescence_noecho # stat
{
typeset stat=$1
arcstat_quiescence $stat 0
}
function arcstat_quiescence_echo # stat
{
typeset stat=$1
arcstat_quiescence $stat 1
}
#
# Given an array of pids, wait until all processes
# have completed and check their return status.
#
function wait_for_children #children
{
rv=0
children=("$@")
for child in "${children[@]}"
do
child_exit=0
wait ${child} || child_exit=$?
if [ $child_exit -ne 0 ]; then
echo "child ${child} failed with ${child_exit}"
rv=1
fi
done
return $rv
}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/Makefile.am b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/Makefile.am
index 137cddd5f784..fd586ecee518 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/Makefile.am
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/Makefile.am
@@ -1,92 +1,93 @@
SUBDIRS = \
acl \
alloc_class \
arc \
atime \
bootfs \
btree \
cache \
cachefile \
casenorm \
channel_program \
chattr \
checksum \
clean_mirror \
cli_root \
cli_user \
compression \
cp_files \
crtime \
ctime \
deadman \
delegate \
devices \
events \
exec \
fallocate \
fault \
features \
grow \
history \
hkdf \
inheritance \
inuse \
io \
l2arc \
large_files \
largest_pool \
libzfs \
limits \
link_count \
log_spacemap \
migration \
mmap \
mmp \
mount \
mv_files \
nestedfs \
no_space \
nopwrite \
online_offline \
pam \
pool_checkpoint \
pool_names \
poolversion \
privilege \
procfs \
projectquota \
pyzfs \
quota \
raidz \
redacted_send \
redundancy \
refquota \
refreserv \
removal \
rename_dirs \
replacement \
reservation \
rootpool \
rsend \
scrub_mirror \
slog \
snapshot \
snapused \
sparse \
suid \
threadsappend \
trim \
truncate \
upgrade \
user_namespace \
userquota \
vdev_zaps \
write_dirs \
xattr \
zpool_influxdb \
zvol
if BUILD_LINUX
SUBDIRS += \
+ simd \
tmpfile
endif
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/alloc_class/alloc_class.kshlib b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/alloc_class/alloc_class.kshlib
index 4c64cff69643..e204f43b3bcd 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/alloc_class/alloc_class.kshlib
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/alloc_class/alloc_class.kshlib
@@ -1,67 +1,68 @@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2017, Intel Corporation.
# Copyright (c) 2018 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/alloc_class/alloc_class.cfg
function disk_setup
{
truncate -s $ZPOOL_DEVSIZE $ZPOOL_DISKS
truncate -s $CLASS_DEVSIZE $CLASS_DISKS
}
function disk_cleanup
{
rm -f $ZPOOL_DEVSIZE $ZPOOL_DISKS 2> /dev/null
rm -f $CLASS_DEVSIZE $CLASS_DISKS 2> /dev/null
}
function cleanup
{
if datasetexists $TESTPOOL ; then
zpool destroy -f $TESTPOOL 2> /dev/null
fi
disk_cleanup
}
#
# Try zpool status/iostat for given pool
#
# $1 pool
#
function display_status
{
typeset pool=$1
typeset -i ret=0
zpool status -xv $pool > /dev/null 2>&1
ret=$?
zpool iostat > /dev/null 2>&1
((ret |= $?))
typeset mntpnt=$(get_prop mountpoint $pool)
dd if=/dev/random of=$mntpnt/testfile.$$ &
typeset pid=$!
zpool iostat -v 1 3 > /dev/null
((ret |= $?))
kill -9 $pid
+ wait $pid 2> /dev/null
return $ret
}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/HEXKEY b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/HEXKEY
new file mode 100644
index 000000000000..95ed1c051a21
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/HEXKEY
@@ -0,0 +1 @@
+000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/Makefile.am b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/Makefile.am
index 06b4239a6d96..7dfec435ce7f 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/Makefile.am
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/Makefile.am
@@ -1,14 +1,18 @@
pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/cli_root/zfs_load-key
dist_pkgdata_SCRIPTS = \
setup.ksh \
cleanup.ksh \
zfs_load-key.ksh \
zfs_load-key_all.ksh \
zfs_load-key_file.ksh \
+ zfs_load-key_https.ksh \
zfs_load-key_location.ksh \
zfs_load-key_noop.ksh \
zfs_load-key_recursive.ksh
dist_pkgdata_DATA = \
zfs_load-key.cfg \
- zfs_load-key_common.kshlib
+ zfs_load-key_common.kshlib \
+ PASSPHRASE \
+ HEXKEY \
+ RAWKEY
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/PASSPHRASE b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/PASSPHRASE
new file mode 100644
index 000000000000..f3097ab13082
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/PASSPHRASE
@@ -0,0 +1 @@
+password
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/RAWKEY b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/RAWKEY
new file mode 100644
index 000000000000..f2d4cbf581ce
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/RAWKEY
@@ -0,0 +1 @@
+aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
\ No newline at end of file
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/cleanup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/cleanup.ksh
index 79cd6e9f908e..d397bcf4e9f0 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/cleanup.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/cleanup.ksh
@@ -1,30 +1,32 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2007 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/cli_root/zfs_load-key/zfs_load-key_common.kshlib
+cleanup_https
default_cleanup
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/setup.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/setup.ksh
index 6a9af3bc28c3..6cc5528ce5d7 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/setup.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/setup.ksh
@@ -1,32 +1,35 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2007 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/cli_root/zfs_load-key/zfs_load-key_common.kshlib
DISK=${DISKS%% *}
-default_setup $DISK
+default_setup_noexit $DISK
+setup_https
+log_pass
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/zfs_load-key.cfg b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/zfs_load-key.cfg
index 90d9f63f1dba..cc1e3b330543 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/zfs_load-key.cfg
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/zfs_load-key.cfg
@@ -1,26 +1,57 @@
#
# CDDL HEADER START
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
# CDDL HEADER END
#
#
# Copyright (c) 2017 Datto, Inc. All rights reserved.
#
+# $PASSPHRASE, $HEXKEY, and $RAWKEY must be kept in sync
+# with the corresponding files in this directory
+
export PASSPHRASE="password"
export PASSPHRASE1="password1"
export PASSPHRASE2="password2"
export HEXKEY="000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F"
export HEXKEY1="201F1E1D1C1B1A191817161514131211100F0E0D0C0B0A090807060504030201"
export RAWKEY="aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
export RAWKEY1="bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"
+
+export SSL_CA_CERT_FILE="/$TESTPOOL/snakeoil.crt"
+export HTTPS_PORT_FILE="/$TESTPOOL/snakeoil.port"
+export HTTPS_HOSTNAME="localhost"
+export HTTPS_PORT=
+export HTTPS_BASE_URL=
+
+function get_https_port
+{
+ if [ -z "$HTTPS_PORT" ]; then
+ read -r HTTPS_PORT < "$HTTPS_PORT_FILE" || return
+ fi
+
+ echo "$HTTPS_PORT"
+}
+
+function get_https_base_url
+{
+ if [ -z "$HTTPS_BASE_URL" ]; then
+ HTTPS_BASE_URL="https://$HTTPS_HOSTNAME:$(get_https_port)" || {
+ typeset ret=$?
+ HTTPS_BASE_URL=
+ return $ret
+ }
+ fi
+
+ echo "$HTTPS_BASE_URL"
+}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/zfs_load-key_all.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/zfs_load-key_all.ksh
index 5e330eb0deb9..3c18e4538d34 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/zfs_load-key_all.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/zfs_load-key_all.ksh
@@ -1,76 +1,85 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
# CDDL HEADER END
#
#
# Copyright (c) 2017 Datto, Inc. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zfs_load-key/zfs_load-key_common.kshlib
#
# DESCRIPTION:
# 'zfs load-key -a' should load keys for all datasets.
#
# STRATEGY:
# 1. Create an encrypted filesystem, encrypted zvol, and an encrypted pool
# 2. Unmount all datasets and unload their keys
# 3. Attempt to load all dataset keys
# 4. Verify each dataset has its key loaded
# 5. Attempt to mount the pool and filesystem
#
verify_runnable "both"
function cleanup
{
datasetexists $TESTPOOL/$TESTFS1 && destroy_dataset $TESTPOOL/$TESTFS1
+ datasetexists $TESTPOOL/$TESTFS2 && destroy_dataset $TESTPOOL/$TESTFS2
datasetexists $TESTPOOL/zvol && destroy_dataset $TESTPOOL/zvol
poolexists $TESTPOOL1 && log_must destroy_pool $TESTPOOL1
}
log_onexit cleanup
log_assert "'zfs load-key -a' should load keys for all datasets"
log_must eval "echo $PASSPHRASE1 > /$TESTPOOL/pkey"
log_must zfs create -o encryption=on -o keyformat=passphrase \
-o keylocation=file:///$TESTPOOL/pkey $TESTPOOL/$TESTFS1
+log_must zfs create -o encryption=on -o keyformat=passphrase \
+ -o keylocation=$(get_https_base_url)/PASSPHRASE $TESTPOOL/$TESTFS2
+
log_must zfs create -V 64M -o encryption=on -o keyformat=passphrase \
-o keylocation=file:///$TESTPOOL/pkey $TESTPOOL/zvol
typeset DISK2="$(echo $DISKS | awk '{ print $2}')"
log_must zpool create -O encryption=on -O keyformat=passphrase \
-O keylocation=file:///$TESTPOOL/pkey $TESTPOOL1 $DISK2
log_must zfs unmount $TESTPOOL/$TESTFS1
log_must_busy zfs unload-key $TESTPOOL/$TESTFS1
+log_must zfs unmount $TESTPOOL/$TESTFS2
+log_must_busy zfs unload-key $TESTPOOL/$TESTFS2
+
log_must_busy zfs unload-key $TESTPOOL/zvol
log_must zfs unmount $TESTPOOL1
log_must_busy zfs unload-key $TESTPOOL1
log_must zfs load-key -a
log_must key_available $TESTPOOL1
log_must key_available $TESTPOOL/zvol
log_must key_available $TESTPOOL/$TESTFS1
+log_must key_available $TESTPOOL/$TESTFS2
log_must zfs mount $TESTPOOL1
log_must zfs mount $TESTPOOL/$TESTFS1
+log_must zfs mount $TESTPOOL/$TESTFS2
log_pass "'zfs load-key -a' loads keys for all datasets"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/zfs_load-key_common.kshlib b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/zfs_load-key_common.kshlib
index d9066f9cbf57..f7461437c615 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/zfs_load-key_common.kshlib
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/zfs_load-key_common.kshlib
@@ -1,101 +1,164 @@
#
# CDDL HEADER START
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
# CDDL HEADER END
#
#
# Copyright (c) 2017 Datto, Inc. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zfs_load-key/zfs_load-key.cfg
# Return 0 is a dataset key is available, 1 otherwise
#
# $1 - dataset
#
function key_available
{
typeset ds=$1
datasetexists $ds || return 1
typeset val=$(get_prop keystatus $ds)
if [[ "$val" == "none" ]]; then
log_note "Dataset $ds is not encrypted"
elif [[ "$val" == "available" ]]; then
return 0
fi
return 1
}
function key_unavailable
{
key_available $1 && return 1
return 0
}
function verify_keyformat
{
typeset ds=$1
typeset format=$2
typeset fmt=$(get_prop keyformat $ds)
if [[ "$fmt" != "$format" ]]; then
log_fail "Expected keyformat $format, got $fmt"
fi
return 0
}
function verify_keylocation
{
typeset ds=$1
typeset location=$2
typeset keyloc=$(get_prop keylocation $ds)
if [[ "$keyloc" != "$location" ]]; then
log_fail "Expected keylocation $location, got $keyloc"
fi
return 0
}
function verify_encryption_root
{
typeset ds=$1
typeset val=$2
typeset eroot=$(get_prop encryptionroot $ds)
if [[ "$eroot" != "$val" ]]; then
log_note "Expected encryption root '$val', got '$eroot'"
return 1
fi
return 0
}
function verify_origin
{
typeset ds=$1
typeset val=$2
typeset orig=$(get_prop origin $ds)
if [[ "$orig" != "$val" ]]; then
log_note "Expected origin '$val', got '$orig'"
return 1
fi
return 0
}
+
+function setup_https
+{
+ log_must openssl req -x509 -newkey rsa:4096 -sha256 -days 1 -nodes -keyout "/$TESTPOOL/snakeoil.key" -out "$SSL_CA_CERT_FILE" -subj "/CN=$HTTPS_HOSTNAME"
+
+ python3 -uc "
+import http.server, ssl, sys, os, time, random
+
+sys.stdin.close()
+
+httpd, err, port = None, None, None
+for i in range(1, 100):
+ port = random.randint(0xC000, 0xFFFF) # ephemeral range
+ try:
+ httpd = http.server.HTTPServer(('$HTTPS_HOSTNAME', port), http.server.SimpleHTTPRequestHandler)
+ break
+ except:
+ err = sys.exc_info()[1]
+ time.sleep(i / 100)
+if not httpd:
+ raise err
+
+with open('$HTTPS_PORT_FILE', 'w') as portf:
+ print(port, file=portf)
+
+httpd.socket = ssl.wrap_socket(httpd.socket, server_side=True, keyfile='/$TESTPOOL/snakeoil.key', certfile='$SSL_CA_CERT_FILE', ssl_version=ssl.PROTOCOL_TLS)
+
+os.chdir('$STF_SUITE/tests/functional/cli_root/zfs_load-key')
+
+with open('/$TESTPOOL/snakeoil.pid', 'w') as pidf:
+ if os.fork() != 0:
+ os._exit(0)
+ print(os.getpid(), file=pidf)
+
+sys.stdout.close()
+sys.stderr.close()
+try:
+ sys.stdout = sys.stderr = open('/tmp/ZTS-snakeoil.log', 'w', buffering=1) # line
+except:
+ sys.stdout = sys.stderr = open('/dev/null', 'w')
+
+print('{} start on {}'.format(os.getpid(), port))
+httpd.serve_forever()
+" || log_fail
+
+ typeset https_pid=
+ for d in $(seq 0 0.1 5); do
+ read -r https_pid 2>/dev/null < "/$TESTPOOL/snakeoil.pid" && [ -n "$https_pid" ] && break
+ sleep "$d"
+ done
+ [ -z "$https_pid" ] && log_fail "Couldn't start HTTPS server"
+ log_note "Started HTTPS server as $https_pid on port $(get_https_port)"
+}
+
+function cleanup_https
+{
+ typeset https_pid=
+ read -r https_pid 2>/dev/null < "/$TESTPOOL/snakeoil.pid" || return 0
+
+ log_must kill "$https_pid"
+ cat /tmp/ZTS-snakeoil.log
+ rm -f "/$TESTPOOL/snakeoil.pid" "/tmp/ZTS-snakeoil.log"
+}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/zfs_load-key_https.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/zfs_load-key_https.ksh
new file mode 100755
index 000000000000..cac9c4140322
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/zfs_load-key_https.ksh
@@ -0,0 +1,78 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+# CDDL HEADER END
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/cli_root/zfs_load-key/zfs_load-key_common.kshlib
+
+#
+# DESCRIPTION:
+# 'zfs load-key' should load a dataset's key from an https:// URL,
+# but fail to do so if the domain doesn't exist or the file 404s.
+#
+# STRATEGY:
+# 1. Try to create a dataset pointing to an RFC6761-guaranteed unresolvable domain,
+# one to the sshd port (which will be either unoccupied (ECONNREFUSED)
+# or have sshd on it ("wrong version number")).
+# and one pointing to an URL that will always 404.
+# 2. Create encrypted datasets with keylocation=https://address
+# 3. Unmount the datasets and unload their keys
+# 4. Attempt to load the keys
+# 5. Verify the keys are loaded
+# 6. Attempt to mount the datasets
+#
+
+verify_runnable "both"
+
+function cleanup
+{
+ for fs in "$TESTFS1" "$TESTFS2" "$TESTFS3"; do
+ datasetexists $TESTPOOL/$fs && \
+ log_must zfs destroy $TESTPOOL/$fs
+ done
+}
+log_onexit cleanup
+
+log_assert "'zfs load-key' should load a key from a file"
+
+log_mustnot zfs create -o encryption=on -o keyformat=passphrase \
+ -o keylocation=https://invalid./where-ever $TESTPOOL/$TESTFS1
+
+log_mustnot zfs create -o encryption=on -o keyformat=passphrase \
+ -o keylocation=https://$HTTPS_HOSTNAME:22 $TESTPOOL/$TESTFS1
+
+log_mustnot zfs create -o encryption=on -o keyformat=passphrase \
+ -o keylocation=$(get_https_base_url)/ENOENT $TESTPOOL/$TESTFS1
+
+log_must zfs create -o encryption=on -o keyformat=passphrase \
+ -o keylocation=$(get_https_base_url)/PASSPHRASE $TESTPOOL/$TESTFS1
+
+log_must zfs create -o encryption=on -o keyformat=hex \
+ -o keylocation=$(get_https_base_url)/HEXKEY $TESTPOOL/$TESTFS2
+
+log_must zfs create -o encryption=on -o keyformat=raw \
+ -o keylocation=$(get_https_base_url)/RAWKEY $TESTPOOL/$TESTFS3
+
+for fs in "$TESTFS1" "$TESTFS2" "$TESTFS3"; do
+ log_must zfs unmount $TESTPOOL/$fs
+ log_must zfs unload-key $TESTPOOL/$fs
+done
+for fs in "$TESTFS1" "$TESTFS2" "$TESTFS3"; do
+ log_must zfs load-key $TESTPOOL/$fs
+ log_must key_available $TESTPOOL/$fs
+ log_must zfs mount $TESTPOOL/$fs
+done
+
+log_pass "'zfs load-key' loads a key from a file"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/zfs_load-key_location.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/zfs_load-key_location.ksh
index 8538143cb62e..11f16e45ad3f 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/zfs_load-key_location.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/zfs_load-key_location.ksh
@@ -1,73 +1,78 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
# CDDL HEADER END
#
#
# Copyright (c) 2017 Datto, Inc. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zfs_load-key/zfs_load-key_common.kshlib
#
# DESCRIPTION:
# 'zfs load-key -L' should override keylocation with provided value.
#
# STRATEGY:
# 1. Create a key file
# 2. Copy the key file to another location
# 3. Create an encrypted dataset using the keyfile
# 4. Unmount the dataset and unload its key
# 5. Attempt to load the dataset specifying a keylocation of file
# 6. Verify the key is loaded
# 7. Verify the keylocation is the original key file
# 8. Unload the dataset's key
# 9. Attempt to load the dataset specifying a keylocation of prompt
# 10. Verify the key is loaded
# 11. Verify the keylocation is the original key file
#
verify_runnable "both"
function cleanup
{
datasetexists $TESTPOOL/$TESTFS1 && \
destroy_dataset $TESTPOOL/$TESTFS1
}
log_onexit cleanup
log_assert "'zfs load-key -L' should override keylocation with provided value"
typeset key_location="/$TESTPOOL/pkey1"
log_must eval "echo $PASSPHRASE > $key_location"
log_must cp $key_location /$TESTPOOL/pkey2
log_must zfs create -o encryption=on -o keyformat=passphrase \
-o keylocation=file://$key_location $TESTPOOL/$TESTFS1
log_must zfs unmount $TESTPOOL/$TESTFS1
log_must zfs unload-key $TESTPOOL/$TESTFS1
log_must zfs load-key -L file:///$TESTPOOL/pkey2 $TESTPOOL/$TESTFS1
log_must key_available $TESTPOOL/$TESTFS1
log_must verify_keylocation $TESTPOOL/$TESTFS1 "file://$key_location"
log_must zfs unload-key $TESTPOOL/$TESTFS1
log_must eval "echo $PASSPHRASE | zfs load-key -L prompt $TESTPOOL/$TESTFS1"
log_must key_available $TESTPOOL/$TESTFS1
log_must verify_keylocation $TESTPOOL/$TESTFS1 "file://$key_location"
+log_must zfs unload-key $TESTPOOL/$TESTFS1
+log_must zfs load-key -L $(get_https_base_url)/PASSPHRASE $TESTPOOL/$TESTFS1
+log_must key_available $TESTPOOL/$TESTFS1
+log_must verify_keylocation $TESTPOOL/$TESTFS1 "file://$key_location"
+
log_pass "'zfs load-key -L' overrides keylocation with provided value"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/zfs_load-key_recursive.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/zfs_load-key_recursive.ksh
index 54c390f2737f..c0b5553e39c9 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/zfs_load-key_recursive.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_load-key/zfs_load-key_recursive.ksh
@@ -1,66 +1,72 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
# CDDL HEADER END
#
#
# Copyright (c) 2017 Datto, Inc. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zfs_load-key/zfs_load-key_common.kshlib
#
# DESCRIPTION:
# 'zfs load-key -r' should recursively load keys.
#
# STRATEGY:
# 1. Create an encrypted dataset
# 2. Create a child dataset as an encryption root
# 3. Unmount all datasets and unload their keys
# 4. Attempt to load all dataset keys
# 5. Verify each dataset has its key loaded
# 6. Attempt to mount each dataset
#
verify_runnable "both"
function cleanup
{
datasetexists $TESTPOOL/$TESTFS1 && \
destroy_dataset $TESTPOOL/$TESTFS1 -r
}
log_onexit cleanup
log_assert "'zfs load-key -r' should recursively load keys"
log_must eval "echo $PASSPHRASE1 > /$TESTPOOL/pkey"
log_must zfs create -o encryption=on -o keyformat=passphrase \
-o keylocation=file:///$TESTPOOL/pkey $TESTPOOL/$TESTFS1
log_must zfs create -o keyformat=passphrase \
-o keylocation=file:///$TESTPOOL/pkey $TESTPOOL/$TESTFS1/child
+log_must zfs create -o keyformat=passphrase \
+ -o keylocation=$(get_https_base_url)/PASSPHRASE $TESTPOOL/$TESTFS1/child/child
+
log_must zfs unmount $TESTPOOL/$TESTFS1
+log_must zfs unload-key $TESTPOOL/$TESTFS1/child/child
log_must zfs unload-key $TESTPOOL/$TESTFS1/child
log_must zfs unload-key $TESTPOOL/$TESTFS1
log_must zfs load-key -r $TESTPOOL
log_must key_available $TESTPOOL/$TESTFS1
log_must key_available $TESTPOOL/$TESTFS1/child
+log_must key_available $TESTPOOL/$TESTFS1/child/child
log_must zfs mount $TESTPOOL/$TESTFS1
log_must zfs mount $TESTPOOL/$TESTFS1/child
+log_must zfs mount $TESTPOOL/$TESTFS1/child/child
log_pass "'zfs load-key -r' recursively loads keys"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_receive/Makefile.am b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_receive/Makefile.am
index 2b281e71c210..773e9f5a5585 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_receive/Makefile.am
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_receive/Makefile.am
@@ -1,32 +1,33 @@
pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/cli_root/zfs_receive
dist_pkgdata_SCRIPTS = \
setup.ksh \
cleanup.ksh \
zfs_receive_001_pos.ksh \
zfs_receive_002_pos.ksh \
zfs_receive_003_pos.ksh \
zfs_receive_004_neg.ksh \
zfs_receive_005_neg.ksh \
zfs_receive_006_pos.ksh \
zfs_receive_007_neg.ksh \
zfs_receive_008_pos.ksh \
zfs_receive_009_neg.ksh \
zfs_receive_010_pos.ksh \
zfs_receive_011_pos.ksh \
zfs_receive_012_pos.ksh \
zfs_receive_013_pos.ksh \
zfs_receive_014_pos.ksh \
zfs_receive_015_pos.ksh \
zfs_receive_016_pos.ksh \
receive-o-x_props_override.ksh \
zfs_receive_from_encrypted.ksh \
zfs_receive_from_zstd.ksh \
zfs_receive_new_props.ksh \
zfs_receive_to_encrypted.ksh \
zfs_receive_raw.ksh \
zfs_receive_raw_incremental.ksh \
zfs_receive_raw_-d.ksh \
- zfs_receive_-e.ksh
+ zfs_receive_-e.ksh \
+ zfs_receive_-wR-encrypted-mix.ksh
dist_pkgdata_DATA = \
zstd_test_data.txt
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_-wR-encrypted-mix.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_-wR-encrypted-mix.ksh
new file mode 100755
index 000000000000..6e27130e0248
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_-wR-encrypted-mix.ksh
@@ -0,0 +1,75 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2022 by Attila Fülöp <attila@fueloep.org>
+#
+
+. $STF_SUITE/include/libtest.shlib
+
+#
+# DESCRIPTION:
+# ZFS should receive a raw send of a mix of unencrypted and encrypted
+# child datasets
+#
+# The layout of the datasets is: enc/unenc/enc/unenc
+#
+# STRATEGY:
+# 1. Create the dataset hierarchy
+# 2. Snapshot the dataset hierarchy
+# 3. Send -Rw the dataset hierarchy and receive into a top-level dataset
+# 4. Check the encryption property of the received datasets
+
+verify_runnable "both"
+
+function cleanup
+{
+ datasetexists "$TESTPOOL/$TESTFS1" && \
+ destroy_dataset "$TESTPOOL/$TESTFS1" -r
+
+ datasetexists "$TESTPOOL/$TESTFS2" && \
+ destroy_dataset "$TESTPOOL/$TESTFS2" -r
+}
+
+log_onexit cleanup
+
+log_assert "ZFS should receive a mix of un/encrypted childs"
+
+typeset src="$TESTPOOL/$TESTFS1"
+typeset dst="$TESTPOOL/$TESTFS2"
+typeset snap="snap"
+
+echo "password" | \
+ create_dataset "$src" -o encryption=on -o keyformat=passphrase
+create_dataset "$src/u" "-o encryption=off"
+echo "password" | \
+ create_dataset "$src/u/e" -o encryption=on -o keyformat=passphrase
+create_dataset "$src/u/e/u" -o encryption=off
+
+log_must zfs snapshot -r "$src@$snap"
+log_must eval "zfs send -Rw $src@$snap | zfs receive -u $dst"
+log_must test "$(get_prop 'encryption' $dst)" != "off"
+log_must test "$(get_prop 'encryption' $dst/u)" == "off"
+log_must test "$(get_prop 'encryption' $dst/u/e)" != "off"
+log_must test "$(get_prop 'encryption' $dst/u/e/u)" == "off"
+
+log_pass "ZFS can receive a mix of un/encrypted childs"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_to_encrypted.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_to_encrypted.ksh
index 526497401f28..5d76c220fc45 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_to_encrypted.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_receive/zfs_receive_to_encrypted.ksh
@@ -1,77 +1,87 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
# CDDL HEADER END
#
#
# Copyright (c) 2017 Datto, Inc. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
#
# DESCRIPTION:
# ZFS should receive to an encrypted child dataset.
#
# STRATEGY:
# 1. Snapshot the default dataset
# 2. Create an encrypted dataset
# 3. Attempt to receive a stream to an encrypted child
# 4. Attempt to receive a stream with properties to an encrypted child
# 5. Attempt to receive a replication stream to an encrypted child
# 6. Unmount and unload the encrypted dataset keys
# 7. Attempt to receive a snapshot stream to an encrypted child
#
verify_runnable "both"
function cleanup
{
snapexists $snap && destroy_dataset $snap -f
datasetexists $TESTPOOL/$TESTFS1 && \
destroy_dataset $TESTPOOL/$TESTFS1 -r
}
log_onexit cleanup
log_assert "ZFS should receive encrypted filesystems into child dataset"
typeset passphrase="password"
typeset snap="$TESTPOOL/$TESTFS@snap"
typeset testfile="testfile"
log_must zfs snapshot $snap
log_must eval "echo $passphrase | zfs create -o encryption=on" \
"-o keyformat=passphrase $TESTPOOL/$TESTFS1"
log_note "Verifying ZFS will receive to an encrypted child"
log_must eval "zfs send $snap | zfs receive $TESTPOOL/$TESTFS1/c1"
+log_must test "$(get_prop 'encryption' $TESTPOOL/$TESTFS1/c1)" != "off"
-log_note "Verifying 'send -p' will receive to an encrypted child"
+# Unload the key, the following tests won't require it and we will test
+# the receive checks as well.
+log_must zfs unmount $TESTPOOL/$TESTFS1
+log_must zfs unload-key $TESTPOOL/$TESTFS1
+
+log_note "Verifying 'send -p' will receive to an unencrypted child"
log_must eval "zfs send -p $snap | zfs receive $TESTPOOL/$TESTFS1/c2"
log_must test "$(get_prop 'encryption' $TESTPOOL/$TESTFS1/c2)" == "off"
-log_note "Verifying 'send -R' will receive to an encrypted child"
+# For completeness add the property override case.
+log_note "Verifying recv -o encyption=off' will receive to an unencrypted child"
+log_must eval "zfs send $snap | \
+ zfs receive -o encryption=off $TESTPOOL/$TESTFS1/c2o"
+log_must test "$(get_prop 'encryption' $TESTPOOL/$TESTFS1/c2o)" == "off"
+
+log_note "Verifying 'send -R' will receive to an unencrypted child"
log_must eval "zfs send -R $snap | zfs receive $TESTPOOL/$TESTFS1/c3"
log_must test "$(get_prop 'encryption' $TESTPOOL/$TESTFS1/c3)" == "off"
log_note "Verifying ZFS will not receive to an encrypted child when the" \
"parent key is unloaded"
-log_must zfs unmount $TESTPOOL/$TESTFS1
-log_must zfs unload-key $TESTPOOL/$TESTFS1
log_mustnot eval "zfs send $snap | zfs receive $TESTPOOL/$TESTFS1/c4"
log_pass "ZFS can receive encrypted filesystems into child dataset"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_set/zfs_set_keylocation.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_set/zfs_set_keylocation.ksh
index d8a2fcbdf9b3..9791339479d7 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_set/zfs_set_keylocation.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_set/zfs_set_keylocation.ksh
@@ -1,102 +1,98 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
# CDDL HEADER END
#
#
# Copyright (c) 2017 Datto, Inc. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zfs_load-key/zfs_load-key_common.kshlib
#
# DESCRIPTION:
# Unencrypted datasets should only allow keylocation of 'none', encryption
# roots should only allow keylocation of 'prompt' and file URI, and encrypted
# child datasets should not be able to change their keylocation.
#
# STRATEGY:
# 1. Verify the key location of the default dataset is 'none'
# 2. Attempt to change the key location of the default dataset
# 3. Create an encrypted dataset using a key file
# 4. Attempt to change the key location of the encrypted dataset to 'none',
# an invalid location, its current location, and 'prompt'
# 5. Attempt to reload the encrypted dataset key using the new key location
# 6. Create a encrypted child dataset
# 7. Verify the key location of the child dataset is 'none'
# 8. Attempt to change the key location of the child dataset
# 9. Verify the key location of the child dataset has not changed
#
verify_runnable "both"
function cleanup
{
datasetexists $TESTPOOL/$TESTFS1 && \
destroy_dataset $TESTPOOL/$TESTFS1 -r
cleanup_https
}
log_onexit cleanup
-log_assert "Key location can only be 'prompt' or a file path for encryption" \
- "roots, and 'none' for unencrypted volumes"
+log_assert "Key location can only be 'prompt', 'file://', or 'https://'" \
+ "for encryption roots, and 'none' for unencrypted volumes"
log_must eval "echo $PASSPHRASE > /$TESTPOOL/pkey"
log_must verify_keylocation $TESTPOOL/$TESTFS "none"
log_must zfs set keylocation=none $TESTPOOL/$TESTFS
log_mustnot zfs set keylocation=/$TESTPOOL/pkey $TESTPOOL/$TESTFS
log_mustnot zfs set keylocation=file:///$TESTPOOL/pkey $TESTPOOL/$TESTFS
log_must verify_keylocation $TESTPOOL/$TESTFS "none"
log_must zfs create -o encryption=on -o keyformat=passphrase \
-o keylocation=file:///$TESTPOOL/pkey $TESTPOOL/$TESTFS1
log_mustnot zfs set keylocation=none $TESTPOOL/$TESTFS1
-if true; then
- log_mustnot zfs set keylocation=/$TESTPOOL/pkey $TESTPOOL/$TESTFS1
-else
- ### SOON: ###
- # file:///$TESTPOOL/pkey and /$TESTPOOL/pkey are equivalent on FreeBSD
- # thanks to libfetch. Eventually we want to make the other platforms
- # work this way as well, either by porting libfetch or by other means.
- log_must zfs set keylocation=/$TESTPOOL/pkey $TESTPOOL/$TESTFS1
-fi
+log_mustnot zfs set keylocation=/$TESTPOOL/pkey $TESTPOOL/$TESTFS1
log_must zfs set keylocation=file:///$TESTPOOL/pkey $TESTPOOL/$TESTFS1
log_must verify_keylocation $TESTPOOL/$TESTFS1 "file:///$TESTPOOL/pkey"
+setup_https
+log_must zfs set keylocation=$(get_https_base_url)/PASSPHRASE $TESTPOOL/$TESTFS1
+log_must verify_keylocation $TESTPOOL/$TESTFS1 "$(get_https_base_url)/PASSPHRASE"
+
log_must zfs set keylocation=prompt $TESTPOOL/$TESTFS1
log_must verify_keylocation $TESTPOOL/$TESTFS1 "prompt"
log_must zfs unmount $TESTPOOL/$TESTFS1
log_must zfs unload-key $TESTPOOL/$TESTFS1
log_must rm /$TESTPOOL/pkey
log_must eval "echo $PASSPHRASE | zfs load-key $TESTPOOL/$TESTFS1"
log_must zfs mount $TESTPOOL/$TESTFS1
log_must zfs create $TESTPOOL/$TESTFS1/child
log_must verify_keylocation $TESTPOOL/$TESTFS1/child "none"
log_mustnot zfs set keylocation=none $TESTPOOL/$TESTFS1/child
log_mustnot zfs set keylocation=prompt $TESTPOOL/$TESTFS1/child
log_mustnot zfs set keylocation=file:///$TESTPOOL/pkey $TESTPOOL/$TESTFS1/child
log_mustnot zfs set keylocation=/$TESTPOOL/pkey $TESTPOOL/$TESTFS1/child
log_must verify_keylocation $TESTPOOL/$TESTFS1/child "none"
-log_pass "Key location can only be 'prompt' or a file path for encryption" \
- "roots, and 'none' for unencrypted volumes"
+log_pass "Key location can only be 'prompt', 'file://', or 'https://'" \
+ "for encryption roots, and 'none' for unencrypted volumes"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_events/zpool_events_errors.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_events/zpool_events_errors.ksh
index 4645e245c973..a6833f167c66 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_events/zpool_events_errors.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_events/zpool_events_errors.ksh
@@ -1,152 +1,153 @@
#!/bin/ksh -p
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright (c) 2018 by Lawrence Livermore National Security, LLC.
#
# DESCRIPTION:
# Verify the number of IO and checksum events match the error counters
# in zpool status.
#
# STRATEGY:
-# 1. Create a raidz or mirror pool
+# 1. Create a mirror, raidz, or draid pool
# 2. Inject read/write IO errors or checksum errors
# 3. Verify the number of errors in zpool status match the corresponding
# number of error events.
-# 4. Repeat for all combinations of raidz/mirror and io/checksum errors.
+# 4. Repeat for all combinations of mirror/raidz/draid and io/checksum
+# errors.
#
. $STF_SUITE/include/libtest.shlib
verify_runnable "both"
MOUNTDIR=$TEST_BASE_DIR/mount
VDEV1=$TEST_BASE_DIR/file1
VDEV2=$TEST_BASE_DIR/file2
VDEV3=$TEST_BASE_DIR/file3
POOL=error_pool
FILESIZE=$((20 * 1024 * 1024))
OLD_CHECKSUMS=$(get_tunable CHECKSUM_EVENTS_PER_SECOND)
OLD_LEN_MAX=$(get_tunable ZEVENT_LEN_MAX)
function cleanup
{
log_must set_tunable64 CHECKSUM_EVENTS_PER_SECOND $OLD_CHECKSUMS
log_must set_tunable64 ZEVENT_LEN_MAX $OLD_LEN_MAX
log_must zinject -c all
log_must zpool events -c
if poolexists $POOL ; then
log_must destroy_pool $POOL
fi
log_must rm -f $VDEV1 $VDEV2 $VDEV3
}
log_assert "Check that the number of zpool errors match the number of events"
log_onexit cleanup
# Set our thresholds high so we never ratelimit or drop events.
set_tunable64 CHECKSUM_EVENTS_PER_SECOND 20000
set_tunable64 ZEVENT_LEN_MAX 20000
log_must truncate -s $MINVDEVSIZE $VDEV1 $VDEV2 $VDEV3
log_must mkdir -p $MOUNTDIR
# Run error test on a specific type of pool
#
-# $1: pool - raidz, mirror
+# $1: pool - mirror, raidz, draid
# $2: test type - corrupt (checksum error), io
# $3: read, write
function do_test
{
POOLTYPE=$1
ERR=$2
RW=$3
log_note "Testing $ERR $RW on $POOLTYPE"
log_must zpool create -f -m $MOUNTDIR -o failmode=continue $POOL $POOLTYPE $VDEV1 $VDEV2 $VDEV3
log_must zpool events -c
log_must zfs set compression=off $POOL
if [ "$RW" == "read" ] ; then
log_must mkfile $FILESIZE $MOUNTDIR/file
fi
log_must zinject -d $VDEV1 -e $ERR -T $RW -f 100 $POOL
if [ "$RW" == "write" ] ; then
log_must mkfile $FILESIZE $MOUNTDIR/file
log_must zpool sync $POOL
else
log_must zpool scrub $POOL
wait_scrubbed $POOL
fi
log_must zinject -c all
# Wait for the pool to settle down and finish resilvering (if
# necessary). We want the errors to stop incrementing before we
# check the error and event counts.
while is_pool_resilvering $POOL ; do
sleep 1
done
out="$(zpool status -p | grep $VDEV1)"
if [ "$ERR" == "corrupt" ] ; then
events=$(zpool events | grep checksum | wc -l)
val=$(echo "$out" | awk '{print $5}')
str="checksum"
elif [ "$ERR" == "io" ] ; then
allevents=$(zpool events | grep io)
events=$(echo "$allevents" | wc -l)
if [ "$RW" == "read" ] ; then
str="read IO"
val=$(echo "$out" | awk '{print $3}')
else
str="write IO"
val=$(echo "$out" | awk '{print $4}')
fi
fi
if [ -z "$val" -o $val -eq 0 -o -z "$events" -o $events -eq 0 ] ; then
log_fail "Didn't see any errors or events ($val/$events)"
fi
if [ $val -ne $events ] ; then
log_fail "$val $POOLTYPE $str errors != $events events"
else
log_note "$val $POOLTYPE $str errors == $events events"
fi
log_must zpool destroy $POOL
}
-# Test all types of errors on mirror and raidz pools
-for pooltype in mirror raidz ; do
+# Test all types of errors on mirror, raidz, and draid pools
+for pooltype in mirror raidz draid; do
do_test $pooltype corrupt read
do_test $pooltype io read
do_test $pooltype io write
done
log_pass "The number of errors matched the number of events"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_expand/zpool_expand_001_pos.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_expand/zpool_expand_001_pos.ksh
index 922e35125e4a..6bbd46289f7c 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_expand/zpool_expand_001_pos.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_expand/zpool_expand_001_pos.ksh
@@ -1,177 +1,177 @@
#! /bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
#
# Copyright (c) 2012, 2016 by Delphix. All rights reserved.
# Copyright (c) 2018 by Lawrence Livermore National Security, LLC.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zpool_expand/zpool_expand.cfg
#
# DESCRIPTION:
# Once zpool set autoexpand=on poolname, zpool can autoexpand by
# Dynamic VDEV Expansion
#
#
# STRATEGY:
# 1) Create three vdevs (loopback, scsi_debug, and file)
# 2) Create pool by using the different devices and set autoexpand=on
# 3) Expand each device as appropriate
# 4) Check that the pool size was expanded
#
# NOTE: Three different device types are used in this test to verify
# expansion of non-partitioned block devices (loopback), partitioned
# block devices (scsi_debug), and non-disk file vdevs. ZFS volumes
# are not used in order to avoid a possible lock inversion when
# layering pools on zvols.
#
verify_runnable "global"
function cleanup
{
poolexists $TESTPOOL1 && destroy_pool $TESTPOOL1
if losetup -a | grep -q $DEV1; then
losetup -d $DEV1
fi
rm -f $FILE_LO $FILE_RAW
block_device_wait
unload_scsi_debug
}
log_onexit cleanup
log_assert "zpool can be autoexpanded after set autoexpand=on on vdev expansion"
-for type in " " mirror raidz draid; do
+for type in " " mirror raidz draid:1s; do
log_note "Setting up loopback, scsi_debug, and file vdevs"
log_must truncate -s $org_size $FILE_LO
DEV1=$(losetup -f)
log_must losetup $DEV1 $FILE_LO
load_scsi_debug $org_size_mb 1 1 1 '512b'
block_device_wait
DEV2=$(get_debug_device)
log_must truncate -s $org_size $FILE_RAW
DEV3=$FILE_RAW
# The -f is required since we're mixing disk and file vdevs.
log_must zpool create -f -o autoexpand=on $TESTPOOL1 $type \
$DEV1 $DEV2 $DEV3
typeset autoexp=$(get_pool_prop autoexpand $TESTPOOL1)
if [[ $autoexp != "on" ]]; then
log_fail "zpool $TESTPOOL1 autoexpand should be on but is " \
"$autoexp"
fi
typeset prev_size=$(get_pool_prop size $TESTPOOL1)
typeset zfs_prev_size=$(get_prop avail $TESTPOOL1)
# Expand each device as appropriate being careful to add an artificial
# delay to ensure we get a single history entry for each. This makes
# is easier to verify each expansion for the striped pool case, since
# they will not be merged in to a single larger expansion.
log_note "Expanding loopback, scsi_debug, and file vdevs"
log_must truncate -s $exp_size $FILE_LO
log_must losetup -c $DEV1
sleep 3
echo "2" > /sys/bus/pseudo/drivers/scsi_debug/virtual_gb
echo "1" > /sys/class/block/$DEV2/device/rescan
block_device_wait
sleep 3
log_must truncate -s $exp_size $FILE_RAW
log_must zpool online -e $TESTPOOL1 $FILE_RAW
typeset expand_size=$(get_pool_prop size $TESTPOOL1)
typeset zfs_expand_size=$(get_prop avail $TESTPOOL1)
log_note "$TESTPOOL1 $type has previous size: $prev_size and " \
"expanded size: $expand_size"
# compare available pool size from zfs
if [[ $zfs_expand_size -gt $zfs_prev_size ]]; then
# check for zpool history for the pool size expansion
if [[ $type == " " ]]; then
typeset expansion_size=$(($exp_size-$org_size))
typeset size_addition=$(zpool history -il $TESTPOOL1 |\
grep "pool '$TESTPOOL1' size:" | \
grep "vdev online" | \
grep "(+${expansion_size}" | wc -l)
if [[ $size_addition -ne 3 ]]; then
log_fail "pool $TESTPOOL1 has not expanded, " \
"$size_addition/3 vdevs expanded"
fi
elif [[ $type == "mirror" ]]; then
typeset expansion_size=$(($exp_size-$org_size))
zpool history -il $TESTPOOL1 | \
grep "pool '$TESTPOOL1' size:" | \
grep "vdev online" | \
grep "(+${expansion_size})" >/dev/null 2>&1
if [[ $? -ne 0 ]] ; then
log_fail "pool $TESTPOOL1 has not expanded"
fi
- elif [[ $type == "draid" ]]; then
+ elif [[ $type == "draid:1s" ]]; then
typeset expansion_size=$((2*($exp_size-$org_size)))
zpool history -il $TESTPOOL1 | \
grep "pool '$TESTPOOL1' size:" | \
grep "vdev online" | \
grep "(+${expansion_size})" >/dev/null 2>&1
if [[ $? -ne 0 ]]; then
log_fail "pool $TESTPOOL has not expanded"
fi
else
typeset expansion_size=$((3*($exp_size-$org_size)))
zpool history -il $TESTPOOL1 | \
grep "pool '$TESTPOOL1' size:" | \
grep "vdev online" | \
grep "(+${expansion_size})" >/dev/null 2>&1
if [[ $? -ne 0 ]]; then
log_fail "pool $TESTPOOL has not expanded"
fi
fi
else
log_fail "pool $TESTPOOL1 is not autoexpanded after vdev " \
"expansion. Previous size: $zfs_prev_size and expanded " \
"size: $zfs_expand_size"
fi
cleanup
done
log_pass "zpool can autoexpand if autoexpand=on after vdev expansion"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_import/import_devices_missing.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_import/import_devices_missing.ksh
index 53828c912ca3..af6ac8d78e4e 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_import/import_devices_missing.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_import/import_devices_missing.ksh
@@ -1,122 +1,122 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2016 by Delphix. All rights reserved.
#
. $STF_SUITE/tests/functional/cli_root/zpool_import/zpool_import.kshlib
#
# DESCRIPTION:
# A pool should be importable when up to 2 top-level devices are missing.
#
# STRATEGY:
# 1. Create a pool.
# 2. Write some data to the pool and checksum it.
# 3. Add one or more devices.
# 4. Write more data to the pool and checksum it.
# 5. Export the pool.
# 6. Move added devices out of the devices directory.
# 7. Import the pool with missing devices.
# 8. Verify that the first batch of data is intact.
# 9. Verify that accessing the second batch of data doesn't suspend pool.
# 10. Export the pool, move back missing devices, Re-import the pool.
# 11. Verify that all the data is intact.
#
verify_runnable "global"
function custom_cleanup
{
log_must set_spa_load_verify_metadata 1
log_must set_spa_load_verify_data 1
log_must set_zfs_max_missing_tvds 0
log_must rm -rf $BACKUP_DEVICE_DIR
# Highly damaged pools may fail to be destroyed, so we export them.
poolexists $TESTPOOL1 && log_must zpool export $TESTPOOL1
cleanup
}
log_onexit custom_cleanup
function test_devices_missing
{
typeset poolcreate="$1"
typeset addvdevs="$2"
typeset missingvdevs="$3"
typeset -i missingtvds="$4"
log_note "$0: pool '$poolcreate', adding $addvdevs, then" \
"moving away $missingvdevs."
log_must zpool create $TESTPOOL1 $poolcreate
log_must generate_data $TESTPOOL1 $MD5FILE "first"
log_must zpool add $TESTPOOL1 $addvdevs
log_must generate_data $TESTPOOL1 $MD5FILE2 "second"
- log_must zpool export $TESTPOOL1
+ log_must_busy zpool export $TESTPOOL1
log_must mv $missingvdevs $BACKUP_DEVICE_DIR
# Tell zfs that it is ok to import a pool with missing top-level vdevs
log_must set_zfs_max_missing_tvds $missingtvds
# Missing devices means that data or metadata may be corrupted.
(( missingtvds > 1 )) && log_must set_spa_load_verify_metadata 0
log_must set_spa_load_verify_data 0
log_must zpool import -o readonly=on -d $DEVICE_DIR $TESTPOOL1
log_must verify_data_md5sums $MD5FILE
log_note "Try reading second batch of data, make sure pool doesn't" \
"get suspended."
verify_data_md5sums $MD5FILE >/dev/null 2>&1
- log_must zpool export $TESTPOOL1
+ log_must_busy zpool export $TESTPOOL1
typeset newpaths=$(echo "$missingvdevs" | \
sed "s:$DEVICE_DIR:$BACKUP_DEVICE_DIR:g")
log_must mv $newpaths $DEVICE_DIR
log_must set_spa_load_verify_metadata 1
log_must set_spa_load_verify_data 1
log_must set_zfs_max_missing_tvds 0
log_must zpool import -d $DEVICE_DIR $TESTPOOL1
log_must verify_data_md5sums $MD5FILE
log_must verify_data_md5sums $MD5FILE2
# Cleanup
log_must zpool destroy $TESTPOOL1
log_note ""
}
log_must mkdir -p $BACKUP_DEVICE_DIR
test_devices_missing "$VDEV0" "$VDEV1" "$VDEV1" 1
test_devices_missing "$VDEV0" "$VDEV1 $VDEV2" "$VDEV1" 1
test_devices_missing "mirror $VDEV0 $VDEV1" "mirror $VDEV2 $VDEV3" \
"$VDEV2 $VDEV3" 1
test_devices_missing "$VDEV0 log $VDEV1" "$VDEV2" "$VDEV2" 1
#
# Note that we are testing for 2 non-consecutive missing devices.
# Missing consecutive devices results in missing metadata. Because of
# Missing metadata can cause the root dataset to fail to mount.
#
test_devices_missing "$VDEV0" "$VDEV1 $VDEV2 $VDEV3" "$VDEV1 $VDEV3" 2
log_pass "zpool import succeeded with missing devices."
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_import/import_rewind_config_changed.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_import/import_rewind_config_changed.ksh
index 3ac8c104f1ca..d79c757d2406 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_import/import_rewind_config_changed.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_import/import_rewind_config_changed.ksh
@@ -1,262 +1,302 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2017 by Delphix. All rights reserved.
#
. $STF_SUITE/tests/functional/cli_root/zpool_import/zpool_import.kshlib
#
# DESCRIPTION:
# It should be possible to rewind a pool beyond a configuration change.
#
# STRATEGY:
# 1. Create a pool.
# 2. Generate files and remember their md5sum.
# 3. Note last synced txg.
# 4. Take a snapshot to make sure old blocks are not overwritten.
# 5. Perform zpool add/attach/detach/remove operation.
# 6. Change device paths if requested and re-import pool.
# 7. Checkpoint the pool as one last attempt to preserve old blocks.
# 8. Overwrite the files.
# 9. Export the pool.
# 10. Verify that we can rewind the pool to the noted txg.
# 11. Verify that the files are readable and retain their old data.
#
# DISCLAIMER:
# This test can fail since nothing guarantees that old MOS blocks aren't
# overwritten. Snapshots protect datasets and data files but not the MOS.
# sync_some_data_a_few_times interleaves file data and MOS data for a few
# txgs, thus increasing the odds that some txgs will have their MOS data
# left untouched.
#
verify_runnable "global"
function custom_cleanup
{
set_vdev_validate_skip 0
cleanup
log_must set_tunable64 VDEV_MIN_MS_COUNT 16
}
log_onexit custom_cleanup
function test_common
{
typeset poolcreate="$1"
typeset addvdevs="$2"
typeset attachargs="${3:-}"
typeset detachvdev="${4:-}"
typeset removevdev="${5:-}"
typeset finalpool="${6:-}"
+ typeset retval=1
typeset poolcheck="$poolcreate"
log_must zpool create $TESTPOOL1 $poolcreate
log_must generate_data $TESTPOOL1 $MD5FILE
# syncing a few times while writing new data increases the odds that MOS
# metadata for some of the txgs will survive
log_must sync_some_data_a_few_times $TESTPOOL1
typeset txg
txg=$(get_last_txg_synced $TESTPOOL1)
log_must zfs snapshot -r $TESTPOOL1@snap1
#
# Perform config change operations
#
if [[ -n $addvdevs ]]; then
log_must zpool add -f $TESTPOOL1 $addvdevs
fi
if [[ -n $attachargs ]]; then
log_must zpool attach $TESTPOOL1 $attachargs
fi
if [[ -n $detachvdev ]]; then
log_must zpool detach $TESTPOOL1 $detachvdev
fi
if [[ -n $removevdev ]]; then
[[ -z $finalpool ]] &&
log_fail "Must provide final pool status!"
log_must zpool remove $TESTPOOL1 $removevdev
log_must wait_for_pool_config $TESTPOOL1 "$finalpool"
fi
if [[ -n $pathstochange ]]; then
#
# Change device paths and re-import pool to update labels
#
zpool export $TESTPOOL1
for dev in $pathstochange; do
log_must mv $dev "${dev}_new"
poolcheck=$(echo "$poolcheck" | \
sed "s:$dev:${dev}_new:g")
done
zpool import -d $DEVICE_DIR $TESTPOOL1
fi
#
# In an attempt to leave MOS data untouched so extreme
# rewind is successful during import we checkpoint the
# pool and hope that these MOS data are part of the
# checkpoint (e.g they stay around). If this goes as
# expected, then extreme rewind should rewind back even
# further than the time that we took the checkpoint.
#
# Note that, ideally we would want to take a checkpoint
# right after we record the txg we plan to rewind to.
# But since we can't attach, detach or remove devices
# while having a checkpoint, we take it after the
# operation that changes the config.
#
+ # However, it is possible the MOS data was overwritten
+ # in which case the pool will either be unimportable, or
+ # may have been rewound prior to the data being written.
+ # In which case an error is returned and test_common()
+ # is retried by the caller to minimize false positives.
+ #
log_must zpool checkpoint $TESTPOOL1
log_must overwrite_data $TESTPOOL1 ""
log_must zpool export $TESTPOOL1
- log_must zpool import -d $DEVICE_DIR -T $txg $TESTPOOL1
- log_must check_pool_config $TESTPOOL1 "$poolcheck"
+ zpool import -d $DEVICE_DIR -T $txg $TESTPOOL1
+ if (( $? == 0 )); then
+ verify_data_md5sums $MD5FILE
+ if (( $? == 0 )); then
+ retval=0
+ fi
- log_must verify_data_md5sums $MD5FILE
+ log_must check_pool_config $TESTPOOL1 "$poolcheck"
+ log_must zpool destroy $TESTPOOL1
+ fi
# Cleanup
- log_must zpool destroy $TESTPOOL1
if [[ -n $pathstochange ]]; then
for dev in $pathstochange; do
log_must mv "${dev}_new" $dev
done
fi
# Fast way to clear vdev labels
log_must zpool create -f $TESTPOOL2 $VDEV0 $VDEV1 $VDEV2 $VDEV3 $VDEV4
log_must zpool destroy $TESTPOOL2
log_note ""
+ return $retval
}
function test_add_vdevs
{
typeset poolcreate="$1"
typeset addvdevs="$2"
log_note "$0: pool '$poolcreate', add $addvdevs."
- test_common "$poolcreate" "$addvdevs"
+ for retry in $(seq 1 5); do
+ test_common "$poolcreate" "$addvdevs" && return
+ log_note "Retry $retry / 5 for test_add_vdevs()"
+ done
+
+ log_fail "Exhausted all 5 retries for test_add_vdevs()"
}
function test_attach_vdev
{
typeset poolcreate="$1"
typeset attachto="$2"
typeset attachvdev="$3"
log_note "$0: pool '$poolcreate', attach $attachvdev to $attachto."
- test_common "$poolcreate" "" "$attachto $attachvdev"
+ for retry in $(seq 1 5); do
+ test_common "$poolcreate" "" "$attachto $attachvdev" && return
+ log_note "Retry $retry / 5 for test_attach_vdev()"
+ done
+
+ log_fail "Exhausted all 5 retries for test_attach_vdev()"
}
function test_detach_vdev
{
typeset poolcreate="$1"
typeset detachvdev="$2"
log_note "$0: pool '$poolcreate', detach $detachvdev."
- test_common "$poolcreate" "" "" "$detachvdev"
+ for retry in $(seq 1 5); do
+ test_common "$poolcreate" "" "" "$detachvdev" && return
+ log_note "Retry $retry / 5 for test_detach_vdev()"
+ done
+
+ log_fail "Exhausted all 5 retries for test_detach_vdev()"
}
function test_attach_detach_vdev
{
typeset poolcreate="$1"
typeset attachto="$2"
typeset attachvdev="$3"
typeset detachvdev="$4"
log_note "$0: pool '$poolcreate', attach $attachvdev to $attachto," \
"then detach $detachvdev."
- test_common "$poolcreate" "" "$attachto $attachvdev" "$detachvdev"
+ for retry in $(seq 1 5); do
+ test_common "$poolcreate" "" "$attachto $attachvdev" \
+ "$detachvdev" && return
+ log_note "Retry $retry / 5 for test_attach_detach_vdev()"
+ done
+
+ log_fail "Exhausted all 5 retries for test_attach_detach_vdev()"
}
function test_remove_vdev
{
typeset poolcreate="$1"
typeset removevdev="$2"
typeset finalpool="$3"
log_note "$0: pool '$poolcreate', remove $removevdev."
- test_common "$poolcreate" "" "" "" "$removevdev" "$finalpool"
+ for retry in $(seq 1 5); do
+ test_common "$poolcreate" "" "" "" "$removevdev" \
+ "$finalpool" && return
+ log_note "Retry $retry / 5 for test_remove_vdev()"
+ done
+
+ log_fail "Exhausted all 5 retries for test_remove_vdev()"
}
# Record txg history
is_linux && log_must set_tunable32 TXG_HISTORY 100
# Make the devices bigger to reduce chances of overwriting MOS metadata.
increase_device_sizes $(( FILE_SIZE * 4 ))
# Increase the number of metaslabs for small pools temporarily to
# reduce the chance of reusing a metaslab that holds old MOS metadata.
log_must set_tunable64 VDEV_MIN_MS_COUNT 150
# Part of the rewind test is to see how it reacts to path changes
typeset pathstochange="$VDEV0 $VDEV1 $VDEV2 $VDEV3"
log_note " == test rewind after device addition == "
test_add_vdevs "$VDEV0" "$VDEV1"
test_add_vdevs "$VDEV0 $VDEV1" "$VDEV2"
test_add_vdevs "$VDEV0" "$VDEV1 $VDEV2"
test_add_vdevs "mirror $VDEV0 $VDEV1" "mirror $VDEV2 $VDEV3"
test_add_vdevs "$VDEV0" "raidz $VDEV1 $VDEV2 $VDEV3"
test_add_vdevs "$VDEV0" "draid $VDEV1 $VDEV2 $VDEV3"
test_add_vdevs "$VDEV0" "log $VDEV1"
test_add_vdevs "$VDEV0 log $VDEV1" "$VDEV2"
log_note " == test rewind after device attach == "
test_attach_vdev "$VDEV0" "$VDEV0" "$VDEV1"
test_attach_vdev "mirror $VDEV0 $VDEV1" "$VDEV0" "$VDEV2"
test_attach_vdev "$VDEV0 $VDEV1" "$VDEV0" "$VDEV2"
log_note " == test rewind after device removal == "
# Once we remove a device it will be overlooked in the device scan, so we must
# preserve its original path
pathstochange="$VDEV0 $VDEV2"
test_remove_vdev "$VDEV0 $VDEV1 $VDEV2" "$VDEV1" "$VDEV0 $VDEV2"
#
# Path change and detach are incompatible. Detach changes the guid of the vdev
# so we have no direct way to link the new path to an existing vdev.
#
pathstochange=""
log_note " == test rewind after device detach == "
test_detach_vdev "mirror $VDEV0 $VDEV1" "$VDEV1"
test_detach_vdev "mirror $VDEV0 $VDEV1 mirror $VDEV2 $VDEV3" "$VDEV1"
test_detach_vdev "$VDEV0 log mirror $VDEV1 $VDEV2" "$VDEV2"
log_note " == test rewind after device attach followed by device detach == "
#
# We need to disable vdev validation since once we detach VDEV1, VDEV0 will
# inherit the mirror tvd's guid and lose its original guid.
#
set_vdev_validate_skip 1
test_attach_detach_vdev "$VDEV0" "$VDEV0" "$VDEV1" "$VDEV1"
set_vdev_validate_skip 0
log_pass "zpool import rewind after configuration change passed."
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen.shlib b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen.shlib
index 075ad85e9f96..3d142fdf70ca 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen.shlib
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_reopen/zpool_reopen.shlib
@@ -1,124 +1,124 @@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
#
. $STF_SUITE/tests/functional/cli_root/zpool_reopen/zpool_reopen.cfg
#
# Clear labels on the given disks
#
function clear_labels #disks
{
for disk in $@; do
if ( is_loop_device $disk ) || ( is_mpath_device $disk ); then
zpool labelclear -f /dev/$disk
else
zpool labelclear -f /dev/${disk}1
fi
done
}
#
# Set the REMOVED_DISK and REMOVED_DISK_ID constants for device
# used for re-plugging. When the disk is loop device use the
# scsi_debug emulated drive. Otherwise use the real drive.
#
function set_removed_disk
{
if is_loop_device $DISK1; then
export REMOVED_DISK=$(get_debug_device)
export REMOVED_DISK_ID=$(get_persistent_disk_name $REMOVED_DISK)
elif ( is_real_device $DISK1 ) || ( is_mpath_device $DISK1 ); then
export REMOVED_DISK="$DISK1"
export REMOVED_DISK_ID=${devs_id[0]}
else
log_fail "No drives that supports removal"
fi
}
#
# Generate random file of the given size in MiB
#
function generate_random_file #path size_mb
{
typeset path=$1
typeset -i size_mb=$2
file_write -o create -f $path -b 1048576 -s0 -c $size_mb -d R
}
#
# Wait until specific event or timeout occur.
#
# The passed function is executed with pool name as argument
# with an interval of 1 second until it succeeds or until the
# timeout occurs.
# It returns 1 on timeout or 0 otherwise.
#
function wait_for_action #pool timeout function
{
typeset pool=$1
typeset -i timeout=$2
- typeset func=$3
+ typeset funct=$3
while [ $timeout -gt 0 ]; do
(( --timeout ))
- if ( $func $pool ); then
+ if ( $funct $pool ); then
return 0
fi
sleep 1
done
return 1
}
#
# Helpers for wait_for_action function:
# wait_for_resilver_start - wait until resilver is started
# wait_for_resilver_end - wait until resilver is finished
# wait_for_scrub_end - wait until scrub is finished
#
function wait_for_resilver_start #pool timeout
{
wait_for_action $1 $2 is_pool_resilvering
return $?
}
function wait_for_resilver_end #pool timeout
{
wait_for_action $1 $2 is_pool_resilvered
return $?
}
function wait_for_scrub_end #pool timeout
{
wait_for_action $1 $2 is_pool_scrubbed
return $?
}
#
# Check if scan action has been restarted on the given pool
#
function is_scan_restarted #pool
{
typeset pool=$1
zpool history -i $pool | grep -q "scan aborted, restarting"
return $?
}
function is_deferred_scan_started #pool
{
typeset pool=$1
zpool history -i $pool | grep -q "starting deferred resilver"
return $?
}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/deadman/deadman_sync.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/deadman/deadman_sync.ksh
index b0b03f5d523e..fd6e8c858edd 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/deadman/deadman_sync.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/deadman/deadman_sync.ksh
@@ -1,90 +1,90 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright (c) 2017 by Lawrence Livermore National Security, LLC.
# Use is subject to license terms.
#
# DESCRIPTION:
# Verify spa deadman detects a hung txg
#
# STRATEGY:
# 1. Reduce the zfs_deadman_synctime_ms to 5s.
# 2. Reduce the zfs_deadman_checktime_ms to 1s.
# 3. Inject a 10s zio delay to force long IOs.
# 4. Write enough data to force a long txg sync time due to the delay.
# 5. Verify a "deadman" event is posted.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/deadman/deadman.cfg
verify_runnable "both"
function cleanup
{
log_must zinject -c all
default_cleanup_noexit
log_must set_tunable64 DEADMAN_SYNCTIME_MS $SYNCTIME_DEFAULT
log_must set_tunable64 DEADMAN_CHECKTIME_MS $CHECKTIME_DEFAULT
log_must set_tunable64 DEADMAN_FAILMODE $FAILMODE_DEFAULT
}
log_assert "Verify spa deadman detects a hung txg"
log_onexit cleanup
log_must set_tunable64 DEADMAN_SYNCTIME_MS 5000
log_must set_tunable64 DEADMAN_CHECKTIME_MS 1000
log_must set_tunable64 DEADMAN_FAILMODE "wait"
# Create a new pool in order to use the updated deadman settings.
default_setup_noexit $DISK1
log_must zpool events -c
# Force each IO to take 10s by allow them to run concurrently.
log_must zinject -d $DISK1 -D10000:10 $TESTPOOL
mntpnt=$(get_prop mountpoint $TESTPOOL/$TESTFS)
log_must file_write -b 1048576 -c 8 -o create -d 0 -f $mntpnt/file
sleep 10
log_must zinject -c all
log_must zpool sync
# Log txg sync times for reference and the zpool event summary.
if is_freebsd; then
log_must sysctl -n kstat.zfs.$TESTPOOL.txgs
else
log_must cat /proc/spl/kstat/zfs/$TESTPOOL/txgs
fi
log_must zpool events
-# Verify at least 5 deadman events were logged. The first after 5 seconds,
+# Verify at least 4 deadman events were logged. The first after 5 seconds,
# and another each second thereafter until the delay is clearer.
events=$(zpool events | grep -c ereport.fs.zfs.deadman)
-if [ "$events" -lt 5 ]; then
+if [ "$events" -lt 4 ]; then
log_fail "Expect >=5 deadman events, $events found"
fi
log_pass "Verify spa deadman detected a hung txg and $events deadman events"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/fallocate/fallocate_punch-hole.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/fallocate/fallocate_punch-hole.ksh
index 6a8faa48704d..ed83561bd556 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/fallocate/fallocate_punch-hole.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/fallocate/fallocate_punch-hole.ksh
@@ -1,97 +1,110 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright (c) 2020 by Lawrence Livermore National Security, LLC.
+# Copyright (c) 2021 by The FreeBSD Foundation.
#
. $STF_SUITE/include/libtest.shlib
#
# DESCRIPTION:
-# Test `fallocate --punch-hole`
+# Test hole-punching functionality
#
# STRATEGY:
# 1. Create a dense file
# 2. Punch an assortment of holes in the file and verify the result.
#
verify_runnable "global"
+#
+# Prior to __FreeBSD_version 1400032 there are no mechanism to punch hole in a
+# file on FreeBSD. truncate -d support is required to call fspacectl(2) on
+# behalf of the script.
+#
+if is_freebsd; then
+ if [[ $(uname -K) -lt 1400032 ]]; then
+ log_unsupported "Requires fspacectl(2) support on FreeBSD"
+ fi
+ if truncate -d 2>&1 | grep "illegal option" > /dev/null; then
+ log_unsupported "Requires truncate(1) -d support on FreeBSD"
+ fi
+fi
+
FILE=$TESTDIR/$TESTFILE0
BLKSZ=$(get_prop recordsize $TESTPOOL)
function cleanup
{
[[ -e $TESTDIR ]] && log_must rm -f $FILE
}
function check_disk_size
{
typeset expected_size=$1
disk_size=$(du $TESTDIR/file | awk '{print $1}')
if [ $disk_size -ne $expected_size ]; then
log_fail "Incorrect size: $disk_size != $expected_size"
fi
}
function check_apparent_size
{
typeset expected_size=$1
apparent_size=$(stat_size)
if [ $apparent_size -ne $expected_size ]; then
log_fail "Incorrect size: $apparent_size != $expected_size"
fi
}
log_assert "Ensure holes can be punched in files making them sparse"
log_onexit cleanup
# Create a dense file and check it is the correct size.
log_must file_write -o create -f $FILE -b $BLKSZ -c 8
log_must check_disk_size $((131072 * 8))
# Punch a hole for the first full block.
-log_must fallocate --punch-hole --offset 0 --length $BLKSZ $FILE
+log_must punch_hole 0 $BLKSZ $FILE
log_must check_disk_size $((131072 * 7))
# Partially punch a hole in the second block.
-log_must fallocate --punch-hole --offset $BLKSZ --length $((BLKSZ / 2)) $FILE
+log_must punch_hole $BLKSZ $((BLKSZ / 2)) $FILE
log_must check_disk_size $((131072 * 7))
# Punch a hole which overlaps the third and forth block.
-log_must fallocate --punch-hole --offset $(((BLKSZ * 2) + (BLKSZ / 2))) \
- --length $((BLKSZ)) $FILE
+log_must punch_hole $(((BLKSZ * 2) + (BLKSZ / 2))) $((BLKSZ)) $FILE
log_must check_disk_size $((131072 * 7))
# Punch a hole from the fifth block past the end of file. The apparent
# file size should not change since --keep-size is implied.
apparent_size=$(stat_size $FILE)
-log_must fallocate --punch-hole --offset $((BLKSZ * 4)) \
- --length $((BLKSZ * 10)) $FILE
+log_must punch_hole $((BLKSZ * 4)) $((BLKSZ * 10)) $FILE
log_must check_disk_size $((131072 * 4))
log_must check_apparent_size $apparent_size
log_pass "Ensure holes can be punched in files making them sparse"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/history/history_006_neg.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/history/history_006_neg.ksh
index 19b7114faf5b..c3a5e092d02d 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/history/history_006_neg.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/history/history_006_neg.ksh
@@ -1,86 +1,88 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2007 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
#
# Copyright (c) 2013, 2016 by Delphix. All rights reserved.
#
. $STF_SUITE/tests/functional/history/history_common.kshlib
. $STF_SUITE/include/libtest.shlib
#
# DESCRIPTION:
# Verify the following zfs subcommands are not logged.
# list, get, holds, mount, unmount, share, unshare, send
#
# STRATEGY:
# 1. Create a test pool.
# 2. Separately invoke zfs list|get|holds|mount|unmount|share|unshare|send
# 3. Verify they were not recorded in pool history.
#
verify_runnable "global"
function cleanup
{
datasetexists $fs && destroy_dataset $fs -rf
log_must zfs create $fs
}
log_assert "Verify 'zfs list|get|holds|mount|unmount|share|unshare|send' " \
"will not be logged."
log_onexit cleanup
# Create initial test environment
fs=$TESTPOOL/$TESTFS; snap1=$fs@snap1; snap2=$fs@snap2
if ! is_linux; then
log_must zfs set sharenfs=on $fs
fi
log_must zfs snapshot $snap1
log_must zfs hold tag $snap1
log_must zfs snapshot $snap2
# Save initial TESTPOOL history
log_must eval "zpool history $TESTPOOL > $OLD_HISTORY"
log_must zfs list $fs > /dev/null
log_must zfs get mountpoint $fs > /dev/null
log_must zfs unmount $fs
log_must zfs mount $fs
if ! is_linux; then
log_must zfs share $fs
log_must zfs unshare $fs
fi
-log_must zfs send -i $snap1 $snap2 > /dev/null
+# https://github.com/openzfs/zfs/issues/11445
+set -o pipefail
+log_must zfs send -i $snap1 $snap2 | cat > /dev/null
log_must zfs holds $snap1
log_must eval "zpool history $TESTPOOL > $NEW_HISTORY"
log_must diff $OLD_HISTORY $NEW_HISTORY
log_must zfs release tag $snap1
log_pass "Verify 'zfs list|get|mount|unmount|share|unshare|send' passed."
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/no_space/Makefile.am b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/no_space/Makefile.am
index c2e42bc2ada4..31584fb17583 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/no_space/Makefile.am
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/no_space/Makefile.am
@@ -1,11 +1,12 @@
pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/no_space
dist_pkgdata_SCRIPTS = \
setup.ksh \
cleanup.ksh \
enospc_001_pos.ksh \
enospc_002_pos.ksh \
enospc_003_pos.ksh \
- enospc_df.ksh
+ enospc_df.ksh \
+ enospc_rm.ksh
dist_pkgdata_DATA = \
enospc.cfg
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/no_space/enospc_002_pos.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/no_space/enospc_002_pos.ksh
index db6ee6ba7ca5..ffd14f4c550d 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/no_space/enospc_002_pos.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/no_space/enospc_002_pos.ksh
@@ -1,92 +1,89 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
# CDDL HEADER END
#
#
# Copyright (c) 2014, 2016 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/no_space/enospc.cfg
#
# DESCRIPTION:
# After filling a filesystem, certain zfs commands are allowed.
#
verify_runnable "both"
function cleanup
{
log_must_busy zpool destroy -f $TESTPOOL
}
log_onexit cleanup
log_assert "ENOSPC is returned when file system is full."
default_setup_noexit $DISK_SMALL
log_must zfs set compression=off $TESTPOOL/$TESTFS
log_must zfs snapshot $TESTPOOL/$TESTFS@snap
#
# Completely fill the pool in order to ensure the commands below will more
# reliably succeed or fail as a result of lack of space. Care is taken to
# force multiple transaction groups to ensure as many recently freed blocks
# as possible are reallocated.
#
log_note "Writing files until ENOSPC."
-for i in $(seq 30); do
+for i in $(seq 100); do
file_write -o create -f $TESTDIR/file.$i -b $BLOCKSZ \
-c $NUM_WRITES -d $DATA
ret=$?
(( $ret != $ENOSPC )) && \
log_fail "file.$i returned: $ret rather than ENOSPC."
log_must zpool sync -f
done
log_mustnot_expect space zfs create $TESTPOOL/$TESTFS/subfs
log_mustnot_expect space zfs clone $TESTPOOL/$TESTFS@snap $TESTPOOL/clone
-log_mustnot_expect space zfs snapshot $TESTPOOL/$TESTFS@snap2
-log_mustnot_expect space zfs bookmark \
- $TESTPOOL/$TESTFS@snap $TESTPOOL/$TESTFS#bookmark
log_must zfs send $TESTPOOL/$TESTFS@snap > $TEST_BASE_DIR/stream.$$
log_mustnot_expect space zfs receive $TESTPOOL/$TESTFS/recvd < $TEST_BASE_DIR/stream.$$
log_must rm $TEST_BASE_DIR/stream.$$
log_must zfs rename $TESTPOOL/$TESTFS@snap $TESTPOOL/$TESTFS@snap_newname
log_must zfs rename $TESTPOOL/$TESTFS@snap_newname $TESTPOOL/$TESTFS@snap
log_must zfs rename $TESTPOOL/$TESTFS $TESTPOOL/${TESTFS}_newname
log_must zfs rename $TESTPOOL/${TESTFS}_newname $TESTPOOL/$TESTFS
log_must zfs allow staff snapshot $TESTPOOL/$TESTFS
log_must zfs unallow staff snapshot $TESTPOOL/$TESTFS
log_must zfs set user:prop=value $TESTPOOL/$TESTFS
log_must zfs set quota=1EB $TESTPOOL/$TESTFS
log_must zfs set quota=none $TESTPOOL/$TESTFS
log_must zfs set reservation=1KB $TESTPOOL/$TESTFS
log_must zfs set reservation=none $TESTPOOL/$TESTFS
log_must zpool scrub $TESTPOOL
zpool scrub -s $TESTPOOL
log_must zpool set comment="Use the force, Luke." $TESTPOOL
log_must zpool set comment="" $TESTPOOL
# destructive tests must come last
log_must zfs rollback $TESTPOOL/$TESTFS@snap
log_must zfs destroy $TESTPOOL/$TESTFS@snap
log_pass "ENOSPC returned as expected."
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/no_space/enospc_rm.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/no_space/enospc_rm.ksh
new file mode 100755
index 000000000000..065abc75977e
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/no_space/enospc_rm.ksh
@@ -0,0 +1,60 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2014, 2016 by Delphix. All rights reserved.
+# Copyright (c) 2022 by Lawrence Livermore National Security, LLC.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/no_space/enospc.cfg
+
+#
+# DESCRIPTION:
+# After filling a filesystem, verify the contents can be removed
+# without encountering an ENOSPC error.
+#
+
+verify_runnable "both"
+
+function cleanup
+{
+ destroy_pool $TESTPOOL
+ log_must rm -f $all_vdevs
+}
+
+log_onexit cleanup
+
+log_assert "Files can be removed from full file system."
+
+all_vdevs=$(echo $TEST_BASE_DIR/file.{01..12})
+
+log_must truncate -s $MINVDEVSIZE $all_vdevs
+
+log_must zpool create -f $TESTPOOL draid2:8d:2s $all_vdevs
+log_must zfs create $TESTPOOL/$TESTFS
+log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
+log_must zfs set compression=off $TESTPOOL/$TESTFS
+
+log_note "Writing files until ENOSPC."
+log_mustnot_expect "No space left on device" fio --name=test \
+ --fallocate=none --rw=write --bs=1M --size=1G --numjobs=4 \
+ --sync=1 --directory=$TESTDIR/ --group_reporting
+
+log_must rm $TESTDIR/test.*
+log_must test -z "$(ls -A $TESTDIR)"
+
+log_pass "All files removed without error"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_ro_rewind.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_ro_rewind.ksh
index fd7416612b7c..f326bf0c25d2 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_ro_rewind.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_ro_rewind.ksh
@@ -1,57 +1,57 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2017 by Delphix. All rights reserved.
#
. $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib
#
# DESCRIPTION:
# Ensure that we can open the checkpointed state of a pool
# as read-only.
#
# STRATEGY:
# 1. Create pool
# 2. Populate it
# 3. Take checkpoint
# 4. Modify data (include at least one destructive change)
# 5. Export and import the checkpointed state as readonly
# 6. Verify that we can see the checkpointed state and not
# the actual current state.
# 7. Export and import the current state
# 8. Verify that we can see the current state and not the
# checkpointed state.
#
verify_runnable "global"
setup_test_pool
log_onexit cleanup_test_pool
populate_test_pool
log_must zpool checkpoint $TESTPOOL
test_change_state_after_checkpoint
-log_must zpool export $TESTPOOL
+log_must_busy zpool export $TESTPOOL
log_must zpool import -o readonly=on --rewind-to-checkpoint $TESTPOOL
test_verify_pre_checkpoint_state "ro-check"
-log_must zpool export $TESTPOOL
+log_must_busy zpool export $TESTPOOL
log_must zpool import $TESTPOOL
test_verify_post_checkpoint_state
log_pass "Open checkpointed state of the pool as read-only pool."
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/redundancy/redundancy.kshlib b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/redundancy/redundancy.kshlib
index baee8269b1e1..9888034667a7 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/redundancy/redundancy.kshlib
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/redundancy/redundancy.kshlib
@@ -1,380 +1,380 @@
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
#
# Copyright (c) 2013, 2016 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/redundancy/redundancy.cfg
function cleanup
{
if poolexists $TESTPOOL; then
destroy_pool $TESTPOOL
fi
typeset dir
for dir in $TESTDIR $BASEDIR; do
if [[ -d $dir ]]; then
log_must rm -rf $dir
fi
done
}
#
# Get random number between min and max number.
#
# $1 Minimal value
# $2 Maximal value
#
function random
{
typeset -i min=$1
typeset -i max=$2
typeset -i value
while true; do
((value = RANDOM % (max + 1)))
if ((value >= min)); then
break
fi
done
echo $value
}
#
# Get the number of checksum errors for the pool.
#
# $1 Pool
#
function cksum_pool
{
typeset -i cksum=$(zpool status $1 | awk '
!NF { isvdev = 0 }
isvdev { errors += $NF }
/CKSUM$/ { isvdev = 1 }
END { print errors }
')
echo $cksum
}
#
# Record the directories construction and checksum all the files which reside
# within the specified pool
#
# $1 The specified pool
# $2 The file which save the record.
#
function record_data
{
typeset pool=$1
typeset recordfile=$2
[[ -z $pool ]] && log_fail "No specified pool."
[[ -f $recordfile ]] && log_must rm -f $recordfile
sync_pool $pool
typeset mntpnt
mntpnt=$(get_prop mountpoint $pool)
log_must eval "du -a $mntpnt > $recordfile 2>&1"
#
# When the data was damaged, checksum is failing and return 1
# So, will not use log_must
#
find $mntpnt -type f -exec cksum {} + >> $recordfile 2>&1
}
#
# Create test pool and fill with files and directories.
#
# $1 pool name
# $2 pool type
# $3 virtual devices number
#
function setup_test_env
{
typeset pool=$1
typeset keyword=$2
typeset -i vdev_cnt=$3
typeset vdevs
typeset -i i=0
while (( i < vdev_cnt )); do
vdevs="$vdevs $BASEDIR/vdev$i"
((i += 1))
done
if [[ ! -d $BASEDIR ]]; then
log_must mkdir $BASEDIR
fi
if poolexists $pool ; then
destroy_pool $pool
fi
log_must truncate -s $MINVDEVSIZE $vdevs
log_must zpool create -f -m $TESTDIR $pool $keyword $vdevs
log_note "Filling up the filesystem ..."
typeset -i ret=0
typeset -i i=0
typeset file=$TESTDIR/file
typeset -i limit
(( limit = $(get_prop available $pool) / 2 ))
while true ; do
[[ $(get_prop available $pool) -lt $limit ]] && break
file_write -o create -f $file.$i -b $BLOCKSZ -c $NUM_WRITES
ret=$?
(( $ret != 0 )) && break
(( i = i + 1 ))
done
record_data $TESTPOOL $PRE_RECORD_FILE
}
function refill_test_env
{
log_note "Re-filling the filesystem ..."
typeset pool=$1
typeset -i ret=0
typeset -i i=0
typeset mntpnt
mntpnt=$(get_prop mountpoint $pool)
typeset file=$mntpnt/file
while [[ -e $file.$i ]]; do
log_must rm -f $file.$i
file_write -o create -f $file.$i -b $BLOCKSZ -c $NUM_WRITES
ret=$?
(( $ret != 0 )) && break
(( i = i + 1 ))
done
record_data $TESTPOOL $PRE_RECORD_FILE
}
#
# Check pool status is healthy
#
# $1 pool
#
function is_healthy
{
typeset pool=$1
typeset healthy_output="pool '$pool' is healthy"
typeset real_output=$(zpool status -x $pool)
if [[ "$real_output" == "$healthy_output" ]]; then
return 0
else
typeset -i ret
zpool status -x $pool | grep "state:" | \
grep "FAULTED" >/dev/null 2>&1
ret=$?
(( $ret == 0 )) && return 1
typeset l_scan
typeset errnum
l_scan=$(zpool status -x $pool | grep "scan:")
l_scan=${l_scan##*"with"}
errnum=$(echo $l_scan | awk '{print $1}')
return $errnum
fi
}
#
# Check pool data is valid
#
# $1 pool
#
function is_data_valid
{
typeset pool=$1
log_must zpool scrub -w $pool
record_data $pool $PST_RECORD_FILE
if ! diff $PRE_RECORD_FILE $PST_RECORD_FILE > /dev/null 2>&1; then
log_must cat $PRE_RECORD_FILE
log_must cat $PST_RECORD_FILE
diff -u $PRE_RECORD_FILE $PST_RECORD_FILE
return 1
fi
return 0
}
#
# Get the specified count devices name
#
# $1 pool name
# $2 devices count
#
function get_vdevs #pool cnt
{
typeset pool=$1
typeset -i cnt=$2
typeset all_devs=$(zpool iostat -v $pool | awk '{print $1}'| \
- egrep -v "^pool$|^capacity$|^mirror$|^raidz1$|^raidz2$|^raidz3$|^draid1.*|^draid2.*|^draid3.*|---" | \
+ egrep -v "^pool$|^capacity$|^mirror\-[0-9]$|^raidz[1-3]\-[0-9]$|^draid[1-3].*\-[0-9]$|---" | \
egrep -v "/old$|^$pool$")
typeset -i i=0
typeset vdevs
while ((i < cnt)); do
typeset dev=$(echo $all_devs | awk '{print $1}')
eval all_devs=\${all_devs##*$dev}
vdevs="$dev $vdevs"
((i += 1))
done
echo "$vdevs"
}
#
# Create and replace the same name virtual device files
#
# $1 pool name
# $2-n virtual device files
#
function replace_missing_devs
{
typeset pool=$1
shift
typeset vdev
for vdev in $@; do
log_must dd if=/dev/zero of=$vdev \
bs=1024k count=$((MINVDEVSIZE / (1024 * 1024))) \
conv=fdatasync
log_must zpool replace -wf $pool $vdev $vdev
done
}
#
# Damage the pool's virtual device files.
#
# $1 pool name
# $2 Failing devices count
# $3 damage vdevs method, if not null, we keep
# the label for the vdevs
#
function damage_devs
{
typeset pool=$1
typeset -i cnt=$2
typeset label="$3"
typeset vdevs
typeset -i bs_count=$(((MINVDEVSIZE / 1024) - 4096))
vdevs=$(get_vdevs $pool $cnt)
typeset dev
if [[ -n $label ]]; then
for dev in $vdevs; do
log_must dd if=/dev/zero of=$dev seek=512 bs=1024 \
count=$bs_count conv=notrunc >/dev/null 2>&1
done
else
for dev in $vdevs; do
log_must dd if=/dev/zero of=$dev bs=1024 \
count=$bs_count conv=notrunc >/dev/null 2>&1
done
fi
sync_pool $pool
}
#
# Clear errors in the pool caused by data corruptions
#
# $1 pool name
#
function clear_errors
{
typeset pool=$1
log_must zpool clear $pool
if ! is_healthy $pool ; then
log_note "$pool should be healthy."
return 1
fi
if ! is_data_valid $pool ; then
log_note "Data should be valid in $pool."
return 1
fi
return 0
}
#
# Remove the specified pool's virtual device files
#
# $1 Pool name
# $2 Missing devices count
#
function remove_devs
{
typeset pool=$1
typeset -i cnt=$2
typeset vdevs
vdevs=$(get_vdevs $pool $cnt)
log_must rm -f $vdevs
sync_pool $pool
}
#
# Recover the bad or missing device files in the pool
#
# $1 Pool name
# $2 Missing devices count
#
function recover_bad_missing_devs
{
typeset pool=$1
typeset -i cnt=$2
typeset vdevs
vdevs=$(get_vdevs $pool $cnt)
replace_missing_devs $pool $vdevs
if ! is_healthy $pool ; then
log_note "$pool should be healthy."
return 1
fi
if ! is_data_valid $pool ; then
log_note "Data should be valid in $pool."
return 1
fi
return 0
}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/refreserv/refreserv_raidz.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/refreserv/refreserv_raidz.ksh
index 22891ef1d513..056c7916d990 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/refreserv/refreserv_raidz.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/refreserv/refreserv_raidz.ksh
@@ -1,135 +1,135 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright 2019 Joyent, Inc.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/refreserv/refreserv.cfg
#
# DESCRIPTION:
# raidz refreservation=auto accounts for extra parity and skip blocks
#
# STRATEGY:
# 1. Create a pool with a single raidz vdev
# 2. For each block size [512b, 1k, 128k] or [4k, 8k, 128k]
# - create a volume
# - fully overwrite it
# - verify that referenced is less than or equal to reservation
# - destroy the volume
# 3. Destroy the pool
# 4. Recreate the pool with one more disk in the vdev, then repeat steps
# 2 and 3.
# 5. Repeat all steps above for raidz2 and raidz3.
#
# NOTES:
# 1. This test will use up to 14 disks but can cover the key concepts with
# 5 disks.
# 2. If the disks are a mixture of 4Kn and 512n/512e, failures are likely.
#
verify_runnable "global"
typeset -a alldisks=($DISKS)
# The larger the volsize, the better zvol_volsize_to_reservation() is at
# guessing the right number. At 10M on ashift=12, the estimate may be over 26%
# too high.
volsize=100
function cleanup
{
default_cleanup_noexit
default_setup_noexit "${alldisks[0]}"
}
log_assert "raidz refreservation=auto accounts for extra parity and skip blocks"
log_onexit cleanup
poolexists "$TESTPOOL" && log_must_busy zpool destroy "$TESTPOOL"
# Testing tiny block sizes on ashift=12 pools causes so much size inflation
# that small test disks may fill before creating small volumes. However,
# testing 512b and 1K blocks on ashift=9 pools is an ok approximation for
# testing the problems that arise from 4K and 8K blocks on ashift=12 pools.
if is_freebsd; then
bps=$(diskinfo -v ${alldisks[0]} | awk '/sectorsize/ { print $1 }')
elif is_linux; then
bps=$(lsblk -nrdo min-io /dev/${alldisks[0]})
fi
log_must test "$bps" -eq 512 -o "$bps" -eq 4096
case "$bps" in
512)
allshifts=(9 10 17)
maxpct=151
;;
4096)
allshifts=(12 13 17)
maxpct=110
;;
*)
log_fail "bytes/sector: $bps != (512|4096)"
;;
esac
log_note "Testing in ashift=${allshifts[0]} mode"
# This loop handles all iterations of steps 1 through 4 described in strategy
# comment above,
for parity in 1 2 3; do
raid=raidz$parity
# Ensure we hit scenarios with and without skip blocks
for ndisks in $((parity * 2)) $((parity * 2 + 1)); do
typeset -a disks=(${alldisks[0..$((ndisks - 1))]})
if (( ${#disks[@]} < ndisks )); then
log_note "Too few disks to test $raid-$ndisks"
continue
fi
log_must zpool create "$TESTPOOL" "$raid" "${disks[@]}"
for bits in "${allshifts[@]}"; do
vbs=$((1 << bits))
log_note "Testing $raid-$ndisks volblocksize=$vbs"
vol=$TESTPOOL/$TESTVOL
log_must zfs create -V ${volsize}m \
-o volblocksize=$vbs "$vol"
block_device_wait "/dev/zvol/$vol"
log_must dd if=/dev/zero of=/dev/zvol/$vol \
bs=1024k count=$volsize
- sync
+ sync_pool $TESTPOOL
ref=$(zfs get -Hpo value referenced "$vol")
refres=$(zfs get -Hpo value refreservation "$vol")
log_must test -n "$ref"
log_must test -n "$refres"
typeset -F2 deltapct=$((refres * 100.0 / ref))
log_note "$raid-$ndisks refreservation $refres" \
"is $deltapct% of reservation $res"
log_must test "$ref" -le "$refres"
log_must test "$deltapct" -le $maxpct
log_must_busy zfs destroy "$vol"
block_device_wait
done
log_must_busy zpool destroy "$TESTPOOL"
done
done
log_pass "raidz refreservation=auto accounts for extra parity and skip blocks"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/reservation/reservation_021_neg.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/reservation/reservation_021_neg.ksh
index c99a82c5db71..07da7e96306e 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/reservation/reservation_021_neg.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/reservation/reservation_021_neg.ksh
@@ -1,72 +1,72 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright 2018 Joyent, Inc.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/reservation/reservation.shlib
#
# DESCRIPTION:
#
# The use of refreservation=auto on a filesystem does not change the
# refreservation and results in an error.
#
# STRATEGY:
# 1) Create a filesystem
# 2) Verify that zfs set refreservation=auto fails without changing
# refreservation from none.
# 3) Set refreservation to a valid value.
# 4) Verify that zfs set refreservation=auto fails without changing
# refreservation from the previous value.
#
verify_runnable "both"
-fs=$TESTPOOL/$TESTFS/$(basename $0).$$
+fs=$TESTPOOL/$TESTFS/${0##*/}.$$
function cleanup
{
destroy_dataset "$fs" "-f"
}
log_onexit cleanup
log_assert "refreservation=auto on a filesystem generates an error without" \
"changing refreservation"
space_avail=$(get_prop available $TESTPOOL)
(( fs_size = space_avail / 4 ))
# Create a filesystem with no refreservation
log_must zfs create $fs
resv=$(get_prop refreservation $fs)
log_must test $resv -eq 0
# Verify that refreservation=auto fails without altering refreservation
log_mustnot zfs set refreservation=auto $fs
resv=$(get_prop refreservation $fs)
log_must test $resv -eq 0
# Set refreservation and verify
log_must zfs set refreservation=$fs_size $fs
resv=$(get_prop refreservation $fs)
log_must test $resv -eq $fs_size
# Verify that refreservation=auto fails without altering refreservation
log_mustnot zfs set refreservation=auto $fs
resv=$(get_prop refreservation $fs)
log_must test $resv -eq $fs_size
log_pass "refreservation=auto does not work on filesystems, as expected"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/Makefile.am b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/Makefile.am
index 94bdd2674517..b8eb54f64c3e 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/Makefile.am
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/Makefile.am
@@ -1,66 +1,68 @@
pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/rsend
dist_pkgdata_SCRIPTS = \
setup.ksh \
cleanup.ksh \
recv_dedup.ksh \
recv_dedup_encrypted_zvol.ksh \
rsend_001_pos.ksh \
rsend_002_pos.ksh \
rsend_003_pos.ksh \
rsend_004_pos.ksh \
rsend_005_pos.ksh \
rsend_006_pos.ksh \
rsend_007_pos.ksh \
rsend_008_pos.ksh \
rsend_009_pos.ksh \
rsend_010_pos.ksh \
rsend_011_pos.ksh \
rsend_012_pos.ksh \
rsend_013_pos.ksh \
rsend_014_pos.ksh \
rsend_016_neg.ksh \
rsend_019_pos.ksh \
rsend_020_pos.ksh \
rsend_021_pos.ksh \
rsend_022_pos.ksh \
rsend_024_pos.ksh \
send_encrypted_files.ksh \
send_encrypted_hierarchy.ksh \
send_encrypted_props.ksh \
send_encrypted_truncated_files.ksh \
send-c_embedded_blocks.ksh \
send-c_incremental.ksh \
send-c_lz4_disabled.ksh \
send-c_mixed_compression.ksh \
send-c_props.ksh \
send-c_recv_dedup.ksh \
send-c_recv_lz4_disabled.ksh \
send-c_resume.ksh \
send-c_stream_size_estimate.ksh \
send-c_verify_contents.ksh \
send-c_verify_ratio.ksh \
send-c_volume.ksh \
send-c_zstreamdump.ksh \
send-cpL_varied_recsize.ksh \
send-L_toggle.ksh \
send_freeobjects.ksh \
send_partial_dataset.ksh \
send_realloc_dnode_size.ksh \
send_realloc_files.ksh \
send_realloc_encrypted_files.ksh \
send_spill_block.ksh \
+ send_raw_spill_block.ksh \
+ send_raw_ashift.ksh \
send_holds.ksh \
send_hole_birth.ksh \
send_invalid.ksh \
send_mixed_raw.ksh \
send-wR_encrypted_zvol.ksh \
send_doall.ksh
dist_pkgdata_DATA = \
dedup.zsend.bz2 \
dedup_encrypted_zvol.bz2 \
dedup_encrypted_zvol.zsend.bz2 \
fs.tar.gz \
rsend.cfg \
rsend.kshlib
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/rsend.kshlib b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/rsend.kshlib
index d06bd39b4d49..516d41263294 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/rsend.kshlib
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/rsend.kshlib
@@ -1,857 +1,865 @@
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
#
# Copyright (c) 2013, 2018 by Delphix. All rights reserved.
# Copyright (c) 2020 by Datto Inc. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/include/math.shlib
. $STF_SUITE/tests/functional/cli_root/zfs_set/zfs_set_common.kshlib
. $STF_SUITE/tests/functional/rsend/rsend.cfg
#
# Set up test model which includes various datasets
#
# @final
# @snapB
# @init
# |
# ______ pclone
# | /
# |@psnap
# || @final
# ||@final @final @snapC
# ||@snapC @snapC @snapB
# ||@snapA @snapB @snapA
# ||@init @init @init
# ||| | |
# $pool -------- $FS ------- fs1 ------- fs2
# \ \\_____ \ |
# vol vol \____ \ @fsnap
# | | \ \ \
# @init @vsnap | ------------ fclone
# @snapA @init \ | |
# @final @snapB \ | @init
# @snapC vclone @snapA
# @final | @final
# @init
# @snapC
# @final
#
# $1 pool name
#
function setup_test_model
{
typeset pool=$1
log_must zfs create -p $pool/$FS/fs1/fs2
log_must zfs snapshot $pool@psnap
log_must zfs clone $pool@psnap $pool/pclone
if is_global_zone ; then
log_must zfs create -V 16M $pool/vol
log_must zfs create -V 16M $pool/$FS/vol
block_device_wait
log_must zfs snapshot $pool/$FS/vol@vsnap
log_must zfs clone $pool/$FS/vol@vsnap $pool/$FS/vclone
block_device_wait
fi
log_must snapshot_tree $pool/$FS/fs1/fs2@fsnap
log_must zfs clone $pool/$FS/fs1/fs2@fsnap $pool/$FS/fs1/fclone
log_must zfs snapshot -r $pool@init
log_must snapshot_tree $pool@snapA
log_must snapshot_tree $pool@snapC
log_must snapshot_tree $pool/pclone@snapB
log_must snapshot_tree $pool/$FS@snapB
log_must snapshot_tree $pool/$FS@snapC
log_must snapshot_tree $pool/$FS/fs1@snapA
log_must snapshot_tree $pool/$FS/fs1@snapB
log_must snapshot_tree $pool/$FS/fs1@snapC
log_must snapshot_tree $pool/$FS/fs1/fclone@snapA
if is_global_zone ; then
log_must zfs snapshot $pool/vol@snapA
log_must zfs snapshot $pool/$FS/vol@snapB
log_must zfs snapshot $pool/$FS/vol@snapC
log_must zfs snapshot $pool/$FS/vclone@snapC
fi
log_must zfs snapshot -r $pool@final
return 0
}
#
# Cleanup the BACKDIR and given pool content and all the sub datasets
#
# $1 pool name
#
function cleanup_pool
{
typeset pool=$1
log_must rm -rf $BACKDIR/*
if is_global_zone ; then
+ #
+ # Linux: Issuing a `df` seems to properly force any negative
+ # dcache entries to be invalidated preventing failures when
+ # accessing the mount point. Additional investigation required.
+ #
+ # https://github.com/openzfs/zfs/issues/6143
+ #
+ log_must df >/dev/null
log_must_busy zfs destroy -Rf $pool
else
typeset list=$(zfs list -H -r -t all -o name $pool)
for ds in $list ; do
if [[ $ds != $pool ]] ; then
if datasetexists $ds ; then
log_must_busy zfs destroy -Rf $ds
fi
fi
done
fi
typeset mntpnt=$(get_prop mountpoint $pool)
if ! ismounted $pool ; then
# Make sure mountpoint directory is empty
if [[ -d $mntpnt ]]; then
log_must rm -rf $mntpnt/*
fi
log_must zfs mount $pool
fi
if [[ -d $mntpnt ]]; then
rm -rf $mntpnt/*
fi
return 0
}
function cleanup_pools
{
cleanup_pool $POOL2
destroy_pool $POOL3
}
function cmp_md5s {
typeset file1=$1
typeset file2=$2
typeset sum1=$(md5digest $file1)
typeset sum2=$(md5digest $file2)
test "$sum1" = "$sum2"
}
#
# Detect if the given two filesystems have same sub-datasets
#
# $1 source filesystem
# $2 destination filesystem
#
function cmp_ds_subs
{
typeset src_fs=$1
typeset dst_fs=$2
zfs list -r -H -t all -o name $src_fs > $BACKDIR/src1
zfs list -r -H -t all -o name $dst_fs > $BACKDIR/dst1
eval sed -e 's:^$src_fs:PREFIX:g' < $BACKDIR/src1 > $BACKDIR/src
eval sed -e 's:^$dst_fs:PREFIX:g' < $BACKDIR/dst1 > $BACKDIR/dst
diff $BACKDIR/src $BACKDIR/dst
typeset -i ret=$?
rm -f $BACKDIR/src $BACKDIR/dst $BACKDIR/src1 $BACKDIR/dst1
return $ret
}
#
# Compare all the directories and files in two filesystems
#
# $1 source filesystem
# $2 destination filesystem
#
function cmp_ds_cont
{
typeset src_fs=$1
typeset dst_fs=$2
typeset srcdir dstdir
srcdir=$(get_prop mountpoint $src_fs)
dstdir=$(get_prop mountpoint $dst_fs)
diff -r $srcdir $dstdir > /dev/null 2>&1
return $?
}
#
# Compare the given two dataset properties
#
# $1 dataset 1
# $2 dataset 2
#
function cmp_ds_prop
{
typeset dtst1=$1
typeset dtst2=$2
typeset -a props=("type" "origin" "volblocksize" "acltype" "dnodesize" \
"atime" "canmount" "checksum" "compression" "copies" "devices" \
"exec" "quota" "readonly" "recordsize" "reservation" "setuid" \
"snapdir" "version" "volsize" "xattr" "mountpoint");
if is_freebsd; then
props+=("jailed")
else
props+=("zoned")
fi
for prop in $props;
do
zfs get -H -o property,value,source $prop $dtst1 >> \
$BACKDIR/dtst1
zfs get -H -o property,value,source $prop $dtst2 >> \
$BACKDIR/dtst2
done
eval sed -e 's:$dtst1:PREFIX:g' < $BACKDIR/dtst1 > $BACKDIR/dtst1
eval sed -e 's:$dtst2:PREFIX:g' < $BACKDIR/dtst2 > $BACKDIR/dtst2
diff $BACKDIR/dtst1 $BACKDIR/dtst2
typeset -i ret=$?
rm -f $BACKDIR/dtst1 $BACKDIR/dtst2
return $ret
}
#
# Random create directories and files
#
# $1 directory
#
function random_tree
{
typeset dir=$1
if [[ -d $dir ]]; then
rm -rf $dir
fi
mkdir -p $dir
typeset -i ret=$?
typeset -i nl nd nf
((nl = RANDOM % 6 + 1))
((nd = RANDOM % 3 ))
((nf = RANDOM % 5 ))
mktree -b $dir -l $nl -d $nd -f $nf
((ret |= $?))
return $ret
}
#
# Put data in filesystem and take snapshot
#
# $1 snapshot name
#
function snapshot_tree
{
typeset snap=$1
typeset ds=${snap%%@*}
typeset type=$(get_prop "type" $ds)
typeset -i ret=0
if [[ $type == "filesystem" ]]; then
typeset mntpnt=$(get_prop mountpoint $ds)
((ret |= $?))
if ((ret == 0)) ; then
eval random_tree $mntpnt/${snap##$ds}
((ret |= $?))
fi
fi
if ((ret == 0)) ; then
zfs snapshot $snap
((ret |= $?))
fi
return $ret
}
#
# Destroy the given snapshot and stuff
#
# $1 snapshot
#
function destroy_tree
{
typeset -i ret=0
typeset snap
for snap in "$@" ; do
log_must_busy zfs destroy $snap
typeset ds=${snap%%@*}
typeset type=$(get_prop "type" $ds)
if [[ $type == "filesystem" ]]; then
typeset mntpnt=$(get_prop mountpoint $ds)
if [[ -n $mntpnt ]]; then
rm -rf $mntpnt/$snap
fi
fi
done
return 0
}
#
# Get all the sub-datasets of give dataset with specific suffix
#
# $1 Given dataset
# $2 Suffix
#
function getds_with_suffix
{
typeset ds=$1
typeset suffix=$2
typeset list=$(zfs list -r -H -t all -o name $ds | grep "$suffix$")
echo $list
}
#
# Output inherited properties which is edited for file system
#
function fs_inherit_prop
{
typeset fs_prop
if is_global_zone ; then
fs_prop=$(zfs inherit 2>&1 | \
awk '$2=="YES" && $3=="YES" {print $1}')
if ! is_te_enabled ; then
fs_prop=$(echo $fs_prop | grep -v "mlslabel")
fi
else
fs_prop=$(zfs inherit 2>&1 | \
awk '$2=="YES" && $3=="YES" {print $1}'|
egrep -v "devices|mlslabel|sharenfs|sharesmb|zoned")
fi
echo $fs_prop
}
#
# Output inherited properties for volume
#
function vol_inherit_prop
{
echo "checksum readonly"
}
#
# Get the destination dataset to compare
#
function get_dst_ds
{
typeset srcfs=$1
typeset dstfs=$2
#
# If the srcfs is not pool
#
if ! zpool list $srcfs > /dev/null 2>&1 ; then
eval dstfs="$dstfs/${srcfs#*/}"
fi
echo $dstfs
}
#
# Make test files
#
# $1 Number of files to create
# $2 Maximum file size
# $3 File ID offset
# $4 File system to create the files on
#
function mk_files
{
nfiles=$1
maxsize=$2
file_id_offset=$3
fs=$4
bs=512
for ((i=0; i<$nfiles; i=i+1)); do
file_name="/$fs/file-$maxsize-$((i+$file_id_offset))"
file_size=$((($RANDOM * $RANDOM % ($maxsize - 1)) + 1))
#
# Create an interesting mix of files which contain both
# data blocks and holes for more realistic test coverage.
# Half the files are created as sparse then partially filled,
# the other half is dense then a hole is punched in the file.
#
if [ $((RANDOM % 2)) -eq 0 ]; then
truncate -s $file_size $file_name || \
log_fail "Failed to create $file_name"
dd if=/dev/urandom of=$file_name \
bs=$bs count=$(($file_size / 2 / $bs)) \
seek=$(($RANDOM % (($file_size / 2 / $bs) + 1))) \
conv=notrunc >/dev/null 2>&1 || \
log_fail "Failed to create $file_name"
else
dd if=/dev/urandom of=$file_name \
bs=$file_size count=1 >/dev/null 2>&1 || \
log_fail "Failed to create $file_name"
dd if=/dev/zero of=$file_name \
bs=$bs count=$(($file_size / 2 / $bs)) \
seek=$(($RANDOM % (($file_size / 2 / $bs) + 1))) \
conv=notrunc >/dev/null 2>&1 || \
log_fail "Failed to create $file_name"
fi
done
echo Created $nfiles files of random sizes up to $maxsize bytes
}
#
# Remove test files
#
# $1 Number of files to remove
# $2 Maximum file size
# $3 File ID offset
# $4 File system to remove the files from
#
function rm_files
{
nfiles=$1
maxsize=$2
file_id_offset=$3
fs=$4
for ((i=0; i<$nfiles; i=i+1)); do
rm -f /$fs/file-$maxsize-$((i+$file_id_offset))
done
echo Removed $nfiles files of random sizes up to $maxsize bytes
}
#
# Simulate a random set of operations which could be reasonably expected
# to occur on an average filesystem.
#
# $1 Number of files to modify
# $2 Maximum file size
# $3 File system to modify the file on
# $4 Enabled xattrs (optional)
#
function churn_files
{
nfiles=$1
maxsize=$2
fs=$3
xattrs=${4:-1}
#
# Remove roughly half of the files in order to make it more
# likely that a dnode will be reallocated.
#
for ((i=0; i<$nfiles; i=i+1)); do
file_name="/$fs/file-$i"
if [[ -e $file_name ]]; then
if [ $((RANDOM % 2)) -eq 0 ]; then
rm $file_name || \
log_fail "Failed to remove $file_name"
fi
fi
done
#
# Remount the filesystem to simulate normal usage. This resets
# the last allocated object id allowing for new objects to be
# reallocated in the locations of previously freed objects.
#
log_must zfs unmount $fs
log_must zfs mount $fs
for i in {0..$nfiles}; do
file_name="/$fs/file-$i"
file_size=$((($RANDOM * $RANDOM % ($maxsize - 1)) + 1))
#
# When the file exists modify it in one of five ways to
# simulate normal usage:
# - (20%) Remove and set and extended attribute on the file
# - (20%) Overwrite the existing file
# - (20%) Truncate the existing file to a random length
# - (20%) Truncate the existing file to zero length
# - (20%) Remove the file
#
# Otherwise create the missing file. 20% of the created
# files will be small and use embedded block pointers, the
# remainder with have random sizes up to the maximum size.
# Three extended attributes are attached to all of the files.
#
if [[ -e $file_name ]]; then
value=$((RANDOM % 5))
if [ $value -eq 0 -a $xattrs -ne 0 ]; then
attrname="testattr$((RANDOM % 3))"
attrlen="$(((RANDOM % 1000) + 1))"
attrvalue="$(random_string VALID_NAME_CHAR \
$attrlen)"
rm_xattr $attrname $file_name || \
log_fail "Failed to remove $attrname"
set_xattr $attrname "$attrvalue" $file_name || \
log_fail "Failed to set $attrname"
elif [ $value -eq 1 ]; then
dd if=/dev/urandom of=$file_name \
bs=$file_size count=1 >/dev/null 2>&1 || \
log_fail "Failed to overwrite $file_name"
elif [ $value -eq 2 ]; then
truncate -s $file_size $file_name || \
log_fail "Failed to truncate $file_name"
elif [ $value -eq 3 ]; then
truncate -s 0 $file_name || \
log_fail "Failed to truncate $file_name"
else
rm $file_name || \
log_fail "Failed to remove $file_name"
fi
else
if [ $((RANDOM % 5)) -eq 0 ]; then
file_size=$((($RANDOM % 64) + 1))
fi
dd if=/dev/urandom of=$file_name \
bs=$file_size count=1 >/dev/null 2>&1 || \
log_fail "Failed to create $file_name"
if [ $xattrs -ne 0 ]; then
for j in {0..2}; do
attrname="testattr$j"
attrlen="$(((RANDOM % 1000) + 1))"
attrvalue="$(random_string \
VALID_NAME_CHAR $attrlen)"
set_xattr $attrname \
"$attrvalue" $file_name || \
log_fail "Failed to set $attrname"
done
fi
fi
done
return 0
}
#
# Mess up a send file's contents
#
# $1 The send file path
#
function mess_send_file
{
file=$1
filesize=$(stat_size $file)
offset=$(($RANDOM * $RANDOM % $filesize))
# The random offset might truncate the send stream to be
# smaller than the DRR_BEGIN record. If this happens, then
# the receiving system won't have enough info to create the
# partial dataset at all. We use zstream dump to check for
# this and retry in this case.
nr_begins=$(head -c $offset $file | zstream dump | \
grep DRR_BEGIN | awk '{ print $5 }')
while [ "$nr_begins" -eq 0 ]; do
offset=$(($RANDOM * $RANDOM % $filesize))
nr_begins=$(head -c $offset $file | zstream dump | \
grep DRR_BEGIN | awk '{ print $5 }')
done
if (($RANDOM % 7 <= 1)); then
#
# We corrupt 2 bytes to minimize the chance that we
# write the same value that's already there.
#
log_must eval "dd if=/dev/urandom of=$file conv=notrunc " \
"bs=1 count=2 seek=$offset >/dev/null 2>&1"
else
log_must truncate -s $offset $file
fi
}
#
# Diff the send/receive filesystems
#
# $1 The sent filesystem
# $2 The received filesystem
#
function file_check
{
sendfs=$1
recvfs=$2
if [[ -d /$recvfs/.zfs/snapshot/a && -d \
/$sendfs/.zfs/snapshot/a ]]; then
diff -r /$recvfs/.zfs/snapshot/a /$sendfs/.zfs/snapshot/a
[[ $? -eq 0 ]] || log_fail "Differences found in snap a"
fi
if [[ -d /$recvfs/.zfs/snapshot/b && -d \
/$sendfs/.zfs/snapshot/b ]]; then
diff -r /$recvfs/.zfs/snapshot/b /$sendfs/.zfs/snapshot/b
[[ $? -eq 0 ]] || log_fail "Differences found in snap b"
fi
}
#
# Resume test helper
#
# $1 The ZFS send command
# $2 The filesystem where the streams are sent
# $3 The receive filesystem
# $4 Test dry-run (optional)
#
function resume_test
{
typeset sendcmd=$1
typeset streamfs=$2
typeset recvfs=$3
typeset dryrun=${4:-1}
stream_num=1
log_must eval "$sendcmd >/$streamfs/$stream_num"
for ((i=0; i<2; i=i+1)); do
mess_send_file /$streamfs/$stream_num
log_mustnot zfs recv -suv $recvfs </$streamfs/$stream_num
stream_num=$((stream_num+1))
token=$(zfs get -Hp -o value receive_resume_token $recvfs)
# Do a dry-run
[ $dryrun -ne 0 ] && \
log_must eval "zfs send -nvt $token > /dev/null"
log_must eval "zfs send -t $token >/$streamfs/$stream_num"
[[ -f /$streamfs/$stream_num ]] || \
log_fail "NO FILE /$streamfs/$stream_num"
done
log_must zfs recv -suv $recvfs </$streamfs/$stream_num
}
function get_resume_token
{
sendcmd=$1
streamfs=$2
recvfs=$3
log_must eval "$sendcmd > /$streamfs/1"
mess_send_file /$streamfs/1
log_mustnot zfs recv -suv $recvfs < /$streamfs/1 2>&1
token=$(zfs get -Hp -o value receive_resume_token $recvfs)
echo "$token" > /$streamfs/resume_token
return 0
}
#
# Setup filesystems for the resumable send/receive tests
#
# $1 The "send" filesystem
# $2 The "recv" filesystem
#
function test_fs_setup
{
typeset sendfs=$1
typeset recvfs=$2
typeset streamfs=$3
typeset sendpool=${sendfs%%/*}
typeset recvpool=${recvfs%%/*}
datasetexists $sendfs && log_must_busy zfs destroy -r $sendpool
datasetexists $recvfs && log_must_busy zfs destroy -r $recvpool
datasetexists $streamfs && log_must_busy zfs destroy -r $streamfs
if datasetexists $sendfs || zfs create -o compress=lz4 $sendfs; then
mk_files 1000 256 0 $sendfs &
mk_files 1000 131072 0 $sendfs &
mk_files 100 1048576 0 $sendfs &
mk_files 10 10485760 0 $sendfs &
mk_files 1 104857600 0 $sendfs &
log_must wait
log_must zfs snapshot $sendfs@a
rm_files 200 256 0 $sendfs &
rm_files 200 131072 0 $sendfs &
rm_files 20 1048576 0 $sendfs &
rm_files 2 10485760 0 $sendfs &
log_must wait
mk_files 400 256 0 $sendfs &
mk_files 400 131072 0 $sendfs &
mk_files 40 1048576 0 $sendfs &
mk_files 4 10485760 0 $sendfs &
log_must wait
log_must zfs snapshot $sendfs@b
log_must eval "zfs send -v $sendfs@a >/$sendpool/initial.zsend"
log_must eval "zfs send -v -i @a $sendfs@b " \
">/$sendpool/incremental.zsend"
fi
log_must zfs create -o compress=lz4 $streamfs
}
#
# Check to see if the specified features are set in a send stream.
# The values for these features are found in include/sys/zfs_ioctl.h
#
# $1 The stream file
# $2-$n The flags expected in the stream
#
function stream_has_features
{
typeset file=$1
shift
[[ -f $file ]] || log_fail "Couldn't find file: $file"
typeset flags=$(cat $file | zstream dump | \
awk '/features =/ {features = $3} END {print features}')
typeset -A feature
feature[dedup]="1"
feature[dedupprops]="2"
feature[sa_spill]="4"
feature[embed_data]="10000"
feature[lz4]="20000"
feature[mooch_byteswap]="40000"
feature[large_blocks]="80000"
feature[resuming]="100000"
feature[redacted]="200000"
feature[compressed]="400000"
typeset flag known derived=0
for flag in "$@"; do
known=${feature[$flag]}
[[ -z $known ]] && log_fail "Unknown feature: $flag"
derived=$(printf "%x" $((0x${flags} & 0x${feature[$flag]})))
[[ $derived = $known ]] || return 1
done
return 0
}
#
# Given a send stream, verify that the size of the stream matches what's
# expected based on the source or target dataset. If the stream is an
# incremental stream, subtract the size of the source snapshot before
# comparing. This function does not currently handle incremental streams
# that remove data.
#
# $1 The zstream dump output file
# $2 The dataset to compare against
# This can be a source of a send or recv target (fs, not snapshot)
# $3 The percentage below which verification is deemed a failure
# $4 The source snapshot of an incremental send
#
function verify_stream_size
{
typeset stream=$1
typeset ds=$2
typeset percent=${3:-90}
typeset inc_src=$4
[[ -f $stream ]] || log_fail "No such file: $stream"
datasetexists $ds || log_fail "No such dataset: $ds"
typeset stream_size=$(cat $stream | zstream dump | sed -n \
's/ Total payload size = \(.*\) (0x.*)/\1/p')
typeset inc_size=0
if [[ -n $inc_src ]]; then
inc_size=$(get_prop lrefer $inc_src)
if stream_has_features $stream compressed; then
inc_size=$(get_prop refer $inc_src)
fi
fi
if stream_has_features $stream compressed; then
ds_size=$(get_prop refer $ds)
else
ds_size=$(get_prop lrefer $ds)
fi
ds_size=$((ds_size - inc_size))
within_percent $stream_size $ds_size $percent || log_fail \
"$stream_size $ds_size differed by too much"
}
# Cleanup function for tests involving resumable send
function resume_cleanup
{
typeset sendfs=$1
typeset streamfs=$2
typeset sendpool=${sendfs%%/*}
datasetexists $sendfs && log_must_busy zfs destroy -r $sendfs
datasetexists $streamfs && log_must_busy zfs destroy -r $streamfs
cleanup_pool $POOL2
rm -f /$sendpool/initial.zsend /$sendpool/incremental.zsend
}
# Randomly set the property to one of the enumerated values.
function rand_set_prop
{
typeset dtst=$1
typeset prop=$2
shift 2
typeset value=$(random_get $@)
log_must eval "zfs set $prop='$value' $dtst"
}
# Generate a recursive checksum of a filesystem which includes the file
# contents and any associated extended attributes.
function recursive_cksum
{
case "$(uname)" in
FreeBSD)
find $1 -type f -exec sh -c 'sha256 -q {}; lsextattr -q \
system {} | sha256 -q; lsextattr -q user {} | sha256 -q' \
\; | sort | sha256 -q
;;
*)
find $1 -type f -exec sh -c 'sha256sum {}; getfattr \
--absolute-names --only-values -d {} | sha256sum' \; | \
sort -k 2 | awk '{ print $1 }' | sha256sum | \
awk '{ print $1 }'
;;
esac
}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send-c_verify_ratio.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send-c_verify_ratio.ksh
index b7d978624f2b..845349a95873 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send-c_verify_ratio.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send-c_verify_ratio.ksh
@@ -1,67 +1,67 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2015, Delphix. All rights reserved.
# Copyright (c) 2019, Kjeld Schouten-Lebbing. All rights reserved.
#
. $STF_SUITE/tests/functional/rsend/rsend.kshlib
. $STF_SUITE/include/properties.shlib
#
# Description:
# Verify that the amount of data in a send -c stream matches compressratio.
#
# Strategy:
# 1. For random compression types, and compressible / incompressible data:
# 2. Create a snap with data
# 3. Compare the size of the stream with the data on the dataset, adjusted
# by compressratio for normal send, and compared to used for send -c.
#
verify_runnable "both"
log_assert "Verify send -c streams are compressed"
log_onexit cleanup_pool $POOL2
typeset sendfs=$POOL2/$FS
-typeset megs=128
+typeset megs=64
for prop in "${compress_prop_vals[@]}"; do
for compressible in 'yes' 'no'; do
log_must zfs create -o compress=$prop $sendfs
if [[ $compressible = 'yes' ]]; then
write_compressible $(get_prop mountpoint $sendfs) \
${megs}m
else
typeset file="$(get_prop mountpoint $sendfs)/ddfile"
log_must dd if=/dev/urandom of=$file bs=1024k count=$megs
fi
log_must zfs snapshot $sendfs@snap
# Calculate the sizes and verify the compression ratio.
log_must eval "zfs send $sendfs@snap >$BACKDIR/uncompressed"
verify_stream_size $BACKDIR/uncompressed $sendfs
log_must eval "zfs send -c $sendfs@snap >$BACKDIR/compressed"
verify_stream_size $BACKDIR/compressed $sendfs
log_must rm $BACKDIR/uncompressed $BACKDIR/compressed
log_must_busy zfs destroy -r $sendfs
done
done
log_pass "Verify send -c streams are compressed"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send_raw_ashift.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send_raw_ashift.ksh
new file mode 100755
index 000000000000..3cea334495d9
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send_raw_ashift.ksh
@@ -0,0 +1,193 @@
+#!/bin/ksh
+
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright (c) 2019, Lawrence Livermore National Security, LLC.
+# Copyright (c) 2021, George Amanakis. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/include/properties.shlib
+. $STF_SUITE/tests/functional/rsend/rsend.kshlib
+
+#
+# Description:
+# Verify encrypted raw sending to pools with greater ashift succeeds.
+#
+# Strategy:
+# 1) Create a set of files each containing some file data in an
+# encrypted filesystem.
+# 2) Snapshot and raw send these files to a pool with greater ashift
+# 3) Verify that all the xattrs (and thus the spill block) were
+# preserved when receiving the incremental stream.
+# 4) Repeat the test for a non-encrypted filesystem using raw send
+#
+
+verify_runnable "both"
+
+log_assert "Verify raw sending to pools with greater ashift succeeds"
+
+function cleanup
+{
+ rm -f $BACKDIR/fs@*
+ poolexists pool9 && destroy_pool pool9
+ poolexists pool12 && destroy_pool pool12
+ log_must rm -f $TESTDIR/vdev_a $TESTDIR/vdev_b
+}
+
+function xattr_test
+{
+ log_must zfs set xattr=sa pool9/$1
+ log_must zfs set dnodesize=legacy pool9/$1
+ log_must zfs set recordsize=128k pool9/$1
+ rand_set_prop pool9/$1 compression "${compress_prop_vals[@]}"
+
+ # Create 40 files each with a spill block containing xattrs. Each file
+ # will be modified in a different way to validate the incremental receive.
+ for i in {1..40}; do
+ file="/pool9/$1/file$i"
+
+ log_must mkfile 16384 $file
+ for j in {1..20}; do
+ log_must set_xattr "testattr$j" "$attrvalue" $file
+ done
+ done
+
+ # Snapshot the pool and send it to the new dataset.
+ log_must zfs snapshot pool9/$1@snap1
+ log_must eval "zfs send -w pool9/$1@snap1 >$BACKDIR/$1@snap1"
+ log_must eval "zfs recv pool12/$1 < $BACKDIR/$1@snap1"
+
+ #
+ # Modify file[1-6]'s contents but not the spill blocks.
+ #
+ # file1 - Increase record size; single block
+ # file2 - Increase record size; multiple blocks
+ # file3 - Truncate file to zero size; single block
+ # file4 - Truncate file to smaller size; single block
+ # file5 - Truncate file to much larger size; add holes
+ # file6 - Truncate file to embedded size; embedded data
+ #
+ log_must mkfile 32768 /pool9/$1/file1
+ log_must mkfile 1048576 /pool9/$1/file2
+ log_must truncate -s 0 /pool9/$1/file3
+ log_must truncate -s 8192 /pool9/$1/file4
+ log_must truncate -s 1073741824 /pool9/$1/file5
+ log_must truncate -s 50 /pool9/$1/file6
+
+ #
+ # Modify file[11-16]'s contents and their spill blocks.
+ #
+ # file11 - Increase record size; single block
+ # file12 - Increase record size; multiple blocks
+ # file13 - Truncate file to zero size; single block
+ # file14 - Truncate file to smaller size; single block
+ # file15 - Truncate file to much larger size; add holes
+ # file16 - Truncate file to embedded size; embedded data
+ #
+ log_must mkfile 32768 /pool9/$1/file11
+ log_must mkfile 1048576 /pool9/$1/file12
+ log_must truncate -s 0 /pool9/$1/file13
+ log_must truncate -s 8192 /pool9/$1/file14
+ log_must truncate -s 1073741824 /pool9/$1/file15
+ log_must truncate -s 50 /pool9/$1/file16
+
+ for i in {11..20}; do
+ log_must rm_xattr testattr1 /pool9/$1/file$i
+ done
+
+ #
+ # Modify file[21-26]'s contents and remove their spill blocks.
+ #
+ # file21 - Increase record size; single block
+ # file22 - Increase record size; multiple blocks
+ # file23 - Truncate file to zero size; single block
+ # file24 - Truncate file to smaller size; single block
+ # file25 - Truncate file to much larger size; add holes
+ # file26 - Truncate file to embedded size; embedded data
+ #
+ log_must mkfile 32768 /pool9/$1/file21
+ log_must mkfile 1048576 /pool9/$1/file22
+ log_must truncate -s 0 /pool9/$1/file23
+ log_must truncate -s 8192 /pool9/$1/file24
+ log_must truncate -s 1073741824 /pool9/$1/file25
+ log_must truncate -s 50 /pool9/$1/file26
+
+ for i in {21..30}; do
+ for j in {1..20}; do
+ log_must rm_xattr testattr$j /pool9/$1/file$i
+ done
+ done
+
+ #
+ # Modify file[31-40]'s spill blocks but not the file contents.
+ #
+ for i in {31..40}; do
+ file="/pool9/$1/file$i"
+ log_must rm_xattr testattr$(((RANDOM % 20) + 1)) $file
+ log_must set_xattr testattr$(((RANDOM % 20) + 1)) "$attrvalue" $file
+ done
+
+ # Snapshot the pool and send the incremental snapshot.
+ log_must zfs snapshot pool9/$1@snap2
+ log_must eval "zfs send -w -i pool9/$1@snap1 pool9/$1@snap2 >$BACKDIR/$1@snap2"
+ log_must eval "zfs recv pool12/$1 < $BACKDIR/$1@snap2"
+}
+
+attrvalue="abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz"
+
+log_onexit cleanup
+
+# Create pools
+truncate -s $MINVDEVSIZE $TESTDIR/vdev_a
+truncate -s $MINVDEVSIZE $TESTDIR/vdev_b
+log_must zpool create -f -o ashift=9 pool9 $TESTDIR/vdev_a
+log_must zpool create -f -o ashift=12 pool12 $TESTDIR/vdev_b
+
+# Create encrypted fs
+log_must eval "echo 'password' | zfs create -o encryption=on" \
+ "-o keyformat=passphrase -o keylocation=prompt " \
+ "pool9/encfs"
+
+# Run xattr tests for encrypted fs
+xattr_test encfs
+
+# Calculate the expected recursive checksum for source encrypted fs
+expected_cksum=$(recursive_cksum /pool9/encfs)
+
+# Mount target encrypted fs
+log_must eval "echo 'password' | zfs load-key pool12/encfs"
+log_must zfs mount pool12/encfs
+
+# Validate the received copy using the received recursive checksum
+actual_cksum=$(recursive_cksum /pool12/encfs)
+if [[ "$expected_cksum" != "$actual_cksum" ]]; then
+ log_fail "Checksums differ ($expected_cksum != $actual_cksum)"
+fi
+
+# Perform the same test but without encryption (send -w)
+log_must zfs create pool9/fs
+
+# Run xattr tests for non-encrypted fs
+xattr_test fs
+
+# Calculate the expected recursive checksum for source non-encrypted fs
+expected_cksum=$(recursive_cksum /pool9/fs)
+
+# Validate the received copy using the received recursive checksum
+actual_cksum=$(recursive_cksum /pool12/fs)
+if [[ "$expected_cksum" != "$actual_cksum" ]]; then
+ log_fail "Checksums differ ($expected_cksum != $actual_cksum)"
+fi
+
+log_pass "Verify raw sending to pools with greater ashift succeeds"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send_raw_spill_block.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send_raw_spill_block.ksh
new file mode 100755
index 000000000000..8d7451ae62e2
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send_raw_spill_block.ksh
@@ -0,0 +1,161 @@
+#!/bin/ksh
+
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright (c) 2019, Lawrence Livermore National Security, LLC.
+# Copyright (c) 2021, George Amanakis. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/rsend/rsend.kshlib
+
+#
+# Description:
+# Verify spill blocks are correctly preserved in raw sends.
+#
+# Strategy:
+# 1) Create a set of files each containing some file data in an
+# encrypted filesystem.
+# 2) Add enough xattrs to the file to require a spill block.
+# 3) Snapshot and raw send these files to a new dataset.
+# 4) Modify the files and spill blocks in a variety of ways.
+# 5) Send the changes using an raw incremental send stream.
+# 6) Verify that all the xattrs (and thus the spill block) were
+# preserved when receiving the incremental stream.
+#
+
+verify_runnable "both"
+
+log_assert "Verify spill blocks are correctly preserved in raw sends"
+
+function cleanup
+{
+ rm -f $BACKDIR/fs@*
+ destroy_dataset $POOL/fs "-rR"
+ destroy_dataset $POOL/newfs "-rR"
+}
+
+attrvalue="abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz"
+
+log_onexit cleanup
+
+log_must eval "echo 'password' | zfs create -o encryption=on" \
+ "-o keyformat=passphrase -o keylocation=prompt " \
+ "$POOL/fs"
+log_must zfs set xattr=sa $POOL/fs
+log_must zfs set dnodesize=legacy $POOL/fs
+log_must zfs set recordsize=128k $POOL/fs
+
+# Create 40 files each with a spill block containing xattrs. Each file
+# will be modified in a different way to validate the incremental receive.
+for i in {1..40}; do
+ file="/$POOL/fs/file$i"
+
+ log_must mkfile 16384 $file
+ for j in {1..20}; do
+ log_must set_xattr "testattr$j" "$attrvalue" $file
+ done
+done
+
+# Snapshot the pool and send it to the new dataset.
+log_must zfs snapshot $POOL/fs@snap1
+log_must eval "zfs send -w $POOL/fs@snap1 >$BACKDIR/fs@snap1"
+log_must eval "zfs recv $POOL/newfs < $BACKDIR/fs@snap1"
+
+#
+# Modify file[1-6]'s contents but not the spill blocks.
+#
+# file1 - Increase record size; single block
+# file2 - Increase record size; multiple blocks
+# file3 - Truncate file to zero size; single block
+# file4 - Truncate file to smaller size; single block
+# file5 - Truncate file to much larger size; add holes
+# file6 - Truncate file to embedded size; embedded data
+#
+log_must mkfile 32768 /$POOL/fs/file1
+log_must mkfile 1048576 /$POOL/fs/file2
+log_must truncate -s 0 /$POOL/fs/file3
+log_must truncate -s 8192 /$POOL/fs/file4
+log_must truncate -s 1073741824 /$POOL/fs/file5
+log_must truncate -s 50 /$POOL/fs/file6
+
+#
+# Modify file[11-16]'s contents and their spill blocks.
+#
+# file11 - Increase record size; single block
+# file12 - Increase record size; multiple blocks
+# file13 - Truncate file to zero size; single block
+# file14 - Truncate file to smaller size; single block
+# file15 - Truncate file to much larger size; add holes
+# file16 - Truncate file to embedded size; embedded data
+#
+log_must mkfile 32768 /$POOL/fs/file11
+log_must mkfile 1048576 /$POOL/fs/file12
+log_must truncate -s 0 /$POOL/fs/file13
+log_must truncate -s 8192 /$POOL/fs/file14
+log_must truncate -s 1073741824 /$POOL/fs/file15
+log_must truncate -s 50 /$POOL/fs/file16
+
+for i in {11..20}; do
+ log_must rm_xattr testattr1 /$POOL/fs/file$i
+done
+
+#
+# Modify file[21-26]'s contents and remove their spill blocks.
+#
+# file21 - Increase record size; single block
+# file22 - Increase record size; multiple blocks
+# file23 - Truncate file to zero size; single block
+# file24 - Truncate file to smaller size; single block
+# file25 - Truncate file to much larger size; add holes
+# file26 - Truncate file to embedded size; embedded data
+#
+log_must mkfile 32768 /$POOL/fs/file21
+log_must mkfile 1048576 /$POOL/fs/file22
+log_must truncate -s 0 /$POOL/fs/file23
+log_must truncate -s 8192 /$POOL/fs/file24
+log_must truncate -s 1073741824 /$POOL/fs/file25
+log_must truncate -s 50 /$POOL/fs/file26
+
+for i in {21..30}; do
+ for j in {1..20}; do
+ log_must rm_xattr testattr$j /$POOL/fs/file$i
+ done
+done
+
+#
+# Modify file[31-40]'s spill blocks but not the file contents.
+#
+for i in {31..40}; do
+ file="/$POOL/fs/file$i"
+ log_must rm_xattr testattr$(((RANDOM % 20) + 1)) $file
+ log_must set_xattr testattr$(((RANDOM % 20) + 1)) "$attrvalue" $file
+done
+
+# Calculate the expected recursive checksum for the source.
+expected_cksum=$(recursive_cksum /$POOL/fs)
+
+# Snapshot the pool and send the incremental snapshot.
+log_must zfs snapshot $POOL/fs@snap2
+log_must eval "zfs send -w -i $POOL/fs@snap1 $POOL/fs@snap2 >$BACKDIR/fs@snap2"
+log_must eval "zfs recv $POOL/newfs < $BACKDIR/fs@snap2"
+log_must eval "echo 'password' | zfs load-key $POOL/newfs"
+log_must zfs mount $POOL/newfs
+
+# Validate the received copy using the received recursive checksum.
+actual_cksum=$(recursive_cksum /$POOL/newfs)
+if [[ "$expected_cksum" != "$actual_cksum" ]]; then
+ log_fail "Checksums differ ($expected_cksum != $actual_cksum)"
+fi
+
+log_pass "Verify spill blocks are correctly preserved in raw sends"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send_realloc_encrypted_files.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send_realloc_encrypted_files.ksh
index a653f8b3f15e..361f6b375ea1 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send_realloc_encrypted_files.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send_realloc_encrypted_files.ksh
@@ -1,124 +1,120 @@
#!/bin/ksh
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2019, Lawrence Livermore National Security LLC.
# Use is subject to license terms.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/include/properties.shlib
. $STF_SUITE/tests/functional/rsend/rsend.kshlib
#
# Description:
# Verify encrypted raw incremental receives handle dnode reallocation.
# Strategy:
# 1. Create a pool containing an encrypted filesystem.
# 2. Use 'zfs send -wp' to perform a raw send of the initial filesystem.
# 3. Repeat the following steps N times to verify raw incremental receives.
# a) Randomly change several key dataset properties.
# b) Modify the contents of the filesystem such that dnode reallocation
# is likely during the 'zfs receive', and receive_object() exercises
# as much of its functionality as possible.
# c) Create a new snapshot and generate an raw incremental stream.
# d) Receive the raw incremental stream and verify the received contents.
# e) Destroy the incremental stream and old snapshot.
#
verify_runnable "both"
log_assert "Verify encrypted raw incremental receive handles reallocation"
function cleanup
{
rm -f $BACKDIR/fs@*
rm -f $keyfile
destroy_dataset $POOL/fs "-rR"
destroy_dataset $POOL/newfs "-rR"
}
log_onexit cleanup
typeset keyfile=/$TESTPOOL/pkey
# Create an encrypted dataset
log_must eval "echo 'password' > $keyfile"
log_must zfs create -o encryption=on -o keyformat=passphrase \
-o keylocation=file://$keyfile $POOL/fs
last_snap=1
log_must zfs snapshot $POOL/fs@snap${last_snap}
log_must eval "zfs send -wp $POOL/fs@snap${last_snap} \
>$BACKDIR/fs@snap${last_snap}"
log_must eval "zfs recv $POOL/newfs < $BACKDIR/fs@snap${last_snap}"
# Set atime=off to prevent the recursive_cksum from modifying newfs.
log_must zfs set atime=off $POOL/newfs
if is_kmemleak; then
# Use fewer files and passes on debug kernels
# to avoid timeout due to reduced performance.
nr_files=100
passes=2
-elif is_freebsd; then
- # Use fewer files and passes on FreeBSD to avoid timeout.
- nr_files=500
- passes=2
else
- nr_files=1000
+ nr_files=300
passes=3
fi
for i in {1..$passes}; do
# Randomly modify several dataset properties in order to generate
# more interesting incremental send streams.
rand_set_prop $POOL/fs checksum "off" "fletcher4" "sha256"
rand_set_prop $POOL/fs compression "${compress_prop_vals[@]}"
rand_set_prop $POOL/fs recordsize "32K" "128K"
rand_set_prop $POOL/fs dnodesize "legacy" "auto" "4k"
rand_set_prop $POOL/fs xattr "on" "sa"
# Churn the filesystem in such a way that we're likely to be both
# allocating and reallocating objects in the incremental stream.
log_must churn_files $nr_files 524288 $POOL/fs
expected_cksum=$(recursive_cksum /$POOL/fs)
# Create a snapshot and use it to send an incremental stream.
this_snap=$((last_snap + 1))
log_must zfs snapshot $POOL/fs@snap${this_snap}
log_must eval "zfs send -wp -i $POOL/fs@snap${last_snap} \
$POOL/fs@snap${this_snap} > $BACKDIR/fs@snap${this_snap}"
# Receive the incremental stream and verify the received contents.
log_must eval "zfs recv -Fu $POOL/newfs < $BACKDIR/fs@snap${this_snap}"
log_must zfs load-key $POOL/newfs
log_must zfs mount $POOL/newfs
actual_cksum=$(recursive_cksum /$POOL/newfs)
log_must zfs umount $POOL/newfs
log_must zfs unload-key $POOL/newfs
if [[ "$expected_cksum" != "$actual_cksum" ]]; then
log_fail "Checksums differ ($expected_cksum != $actual_cksum)"
fi
# Destroy the incremental stream and old snapshot.
rm -f $BACKDIR/fs@snap${last_snap}
log_must zfs destroy $POOL/fs@snap${last_snap}
log_must zfs destroy $POOL/newfs@snap${last_snap}
last_snap=$this_snap
done
log_pass "Verify encrypted raw incremental receive handles reallocation"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send_realloc_files.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send_realloc_files.ksh
index 083a2bec9daa..187a899a23c6 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send_realloc_files.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/rsend/send_realloc_files.ksh
@@ -1,111 +1,107 @@
#!/bin/ksh
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2019 by Lawrence Livermore National Security, LLC.
#
. $STF_SUITE/include/properties.shlib
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/rsend/rsend.kshlib
#
# Description:
# Verify incremental receive properly handles reallocation.
#
# Strategy:
# 1. Create a pool containing an encrypted filesystem.
# 2. Use 'zfs send -wp' to perform a raw send of the initial filesystem.
# 3. Repeat the following steps N times to verify raw incremental receives.
# a) Randomly change several key dataset properties.
# b) Modify the contents of the filesystem such that dnode reallocation
# is likely during the 'zfs receive', and receive_object() exercises
# as much of its functionality as possible.
# c) Create a new snapshot and generate an raw incremental stream.
# d) Receive the raw incremental stream and verify the received contents.
# e) Destroy the incremental stream and old snapshot.
#
verify_runnable "both"
log_assert "Verify incremental receive handles reallocation"
function cleanup
{
rm -f $BACKDIR/fs@*
destroy_dataset $POOL/fs "-rR"
destroy_dataset $POOL/newfs "-rR"
}
log_onexit cleanup
log_must zfs create $POOL/fs
last_snap=1
log_must zfs snapshot $POOL/fs@snap${last_snap}
log_must eval "zfs send $POOL/fs@snap${last_snap} >$BACKDIR/fs@snap${last_snap}"
log_must eval "zfs recv $POOL/newfs < $BACKDIR/fs@snap${last_snap}"
# Set atime=off to prevent the recursive_cksum from modifying newfs.
log_must zfs set atime=off $POOL/newfs
if is_kmemleak; then
# Use fewer files and passes on debug kernels
# to avoid timeout due to reduced performance.
nr_files=100
passes=2
-elif is_freebsd; then
- # Use fewer passes and files on FreeBSD to avoid timeout.
- nr_files=500
- passes=2
else
- nr_files=1000
+ nr_files=300
passes=3
fi
for i in {1..$passes}; do
# Randomly modify several dataset properties in order to generate
# more interesting incremental send streams.
rand_set_prop $POOL/fs checksum "off" "fletcher4" "sha256"
rand_set_prop $POOL/fs compression "${compress_prop_vals[@]}"
rand_set_prop $POOL/fs recordsize "32K" "128K"
rand_set_prop $POOL/fs dnodesize "legacy" "auto" "4k"
rand_set_prop $POOL/fs xattr "on" "sa"
# Churn the filesystem in such a way that we're likely to be both
# allocating and reallocating objects in the incremental stream.
log_must churn_files $nr_files 524288 $POOL/fs
expected_cksum=$(recursive_cksum /$POOL/fs)
# Create a snapshot and use it to send an incremental stream.
this_snap=$((last_snap + 1))
log_must zfs snapshot $POOL/fs@snap${this_snap}
log_must eval "zfs send -i $POOL/fs@snap${last_snap} \
$POOL/fs@snap${this_snap} > $BACKDIR/fs@snap${this_snap}"
# Receive the incremental stream and verify the received contents.
log_must eval "zfs recv $POOL/newfs < $BACKDIR/fs@snap${this_snap}"
actual_cksum=$(recursive_cksum /$POOL/newfs)
if [[ "$expected_cksum" != "$actual_cksum" ]]; then
log_fail "Checksums differ ($expected_cksum != $actual_cksum)"
fi
# Destroy the incremental stream and old snapshot.
rm -f $BACKDIR/fs@snap${last_snap}
log_must zfs destroy $POOL/fs@snap${last_snap}
log_must zfs destroy $POOL/newfs@snap${last_snap}
last_snap=$this_snap
done
log_pass "Verify incremental receive handles dnode reallocation"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/simd/Makefile.am b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/simd/Makefile.am
new file mode 100644
index 000000000000..bfc28868024a
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/simd/Makefile.am
@@ -0,0 +1,2 @@
+pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/simd
+dist_pkgdata_SCRIPTS = simd_supported.ksh
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/suid/suid_write_to_none.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/simd/simd_supported.ksh
similarity index 50%
copy from sys/contrib/openzfs/tests/zfs-tests/tests/functional/suid/suid_write_to_none.ksh
copy to sys/contrib/openzfs/tests/zfs-tests/tests/functional/simd/simd_supported.ksh
index dd01978619f9..d88bc582bf08 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/suid/suid_write_to_none.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/simd/simd_supported.ksh
@@ -1,52 +1,58 @@
-#! /bin/ksh -p
+#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
-# Copyright (c) 2019 by Tomohiro Kusumi. All rights reserved.
+# Copyright (c) 2022 by Attila Fülöp <attila@fueloep.org>
#
. $STF_SUITE/include/libtest.shlib
#
# DESCRIPTION:
-# Verify write(2) to regular file by non-owner.
-# Also see https://github.com/pjd/pjdfstest/blob/master/tests/chmod/12.t
+# Make sure we have SIMD support, so it will not go away without notice
#
# STRATEGY:
-# 1. creat(2) a file.
-# 2. write(2) to the file with uid=65534.
-# 3. stat(2) the file and verify .st_mode value.
-#
-
-verify_runnable "both"
-
-function cleanup
-{
- rm -f $TESTDIR/$TESTFILE0
-}
-
-log_onexit cleanup
-log_note "Verify write(2) to regular file by non-owner"
-
-log_must $STF_SUITE/tests/functional/suid/suid_write_to_file "NONE"
-
-log_pass "Verify write(2) to regular file by non-owner passed"
+# 1. Test if we are running on a Linux x86 system with SSE support
+# 2. If so, check if the zfs_fletcher_4_impl module parameter contains
+# a sse implementation
+# 3. If not fail the test, otherwise pass it
+
+log_note "Testing if we support SIMD instructions (Linux x86 only)"
+
+if !is_linux; then
+ log_unsupported "Not a Linux System"
+fi
+
+case "$(uname -m)" in
+i386|i686|x86_64)
+ typeset -R modparam="/sys/module/zcommon/parameters/zfs_fletcher_4_impl"
+ if cat /proc/cpuinfo | awk '/^flags/ {print; exit;}' | grep -q sse; then
+ log_must grep -q sse "$modparam"
+ log_pass "SIMD instructions supported"
+ else
+ log_unsupported "No FPU present"
+ fi
+ ;;
+*)
+ log_unsupported "Not a x86 CPU"
+ ;;
+esac
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/snapshot/rollback_003_pos.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/snapshot/rollback_003_pos.ksh
index 59e7c110ddd7..036e71410c6d 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/snapshot/rollback_003_pos.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/snapshot/rollback_003_pos.ksh
@@ -1,111 +1,114 @@
#! /bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
#
# Copyright (c) 2013, 2016 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zfs_rollback/zfs_rollback_common.kshlib
. $STF_SUITE/tests/functional/snapshot/snapshot.cfg
#
# DESCRIPTION:
# Verify that rollbacks succeed when there are nested file systems.
#
# STRATEGY:
# 1) Snapshot an empty file system and rollback
# 2) Create a file in the file system
# 3) Rollback the file system to empty
# 4) Create a nested file system with the same name as the file created in (2)
# 5) Verify a rollback succeeds
#
verify_runnable "both"
function cleanup
{
typeset snap=""
typeset fs=""
export __ZFS_POOL_RESTRICT="$TESTPOOL"
log_must zfs mount -a
unset __ZFS_POOL_RESTRICT
for snap in "$SNAPPOOL.1" "$SNAPPOOL"; do
if snapexists $snap; then
destroy_snapshot $snap
fi
done
for fs in "$TESTPOOL/$TESTFILE/$TESTFILE.1" "$TESTPOOL/$TESTFILE"; do
if datasetexists $fs; then
destroy_dataset $fs -r
fi
done
[[ -e /$TESTPOOL ]] && \
log_must rm -rf $TESTPOOL/*
}
log_assert "Verify rollback succeeds when there are nested file systems."
log_onexit cleanup
log_must zfs snapshot $SNAPPOOL
log_must zfs rollback $SNAPPOOL
log_mustnot zfs snapshot $SNAPPOOL
log_must touch /$TESTPOOL/$TESTFILE
log_must zfs rollback $SNAPPOOL
log_must zfs create $TESTPOOL/$TESTFILE
log_must zfs rollback $SNAPPOOL
log_note "Verify rollback of multiple nested file systems succeeds."
log_must zfs snapshot $TESTPOOL/$TESTFILE@$TESTSNAP
log_must zfs snapshot $SNAPPOOL.1
+#
+# Linux: Issuing a `df` seems to properly force any negative dcache entries to
+# be invalidated preventing failures when accessing the mount point. Additional
+# investigation required.
+#
+# https://github.com/openzfs/zfs/issues/6143
+#
+log_must df >/dev/null
+
export __ZFS_POOL_RESTRICT="$TESTPOOL"
log_must zfs unmount -a
log_must zfs mount -a
unset __ZFS_POOL_RESTRICT
log_must touch /$TESTPOOL/$TESTFILE/$TESTFILE.1
log_must zfs rollback $SNAPPOOL.1
-
-#
-# Workaround for issue #6143. Issuing a `df` seems to properly force any
-# negative dcache entries to be invalidated preventing subsequent failures
-# when accessing the mount point. Additional investigation required.
-#
-log_must df
+log_must df >/dev/null
log_pass "Rollbacks succeed when nested file systems are present."
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/suid/Makefile.am b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/suid/Makefile.am
index 594d2b77ca8e..0145c1205fb3 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/suid/Makefile.am
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/suid/Makefile.am
@@ -1,16 +1,17 @@
include $(top_srcdir)/config/Rules.am
pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/suid
dist_pkgdata_SCRIPTS = \
suid_write_to_suid.ksh \
suid_write_to_sgid.ksh \
suid_write_to_suid_sgid.ksh \
suid_write_to_none.ksh \
+ suid_write_zil_replay.ksh \
cleanup.ksh \
setup.ksh
pkgexecdir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/suid
pkgexec_PROGRAMS = suid_write_to_file
suid_write_to_file_SOURCES = suid_write_to_file.c
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/suid/suid_write_to_file.c b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/suid/suid_write_to_file.c
index 571dc553bec2..f3febb903b59 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/suid/suid_write_to_file.c
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/suid/suid_write_to_file.c
@@ -1,133 +1,133 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2019 by Tomohiro Kusumi. All rights reserved.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
-
-static void
-test_stat_mode(mode_t extra)
-{
- struct stat st;
- int i, fd;
- char fpath[1024];
- char *penv[] = {"TESTDIR", "TESTFILE0"};
- char buf[] = "test";
- mode_t res;
- mode_t mode = 0777 | extra;
-
- /*
- * Get the environment variable values.
- */
- for (i = 0; i < sizeof (penv) / sizeof (char *); i++) {
- if ((penv[i] = getenv(penv[i])) == NULL) {
- fprintf(stderr, "getenv(penv[%d])\n", i);
- exit(1);
- }
- }
-
- umask(0);
- if (stat(penv[0], &st) == -1 && mkdir(penv[0], mode) == -1) {
- perror("mkdir");
- exit(2);
- }
-
- snprintf(fpath, sizeof (fpath), "%s/%s", penv[0], penv[1]);
- unlink(fpath);
- if (stat(fpath, &st) == 0) {
- fprintf(stderr, "%s exists\n", fpath);
- exit(3);
- }
-
- fd = creat(fpath, mode);
- if (fd == -1) {
- perror("creat");
- exit(4);
- }
- close(fd);
-
- if (setuid(65534) == -1) {
- perror("setuid");
- exit(5);
- }
-
- fd = open(fpath, O_RDWR);
- if (fd == -1) {
- perror("open");
- exit(6);
- }
-
- if (write(fd, buf, sizeof (buf)) == -1) {
- perror("write");
- exit(7);
- }
- close(fd);
-
- if (stat(fpath, &st) == -1) {
- perror("stat");
- exit(8);
- }
- unlink(fpath);
-
- /* Verify SUID/SGID are dropped */
- res = st.st_mode & (0777 | S_ISUID | S_ISGID);
- if (res != (mode & 0777)) {
- fprintf(stderr, "stat(2) %o\n", res);
- exit(9);
- }
-}
+#include <stdbool.h>
int
main(int argc, char *argv[])
{
- const char *name;
+ const char *name, *phase;
mode_t extra;
+ struct stat st;
- if (argc < 2) {
+ if (argc < 3) {
fprintf(stderr, "Invalid argc\n");
exit(1);
}
name = argv[1];
if (strcmp(name, "SUID") == 0) {
extra = S_ISUID;
} else if (strcmp(name, "SGID") == 0) {
extra = S_ISGID;
} else if (strcmp(name, "SUID_SGID") == 0) {
extra = S_ISUID | S_ISGID;
} else if (strcmp(name, "NONE") == 0) {
extra = 0;
} else {
fprintf(stderr, "Invalid name %s\n", name);
exit(1);
}
- test_stat_mode(extra);
+ const char *testdir = getenv("TESTDIR");
+ if (!testdir) {
+ fprintf(stderr, "getenv(TESTDIR)\n");
+ exit(1);
+ }
+
+ umask(0);
+ if (stat(testdir, &st) == -1 && mkdir(testdir, 0777) == -1) {
+ perror("mkdir");
+ exit(2);
+ }
+
+ char fpath[1024];
+ snprintf(fpath, sizeof (fpath), "%s/%s", testdir, name);
+
+
+ phase = argv[2];
+ if (strcmp(phase, "PRECRASH") == 0) {
+
+ /* clean up last run */
+ unlink(fpath);
+ if (stat(fpath, &st) == 0) {
+ fprintf(stderr, "%s exists\n", fpath);
+ exit(3);
+ }
+
+ int fd;
+
+ fd = creat(fpath, 0777 | extra);
+ if (fd == -1) {
+ perror("creat");
+ exit(4);
+ }
+ close(fd);
+
+ if (setuid(65534) == -1) {
+ perror("setuid");
+ exit(5);
+ }
+
+ fd = open(fpath, O_RDWR);
+ if (fd == -1) {
+ perror("open");
+ exit(6);
+ }
+
+ const char buf[] = "test";
+ if (write(fd, buf, sizeof (buf)) == -1) {
+ perror("write");
+ exit(7);
+ }
+ close(fd);
+
+ } else if (strcmp(phase, "REPLAY") == 0) {
+ /* created in PRECRASH run */
+ } else {
+ fprintf(stderr, "Invalid phase %s\n", phase);
+ exit(1);
+ }
+
+ if (stat(fpath, &st) == -1) {
+ perror("stat");
+ exit(8);
+ }
+
+ /* Verify SUID/SGID are dropped */
+ mode_t res = st.st_mode & (0777 | S_ISUID | S_ISGID);
+ if (res != 0777) {
+ fprintf(stderr, "stat(2) %o\n", res);
+ exit(9);
+ }
return (0);
}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/suid/suid_write_to_none.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/suid/suid_write_to_none.ksh
index dd01978619f9..470350f960cf 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/suid/suid_write_to_none.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/suid/suid_write_to_none.ksh
@@ -1,52 +1,52 @@
#! /bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright (c) 2019 by Tomohiro Kusumi. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
#
# DESCRIPTION:
# Verify write(2) to regular file by non-owner.
# Also see https://github.com/pjd/pjdfstest/blob/master/tests/chmod/12.t
#
# STRATEGY:
# 1. creat(2) a file.
# 2. write(2) to the file with uid=65534.
# 3. stat(2) the file and verify .st_mode value.
#
verify_runnable "both"
function cleanup
{
rm -f $TESTDIR/$TESTFILE0
}
log_onexit cleanup
log_note "Verify write(2) to regular file by non-owner"
-log_must $STF_SUITE/tests/functional/suid/suid_write_to_file "NONE"
+log_must $STF_SUITE/tests/functional/suid/suid_write_to_file "NONE" "PRECRASH"
log_pass "Verify write(2) to regular file by non-owner passed"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/suid/suid_write_to_sgid.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/suid/suid_write_to_sgid.ksh
index 49ae2bd1b31e..3c95a402658e 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/suid/suid_write_to_sgid.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/suid/suid_write_to_sgid.ksh
@@ -1,52 +1,52 @@
#! /bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright (c) 2019 by Tomohiro Kusumi. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
#
# DESCRIPTION:
# Verify write(2) to SGID file by non-owner.
# Also see https://github.com/pjd/pjdfstest/blob/master/tests/chmod/12.t
#
# STRATEGY:
# 1. creat(2) a file with SGID.
# 2. write(2) to the file with uid=65534.
# 3. stat(2) the file and verify .st_mode value.
#
verify_runnable "both"
function cleanup
{
rm -f $TESTDIR/$TESTFILE0
}
log_onexit cleanup
log_note "Verify write(2) to SGID file by non-owner"
-log_must $STF_SUITE/tests/functional/suid/suid_write_to_file "SGID"
+log_must $STF_SUITE/tests/functional/suid/suid_write_to_file "SGID" "PRECRASH"
log_pass "Verify write(2) to SGID file by non-owner passed"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/suid/suid_write_to_suid.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/suid/suid_write_to_suid.ksh
index 3983aad2e51d..4183cbeefc20 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/suid/suid_write_to_suid.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/suid/suid_write_to_suid.ksh
@@ -1,52 +1,52 @@
#! /bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright (c) 2019 by Tomohiro Kusumi. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
#
# DESCRIPTION:
# Verify write(2) to SUID file by non-owner.
# Also see https://github.com/pjd/pjdfstest/blob/master/tests/chmod/12.t
#
# STRATEGY:
# 1. creat(2) a file with SUID.
# 2. write(2) to the file with uid=65534.
# 3. stat(2) the file and verify .st_mode value.
#
verify_runnable "both"
function cleanup
{
rm -f $TESTDIR/$TESTFILE0
}
log_onexit cleanup
log_note "Verify write(2) to SUID file by non-owner"
-log_must $STF_SUITE/tests/functional/suid/suid_write_to_file "SUID"
+log_must $STF_SUITE/tests/functional/suid/suid_write_to_file "SUID" "PRECRASH"
log_pass "Verify write(2) to SUID file by non-owner passed"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/suid/suid_write_to_suid_sgid.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/suid/suid_write_to_suid_sgid.ksh
index a058c7e7d4bc..f7a08a55fc4b 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/suid/suid_write_to_suid_sgid.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/suid/suid_write_to_suid_sgid.ksh
@@ -1,52 +1,52 @@
#! /bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright (c) 2019 by Tomohiro Kusumi. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
#
# DESCRIPTION:
# Verify write(2) to SUID/SGID file by non-owner.
# Also see https://github.com/pjd/pjdfstest/blob/master/tests/chmod/12.t
#
# STRATEGY:
# 1. creat(2) a file with SUID/SGID.
# 2. write(2) to the file with uid=65534.
# 3. stat(2) the file and verify .st_mode value.
#
verify_runnable "both"
function cleanup
{
rm -f $TESTDIR/$TESTFILE0
}
log_onexit cleanup
log_note "Verify write(2) to SUID/SGID file by non-owner"
-log_must $STF_SUITE/tests/functional/suid/suid_write_to_file "SUID_SGID"
+log_must $STF_SUITE/tests/functional/suid/suid_write_to_file "SUID_SGID" "PRECRASH"
log_pass "Verify write(2) to SUID/SGID file by non-owner passed"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/suid/suid_write_zil_replay.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/suid/suid_write_zil_replay.ksh
new file mode 100755
index 000000000000..81f431f6b68b
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/suid/suid_write_zil_replay.ksh
@@ -0,0 +1,99 @@
+#!/bin/ksh -p
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+. $STF_SUITE/tests/functional/slog/slog.kshlib
+
+verify_runnable "global"
+
+function cleanup_fs
+{
+ cleanup
+}
+
+log_assert "Verify ZIL replay results in correct SUID/SGID bits for unprivileged write to SUID/SGID files"
+log_onexit cleanup_fs
+log_must setup
+
+#
+# 1. Create a file system (TESTFS)
+#
+log_must zpool destroy "$TESTPOOL"
+log_must zpool create $TESTPOOL $VDEV log mirror $LDEV
+log_must zfs set compression=on $TESTPOOL
+log_must zfs create -o mountpoint="$TESTDIR" $TESTPOOL/$TESTFS
+
+# Make all the writes from suid_write_to_file.c sync
+log_must zfs set sync=always "$TESTPOOL/$TESTFS"
+
+#
+# This dd command works around an issue where ZIL records aren't created
+# after freezing the pool unless a ZIL header already exists. Create a file
+# synchronously to force ZFS to write one out.
+#
+log_must dd if=/dev/zero of=$TESTDIR/sync \
+ conv=fdatasync,fsync bs=1 count=1
+
+#
+# 2. Freeze TESTFS
+#
+log_must zpool freeze $TESTPOOL
+
+#
+# 3. Unprivileged write to a setuid file
+#
+log_must $STF_SUITE/tests/functional/suid/suid_write_to_file "NONE" "PRECRASH"
+log_must $STF_SUITE/tests/functional/suid/suid_write_to_file "SUID" "PRECRASH"
+log_must $STF_SUITE/tests/functional/suid/suid_write_to_file "SGID" "PRECRASH"
+log_must $STF_SUITE/tests/functional/suid/suid_write_to_file "SUID_SGID" "PRECRASH"
+
+#
+# 4. Unmount filesystem and export the pool
+#
+# At this stage TESTFS is empty again and frozen, the intent log contains
+# a complete set of deltas to replay.
+#
+log_must zfs unmount $TESTPOOL/$TESTFS
+
+log_note "List transactions to replay:"
+log_must zdb -iv $TESTPOOL/$TESTFS
+
+log_must zpool export $TESTPOOL
+
+#
+# 5. Remount TESTFS <which replays the intent log>
+#
+# Import the pool to unfreeze it and claim log blocks. It has to be
+# `zpool import -f` because we can't write a frozen pool's labels!
+#
+log_must zpool import -f -d $VDIR $TESTPOOL
+
+log_must $STF_SUITE/tests/functional/suid/suid_write_to_file "NONE" "REPLAY"
+log_must $STF_SUITE/tests/functional/suid/suid_write_to_file "SUID" "REPLAY"
+log_must $STF_SUITE/tests/functional/suid/suid_write_to_file "SGID" "REPLAY"
+log_must $STF_SUITE/tests/functional/suid/suid_write_to_file "SUID_SGID" "REPLAY"
+
+log_pass
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/userquota/Makefile.am b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/userquota/Makefile.am
index 9100e4adadca..2c94d3e1521c 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/userquota/Makefile.am
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/userquota/Makefile.am
@@ -1,28 +1,29 @@
pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/userquota
dist_pkgdata_SCRIPTS = \
setup.ksh \
cleanup.ksh \
groupspace_001_pos.ksh \
groupspace_002_pos.ksh \
groupspace_003_pos.ksh \
userquota_001_pos.ksh \
userquota_002_pos.ksh \
userquota_003_pos.ksh \
userquota_004_pos.ksh \
userquota_005_neg.ksh \
userquota_006_pos.ksh \
userquota_007_pos.ksh \
userquota_008_pos.ksh \
userquota_009_pos.ksh \
userquota_010_pos.ksh \
userquota_011_pos.ksh \
userquota_012_neg.ksh \
userquota_013_pos.ksh \
userspace_001_pos.ksh \
userspace_002_pos.ksh \
userspace_003_pos.ksh \
- userspace_encrypted.ksh
+ userspace_encrypted.ksh \
+ userspace_send_encrypted.ksh
dist_pkgdata_DATA = \
userquota.cfg \
userquota_common.kshlib
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/userquota/userspace_send_encrypted.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/userquota/userspace_send_encrypted.ksh
new file mode 100755
index 000000000000..e9ef0c4262e7
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/userquota/userspace_send_encrypted.ksh
@@ -0,0 +1,119 @@
+#!/bin/ksh -p
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright 2021, George Amanakis <gamanakis@gmail.com>. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/userquota/userquota_common.kshlib
+
+#
+# DESCRIPTION:
+# Sending raw encrypted datasets back to the source dataset succeeds.
+#
+#
+# STRATEGY:
+# 1. Create encrypted source dataset, set userquota and write a file
+# 2. Create base snapshot
+# 3. Write new file, snapshot, get userspace
+# 4. Raw send both snapshots
+# 5. Destroy latest snapshot at source and rollback
+# 6. Unmount, unload key from source
+# 7. Raw send latest snapshot back to source
+# 8. Mount both source and target datasets
+# 9. Verify encrypted datasets support 'zfs userspace' and 'zfs groupspace'
+# and the accounting is done correctly
+#
+
+function cleanup
+{
+ destroy_pool $POOLNAME
+ rm -f $FILEDEV
+}
+
+log_onexit cleanup
+
+FILEDEV="$TEST_BASE_DIR/userspace_encrypted"
+POOLNAME="testpool$$"
+ENC_SOURCE="$POOLNAME/source"
+ENC_TARGET="$POOLNAME/target"
+
+log_assert "Sending raw encrypted datasets back to the source dataset succeeds."
+
+# Setup pool and create source
+truncate -s 200m $FILEDEV
+log_must zpool create -o feature@encryption=enabled $POOLNAME \
+ $FILEDEV
+log_must eval "echo 'password' | zfs create -o encryption=on" \
+ "-o keyformat=passphrase -o keylocation=prompt " \
+ "$ENC_SOURCE"
+
+# Set user quota and write file
+log_must zfs set userquota@$QUSER1=50m $ENC_SOURCE
+mkmount_writable $ENC_SOURCE
+mntpnt=$(get_prop mountpoint $ENC_SOURCE)
+log_must user_run $QUSER1 mkfile 10m /$mntpnt/file1
+sync
+
+# Snapshot
+log_must zfs snap $ENC_SOURCE@base
+
+# Write new file, snapshot, get userspace
+log_must user_run $QUSER1 mkfile 20m /$mntpnt/file2
+log_must zfs snap $ENC_SOURCE@s1
+
+# Raw send both snapshots
+log_must eval "zfs send -w $ENC_SOURCE@base | zfs recv " \
+ "$ENC_TARGET"
+log_must eval "zfs send -w -i @base $ENC_SOURCE@s1 | zfs recv " \
+ "$ENC_TARGET"
+
+# Destroy latest snapshot at source and rollback
+log_must zfs destroy $ENC_SOURCE@s1
+log_must zfs rollback $ENC_SOURCE@base
+rollback_uspace=$(zfs userspace -Hp $ENC_SOURCE | \
+ awk "/$QUSER1/"' {printf "%d\n", $4 / 1024 / 1024}')
+
+# Unmount, unload key
+log_must zfs umount $ENC_SOURCE
+log_must zfs unload-key -a
+
+# Raw send latest snapshot back to source
+log_must eval "zfs send -w -i @base $ENC_TARGET@s1 | zfs recv " \
+ "$ENC_SOURCE"
+
+# Mount encrypted datasets and verify they support 'zfs userspace' and
+# 'zfs groupspace' and the accounting is done correctly
+log_must eval "echo 'password' | zfs load-key $ENC_SOURCE"
+log_must eval "echo 'password' | zfs load-key $ENC_TARGET"
+log_must zfs mount $ENC_SOURCE
+log_must zfs mount $ENC_TARGET
+sync
+
+sleep 5
+
+src_uspace=$(zfs userspace -Hp $ENC_SOURCE | \
+ awk "/$QUSER1/"' {printf "%d\n", $4 / 1024 / 1024}')
+tgt_uspace=$(zfs userspace -Hp $ENC_TARGET | \
+ awk "/$QUSER1/"' {printf "%d\n", $4 / 1024 / 1024}')
+log_must test "$src_uspace" -eq "$tgt_uspace"
+log_must test "$rollback_uspace" -ne "$src_uspace"
+
+src_uquota=$(zfs userspace -Hp $ENC_SOURCE | awk "/$QUSER1/"' {print $5}')
+tgt_uquota=$(zfs userspace -Hp $ENC_TARGET | awk "/$QUSER1/"' {print $5}')
+log_must test "$src_uquota" -eq "$tgt_uquota"
+
+# Cleanup
+cleanup
+
+log_pass "Sending raw encrypted datasets back to the source dataset succeeds."
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_common.kshlib b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_common.kshlib
index 3ee09a151b12..b69d2ce02913 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_common.kshlib
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_common.kshlib
@@ -1,143 +1,142 @@
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2017, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zfs_set/zfs_set_common.kshlib
. $STF_SUITE/tests/functional/zvol/zvol_common.shlib
#
# Wait for udev to settle, completely.
# This is quite discomforting, but there's a race condition here
# (Amazon 2015.09 x86_64 Release (TEST) is good at triggering this) where the
# kernel tries to remove zvol device nodes while they're open by [blkid],
# [zvol_id] or other udev related processes.
# Calling 'udevadm settle' is not enough: wait for those processes "manually".
#
function udev_wait
{
- sleep 1
- is_linux || return 0
udevadm trigger --action=change
udevadm settle
for i in {1..3}; do
blkid="$(pgrep blkid | wc -l)"
zvol_id="$(pgrep zvol_id | wc -l)"
[[ "0" == "$zvol_id" && "0" == "$blkid" ]] && return
udevadm settle
done
log_fail "Wait timeout reached for udev_wait"
}
#
# Clean up udev status
# This is also a problem on "Amazon 2015.09 x86_64 Release (TEST)" where udev,
# sometimes, does not clean up /dev/zvol symlinks correctly for removed ZVOLs.
# Prune those links manually, then tell udev to forget them.
#
function udev_cleanup
{
- is_linux || return 0
log_note "Pruning broken ZVOL symlinks ..."
udevadm settle
# find all dangling links and delete them
find -L "${ZVOL_DEVDIR}" -type l -print -delete
# purge those links from udev database
udevadm info --cleanup-db
}
#
# Verify $device exists and is a block device
#
function blockdev_exists # device
{
typeset device="$1"
# we wait here instead of doing it in a wrapper around 'zfs set snapdev'
# because there are other commands (zfs snap, zfs inherit, zfs destroy)
# that can affect device nodes
for i in {1..3}; do
- udev_wait
+ is_linux && udev_wait
+ block_device_wait "$device"
is_disk_device "$device" && return 0
done
log_fail "$device does not exist as a block device"
}
#
# Verify $device does not exist
#
function blockdev_missing # device
{
typeset device="$1"
# we wait here instead of doing it in a wrapper around 'zfs set snapdev'
# because there are other commands (zfs snap, zfs inherit, zfs destroy)
# that can affect device nodes
for i in {1..3}; do
- udev_wait
- [[ ! -e "$device" ]] && return 0
+ is_linux && udev_wait
+ block_device_wait
+ is_disk_device "$device" || return 0
done
log_fail "$device exists when not expected"
}
#
# Verify $property on $dataset is inherited by $parent and is set to $value
#
function verify_inherited # property value dataset parent
{
typeset property="$1"
typeset value="$2"
typeset dataset="$3"
typeset parent="$4"
typeset val=$(get_prop "$property" "$dataset")
typeset src=$(get_source "$property" "$dataset")
if [[ "$val" != "$value" || "$src" != "inherited from $parent" ]]; then
log_fail "Dataset $dataset did not inherit $property properly:"\
"expected=$value, value=$val, source=$src."
fi
}
#
# Create a small partition on $device, then verify if we can access it
#
function verify_partition # device
{
typeset device="$1"
if ! is_disk_device "$device"; then
log_fail "$device is not a block device"
fi
# create a small dummy partition
set_partition 0 "" 1m $device
# verify we can access the partition on the device
devname="$(readlink -f "$device")"
if is_linux || is_freebsd; then
is_disk_device "$devname""p1"
else
is_disk_device "$devname""s0"
fi
return $?
}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_rename_inuse.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_rename_inuse.ksh
index c4ea23c48d16..e9b7bbc4c4a5 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_rename_inuse.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_rename_inuse.ksh
@@ -1,71 +1,71 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/zvol/zvol_common.shlib
. $STF_SUITE/tests/functional/zvol/zvol_misc/zvol_misc_common.kshlib
#
# DESCRIPTION:
# Verify 'zfs rename' works on a ZVOL already in use as block device
#
# STRATEGY:
# 1. Create a ZVOL
# 2. Create a filesystem on the ZVOL device and mount it
# 3. Rename the ZVOL dataset
# 4. Receive a send stream with the same name as the old ZVOL dataset and verify
# we don't trigger any issue like the one reported in #6263
#
verify_runnable "global"
function cleanup
{
log_must umount "$MNTPFS"
log_must rmdir "$MNTPFS"
for ds in "$SENDFS" "$ZVOL" "$ZVOL-renamed"; do
destroy_dataset "$ds" '-rf'
done
- udev_wait
+ block_device_wait
}
log_assert "Verify 'zfs rename' works on a ZVOL already in use as block device"
log_onexit cleanup
ZVOL="$TESTPOOL/vol.$$"
ZDEV="$ZVOL_DEVDIR/$ZVOL"
MNTPFS="$TESTDIR/zvol_inuse_rename"
SENDFS="$TESTPOOL/sendfs.$$"
# 1. Create a ZVOL
log_must zfs create -V $VOLSIZE "$ZVOL"
# 2. Create a filesystem on the ZVOL device and mount it
-udev_wait
+block_device_wait "$ZDEV"
log_must eval "new_fs $ZDEV >/dev/null 2>&1"
log_must mkdir "$MNTPFS"
log_must mount "$ZDEV" "$MNTPFS"
# 3. Rename the ZVOL dataset
log_must zfs rename "$ZVOL" "$ZVOL-renamed"
# 4. Receive a send stream with the same name as the old ZVOL dataset and verify
# we don't trigger any issue like the one reported in #6263
log_must zfs create "$SENDFS"
log_must zfs snap "$SENDFS@snap"
log_must eval "zfs send $SENDFS@snap | zfs recv $ZVOL"
log_pass "Renaming in use ZVOL works successfully"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_snapdev.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_snapdev.ksh
index 1fd87aefdffc..9d3fb05da4b0 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_snapdev.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_snapdev.ksh
@@ -1,121 +1,121 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2017, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zfs_set/zfs_set_common.kshlib
. $STF_SUITE/tests/functional/zvol/zvol_common.shlib
. $STF_SUITE/tests/functional/zvol/zvol_misc/zvol_misc_common.kshlib
#
# DESCRIPTION:
# Verify that ZFS volume property "snapdev" works as intended.
#
# STRATEGY:
# 1. Verify "snapdev" property does not accept invalid values
# 2. Verify "snapdev" adds and removes device nodes when updated
# 3. Verify "snapdev" is inherited correctly
#
verify_runnable "global"
function cleanup
{
datasetexists $VOLFS && destroy_dataset $VOLFS -r
datasetexists $ZVOL && destroy_dataset $ZVOL -r
log_must zfs inherit snapdev $TESTPOOL
block_device_wait
- udev_cleanup
+ is_linux && udev_cleanup
}
log_assert "Verify that ZFS volume property 'snapdev' works as expected."
log_onexit cleanup
VOLFS="$TESTPOOL/volfs"
ZVOL="$TESTPOOL/vol"
SNAP="$ZVOL@snap"
SNAPDEV="${ZVOL_DEVDIR}/$SNAP"
SUBZVOL="$VOLFS/subvol"
SUBSNAP="$SUBZVOL@snap"
SUBSNAPDEV="${ZVOL_DEVDIR}/$SUBSNAP"
log_must zfs create -o mountpoint=none $VOLFS
log_must zfs create -V $VOLSIZE -s $ZVOL
log_must zfs create -V $VOLSIZE -s $SUBZVOL
# 1. Verify "snapdev" property does not accept invalid values
typeset badvals=("off" "on" "1" "nope" "-")
for badval in ${badvals[@]}
do
log_mustnot zfs set snapdev="$badval" $ZVOL
done
# 2. Verify "snapdev" adds and removes device nodes when updated
# 2.1 First create a snapshot then change snapdev property
log_must zfs snapshot $SNAP
log_must zfs set snapdev=visible $ZVOL
blockdev_exists $SNAPDEV
log_must zfs set snapdev=hidden $ZVOL
blockdev_missing $SNAPDEV
log_must zfs destroy $SNAP
# 2.2 First set snapdev property then create a snapshot
log_must zfs set snapdev=visible $ZVOL
log_must zfs snapshot $SNAP
blockdev_exists $SNAPDEV
log_must zfs destroy $SNAP
blockdev_missing $SNAPDEV
# 2.3 Verify setting to the same value multiple times does not lead to issues
log_must zfs snapshot $SNAP
log_must zfs set snapdev=visible $ZVOL
blockdev_exists $SNAPDEV
log_must zfs set snapdev=visible $ZVOL
blockdev_exists $SNAPDEV
log_must zfs set snapdev=hidden $ZVOL
blockdev_missing $SNAPDEV
log_must zfs set snapdev=hidden $ZVOL
blockdev_missing $SNAPDEV
log_must zfs destroy $SNAP
# 3. Verify "snapdev" is inherited correctly
# 3.1 Check snapdev=visible case
log_must zfs snapshot $SNAP
log_must zfs inherit snapdev $ZVOL
log_must zfs set snapdev=visible $TESTPOOL
verify_inherited 'snapdev' 'visible' $ZVOL $TESTPOOL
blockdev_exists $SNAPDEV
# 3.2 Check snapdev=hidden case
log_must zfs set snapdev=hidden $TESTPOOL
verify_inherited 'snapdev' 'hidden' $ZVOL $TESTPOOL
blockdev_missing $SNAPDEV
# 3.3 Check inheritance on multiple levels
log_must zfs snapshot $SUBSNAP
log_must zfs inherit snapdev $SUBZVOL
log_must zfs set snapdev=hidden $VOLFS
log_must zfs set snapdev=visible $TESTPOOL
verify_inherited 'snapdev' 'hidden' $SUBZVOL $VOLFS
blockdev_missing $SUBSNAPDEV
blockdev_exists $SNAPDEV
log_pass "ZFS volume property 'snapdev' works as expected"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_volmode.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_volmode.ksh
index 322a31e07d89..af808dc30764 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_volmode.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_volmode.ksh
@@ -1,244 +1,259 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2017, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zfs_set/zfs_set_common.kshlib
. $STF_SUITE/tests/functional/zvol/zvol_common.shlib
. $STF_SUITE/tests/functional/zvol/zvol_misc/zvol_misc_common.kshlib
#
# DESCRIPTION:
# Verify that ZFS volume property "volmode" works as intended.
#
# STRATEGY:
# 1. Verify "volmode" property does not accept invalid values
# 2. Verify "volmode=none" hides ZVOL device nodes
# 3. Verify "volmode=full" exposes a fully functional device
# 4. Verify "volmode=dev" hides partition info on the device
# 5. Verify "volmode=default" behaves accordingly to "volmode" module parameter
# 6. Verify "volmode" property is inherited correctly
# 7. Verify "volmode" behaves correctly at import time
# 8. Verify "volmode" behaves accordingly to zvol_inhibit_dev (Linux only)
#
-# NOTE: changing volmode may need to remove minors, which could be open, so call
-# udev_wait() before we "zfs set volmode=<value>".
verify_runnable "global"
function cleanup
{
- datasetexists $VOLFS && log_must_busy zfs destroy -r $VOLFS
- datasetexists $ZVOL && log_must_busy zfs destroy -r $ZVOL
- log_must zfs inherit volmode $TESTPOOL
- udev_wait
+ datasetexists $VOLFS && destroy_dataset $VOLFS -r
+ datasetexists $ZVOL && destroy_dataset $ZVOL -r
+ zfs inherit volmode $TESTPOOL
sysctl_inhibit_dev 0
sysctl_volmode 1
- udev_cleanup
+ is_linux && udev_cleanup
}
#
# Set zvol_inhibit_dev tunable to $value
#
function sysctl_inhibit_dev # value
{
typeset value="$1"
if is_linux; then
log_note "Setting zvol_inhibit_dev tunable to $value"
log_must set_tunable32 VOL_INHIBIT_DEV $value
fi
}
#
# Set volmode tunable to $value
#
function sysctl_volmode # value
{
typeset value="$1"
log_note "Setting volmode tunable to $value"
log_must set_tunable32 VOL_MODE $value
}
#
# Exercise open and close, read and write operations
#
function test_io # dev
{
typeset dev=$1
- log_must dd if=/dev/zero of=$dev count=1
log_must dd if=$dev of=/dev/null count=1
+ log_must dd if=/dev/zero of=$dev count=1
+}
+
+#
+# Changing volmode may need to remove minors, which could be open, so call
+# udev_wait() before we "zfs set volmode=<value>". This ensures no udev
+# process has the zvol open (i.e. blkid) and the zvol_remove_minor_impl()
+# function won't skip removing the in use device.
+#
+function set_volmode # value ds
+{
+ typeset value="$1"
+ typeset ds="$2"
+
+ is_linux && udev_wait
+ log_must zfs set volmode="$value" "$ds"
}
log_assert "Verify that ZFS volume property 'volmode' works as intended"
log_onexit cleanup
VOLFS="$TESTPOOL/volfs"
ZVOL="$TESTPOOL/vol"
-ZDEV="${ZVOL_DEVDIR}/$ZVOL"
+ZDEV="$ZVOL_DEVDIR/$ZVOL"
SUBZVOL="$VOLFS/subvol"
-SUBZDEV="${ZVOL_DEVDIR}/$SUBZVOL"
+SUBZDEV="$ZVOL_DEVDIR/$SUBZVOL"
+# 0. Verify basic ZVOL functionality
log_must zfs create -o mountpoint=none $VOLFS
log_must zfs create -V $VOLSIZE -s $SUBZVOL
log_must zfs create -V $VOLSIZE -s $ZVOL
-udev_wait
blockdev_exists $ZDEV
blockdev_exists $SUBZDEV
test_io $ZDEV
test_io $SUBZDEV
# 1. Verify "volmode" property does not accept invalid values
typeset badvals=("off" "on" "1" "nope" "-")
for badval in ${badvals[@]}
do
log_mustnot zfs set volmode="$badval" $ZVOL
done
# 2. Verify "volmode=none" hides ZVOL device nodes
-log_must zfs set volmode=none $ZVOL
+set_volmode none $ZVOL
blockdev_missing $ZDEV
log_must_busy zfs destroy $ZVOL
+blockdev_missing $ZDEV
# 3. Verify "volmode=full" exposes a fully functional device
log_must zfs create -V $VOLSIZE -s $ZVOL
-udev_wait
-log_must zfs set volmode=full $ZVOL
+blockdev_exists $ZDEV
+set_volmode full $ZVOL
blockdev_exists $ZDEV
test_io $ZDEV
log_must verify_partition $ZDEV
-udev_wait
# 3.1 Verify "volmode=geom" is an alias for "volmode=full"
-log_must zfs set volmode=geom $ZVOL
+set_volmode geom $ZVOL
blockdev_exists $ZDEV
if [[ "$(get_prop 'volmode' $ZVOL)" != "full" ]]; then
log_fail " Volmode value 'geom' is not an alias for 'full'"
fi
-udev_wait
log_must_busy zfs destroy $ZVOL
+blockdev_missing $ZDEV
# 4. Verify "volmode=dev" hides partition info on the device
log_must zfs create -V $VOLSIZE -s $ZVOL
-udev_wait
-log_must zfs set volmode=dev $ZVOL
+blockdev_exists $ZDEV
+set_volmode dev $ZVOL
blockdev_exists $ZDEV
test_io $ZDEV
log_mustnot verify_partition $ZDEV
-udev_wait
log_must_busy zfs destroy $ZVOL
+blockdev_missing $ZDEV
# 5. Verify "volmode=default" behaves accordingly to "volmode" module parameter
# 5.1 Verify sysctl "volmode=full"
sysctl_volmode 1
log_must zfs create -V $VOLSIZE -s $ZVOL
-udev_wait
-log_must zfs set volmode=default $ZVOL
+blockdev_exists $ZDEV
+set_volmode default $ZVOL
blockdev_exists $ZDEV
log_must verify_partition $ZDEV
-udev_wait
log_must_busy zfs destroy $ZVOL
+blockdev_missing $ZDEV
# 5.2 Verify sysctl "volmode=dev"
sysctl_volmode 2
log_must zfs create -V $VOLSIZE -s $ZVOL
-udev_wait
-log_must zfs set volmode=default $ZVOL
+blockdev_exists $ZDEV
+set_volmode default $ZVOL
blockdev_exists $ZDEV
log_mustnot verify_partition $ZDEV
-udev_wait
log_must_busy zfs destroy $ZVOL
+blockdev_missing $ZDEV
# 5.2 Verify sysctl "volmode=none"
sysctl_volmode 3
log_must zfs create -V $VOLSIZE -s $ZVOL
-udev_wait
-log_must zfs set volmode=default $ZVOL
+blockdev_missing $ZDEV
+set_volmode default $ZVOL
blockdev_missing $ZDEV
# 6. Verify "volmode" property is inherited correctly
log_must zfs inherit volmode $ZVOL
+blockdev_missing $ZDEV
# 6.1 Check volmode=full case
-log_must zfs set volmode=full $TESTPOOL
+set_volmode full $TESTPOOL
verify_inherited 'volmode' 'full' $ZVOL $TESTPOOL
blockdev_exists $ZDEV
# 6.2 Check volmode=none case
-log_must zfs set volmode=none $TESTPOOL
+set_volmode none $TESTPOOL
verify_inherited 'volmode' 'none' $ZVOL $TESTPOOL
blockdev_missing $ZDEV
# 6.3 Check volmode=dev case
-log_must zfs set volmode=dev $TESTPOOL
+set_volmode dev $TESTPOOL
verify_inherited 'volmode' 'dev' $ZVOL $TESTPOOL
blockdev_exists $ZDEV
# 6.4 Check volmode=default case
sysctl_volmode 1
-log_must zfs set volmode=default $TESTPOOL
+set_volmode default $TESTPOOL
verify_inherited 'volmode' 'default' $ZVOL $TESTPOOL
blockdev_exists $ZDEV
# 6.5 Check inheritance on multiple levels
log_must zfs inherit volmode $SUBZVOL
-udev_wait
-log_must zfs set volmode=none $VOLFS
-udev_wait
-log_must zfs set volmode=full $TESTPOOL
+set_volmode none $VOLFS
+set_volmode full $TESTPOOL
verify_inherited 'volmode' 'none' $SUBZVOL $VOLFS
blockdev_missing $SUBZDEV
blockdev_exists $ZDEV
# 7. Verify "volmode" behaves correctly at import time
log_must zpool export $TESTPOOL
blockdev_missing $ZDEV
blockdev_missing $SUBZDEV
log_must zpool import $TESTPOOL
blockdev_exists $ZDEV
blockdev_missing $SUBZDEV
log_must_busy zfs destroy $ZVOL
log_must_busy zfs destroy $SUBZVOL
+blockdev_missing $ZDEV
+blockdev_missing $SUBZDEV
# 8. Verify "volmode" behaves accordingly to zvol_inhibit_dev (Linux only)
if is_linux; then
sysctl_inhibit_dev 1
# 7.1 Verify device nodes not are not created with "volmode=full"
sysctl_volmode 1
log_must zfs create -V $VOLSIZE -s $ZVOL
blockdev_missing $ZDEV
- log_must zfs set volmode=full $ZVOL
+ set_volmode full $ZVOL
blockdev_missing $ZDEV
log_must_busy zfs destroy $ZVOL
+ blockdev_missing $ZDEV
# 7.1 Verify device nodes not are not created with "volmode=dev"
sysctl_volmode 2
log_must zfs create -V $VOLSIZE -s $ZVOL
blockdev_missing $ZDEV
- log_must zfs set volmode=dev $ZVOL
+ set_volmode dev $ZVOL
blockdev_missing $ZDEV
log_must_busy zfs destroy $ZVOL
+ blockdev_missing $ZDEV
# 7.1 Verify device nodes not are not created with "volmode=none"
sysctl_volmode 3
log_must zfs create -V $VOLSIZE -s $ZVOL
blockdev_missing $ZDEV
- log_must zfs set volmode=none $ZVOL
+ set_volmode none $ZVOL
blockdev_missing $ZDEV
fi
log_pass "Verify that ZFS volume property 'volmode' works as intended"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_zil.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_zil.ksh
index b8989f478727..a393606d0fa9 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_zil.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_zil.ksh
@@ -1,74 +1,74 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2017, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/zvol/zvol_common.shlib
. $STF_SUITE/tests/functional/zvol/zvol_misc/zvol_misc_common.kshlib
#
# DESCRIPTION:
# Verify ZIL functionality on ZVOLs
#
# STRATEGY:
# 1. Create a ZVOLs with various combination of "logbias" and "sync" values
# 2. Write data to ZVOL device node
# 3. Verify we don't trigger any issue like the one reported in #6238
#
verify_runnable "global"
function cleanup
{
- datasetexists $ZVOL && log_must_busy zfs destroy $ZVOL
- udev_wait
+ datasetexists $ZVOL && destroy_dataset $ZVOL
+ block_device_wait
}
log_assert "Verify ZIL functionality on ZVOLs"
log_onexit cleanup
ZVOL="$TESTPOOL/vol"
ZDEV="$ZVOL_DEVDIR/$ZVOL"
typeset -a logbias_prop_vals=('latency' 'throughput')
typeset -a sync_prop_vals=('standard' 'always' 'disabled')
for logbias in ${logbias_prop_vals[@]}; do
for sync in ${sync_prop_vals[@]}; do
# 1. Create a ZVOL with logbias=throughput and sync=always
log_must zfs create -V $VOLSIZE -b 128K -o sync=$sync \
-o logbias=$logbias $ZVOL
# 2. Write data to its device node
for i in {1..50}; do
dd if=/dev/zero of=$ZDEV bs=8k count=1 &
done
# 3. Verify we don't trigger any issue
log_must wait
log_must_busy zfs destroy $ZVOL
done
done
log_pass "ZIL functionality works on ZVOLs"
diff --git a/sys/modules/zfs/Makefile b/sys/modules/zfs/Makefile
index 81f3202975b8..4217af1c2b03 100644
--- a/sys/modules/zfs/Makefile
+++ b/sys/modules/zfs/Makefile
@@ -1,342 +1,343 @@
# $FreeBSD$
SRCDIR=${SRCTOP}/sys/contrib/openzfs/module
INCDIR=${SRCTOP}/sys/contrib/openzfs/include
KMOD= zfs
.PATH: ${SRCDIR}/avl \
${SRCDIR}/lua \
${SRCDIR}/nvpair \
${SRCDIR}/os/freebsd/spl \
${SRCDIR}/os/freebsd/zfs \
${SRCDIR}/unicode \
${SRCDIR}/zcommon \
${SRCDIR}/zfs \
${SRCDIR}/zstd \
${SRCDIR}/zstd/lib
CFLAGS+= -I${INCDIR}
CFLAGS+= -I${INCDIR}/os/freebsd
CFLAGS+= -I${INCDIR}/os/freebsd/spl
CFLAGS+= -I${INCDIR}/os/freebsd/zfs
CFLAGS+= -I${SRCDIR}/zstd/include
CFLAGS+= -I${.CURDIR}
CFLAGS+= -D__KERNEL__ -DFREEBSD_NAMECACHE -DBUILDING_ZFS \
-DHAVE_UIO_ZEROCOPY -DWITHOUT_NETDUMP -D__KERNEL -D_SYS_CONDVAR_H_ \
-D_SYS_VMEM_H_ -DIN_FREEBSD_BASE -DHAVE_KSID
.if ${MACHINE_ARCH} == "amd64"
CFLAGS+= -DHAVE_AVX2 -DHAVE_AVX -D__x86_64 -DHAVE_SSE2 -DHAVE_AVX512F -DHAVE_AVX512BW -DHAVE_SSSE3
.endif
.if ${MACHINE_ARCH} == "i386" || ${MACHINE_ARCH} == "powerpc" || \
${MACHINE_ARCH} == "powerpcspe" || ${MACHINE_ARCH} == "arm"
CFLAGS+= -DBITS_PER_LONG=32
.else
CFLAGS+= -DBITS_PER_LONG=64
.endif
SRCS= vnode_if.h device_if.h bus_if.h
# avl
SRCS+= avl.c
#lua
SRCS+= lapi.c \
lauxlib.c \
lbaselib.c \
lcode.c \
lcompat.c \
lcorolib.c \
lctype.c \
ldebug.c \
ldo.c \
lfunc.c \
lgc.c \
llex.c \
lmem.c \
lobject.c \
lopcodes.c \
lparser.c \
lstate.c \
lstring.c \
lstrlib.c \
ltable.c \
ltablib.c \
ltm.c \
lvm.c \
lzio.c
#nvpair
SRCS+= nvpair.c \
fnvpair.c \
nvpair_alloc_spl.c \
nvpair_alloc_fixed.c
#os/freebsd/spl
SRCS+= acl_common.c \
btree.c \
callb.c \
list.c \
spl_acl.c \
spl_cmn_err.c \
spl_dtrace.c \
spl_kmem.c \
spl_kstat.c \
spl_misc.c \
spl_policy.c \
spl_string.c \
spl_sunddi.c \
spl_sysevent.c \
spl_taskq.c \
spl_uio.c \
spl_vfs.c \
spl_vm.c \
spl_zone.c \
sha256c.c \
sha512c.c \
spl_procfs_list.c \
spl_zlib.c
.if ${MACHINE_ARCH} == "i386" || ${MACHINE_ARCH} == "powerpc" || \
${MACHINE_ARCH} == "powerpcspe" || ${MACHINE_ARCH} == "arm"
SRCS+= spl_atomic.c
.endif
#os/freebsd/zfs
SRCS+= abd_os.c \
crypto_os.c \
dmu_os.c \
hkdf.c \
kmod_core.c \
spa_os.c \
sysctl_os.c \
vdev_file.c \
vdev_label_os.c \
vdev_geom.c \
zfs_acl.c \
zfs_ctldir.c \
zfs_dir.c \
zfs_ioctl_compat.c \
zfs_ioctl_os.c \
zfs_log.c \
zfs_racct.c \
zfs_replay.c \
zfs_vfsops.c \
zfs_vnops_os.c \
zfs_znode.c \
zio_crypt.c \
zvol_os.c
#unicode
SRCS+= uconv.c \
u8_textprep.c
#zcommon
SRCS+= zfeature_common.c \
zfs_comutil.c \
zfs_deleg.c \
zfs_fletcher.c \
zfs_fletcher_avx512.c \
zfs_fletcher_intel.c \
zfs_fletcher_sse.c \
zfs_fletcher_superscalar.c \
zfs_fletcher_superscalar4.c \
zfs_namecheck.c \
zfs_prop.c \
zpool_prop.c \
zprop_common.c
#zfs
SRCS+= abd.c \
aggsum.c \
arc.c \
arc_os.c \
blkptr.c \
bplist.c \
bpobj.c \
cityhash.c \
dbuf.c \
dbuf_stats.c \
bptree.c \
bqueue.c \
dataset_kstats.c \
ddt.c \
ddt_zap.c \
dmu.c \
dmu_diff.c \
dmu_object.c \
dmu_objset.c \
dmu_recv.c \
dmu_redact.c \
dmu_send.c \
dmu_traverse.c \
dmu_tx.c \
dmu_zfetch.c \
dnode.c \
dnode_sync.c \
dsl_dataset.c \
dsl_deadlist.c \
dsl_deleg.c \
dsl_bookmark.c \
dsl_dir.c \
dsl_crypt.c \
dsl_destroy.c \
dsl_pool.c \
dsl_prop.c \
dsl_scan.c \
dsl_synctask.c \
dsl_userhold.c \
fm.c \
gzip.c \
lzjb.c \
lz4.c \
metaslab.c \
mmp.c \
multilist.c \
objlist.c \
pathname.c \
range_tree.c \
refcount.c \
rrwlock.c \
sa.c \
sha256.c \
skein_zfs.c \
spa.c \
spa_boot.c \
spa_checkpoint.c \
spa_config.c \
spa_errlog.c \
spa_history.c \
spa_log_spacemap.c \
spa_misc.c \
spa_stats.c \
space_map.c \
space_reftree.c \
txg.c \
uberblock.c \
unique.c \
vdev.c \
vdev_cache.c \
vdev_draid.c \
vdev_draid_rand.c \
vdev_indirect.c \
vdev_indirect_births.c \
vdev_indirect_mapping.c \
vdev_initialize.c \
vdev_label.c \
vdev_mirror.c \
vdev_missing.c \
vdev_queue.c \
vdev_raidz.c \
vdev_raidz_math.c \
vdev_raidz_math_scalar.c \
vdev_raidz_math_avx2.c \
vdev_raidz_math_avx512bw.c \
vdev_raidz_math_avx512f.c \
vdev_raidz_math_sse2.c \
vdev_raidz_math_ssse3.c \
vdev_rebuild.c \
vdev_removal.c \
vdev_root.c \
vdev_trim.c \
zap.c \
zap_leaf.c \
zap_micro.c \
zcp.c \
zcp_get.c \
zcp_global.c \
zcp_iter.c \
zcp_set.c \
zcp_synctask.c \
zfeature.c \
zfs_byteswap.c \
zfs_debug.c \
zfs_file_os.c \
zfs_fm.c \
zfs_fuid.c \
zfs_ioctl.c \
zfs_onexit.c \
zfs_quota.c \
zfs_ratelimit.c \
zfs_rlock.c \
zfs_sa.c \
zfs_vnops.c \
zil.c \
zio.c \
zio_checksum.c \
zio_compress.c \
zio_inject.c \
zle.c \
zrlock.c \
zthr.c \
zvol.c
SRCS+= zfs_zstd.c \
zstd.c \
zstd_sparc.c
.include <bsd.kmod.mk>
CFLAGS+= -include ${SRCTOP}/sys/cddl/compat/opensolaris/sys/debug_compat.h
CFLAGS+= -include ${INCDIR}/os/freebsd/spl/sys/ccompile.h
CFLAGS+= -include ${SRCTOP}/sys/modules/zfs/static_ccompile.h
CWARNFLAGS+= ${OPENZFS_CWARNFLAGS}
CFLAGS.gcc+= -Wno-pointer-to-int-cast
CFLAGS.lapi.c= -Wno-cast-qual
CFLAGS.lcompat.c= -Wno-cast-qual
CFLAGS.lobject.c= -Wno-cast-qual
CFLAGS.ltable.c= -Wno-cast-qual
CFLAGS.lvm.c= -Wno-cast-qual
CFLAGS.nvpair.c= -Wno-cast-qual -DHAVE_RPC_TYPES
CFLAGS.spl_string.c= -Wno-cast-qual
CFLAGS.spl_vm.c= -Wno-cast-qual
CFLAGS.spl_zlib.c= -Wno-cast-qual
CFLAGS.abd.c= -Wno-cast-qual
+CFLAGS.arc.c= -Wno-unused-variable
CFLAGS.zfs_log.c= -Wno-cast-qual
CFLAGS.zfs_vnops_os.c= -Wno-pointer-arith
CFLAGS.u8_textprep.c= -Wno-cast-qual
CFLAGS.zfs_fletcher.c= -Wno-cast-qual -Wno-pointer-arith
CFLAGS.zfs_fletcher_intel.c= -Wno-cast-qual -Wno-pointer-arith
CFLAGS.zfs_fletcher_sse.c= -Wno-cast-qual -Wno-pointer-arith
CFLAGS.zfs_fletcher_avx512.c= -Wno-cast-qual -Wno-pointer-arith
CFLAGS.zprop_common.c= -Wno-cast-qual
CFLAGS.ddt.c= -Wno-cast-qual
CFLAGS.dmu.c= -Wno-cast-qual
CFLAGS.dmu_traverse.c= -Wno-cast-qual
CFLAGS.dsl_dir.c= -Wno-cast-qual
CFLAGS.dsl_deadlist.c= -Wno-cast-qual
CFLAGS.dsl_prop.c= -Wno-cast-qual
CFLAGS.fm.c= -Wno-cast-qual
CFLAGS.lz4.c= -Wno-cast-qual
CFLAGS.spa.c= -Wno-cast-qual
CFLAGS.spa_misc.c= -Wno-cast-qual
CFLAGS.sysctl_os.c= -include ${SRCTOP}/sys/modules/zfs/zfs_config.h
CFLAGS.vdev_draid.c= -Wno-cast-qual
CFLAGS.vdev_raidz.c= -Wno-cast-qual
CFLAGS.vdev_raidz_math.c= -Wno-cast-qual
CFLAGS.vdev_raidz_math_scalar.c= -Wno-cast-qual
CFLAGS.vdev_raidz_math_avx2.c= -Wno-cast-qual -Wno-duplicate-decl-specifier
CFLAGS.vdev_raidz_math_avx512f.c= -Wno-cast-qual -Wno-duplicate-decl-specifier
CFLAGS.vdev_raidz_math_sse2.c= -Wno-cast-qual -Wno-duplicate-decl-specifier
CFLAGS.zap_leaf.c= -Wno-cast-qual
CFLAGS.zap_micro.c= -Wno-cast-qual
CFLAGS.zcp.c= -Wno-cast-qual
CFLAGS.zfs_fm.c= -Wno-cast-qual
CFLAGS.zfs_ioctl.c= -Wno-cast-qual
CFLAGS.zil.c= -Wno-cast-qual
CFLAGS.zio.c= -Wno-cast-qual
CFLAGS.zrlock.c= -Wno-cast-qual
CFLAGS.zfs_zstd.c= -Wno-cast-qual -Wno-pointer-arith
CFLAGS.zstd.c= -U__BMI__ -fno-tree-vectorize
.if ${MACHINE_CPUARCH} == "aarch64"
CFLAGS.zstd.c+= -include ${SRCDIR}/zstd/include/aarch64_compat.h
.endif
CFLAGS.zstd.c+= ${NO_WBITWISE_INSTEAD_OF_LOGICAL}
diff --git a/sys/modules/zfs/zfs_config.h b/sys/modules/zfs/zfs_config.h
index 2e5f26c8ce45..054aaeb3eb19 100644
--- a/sys/modules/zfs/zfs_config.h
+++ b/sys/modules/zfs/zfs_config.h
@@ -1,870 +1,915 @@
/*
* $FreeBSD$
*/
/* zfs_config.h. Generated from zfs_config.h.in by configure. */
/* zfs_config.h.in. Generated from configure.ac by autoheader. */
/* Define to 1 if translation of program messages to the user's native
language is requested. */
/* #undef ENABLE_NLS */
/* bio_end_io_t wants 1 arg */
/* #undef HAVE_1ARG_BIO_END_IO_T */
/* lookup_bdev() wants 1 arg */
/* #undef HAVE_1ARG_LOOKUP_BDEV */
/* submit_bio() wants 1 arg */
/* #undef HAVE_1ARG_SUBMIT_BIO */
/* bdi_setup_and_register() wants 2 args */
/* #undef HAVE_2ARGS_BDI_SETUP_AND_REGISTER */
/* vfs_getattr wants 2 args */
/* #undef HAVE_2ARGS_VFS_GETATTR */
/* zlib_deflate_workspacesize() wants 2 args */
/* #undef HAVE_2ARGS_ZLIB_DEFLATE_WORKSPACESIZE */
/* bdi_setup_and_register() wants 3 args */
/* #undef HAVE_3ARGS_BDI_SETUP_AND_REGISTER */
/* vfs_getattr wants 3 args */
/* #undef HAVE_3ARGS_VFS_GETATTR */
/* vfs_getattr wants 4 args */
/* #undef HAVE_4ARGS_VFS_GETATTR */
/* kernel has access_ok with 'type' parameter */
/* #undef HAVE_ACCESS_OK_TYPE */
/* posix_acl has refcount_t */
/* #undef HAVE_ACL_REFCOUNT */
+/* add_disk() returns int */
+/* #undef HAVE_ADD_DISK_RET */
+
/* Define if host toolchain supports AES */
#define HAVE_AES 1
#ifdef __amd64__
#ifndef RESCUE
/* Define if host toolchain supports AVX */
#define HAVE_AVX 1
#endif
/* Define if host toolchain supports AVX2 */
#define HAVE_AVX2 1
/* Define if host toolchain supports AVX512BW */
#define HAVE_AVX512BW 1
/* Define if host toolchain supports AVX512CD */
#define HAVE_AVX512CD 1
/* Define if host toolchain supports AVX512DQ */
#define HAVE_AVX512DQ 1
/* Define if host toolchain supports AVX512ER */
#define HAVE_AVX512ER 1
/* Define if host toolchain supports AVX512F */
#define HAVE_AVX512F 1
/* Define if host toolchain supports AVX512IFMA */
#define HAVE_AVX512IFMA 1
/* Define if host toolchain supports AVX512PF */
#define HAVE_AVX512PF 1
/* Define if host toolchain supports AVX512VBMI */
#define HAVE_AVX512VBMI 1
/* Define if host toolchain supports AVX512VL */
#define HAVE_AVX512VL 1
#endif
/* bdev_check_media_change() exists */
/* #undef HAVE_BDEV_CHECK_MEDIA_CHANGE */
/* block_device_operations->submit_bio() returns void */
/* #undef HAVE_BDEV_SUBMIT_BIO_RETURNS_VOID */
/* bdev_whole() is available */
/* #undef HAVE_BDEV_WHOLE */
/* bio->bi_bdev->bd_disk exists */
/* #undef HAVE_BIO_BDEV_DISK */
/* bio->bi_opf is defined */
/* #undef HAVE_BIO_BI_OPF */
/* bio->bi_status exists */
/* #undef HAVE_BIO_BI_STATUS */
/* bio has bi_iter */
/* #undef HAVE_BIO_BVEC_ITER */
/* bio_*_io_acct() available */
/* #undef HAVE_BIO_IO_ACCT */
/* bio_max_segs() is implemented */
/* #undef HAVE_BIO_MAX_SEGS */
/* bio_set_dev() is available */
/* #undef HAVE_BIO_SET_DEV */
/* bio_set_dev() GPL-only */
/* #undef HAVE_BIO_SET_DEV_GPL_ONLY */
/* bio_set_dev() is a macro */
/* #undef HAVE_BIO_SET_DEV_MACRO */
/* bio_set_op_attrs is available */
/* #undef HAVE_BIO_SET_OP_ATTRS */
/* blkdev_get_by_path() handles ERESTARTSYS */
/* #undef HAVE_BLKDEV_GET_ERESTARTSYS */
/* blkdev_reread_part() exists */
/* #undef HAVE_BLKDEV_REREAD_PART */
/* blkg_tryget() is available */
/* #undef HAVE_BLKG_TRYGET */
/* blkg_tryget() GPL-only */
/* #undef HAVE_BLKG_TRYGET_GPL_ONLY */
/* blk_alloc_disk() exists */
/* #undef HAVE_BLK_ALLOC_DISK */
/* blk_alloc_queue() expects request function */
/* #undef HAVE_BLK_ALLOC_QUEUE_REQUEST_FN */
/* blk_alloc_queue_rh() expects request function */
/* #undef HAVE_BLK_ALLOC_QUEUE_REQUEST_FN_RH */
/* blk queue backing_dev_info is dynamic */
/* #undef HAVE_BLK_QUEUE_BDI_DYNAMIC */
/* blk_queue_flag_clear() exists */
/* #undef HAVE_BLK_QUEUE_FLAG_CLEAR */
/* blk_queue_flag_set() exists */
/* #undef HAVE_BLK_QUEUE_FLAG_SET */
/* blk_queue_flush() is available */
/* #undef HAVE_BLK_QUEUE_FLUSH */
/* blk_queue_flush() is GPL-only */
/* #undef HAVE_BLK_QUEUE_FLUSH_GPL_ONLY */
/* blk_queue_secdiscard() is available */
/* #undef HAVE_BLK_QUEUE_SECDISCARD */
/* blk_queue_secure_erase() is available */
/* #undef HAVE_BLK_QUEUE_SECURE_ERASE */
/* blk_queue_update_readahead() exists */
/* #undef HAVE_BLK_QUEUE_UPDATE_READAHEAD */
/* blk_queue_write_cache() exists */
/* #undef HAVE_BLK_QUEUE_WRITE_CACHE */
/* blk_queue_write_cache() is GPL-only */
/* #undef HAVE_BLK_QUEUE_WRITE_CACHE_GPL_ONLY */
/* Define if revalidate_disk() in block_device_operations */
/* #undef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK */
/* Define to 1 if you have the Mac OS X function CFLocaleCopyCurrent in the
CoreFoundation framework. */
/* #undef HAVE_CFLOCALECOPYCURRENT */
/* Define to 1 if you have the Mac OS X function
CFLocaleCopyPreferredLanguages in the CoreFoundation framework. */
/* #undef HAVE_CFLOCALECOPYPREFERREDLANGUAGES */
/* Define to 1 if you have the Mac OS X function CFPreferencesCopyAppValue in
the CoreFoundation framework. */
/* #undef HAVE_CFPREFERENCESCOPYAPPVALUE */
/* check_disk_change() exists */
/* #undef HAVE_CHECK_DISK_CHANGE */
/* clear_inode() is available */
/* #undef HAVE_CLEAR_INODE */
/* dentry uses const struct dentry_operations */
/* #undef HAVE_CONST_DENTRY_OPERATIONS */
/* copy_from_iter() is available */
/* #undef HAVE_COPY_FROM_ITER */
/* copy_to_iter() is available */
/* #undef HAVE_COPY_TO_ITER */
/* yes */
/* #undef HAVE_CPU_HOTPLUG */
/* current_time() exists */
/* #undef HAVE_CURRENT_TIME */
/* Define if the GNU dcgettext() function is already present or preinstalled.
*/
/* #undef HAVE_DCGETTEXT */
/* DECLARE_EVENT_CLASS() is available */
/* #undef HAVE_DECLARE_EVENT_CLASS */
+/* dequeue_signal() takes 4 arguments */
+/* #undef HAVE_DEQUEUE_SIGNAL_4ARG */
+
/* lookup_bdev() wants dev_t arg */
/* #undef HAVE_DEVT_LOOKUP_BDEV */
/* sops->dirty_inode() wants flags */
/* #undef HAVE_DIRTY_INODE_WITH_FLAGS */
/* disk_*_io_acct() available */
/* #undef HAVE_DISK_IO_ACCT */
/* disk_update_readahead() exists */
/* #undef HAVE_DISK_UPDATE_READAHEAD */
/* Define to 1 if you have the <dlfcn.h> header file. */
#define HAVE_DLFCN_H 1
/* d_make_root() is available */
/* #undef HAVE_D_MAKE_ROOT */
/* d_prune_aliases() is available */
/* #undef HAVE_D_PRUNE_ALIASES */
/* dops->d_revalidate() operation takes nameidata */
/* #undef HAVE_D_REVALIDATE_NAMEIDATA */
/* eops->encode_fh() wants child and parent inodes */
/* #undef HAVE_ENCODE_FH_WITH_INODE */
/* sops->evict_inode() exists */
/* #undef HAVE_EVICT_INODE */
+/* FALLOC_FL_ZERO_RANGE is defined */
+/* #undef HAVE_FALLOC_FL_ZERO_RANGE */
+
+/* fault_in_iov_iter_readable() is available */
+/* #undef HAVE_FAULT_IN_IOV_ITER_READABLE */
+
/* fops->aio_fsync() exists */
/* #undef HAVE_FILE_AIO_FSYNC */
/* file_dentry() is available */
/* #undef HAVE_FILE_DENTRY */
/* file_inode() is available */
/* #undef HAVE_FILE_INODE */
/* iops->follow_link() cookie */
/* #undef HAVE_FOLLOW_LINK_COOKIE */
/* iops->follow_link() nameidata */
/* #undef HAVE_FOLLOW_LINK_NAMEIDATA */
/* fops->fsync() with range */
/* #undef HAVE_FSYNC_RANGE */
/* fops->fsync() without dentry */
/* #undef HAVE_FSYNC_WITHOUT_DENTRY */
/* generic_fillattr requires struct user_namespace* */
/* #undef HAVE_GENERIC_FILLATTR_USERNS */
/* generic_*_io_acct() 3 arg available */
/* #undef HAVE_GENERIC_IO_ACCT_3ARG */
/* generic_*_io_acct() 4 arg available */
/* #undef HAVE_GENERIC_IO_ACCT_4ARG */
/* generic_readlink is global */
/* #undef HAVE_GENERIC_READLINK */
/* generic_setxattr() exists */
/* #undef HAVE_GENERIC_SETXATTR */
/* generic_write_checks() takes kiocb */
/* #undef HAVE_GENERIC_WRITE_CHECKS_KIOCB */
/* Define if the GNU gettext() function is already present or preinstalled. */
/* #undef HAVE_GETTEXT */
/* iops->get_acl() exists */
/* #undef HAVE_GET_ACL */
/* iops->get_acl() takes rcu */
/* #undef HAVE_GET_ACL_RCU */
/* iops->get_link() cookie */
/* #undef HAVE_GET_LINK_COOKIE */
/* iops->get_link() delayed */
/* #undef HAVE_GET_LINK_DELAYED */
/* group_info->gid exists */
/* #undef HAVE_GROUP_INFO_GID */
/* has_capability() is available */
/* #undef HAVE_HAS_CAPABILITY */
/* Define if you have the iconv() function and it works. */
#define HAVE_ICONV 1
/* yes */
/* #undef HAVE_INODE_LOCK_SHARED */
/* inode_owner_or_capable() exists */
/* #undef HAVE_INODE_OWNER_OR_CAPABLE */
/* inode_owner_or_capable() takes user_ns */
/* #undef HAVE_INODE_OWNER_OR_CAPABLE_IDMAPPED */
/* inode_set_flags() exists */
/* #undef HAVE_INODE_SET_FLAGS */
/* inode_set_iversion() exists */
/* #undef HAVE_INODE_SET_IVERSION */
/* inode->i_*time's are timespec64 */
/* #undef HAVE_INODE_TIMESPEC64_TIMES */
/* timestamp_truncate() exists */
/* #undef HAVE_INODE_TIMESTAMP_TRUNCATE */
/* Define to 1 if you have the <inttypes.h> header file. */
#define HAVE_INTTYPES_H 1
/* in_compat_syscall() is available */
/* #undef HAVE_IN_COMPAT_SYSCALL */
/* iops->create() takes struct user_namespace* */
/* #undef HAVE_IOPS_CREATE_USERNS */
/* iops->mkdir() takes struct user_namespace* */
/* #undef HAVE_IOPS_MKDIR_USERNS */
/* iops->mknod() takes struct user_namespace* */
/* #undef HAVE_IOPS_MKNOD_USERNS */
/* iops->rename() takes struct user_namespace* */
/* #undef HAVE_IOPS_RENAME_USERNS */
/* iops->symlink() takes struct user_namespace* */
/* #undef HAVE_IOPS_SYMLINK_USERNS */
/* iov_iter_advance() is available */
/* #undef HAVE_IOV_ITER_ADVANCE */
/* iov_iter_count() is available */
/* #undef HAVE_IOV_ITER_COUNT */
/* iov_iter_fault_in_readable() is available */
/* #undef HAVE_IOV_ITER_FAULT_IN_READABLE */
/* iov_iter_revert() is available */
/* #undef HAVE_IOV_ITER_REVERT */
/* iov_iter_type() is available */
/* #undef HAVE_IOV_ITER_TYPE */
/* iov_iter types are available */
/* #undef HAVE_IOV_ITER_TYPES */
/* yes */
/* #undef HAVE_IO_SCHEDULE_TIMEOUT */
/* Define to 1 if you have the `issetugid' function. */
#define HAVE_ISSETUGID 1
/* kernel has kernel_fpu_* functions */
/* #undef HAVE_KERNEL_FPU */
/* kernel has asm/fpu/api.h */
/* #undef HAVE_KERNEL_FPU_API_HEADER */
/* kernel fpu internal */
/* #undef HAVE_KERNEL_FPU_INTERNAL */
/* kernel has asm/fpu/xcr.h */
/* #undef HAVE_KERNEL_FPU_XCR_HEADER */
+/* kernel fpu and XSAVE internal */
+/* #undef HAVE_KERNEL_FPU_XSAVE_INTERNAL */
+
/* uncached_acl_sentinel() exists */
/* #undef HAVE_KERNEL_GET_ACL_HANDLE_CACHE */
/* kernel does stack verification */
/* #undef HAVE_KERNEL_OBJTOOL */
/* kernel has linux/objtool.h */
/* #undef HAVE_KERNEL_OBJTOOL_HEADER */
/* kernel_read() take loff_t pointer */
/* #undef HAVE_KERNEL_READ_PPOS */
/* timer_list.function gets a timer_list */
/* #undef HAVE_KERNEL_TIMER_FUNCTION_TIMER_LIST */
/* struct timer_list has a flags member */
/* #undef HAVE_KERNEL_TIMER_LIST_FLAGS */
/* timer_setup() is available */
/* #undef HAVE_KERNEL_TIMER_SETUP */
/* kernel_write() take loff_t pointer */
/* #undef HAVE_KERNEL_WRITE_PPOS */
/* kmem_cache_create_usercopy() exists */
/* #undef HAVE_KMEM_CACHE_CREATE_USERCOPY */
/* kstrtoul() exists */
/* #undef HAVE_KSTRTOUL */
/* ktime_get_coarse_real_ts64() exists */
/* #undef HAVE_KTIME_GET_COARSE_REAL_TS64 */
/* ktime_get_raw_ts64() exists */
/* #undef HAVE_KTIME_GET_RAW_TS64 */
/* kvmalloc exists */
/* #undef HAVE_KVMALLOC */
/* Define if you have [aio] */
/* #undef HAVE_LIBAIO */
/* Define if you have [blkid] */
/* #undef HAVE_LIBBLKID */
/* Define if you have [crypto] */
#define HAVE_LIBCRYPTO 1
/* Define if you have [tirpc] */
/* #undef HAVE_LIBTIRPC */
/* Define if you have [udev] */
/* #undef HAVE_LIBUDEV */
/* Define if you have [uuid] */
/* #undef HAVE_LIBUUID */
/* linux/blk-cgroup.h exists */
/* #undef HAVE_LINUX_BLK_CGROUP_HEADER */
/* lseek_execute() is available */
/* #undef HAVE_LSEEK_EXECUTE */
/* makedev() is declared in sys/mkdev.h */
/* #undef HAVE_MAKEDEV_IN_MKDEV */
/* makedev() is declared in sys/sysmacros.h */
/* #undef HAVE_MAKEDEV_IN_SYSMACROS */
/* Noting that make_request_fn() returns blk_qc_t */
/* #undef HAVE_MAKE_REQUEST_FN_RET_QC */
/* Noting that make_request_fn() returns void */
/* #undef HAVE_MAKE_REQUEST_FN_RET_VOID */
/* Define to 1 if you have the <memory.h> header file. */
#define HAVE_MEMORY_H 1
/* iops->mkdir() takes umode_t */
/* #undef HAVE_MKDIR_UMODE_T */
/* Define to 1 if you have the `mlockall' function. */
#define HAVE_MLOCKALL 1
/* lookup_bdev() wants mode arg */
/* #undef HAVE_MODE_LOOKUP_BDEV */
/* Define if host toolchain supports MOVBE */
#define HAVE_MOVBE 1
/* new_sync_read()/new_sync_write() are available */
/* #undef HAVE_NEW_SYNC_READ */
/* folio_wait_bit() exists */
/* #undef HAVE_PAGEMAP_FOLIO_WAIT_BIT */
/* iops->getattr() takes a path */
/* #undef HAVE_PATH_IOPS_GETATTR */
/* Define if host toolchain supports PCLMULQDQ */
#define HAVE_PCLMULQDQ 1
/* percpu_counter_add_batch() is defined */
/* #undef HAVE_PERCPU_COUNTER_ADD_BATCH */
/* percpu_counter_init() wants gfp_t */
/* #undef HAVE_PERCPU_COUNTER_INIT_WITH_GFP */
/* posix_acl_chmod() exists */
/* #undef HAVE_POSIX_ACL_CHMOD */
/* posix_acl_from_xattr() needs user_ns */
/* #undef HAVE_POSIX_ACL_FROM_XATTR_USERNS */
/* posix_acl_release() is available */
/* #undef HAVE_POSIX_ACL_RELEASE */
/* posix_acl_release() is GPL-only */
/* #undef HAVE_POSIX_ACL_RELEASE_GPL_ONLY */
/* posix_acl_valid() wants user namespace */
/* #undef HAVE_POSIX_ACL_VALID_WITH_NS */
/* proc_ops structure exists */
/* #undef HAVE_PROC_OPS_STRUCT */
/* iops->put_link() cookie */
/* #undef HAVE_PUT_LINK_COOKIE */
/* iops->put_link() delayed */
/* #undef HAVE_PUT_LINK_DELAYED */
/* iops->put_link() nameidata */
/* #undef HAVE_PUT_LINK_NAMEIDATA */
/* If available, contains the Python version number currently in use. */
#define HAVE_PYTHON "3.7"
/* qat is enabled and existed */
/* #undef HAVE_QAT */
/* iops->rename() wants flags */
/* #undef HAVE_RENAME_WANTS_FLAGS */
/* REQ_DISCARD is defined */
/* #undef HAVE_REQ_DISCARD */
/* REQ_FLUSH is defined */
/* #undef HAVE_REQ_FLUSH */
/* REQ_OP_DISCARD is defined */
/* #undef HAVE_REQ_OP_DISCARD */
/* REQ_OP_FLUSH is defined */
/* #undef HAVE_REQ_OP_FLUSH */
/* REQ_OP_SECURE_ERASE is defined */
/* #undef HAVE_REQ_OP_SECURE_ERASE */
/* REQ_PREFLUSH is defined */
/* #undef HAVE_REQ_PREFLUSH */
/* revalidate_disk() is available */
/* #undef HAVE_REVALIDATE_DISK */
/* revalidate_disk_size() is available */
/* #undef HAVE_REVALIDATE_DISK_SIZE */
/* struct rw_semaphore has member activity */
/* #undef HAVE_RWSEM_ACTIVITY */
/* struct rw_semaphore has atomic_long_t member count */
/* #undef HAVE_RWSEM_ATOMIC_LONG_COUNT */
/* linux/sched/signal.h exists */
/* #undef HAVE_SCHED_SIGNAL_HEADER */
/* Define to 1 if you have the <security/pam_modules.h> header file. */
#define HAVE_SECURITY_PAM_MODULES_H 1
/* setattr_prepare() is available, doesn't accept user_namespace */
/* #undef HAVE_SETATTR_PREPARE_NO_USERNS */
/* setattr_prepare() accepts user_namespace */
/* #undef HAVE_SETATTR_PREPARE_USERNS */
/* iops->set_acl() exists, takes 3 args */
/* #undef HAVE_SET_ACL */
/* iops->set_acl() takes 4 args */
/* #undef HAVE_SET_ACL_USERNS */
/* set_cached_acl() is usable */
/* #undef HAVE_SET_CACHED_ACL_USABLE */
/* set_special_state() exists */
/* #undef HAVE_SET_SPECIAL_STATE */
/* struct shrink_control exists */
/* #undef HAVE_SHRINK_CONTROL_STRUCT */
/* kernel_siginfo_t exists */
/* #undef HAVE_SIGINFO */
/* signal_stop() exists */
/* #undef HAVE_SIGNAL_STOP */
/* new shrinker callback wants 2 args */
/* #undef HAVE_SINGLE_SHRINKER_CALLBACK */
/* ->count_objects exists */
/* #undef HAVE_SPLIT_SHRINKER_CALLBACK */
#if defined(__amd64__) || defined(__i386__)
/* Define if host toolchain supports SSE */
#define HAVE_SSE 1
/* Define if host toolchain supports SSE2 */
#define HAVE_SSE2 1
/* Define if host toolchain supports SSE3 */
#define HAVE_SSE3 1
/* Define if host toolchain supports SSE4.1 */
#define HAVE_SSE4_1 1
/* Define if host toolchain supports SSE4.2 */
#define HAVE_SSE4_2 1
/* Define if host toolchain supports SSSE3 */
#define HAVE_SSSE3 1
#endif
/* STACK_FRAME_NON_STANDARD is defined */
/* #undef HAVE_STACK_FRAME_NON_STANDARD */
/* standalone <linux/stdarg.h> exists */
/* #undef HAVE_STANDALONE_LINUX_STDARG */
/* Define to 1 if you have the <stdint.h> header file. */
#define HAVE_STDINT_H 1
/* Define to 1 if you have the <stdlib.h> header file. */
#define HAVE_STDLIB_H 1
/* Define to 1 if you have the <strings.h> header file. */
#define HAVE_STRINGS_H 1
/* Define to 1 if you have the <string.h> header file. */
#define HAVE_STRING_H 1
/* Define to 1 if you have the `strlcat' function. */
#define HAVE_STRLCAT 1
/* Define to 1 if you have the `strlcpy' function. */
#define HAVE_STRLCPY 1
/* submit_bio is member of struct block_device_operations */
/* #undef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS */
/* super_setup_bdi_name() exits */
/* #undef HAVE_SUPER_SETUP_BDI_NAME */
/* super_block->s_user_ns exists */
/* #undef HAVE_SUPER_USER_NS */
/* Define to 1 if you have the <sys/stat.h> header file. */
#define HAVE_SYS_STAT_H 1
/* Define to 1 if you have the <sys/types.h> header file. */
#define HAVE_SYS_TYPES_H 1
/* i_op->tmpfile() exists */
/* #undef HAVE_TMPFILE */
/* i_op->tmpfile() has userns */
/* #undef HAVE_TMPFILE_USERNS */
/* totalhigh_pages() exists */
/* #undef HAVE_TOTALHIGH_PAGES */
/* kernel has totalram_pages() */
/* #undef HAVE_TOTALRAM_PAGES_FUNC */
/* Define to 1 if you have the `udev_device_get_is_initialized' function. */
/* #undef HAVE_UDEV_DEVICE_GET_IS_INITIALIZED */
/* kernel has __kernel_fpu_* functions */
/* #undef HAVE_UNDERSCORE_KERNEL_FPU */
/* Define to 1 if you have the <unistd.h> header file. */
#define HAVE_UNISTD_H 1
/* iops->getattr() takes struct user_namespace* */
/* #undef HAVE_USERNS_IOPS_GETATTR */
/* iops->getattr() takes a vfsmount */
/* #undef HAVE_VFSMOUNT_IOPS_GETATTR */
/* aops->direct_IO() uses iovec */
/* #undef HAVE_VFS_DIRECT_IO_IOVEC */
/* aops->direct_IO() uses iov_iter without rw */
/* #undef HAVE_VFS_DIRECT_IO_ITER */
/* aops->direct_IO() uses iov_iter with offset */
/* #undef HAVE_VFS_DIRECT_IO_ITER_OFFSET */
/* aops->direct_IO() uses iov_iter with rw and offset */
/* #undef HAVE_VFS_DIRECT_IO_ITER_RW_OFFSET */
/* All required iov_iter interfaces are available */
/* #undef HAVE_VFS_IOV_ITER */
/* fops->iterate() is available */
/* #undef HAVE_VFS_ITERATE */
/* fops->iterate_shared() is available */
/* #undef HAVE_VFS_ITERATE_SHARED */
/* fops->readdir() is available */
/* #undef HAVE_VFS_READDIR */
/* fops->read/write_iter() are available */
/* #undef HAVE_VFS_RW_ITERATE */
/* __set_page_dirty_nobuffers exists */
/* #undef HAVE_VFS_SET_PAGE_DIRTY_NOBUFFERS */
/* __vmalloc page flags exists */
/* #undef HAVE_VMALLOC_PAGE_KERNEL */
/* yes */
/* #undef HAVE_WAIT_ON_BIT_ACTION */
/* wait_queue_entry_t exists */
/* #undef HAVE_WAIT_QUEUE_ENTRY_T */
/* wq_head->head and wq_entry->entry exist */
/* #undef HAVE_WAIT_QUEUE_HEAD_ENTRY */
/* xattr_handler->get() wants dentry */
/* #undef HAVE_XATTR_GET_DENTRY */
/* xattr_handler->get() wants both dentry and inode */
/* #undef HAVE_XATTR_GET_DENTRY_INODE */
/* xattr_handler->get() wants xattr_handler */
/* #undef HAVE_XATTR_GET_HANDLER */
/* xattr_handler has name */
/* #undef HAVE_XATTR_HANDLER_NAME */
/* xattr_handler->list() wants dentry */
/* #undef HAVE_XATTR_LIST_DENTRY */
/* xattr_handler->list() wants xattr_handler */
/* #undef HAVE_XATTR_LIST_HANDLER */
/* xattr_handler->list() wants simple */
/* #undef HAVE_XATTR_LIST_SIMPLE */
/* xattr_handler->set() wants dentry */
/* #undef HAVE_XATTR_SET_DENTRY */
/* xattr_handler->set() wants both dentry and inode */
/* #undef HAVE_XATTR_SET_DENTRY_INODE */
/* xattr_handler->set() wants xattr_handler */
/* #undef HAVE_XATTR_SET_HANDLER */
/* xattr_handler->set() takes user_namespace */
/* #undef HAVE_XATTR_SET_USERNS */
+/* Define if host toolchain supports XSAVE */
+#define HAVE_XSAVE 1
+
+/* Define if host toolchain supports XSAVEOPT */
+#define HAVE_XSAVEOPT 1
+
+/* Define if host toolchain supports XSAVES */
+#define HAVE_XSAVES 1
+
/* Define if you have [z] */
#define HAVE_ZLIB 1
/* __posix_acl_chmod() exists */
/* #undef HAVE___POSIX_ACL_CHMOD */
/* kernel exports FPU functions */
/* #undef KERNEL_EXPORTS_X86_FPU */
+/* TBD: fetch(3) support */
+#if 0
+/* whether the chosen libfetch is to be loaded at run-time */
+#define LIBFETCH_DYNAMIC 1
+
+/* libfetch is fetch(3) */
+#define LIBFETCH_IS_FETCH 1
+
+/* libfetch is libcurl */
+#define LIBFETCH_IS_LIBCURL 0
+
+/* soname of chosen libfetch */
+#define LIBFETCH_SONAME "libfetch.so.6"
+#endif
+
/* Define to the sub-directory where libtool stores uninstalled libraries. */
#define LT_OBJDIR ".libs/"
/* make_request_fn() return type */
/* #undef MAKE_REQUEST_FN_RET */
/* hardened module_param_call */
/* #undef MODULE_PARAM_CALL_CONST */
/* struct shrink_control has nid */
/* #undef SHRINK_CONTROL_HAS_NID */
+/* using complete_and_exit() instead */
+/* #undef SPL_KTHREAD_COMPLETE_AND_EXIT */
+
/* Defined for legacy compatibility. */
#define SPL_META_ALIAS ZFS_META_ALIAS
/* Defined for legacy compatibility. */
#define SPL_META_RELEASE ZFS_META_RELEASE
/* Defined for legacy compatibility. */
#define SPL_META_VERSION ZFS_META_VERSION
+/* pde_data() is PDE_DATA() */
+/* #undef SPL_PDE_DATA */
+
/* True if ZFS is to be compiled for a FreeBSD system */
#define SYSTEM_FREEBSD 1
/* True if ZFS is to be compiled for a Linux system */
/* #undef SYSTEM_LINUX */
/* zfs debugging enabled */
/* #undef ZFS_DEBUG */
/* /dev/zfs minor */
/* #undef ZFS_DEVICE_MINOR */
/* enum node_stat_item contains NR_FILE_PAGES */
/* #undef ZFS_ENUM_NODE_STAT_ITEM_NR_FILE_PAGES */
/* enum node_stat_item contains NR_INACTIVE_ANON */
/* #undef ZFS_ENUM_NODE_STAT_ITEM_NR_INACTIVE_ANON */
/* enum node_stat_item contains NR_INACTIVE_FILE */
/* #undef ZFS_ENUM_NODE_STAT_ITEM_NR_INACTIVE_FILE */
/* enum zone_stat_item contains NR_FILE_PAGES */
/* #undef ZFS_ENUM_ZONE_STAT_ITEM_NR_FILE_PAGES */
/* enum zone_stat_item contains NR_INACTIVE_ANON */
/* #undef ZFS_ENUM_ZONE_STAT_ITEM_NR_INACTIVE_ANON */
/* enum zone_stat_item contains NR_INACTIVE_FILE */
/* #undef ZFS_ENUM_ZONE_STAT_ITEM_NR_INACTIVE_FILE */
/* global_node_page_state() exists */
/* #undef ZFS_GLOBAL_NODE_PAGE_STATE */
/* global_zone_page_state() exists */
/* #undef ZFS_GLOBAL_ZONE_PAGE_STATE */
/* Define to 1 if GPL-only symbols can be used */
/* #undef ZFS_IS_GPL_COMPATIBLE */
/* Define the project alias string. */
-#define ZFS_META_ALIAS "zfs-2.1.2-FreeBSD_gaf88d47f1"
+#define ZFS_META_ALIAS "zfs-2.1.3-FreeBSD_gef83e07db"
/* Define the project author. */
#define ZFS_META_AUTHOR "OpenZFS"
/* Define the project release date. */
/* #undef ZFS_META_DATA */
/* Define the maximum compatible kernel version. */
-#define ZFS_META_KVER_MAX "5.15"
+#define ZFS_META_KVER_MAX "5.16"
/* Define the minimum compatible kernel version. */
#define ZFS_META_KVER_MIN "3.10"
/* Define the project license. */
#define ZFS_META_LICENSE "CDDL"
/* Define the libtool library 'age' version information. */
/* #undef ZFS_META_LT_AGE */
/* Define the libtool library 'current' version information. */
/* #undef ZFS_META_LT_CURRENT */
/* Define the libtool library 'revision' version information. */
/* #undef ZFS_META_LT_REVISION */
/* Define the project name. */
#define ZFS_META_NAME "zfs"
/* Define the project release. */
-#define ZFS_META_RELEASE "FreeBSD_gaf88d47f1"
+#define ZFS_META_RELEASE "FreeBSD_gef83e07db"
/* Define the project version. */
-#define ZFS_META_VERSION "2.1.2"
+#define ZFS_META_VERSION "2.1.3"
/* count is located in percpu_ref.data */
/* #undef ZFS_PERCPU_REF_COUNT_IN_DATA */
diff --git a/sys/modules/zfs/zfs_gitrev.h b/sys/modules/zfs/zfs_gitrev.h
index bafe6026a6a8..ce4a84b45e17 100644
--- a/sys/modules/zfs/zfs_gitrev.h
+++ b/sys/modules/zfs/zfs_gitrev.h
@@ -1,5 +1,5 @@
/*
* $FreeBSD$
*/
-#define ZFS_META_GITREV "zfs-2.1.2-0-gaf88d47f1"
+#define ZFS_META_GITREV "zfs-2.1.3-0-gef83e07db"

File Metadata

Mime Type
application/octet-stream
Expires
Sat, May 4, 10:46 AM (2 d)
Storage Engine
chunks
Storage Format
Chunks
Storage Handle
mHbr8HiUMpU5
Default Alt Text
(6 MB)

Event Timeline